prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
"""
This file combines all data loading methods into a central location.
Each type of data has a class that retrieves, processes, and checks it.
Each class has the following methods:
get - retrieves raw data from a source
adapt - transforms from the raw data to the common processed format
check - performs some format checking to see if the processed data looks right
process - does all the above
Additionally, each class then has source specific handlers.
E.g. there might be a get_url and a get_csv for a given class
and then an adapt_phe and an adapt_hps method to format the data
If pulled from an external source (e.g. url), the raw data can be stored
by setting the config['GenerateOutput']['storeInputs'] flag to be True.
These will be stored in the data/ folder
The processed output can be stored by setting the config['GenerateOutput']['storeProcessedInputs']
flag to be true, which will store the data in processed_data/
@authors: <NAME>, <NAME>
"""
import os
import sys
import yaml
import pandas as pd
import re
import requests
import io
import json
import zipfile
from http import HTTPStatus
from bs4 import BeautifulSoup
from collections import Counter
from datetime import datetime
import pickle
import h5py
import numpy as np
from covid import data as LancsData
# import model_spec
# DTYPE = model_spec.DTYPE
DTYPE = np.float64
def CovarData(config):
# Return data and covar data structs
data = {}
data['areas'] = AreaCodeData.process(config)
data['commute'] = InterLadCommuteData.process(config)
data['cases_tidy'] = CasesData.process(config)
data['cases_wide'] = data['cases_tidy'].pivot(index="lad19cd", columns="date", values="cases")
data['mobility'] = MobilityTrendData.process(config)
data['population'] = PopulationData.process(config)
data['tier'] = TierData.process(config)
# Check dimensions are consistent
check_aligned(data)
print('Data passes allignment check')
# put it into covar data form
covar_data = dict(
C=data['commute'].to_numpy().astype(DTYPE),
W=data['mobility'].to_numpy().astype(DTYPE),
N=data['population'].to_numpy().astype(DTYPE),
L=data['tier'].astype(DTYPE),
weekday=config['dates']['weekday'].astype(DTYPE),
)
return data, covar_data
class TierData:
def get(config):
"""
Retrieve an xarray DataArray of the tier data
"""
settings = config['TierData']
if settings['input'] == 'csv':
df = TierData.getCSV(settings['address'])
else:
invalidInput(settings['input'])
return df
def getCSV(file):
"""
Read TierData CSV from file
"""
return pd.read_csv(file)
def check(xarray, config):
"""
Check the data format
"""
return True
def adapt(df, config):
"""
Adapt the dataframe to the desired format.
"""
global_settings = config["Global"]
settings = config["TierData"]
# this key might not be stored in the config file
# if it's not, we need to grab it using AreaCodeData
if 'lad19cds' not in config:
_df = AreaCodeData.process(config)
areacodes = config["lad19cds"]
# Below is assuming inference_period dates
date_low, date_high = get_date_low_high(config)
if settings['format'].lower() == 'tidy':
xarray = TierData.adapt_xarray(df, date_low, date_high, areacodes, settings)
return xarray
def adapt_xarray(tiers, date_low, date_high, lads, settings):
"""
Adapt to a filtered xarray object
"""
tiers["date"] = pd.to_datetime(tiers["date"], format="%Y-%m-%d")
tiers["code"] = merge_lad_codes(tiers["code"])
# Separate out December tiers
date_mask = tiers["date"] > np.datetime64("2020-12-02")
tiers.loc[
date_mask & (tiers["tier"] == "three"),
"tier",
] = "dec_three"
tiers.loc[
date_mask & (tiers["tier"] == "two"),
"tier",
] = "dec_two"
tiers.loc[
date_mask & (tiers["tier"] == "one"),
"tier",
] = "dec_one"
# filter down to the lads
if len(lads) > 0:
tiers = tiers[tiers.code.isin(lads)]
# add in fake LADs to ensure all lockdown tiers are present for filtering
# xarray.loc does not like it when the values aren't present
# this seems to be the cleanest way
# we drop TESTLAD after filtering down
#lockdown_states = ["two", "three", "dec_two", "dec_three"]
lockdown_states = settings['lockdown_states']
for (i, t) in enumerate(lockdown_states):
tiers.loc[tiers.shape[0]+i+1] = ['TESTLAD','TEST','LAD',date_low,t]
index = pd.MultiIndex.from_frame(tiers[["date", "code", "tier"]])
index = index.sort_values()
index = index[~index.duplicated()]
ser = pd.Series(1.0, index=index, name="value")
ser = ser[date_low : (date_high - np.timedelta64(1, "D"))]
xarr = ser.to_xarray()
xarr.data[np.isnan(xarr.data)] = 0.0
xarr_filt = xarr.loc[..., lockdown_states]
xarr_filt = xarr_filt.drop_sel({'code':'TESTLAD'})
return xarr_filt
def process(config):
if config['TierData']['format'].lower()[0:5] == 'lancs':
xarray = TierData.process_lancs(config)
else:
df = TierData.get(config)
xarray = TierData.adapt(df, config)
if TierData.check(xarray, config):
return xarray
def process_lancs(config):
global_settings = config["Global"]
settings = config["TierData"]
if 'lad19cds' not in config:
_df = AreaCodeData.process(config)
areacodes = config["lad19cds"]
date_low, date_high = get_date_low_high(config)
if config['TierData']['format'].lower() == 'lancs_raw':
return LancsData.read_tier_restriction_data(settings['address'], areacodes, date_low, date_high)
elif config['TierData']['format'].lower() == 'lancs_tidy':
return LancsData.read_challen_tier_restriction(settings['address'], date_low, date_high, areacodes)
else:
raise NotImplementedError(f'Format type {config["TierData"]["format"]} not implemented')
class CasesData:
def get(config):
"""
Retrieve a pandas DataFrame containing the cases/line list data.
"""
settings = config['CasesData']
if settings['input'] == 'url':
df = CasesData.getURL(settings['address'],config)
elif settings['input'] == 'csv':
print('Reading case data from local CSV file at',settings['address'])
df = CasesData.getCSV(settings['address'])
elif settings['input'] == 'processed':
print('Reading case data from preprocessed CSV at', settings['address'])
df = pd.read_csv(settings['address'],index_col=0)
else:
invalidInput(settings['input'])
return df
def getURL(url, config):
"""
Placeholder, in case we wish to interface with an API.
"""
pass
def getCSV(file):
"""
Format as per linelisting
"""
columns = ["pillar", "LTLA_code", "specimen_date", "lab_report_date"]
dfs = pd.read_csv(file, chunksize=50000, iterator=True, usecols=columns)
df = pd.concat(dfs)
return df
def check(df, config):
"""
Check that data format seems correct
"""
dims = df.shape
nareas = len(config["lad19cds"])
date_low, date_high = get_date_low_high(config)
dates = pd.date_range(start=date_low,end=date_high,closed="left")
days = len(dates)
entries = days * nareas
if not (((dims[1] >= 3) & (dims[0] == entries)) | ((dims[1] == days) & (dims[0] == nareas))):
raise ValueError("Incorrect CasesData dimensions")
if 'date' in df:
_df = df
elif df.columns.name == 'date':
_df = pd.DataFrame({"date":df.columns})
else:
raise ValueError("Cannot determine date axis")
check_date_bounds(df, date_low, date_high)
check_date_format(df)
check_lad19cd_format(df)
return True
def adapt(df, config):
"""
Adapt the line listing data to the desired dataframe format.
"""
# Extract the yaml config settings
global_settings = config["Global"]
output_settings = config['GenerateOutput']
date_low, date_high = get_date_low_high(config)
settings = config["CasesData"]
pillars = settings["pillars"]
measure = settings["measure"].casefold()
output = settings["output"]
# this key might not be stored in the config file
# if it's not, we need to grab it using AreaCodeData
if 'lad19cds' not in config:
_df = AreaCodeData.process(config)
areacodes = config["lad19cds"]
if settings['input'] == 'processed':
return df
if settings['format'].lower() == 'phe':
df = CasesData.adapt_phe(df, date_low, date_high, pillars, measure, areacodes, output)
if output_settings['storeProcessedInputs'] and output != "None":
output = format_output_filename(output,config)
df.to_csv(output, index=True)
return df
def adapt_phe(df, date_low, date_high, pillars, measure, areacodes, output):
"""
Adapt the line listing data to the desired dataframe format.
"""
# Clean missing values
df.dropna(inplace=True)
df = df.rename(columns = {"LTLA_code":"lad19cd"})
# Clean time formats
df["specimen_date"] = pd.to_datetime(df["specimen_date"], dayfirst=True)
df["lab_report_date"] = pd.to_datetime(df["lab_report_date"], dayfirst=True)
df["lad19cd"] = merge_lad_codes(df["lad19cd"])
# filters for pillars, date ranges, and areacodes if given
filters = df["pillar"].isin(pillars)
filters &= df["lad19cd"].isin(areacodes)
if measure == "specimen":
filters &= (date_low <= df["specimen_date"]) & (df["specimen_date"] < date_high)
else:
filters &= (date_low <= df["lab_report_date"]) & (df["lab_report_date"] < date_high)
df = df[filters]
df = df.drop(columns="pillar") # No longer need pillar column
# Aggregate counts
if measure == "specimen":
df = df.groupby(["specimen_date", "lad19cd"]).count()
df = df.rename(columns = {"lab_report_date":"cases"})
else:
df = df.groupby(["lab_report_date", "lad19cd"]).count()
df = df.rename(columns = {"specimen_date":"cases"})
df.index.names = ["date", "lad19cd"]
# Fill in all dates, and add 0s for empty counts
dates = pd.date_range(date_low, date_high, closed="left")
indexes = [(date, lad19) for date in dates for lad19 in areacodes]
multi_indexes = pd.MultiIndex.from_tuples(indexes, names=["date", "lad19cd"])
results = pd.DataFrame(0, index=multi_indexes, columns=["cases"])
results = results.add(df, axis=0, fill_value=0)
results = results.reset_index()
return results
def process(config):
if config["CasesData"]["format"].lower() == "lancs":
df = CasesData.process_lancs(config)
else:
df = CasesData.get(config)
df = CasesData.adapt(df, config)
if CasesData.check(df, config):
return df
def process_lancs(config):
global_settings = config["Global"]
settings = config["CasesData"]
if 'lad19cds' not in config:
_df = AreaCodeData.process(config)
areacodes = config["lad19cds"]
inference_period = [np.datetime64(x) for x in global_settings["inference_period"]]
date_low = inference_period[0]
date_high = inference_period[1]
if ("Pillar 1" in settings["pillars"]) and ("Pillar 2" in settings["pillars"]):
pillars = "both"
elif ("Pillar 1" in settings["pillars"]):
pillars = "1"
elif ("Pillar 2" in settings["pillars"]):
pillars = "2"
dtype = settings["measure"]
df = LancsData.read_phe_cases(settings['address'], date_low, date_high,
pillar=pillars, date_type=dtype, ltlas = areacodes)
return df.reset_index().melt(['lad19cd']).rename(columns={"value":"cases"})
class MobilityTrendData:
"""
This is the transport data. The fraction of travel compared to normal levels.
"""
def get(config):
"""
Retrieve a response containing the .ods transport data as content.
"""
settings = config['MobilityTrendData']
if settings['input'] == 'url':
df = MobilityTrendData.getURL(settings['address'],config)
elif settings['input'] == 'ods':
print('Reading Transport data from local CSV file at',settings['address'])
df = MobilityTrendData.getODS(settings['address'])
elif settings['input'] == 'processed':
print('Reading Transport data from preprocessed CSV at', settings['address'])
df = pd.read_csv(settings['address'],index_col=0)
df.date = pd.to_datetime(df.date)
else:
invalidInput(settings['input'])
return df
def getURL(url,config):
"""
Utility to extract the URL to the DFT transport .ods data.
"""
settings = config['MobilityTrendData']
response = requests.get(url)
if response.status_code >= HTTPStatus.BAD_REQUEST:
raise RuntimeError(f'Request failed: {response.text}')
if settings['format'].lower() == 'dft':
print("Retrieving transport data from the DfT")
soup = BeautifulSoup(response.text, "html.parser")
href = soup.find("a", {"href":re.compile("COVID-19-transport-use-statistics.ods")}).get("href")
response = requests.get(href, timeout=5)
if response.status_code >= HTTPStatus.BAD_REQUEST:
raise RuntimeError(f'Request failed: {response.text}')
data = io.BytesIO(response.content)
# store the base data
if config['GenerateOutput']['storeInputs']:
fn = format_output_filename(config['GenerateOutput']['scrapedDataDir'] + '/MobilityTrendData_DFT.ods',config)
with open(fn,'wb') as f:
f.write(data.getvalue())
df = MobilityTrendData.getODS(data)
return df
def getODS(file):
"""
Read DfT ODS file
"""
return pd.read_excel(file, sheet_name='Transport_use_(GB)', header=6, engine='odf',
converters={"All motor vehicles2": MobilityTrendData.clean})
def check(df, config):
"""
Check that data format seems correct
Return True if passes
Error if not
"""
dims = df.shape
date_low, date_high = get_date_low_high(config)
dates = pd.date_range(start=date_low,end=date_high,closed="left")
days = len(dates)
if not ((dims[1] >= 1) & (dims[0] == days)): # number of entries
raise ValueError("Incorrect MobilityData dimensions")
# our dates are stored in the index column
# create a new df with just the dates to see
df_date = pd.DataFrame(df.index)
check_date_bounds(df_date, date_low, date_high)
check_date_format(df_date)
return True
def clean(x):
"""
Utility to clean formatting from the table where data has been revised.
"""
if type(x) == str:
return float(x.strip("r%"))/100
else:
return x
def adapt(df, config):
"""
Adapt the transport data to the desired dataframe format.
"""
global_settings = config["Global"]
output_settings = config['GenerateOutput']
date_low, date_high = get_date_low_high(config)
settings = config["MobilityTrendData"]
output = settings["output"]
if settings['input'] == 'processed':
return df
if settings['format'].lower() == 'dft':
df = MobilityTrendData.adapt_dft(df,date_low,date_high,output,config)
if output_settings['storeProcessedInputs'] and output != "None":
output = format_output_filename(output,config)
df.to_csv(output, index=True)
return df
def adapt_dft(df,date_low,date_high,output,config):
"""
Adapt the department for Transport data format to a clean Dataframe
"""
columns = [
"Date1(weekends and bank holidays in grey)",
"All motor vehicles2"
]
colnames = ["date", "percent"]
df = df[columns]
df = df.dropna(0)
df.columns = colnames
df["date"] = df["date"].apply(lambda x: pd.to_datetime(x, dayfirst=True))
mask = (df["date"] >= date_low) & (df["date"] < date_high)
df = df.loc[mask]
# change the index
df.set_index('date',inplace=True)
# set dtype
df.percent = pd.to_numeric(df.percent)
return df
def process(config):
if config['MobilityTrendData']['format'].lower() == "lancs":
df = MobilityTrendData.process_lancs(config)
else:
df = MobilityTrendData.get(config)
df = MobilityTrendData.adapt(df, config)
if MobilityTrendData.check(df, config):
return df
def process_lancs(config):
date_low, date_high = get_date_low_high(config)
return LancsData.read_traffic_flow(
config['MobilityTrendData']['address'],
date_low, date_high)
class PopulationData:
def get(config):
"""
Retrieve a response containing the population data from the ONS.
"""
settings = config['PopulationData']
if settings['input'] == 'url':
df = PopulationData.getURL(settings['address'],config)
elif settings['input'] == 'xls':
print('Reading Pop. data from local XLS file at',settings['address'])
df = PopulationData.getXLS(settings['address'])
elif settings['input'] == 'processed':
print('Reading Pop. data from preprocessed CSV at', settings['address'])
df = pd.read_csv(settings['address'],index_col=0)
else:
invalidInput(settings['input'])
return df
def getURL(url, config):
"""
Utility to extract the URL to the ONS population .xls data.
"""
settings = config['PopulationData']
response = requests.get(url, timeout=5)
if response.status_code >= HTTPStatus.BAD_REQUEST:
raise RuntimeError(f'Request failed: {response.text}')
if settings['format'] == 'ons':
print("Retrieving population data from the ONS")
data = io.BytesIO(response.content)
# store the base data
if config['GenerateOutput']['storeInputs']:
fn = format_output_filename(config['GenerateOutput']['scrapedDataDir'] + '/PopulationData_ONS.xls',config)
with open(fn,'wb') as f:
f.write(data.getvalue())
df = PopulationData.getXLS(data)
return df
def getXLS(file):
"""
Read ONS XLS file
"""
return pd.read_excel(file, sheet_name='MYE2 - Persons', header=4)
def check(df, config):
"""
Check that data format seems correct
"""
dims = df.shape
nareas = len(config["lad19cds"])
if not ((dims[1] >= 1) & (dims[0] == nareas)): # number of entries
raise ValueError("PopData: Incorrect dimensions")
check_lad19cd_format(df)
return True
def adapt(df, config):
"""
Adapt the population data to the desired dataframe format.
"""
output_settings = config['GenerateOutput']
settings = config["PopulationData"]
output = settings["output"]
if settings['input'] == 'processed':
return df
if settings['format'].lower() == 'ons':
df = PopulationData.adapt_ons(df,output,config)
if output_settings['storeProcessedInputs'] and output != "None":
output = format_output_filename(output,config)
df.to_csv(output, index=True)
return df
def adapt_ons(df, output, config):
"""
Adapt the ONS data format to a clean Pandas DataFrame
"""
lads = [
'Metropolitan District',
'Non-metropolitan District',
'Unitary Authority',
'London Borough',
'Council Area',
'Local Government District'
]
if 'lad19cds' not in config:
_df = AreaCodeData.process(config)
areacodes = config["lad19cds"]
columns = ["Code", "All ages"]
colnames = ["lad19cd", "n"]
df = df[df.Geography1.isin(lads)]
df = df[columns]
df.columns = colnames
df["lad19cd"] = merge_lad_codes(df["lad19cd"])
df = merge_lad_values(df)
filters = df["lad19cd"].isin(areacodes)
df = df[filters]
df.set_index('lad19cd',inplace=True)
return df
def process(config):
if config['PopulationData']['format'].lower() == "lancs":
df = PopulationData.process_lancs(config)
else:
df = PopulationData.get(config)
df = PopulationData.adapt(df, config)
try:
PopulationData.check(df, config)
return df
except:
print('Population data failed check')
return df
def process_lancs(config):
return LancsData.read_population(config['PopulationData']['address'])
class InterLadCommuteData:
def get(config):
"""
Retrieve a response containing the commuting data from Nomisweb.
"""
settings = config['InterLadCommuteData']
if settings['input'] == 'url':
df = InterLadCommuteData.getURL(settings['address'],config)
elif settings['input'] == 'csv':
print('Reading Commute data from local CSV file at',settings['address'])
df = InterLadCommuteData.getCSV(settings['address'])
elif settings['input'] == 'processed':
print('Reading Commute data from preprocessed CSV at', settings['address'])
df = pd.read_csv(settings['address'],index_col=0)
else:
invalidInput(settings['input'])
return df
def getURL(url, config):
"""
Utility to extract the URL to the Nomis commuting csv (zipped) data.
"""
settings = config['InterLadCommuteData']
response = requests.get(url, timeout=5)
if response.status_code >= HTTPStatus.BAD_REQUEST:
raise RuntimeError(f'Request failed: {response.text}')
if settings['format'].lower() == 'nomis':
print("Retrieving commute data from NomisWeb")
with zipfile.ZipFile(io.BytesIO(response.content)) as csvzip:
with csvzip.open(csvzip.namelist()[0]) as csv:
data = io.BytesIO(csv.read())
if config['GenerateOutput']['storeInputs']:
fn = format_output_filename(config['GenerateOutput']['scrapedDataDir'] + '/InterLadCommuteData_Nomis.csv',config)
with open(fn,'wb') as f:
f.write(data.getvalue())
df = InterLadCommuteData.getCSV(data)
return df
def getCSV(file):
"""
Read Nomisweb CSV file
"""
return
|
pd.read_csv(file)
|
pandas.read_csv
|
'''
Created on May 16, 2018
@author: cef
significant scripts for calculating damage within the ABMRI framework
for secondary data loader scripts, see fdmg.datos.py
'''
#===============================================================================
# IMPORT STANDARD MODS -------------------------------------------------------
#===============================================================================
import logging, os, time, re, math, copy, gc, weakref, random, sys
import pandas as pd
import numpy as np
import scipy.integrate
#===============================================================================
# shortcuts
#===============================================================================
from collections import OrderedDict
from hlpr.exceptions import Error
from weakref import WeakValueDictionary as wdict
from weakref import proxy
from model.sofda.hp.basic import OrderedSet
from model.sofda.hp.pd import view
idx = pd.IndexSlice
#===============================================================================
# IMPORT CUSTOM MODS ---------------------------------------------------------
#===============================================================================
#import hp.plot
import model.sofda.hp.basic as hp_basic
import model.sofda.hp.pd as hp_pd
import model.sofda.hp.oop as hp_oop
import model.sofda.hp.sim as hp_sim
import model.sofda.hp.data as hp_data
import model.sofda.hp.dyno as hp_dyno
import model.sofda.hp.sel as hp_sel
import model.sofda.fdmg.datos_fdmg as datos
#import matplotlib.pyplot as plt
#import matplotlib
#import matplotlib.animation as animation #load the animation module (with the new search path)
#===============================================================================
# custom shortcuts
#===============================================================================
from model.sofda.fdmg.house import House
#from model.sofda.fdmg.dfunc import Dfunc
from model.sofda.fdmg.dmgfeat import Dmg_feat
# logger setup -----------------------------------------------------------------------
mod_logger = logging.getLogger(__name__)
mod_logger.debug('initilized')
#===============================================================================
#module level defaults ------------------------------------------------------
#===============================================================================
#datapars_cols = [u'dataname', u'desc', u'datafile_tailpath', u'datatplate_tailpath', u'trim_row'] #headers in the data tab
datafile_types_list = ['.csv', '.xls']
class Fdmg( #flood damage model
hp_sel.Sel_controller, #no init
hp_dyno.Dyno_wrap, #add some empty containers
#hp.plot.Plot_o, #build the label
hp_sim.Sim_model, #Sim_wrap: attach the reset_d. Sim_model: inherit attributes
hp_oop.Trunk_o, #no init
#Parent_cmplx: attach empty kids_sd
#Parent: set some defaults
hp_oop.Child):
"""
#===========================================================================
# INPUTS
#===========================================================================
pars_path ==> pars_file.xls
main external parameter spreadsheet.
See description in file for each column
dataset parameters
tab = 'data'. expected columns: datapars_cols
session parameters
tab = 'gen'. expected rows: sessionpars_rows
"""
#===========================================================================
# program parameters
#===========================================================================
name = 'fdmg'
#list of attribute names to try and inherit from the session
try_inherit_anl = set(['ca_ltail', 'ca_rtail', 'mind', \
'dbg_fld_cnt', 'legacy_binv_f', 'gis_area_max', \
'fprob_mult', 'flood_tbl_nm', 'gpwr_aep', 'dmg_rat_f',\
'joist_space', 'G_anchor_ht', 'bsmt_opn_ht_code','bsmt_egrd_code', \
'damp_func_code', 'cont_val_scale', 'hse_skip_depth', \
'area_egrd00', 'area_egrd01', 'area_egrd02',
'fhr_nm', 'write_fdmg_sum', 'dfeat_xclud_price',
'write_fdmg_sum_fly',
])
fld_aep_spcl = 100 #special flood to try and include in db runs
bsmt_egrd = 'wet' #default value for bsmt_egrd
legacy_binv_f = True #flag to indicate that the binv is in legacy format (use indicies rather than column labels)
gis_area_max = 3500
acode_sec_d = dict() #available acodes with dfunc data loaded (to check against binv request) {acode:asector}
'consider allowing the user control of these'
gis_area_min = 5
gis_area_max = 5000
write_fdmg_sum_fly = False
write_dmg_fly_first = True #start off to signifiy first run
#===========================================================================
# debuggers
#===========================================================================
write_beg_hist = True #whether to write the beg history or not
beg_hist_df = None
#===========================================================================
# user provided values
#===========================================================================
#legacy pars
floor_ht = 0.0
mind = '' #column to match between data sets and name the house objects
#EAD calc
ca_ltail ='flat'
ca_rtail =2 #aep at which zero value is assumeed. 'none' uses lowest aep in flood set
#Floodo controllers
gpwr_aep = 100 #default max aep where gridpower_f = TRUE (when the power shuts off)
dbg_fld_cnt = '0' #for slicing the number of floods we want to evaluate
#area exposure
area_egrd00 = None
area_egrd01 = None
area_egrd02 = None
#Dfunc controllers
place_codes = None
dmg_types = None
flood_tbl_nm = None #name of the flood table to use
#timeline deltas
'just keeping this on the fdmg for simplicitly.. no need for flood level heterogenieyt'
wsl_delta = 0.0
fprob_mult = 1.0 #needs to be a float for type matching
dmg_rat_f = False
#Fdmg.House pars
joist_space = 0.3
G_anchor_ht = 0.6
bsmt_egrd_code = 'plpm'
damp_func_code = 'seep'
bsmt_opn_ht_code = '*min(2.0)'
hse_skip_depth = -4 #depth to skip house damage calc
fhr_nm = ''
cont_val_scale = .25
write_fdmg_sum = True
dfeat_xclud_price = 0.0
#===========================================================================
# calculation parameters
#===========================================================================
res_fancy = None
gpwr_f = True #placeholder for __init__ calcs
fld_aep_l = None
dmg_dx_base = None #results frame for writing
plotr_d = None #dictionary of EAD plot workers
dfeats_d = dict() #{tag:dfeats}. see raise_all_dfeats()
fld_pwr_cnt = 0
seq = 0
#damage results/stats
dmgs_df = None
dmgs_df_wtail = None #damage summaries with damages for the tail logic included
ead_tot = 0
dmg_tot = 0
#===========================================================================
# calculation data holders
#===========================================================================
dmg_dx = None #container for full run results
bdry_cnt = 0
bwet_cnt = 0
bdamp_cnt = 0
def __init__(self,*vars, **kwargs):
logger = mod_logger.getChild('Fdmg')
#=======================================================================
# initilize cascade
#=======================================================================
super(Fdmg, self).__init__(*vars, **kwargs) #initilzie teh baseclass
#=======================================================================
# object updates
#=======================================================================
self.reset_d.update({'ead_tot':0, 'dmgs_df':None, 'dmg_dx':None,\
'wsl_delta':0}) #update the rest attributes
#=======================================================================
# defaults
#=======================================================================
if not self.session._write_data:
self.write_fdmg_sum = False
if not self.dbg_fld_cnt == 'all':
self.dbg_fld_cnt = int(float(self.dbg_fld_cnt))
#=======================================================================
# pre checks
#=======================================================================
if self.db_f:
#model assignment
if not self.model.__repr__() == self.__repr__():
raise IOError
#check we have all the datos we want
dname_exp = np.array(('rfda_curve', 'binv','dfeat_tbl', 'fhr_tbl'))
boolar = np.invert(np.isin(dname_exp, self.session.pars_df_d['datos']))
if np.any(boolar):
"""allowing this?"""
logger.warning('missing %i expected datos: %s'%(boolar.sum(), dname_exp[boolar]))
#=======================================================================
#setup functions
#=======================================================================
#par cleaners/ special loaders
logger.debug("load_hse_geo() \n")
self.load_hse_geo()
logger.info('load and clean dfunc data \n')
self.load_pars_dfunc(self.session.pars_df_d['dfunc']) #load the data functions to damage type table
logger.debug('\n')
self.setup_dmg_dx_cols()
logger.debug('load_submodels() \n')
self.load_submodels()
logger.debug('init_dyno() \n')
self.init_dyno()
#outputting setup
if self.write_fdmg_sum_fly:
self.fly_res_fpath = os.path.join(self.session.outpath, '%s fdmg_res_fly.csv'%self.session.tag)
logger.info('Fdmg model initialized as \'%s\' \n'%(self.name))
return
#===========================================================================
# def xxxcheck_pars(self): #check your data pars
# #pull the datas frame
# df_raw = self.session.pars_df_d['datos']
#
# #=======================================================================
# # check mandatory data objects
# #=======================================================================
# if not 'binv' in df_raw['name'].tolist():
# raise Error('missing \'binv\'!')
#
# #=======================================================================
# # check optional data objects
# #=======================================================================
# fdmg_tab_nl = ['rfda_curve', 'binv','dfeat_tbl', 'fhr_tbl']
# boolidx = df_raw['name'].isin(fdmg_tab_nl)
#
# if not np.all(boolidx):
# raise IOError #passed some unexpected data names
#
# return
#===========================================================================
def load_submodels(self):
logger = self.logger.getChild('load_submodels')
self.state = 'load'
#=======================================================================
# data objects
#=======================================================================
'this is the main loader that builds all teh children as specified on the data tab'
logger.info('loading dat objects from \'fdmg\' tab')
logger.debug('\n \n')
#build datos from teh data tab
'todo: hard code these class types (rather than reading from teh control file)'
self.fdmgo_d = self.raise_children_df(self.session.pars_df_d['datos'], #df to raise on
kid_class = None) #should raise according to df entry
self.session.prof(state='load.fdmg.datos')
'WARNING: fdmgo_d is not set until after ALL the children on this tab are raised'
#attach special children
self.binv = self.fdmgo_d['binv']
"""NO! this wont hold resetting updates
self.binv_df = self.binv.childmeta_df"""
#=======================================================================
# flood tables
#=======================================================================
self.ftblos_d = self.raise_children_df(self.session.pars_df_d['flood_tbls'], #df to raise on
kid_class = datos.Flood_tbl) #should raise according to df entry
#make sure the one we are loking for is in there
if not self.session.flood_tbl_nm in list(self.ftblos_d.keys()):
raise Error('requested flood table name \'%s\' not found in loaded sets'%self.session.flood_tbl_nm)
'initial call which only udpates the binv_df'
self.set_area_prot_lvl()
if 'fhr_tbl' in list(self.fdmgo_d.keys()):
self.set_fhr()
#=======================================================================
# dfeats
#======================================================================
if self.session.load_dfeats_first_f & self.session.wdfeats_f:
logger.debug('raise_all_dfeats() \n')
self.dfeats_d = self.fdmgo_d['dfeat_tbl'].raise_all_dfeats()
#=======================================================================
# raise houses
#=======================================================================
#check we have all the acodes
self.check_acodes()
logger.info('raising houses')
logger.debug('\n')
self.binv.raise_houses()
self.session.prof(state='load.fdmg.houses')
'calling this here so all of the other datos are raised'
#self.rfda_curve = self.fdmgo_d['rfda_curve']
"""No! we need to get this in before the binv.reset_d['childmeta_df'] is set
self.set_area_prot_lvl() #apply the area protectino from teh named flood table"""
logger.info('loading floods')
logger.debug('\n \n')
self.load_floods()
self.session.prof(state='load.fdmg.floods')
logger.debug("finished with %i kids\n"%len(self.kids_d))
return
def setup_dmg_dx_cols(self): #get teh columns to use for fdmg results
"""
This is setup to generate a unique set of ordered column names with this logic
take the damage types
add mandatory fields
add user provided fields
"""
logger = self.logger.getChild('setup_dmg_dx_cols')
#=======================================================================
#build the basic list of column headers
#=======================================================================
#damage types at the head
col_os = OrderedSet(self.dmg_types) #put
#basic add ons
_ = col_os.update(['total', 'hse_depth', 'wsl', 'bsmt_egrd', 'anchor_el'])
#=======================================================================
# special logic
#=======================================================================
if self.dmg_rat_f:
for dmg_type in self.dmg_types:
_ = col_os.add('%s_rat'%dmg_type)
if not self.wsl_delta==0:
col_os.add('wsl_raw')
"""This doesnt handle runs where we start with a delta of zero and then add some later
for these, you need to expplicitly call 'wsl_raw' in the dmg_xtra_cols_fat"""
#ground water damage
if 'dmg_gw' in self.session.outpars_d['Flood']:
col_os.add('gw_f')
#add the dem if necessary
if 'gw_f' in col_os:
col_os.add('dem_el')
#=======================================================================
# set pars based on user provided
#=======================================================================
#s = self.session.outpars_d[self.__class__.__name__]
#extra columns for damage resulst frame
if self.db_f or self.session.write_fdmg_fancy:
logger.debug('including extra columns in outputs')
#clewan the extra cols
'todo: move this to a helper'
if hasattr(self.session, 'xtra_cols'):
try:
dc_l = eval(self.session.xtra_cols) #convert to a list
except:
logger.error('failed to convert \'xtra_cols\' to a list. check formatting')
raise IOError
else:
dc_l = ['wsl_raw', 'gis_area', 'acode_s', 'B_f_height', 'BS_ints','gw_f']
if not isinstance(dc_l, list): raise IOError
col_os.update(dc_l) #add these
self.dmg_df_cols = col_os
logger.debug('set dmg_df_cols as: %s'%self.dmg_df_cols)
return
def load_pars_dfunc(self,
df_raw=None): #build a df from the dfunc tab
"""
20190512: upgraded to handle nores and mres types
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('load_pars_dfunc')
#list of columns to expect
exp_colns = np.array(['acode','asector','place_code','dmg_code','dfunc_type','anchor_ht_code'])
if df_raw is None:
df_raw = self.session.pars_df_d['dfunc']
logger.debug('from df %s: \n %s'%(str(df_raw.shape), df_raw))
#=======================================================================
# clean
#=======================================================================
df1 = df_raw.dropna(axis='columns', how='all').dropna(axis='index', how='all') #drop rows with all na
df1 = df1.drop(columns=['note', 'rank'], errors='ignore') #drop some columns we dont need
#=======================================================================
# checking
#=======================================================================
#expected columns
boolar = np.invert(np.isin(exp_colns, df1.columns))
if np.any(boolar):
raise Error('missing %i expected columns\n %s'%(boolar.sum, exp_colns[boolar]))
#rfda garage logic
boolidx = np.logical_and(df1['place_code'] == 'G', df1['dfunc_type'] == 'rfda')
if np.any(boolidx):
raise Error('got dfunc_type = rfda for a garage curve (no such thing)')
#=======================================================================
# calculated columns
#=======================================================================
df2 = df1.copy()
df2['dmg_type'] = df2['place_code'] + df2['dmg_code']
"""as acode whill change, we want to keep the name static
df2['name'] = df2['acode'] + df2['dmg_type']"""
df2['name'] = df2['dmg_type']
#=======================================================================
# data loading
#=======================================================================
if 'tailpath' in df2.columns:
boolidx = ~pd.isnull(df2['tailpath']) #get dfuncs with data requests
self.load_raw_dfunc(df2[boolidx])
df2 = df2.drop(['headpath', 'tailpath'], axis = 1, errors='ignore') #drop these columns
#=======================================================================
# get special lists
#=======================================================================
#find total for exclusion
boolidx = np.invert((df2['place_code']=='total').astype(bool))
"""Im not using the total dfunc any more..."""
if not np.all(boolidx):
raise Error('i thinkn this has been disabled')
self.dmg_types = tuple(df2.loc[boolidx,'dmg_type'].dropna().unique().tolist())
self.dmg_codes = tuple(df2.loc[boolidx, 'dmg_code'].dropna().unique().tolist())
self.place_codes = tuple(df2.loc[boolidx,'place_code'].dropna().unique().tolist())
#=======================================================================
# #handle nulls
#=======================================================================
df3 = df2.copy()
for coln in ['dmg_type', 'name']:
df3.loc[:,coln] = df3[coln].replace(to_replace=np.nan, value='none')
#=======================================================================
# set this
#=======================================================================
self.session.pars_df_d['dfunc'] = df3
logger.debug('dfunc_df with %s'%str(df3.shape))
#=======================================================================
# get slice for houses
#=======================================================================
self.dfunc_mstr_df = df3[boolidx] #get this trim
return
"""
view(df3)
"""
def load_hse_geo(self): #special loader for hse_geo dxcol (from tab hse_geo)
logger = self.logger.getChild('load_hse_geo')
#=======================================================================
# load and clean the pars
#=======================================================================
df_raw = hp_pd.load_xls_df(self.session.parspath,
sheetname = 'hse_geo', header = [0,1], logger = logger)
df = df_raw.dropna(how='all', axis = 'index') #drop any rows with all nulls
self.session.pars_df_d['hse_geo'] = df
#=======================================================================
# build a blank starter for each house to fill
#=======================================================================
omdex = df.columns #get the original mdex
'probably a cleaner way of doing this'
lvl0_values = omdex.get_level_values(0).unique().tolist()
lvl1_values = omdex.get_level_values(1).unique().tolist()
lvl1_values.append('t')
newcols = pd.MultiIndex.from_product([lvl0_values, lvl1_values],
names=['place_code','finish_code'])
"""id prefer to use a shortend type (Float32)
but this makes all the type checking very difficult"""
geo_dxcol = pd.DataFrame(index = df.index, columns = newcols, dtype='Float32') #make the frame
self.geo_dxcol_blank = geo_dxcol
if self.db_f:
if np.any(pd.isnull(df)):
raise Error('got %i nulls in the hse_geo tab'%df.isna().sum().sum())
l = geo_dxcol.index.tolist()
if not l == ['area', 'height', 'per', 'inta']:
raise IOError
return
def load_raw_dfunc(self, meta_df_raw): #load raw data for dfuncs
logger = self.logger.getChild('load_raw_dfunc')
logger.debug('with df \'%s\''%(str(meta_df_raw.shape)))
d = dict() #empty container
meta_df = meta_df_raw.copy()
#=======================================================================
# loop through each row and load the data
#=======================================================================
for indx, row in meta_df.iterrows():
inpath = os.path.join(row['headpath'], row['tailpath'])
df = hp_pd.load_smart_df(inpath,
index_col =None,
logger = logger)
d[row['name']] = df.dropna(how = 'all', axis = 'index') #store this into the dictionaryu
logger.info('finished loading raw dcurve data on %i dcurves: %s'%(len(d), list(d.keys())))
self.dfunc_raw_d = d
return
def load_floods(self):
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('load_floods')
logger.debug('setting floods df \n')
self.set_floods_df()
df = self.floods_df
logger.debug('raising floods \n')
d = self.raise_children_df(df, #build flood children
kid_class = Flood,
dup_sibs_f= True,
container = OrderedDict) #pass attributes from one tot eh next
#=======================================================================
# ordered by aep
#=======================================================================
fld_aep_od = OrderedDict()
for childname, childo in d.items():
if hasattr(childo, 'ari'):
fld_aep_od[childo.ari] = childo
else: raise IOError
logger.info('raised and bundled %i floods by aep'%len(fld_aep_od))
self.fld_aep_od = fld_aep_od
return
def set_floods_df(self): #build the flood meta data
logger = self.logger.getChild('set_floods_df')
df_raw = self.session.pars_df_d['floods']
df1 = df_raw.sort_values('ari').reset_index(drop=True)
df1['ari'] = df1['ari'].astype(np.int)
#=======================================================================
# slice for debug set
#=======================================================================
if self.db_f & (not self.dbg_fld_cnt == 'all'):
"""this would be much better with explicit typesetting"""
#check that we even have enough to do the slicing
if len(df1) < 2:
logger.error('too few floods for debug slicing. pass dbg_fld_cnt == all')
raise IOError
df2 = pd.DataFrame(columns = df1.columns) #make blank starter frame
dbg_fld_cnt = int(float(self.dbg_fld_cnt))
logger.info('db_f=TRUE. selecting %i (of %i) floods'%(dbg_fld_cnt, len(df1)))
#===================================================================
# try to pull out and add the 100yr
#===================================================================
try:
boolidx = df1.loc[:,'ari'] == self.fld_aep_spcl
if not boolidx.sum() == 1:
logger.debug('failed to locate 1 flood')
raise IOError
df2 = df2.append(df1[boolidx]) #add this row to the end
df1 = df1[~boolidx] #slice out this row
dbg_fld_cnt = max(0, dbg_fld_cnt - 1) #reduce the loop count by 1
dbg_fld_cnt = min(dbg_fld_cnt, len(df1)) #double check in case we are given a very short set
logger.debug('added the %s year flood to the list with dbg_fld_cnt %i'%(self.fld_aep_spcl, dbg_fld_cnt))
except:
logger.debug('failed to extract the special %i flood'%self.fld_aep_spcl)
df2 = df1.copy()
#===================================================================
# build list of extreme (low/high) floods
#===================================================================
evn_cnt = 0
odd_cnt = 0
for cnt in range(0, dbg_fld_cnt, 1):
if cnt % 2 == 0: #evens. pull from front
idxr = evn_cnt
evn_cnt += 1
else: #odds. pull from end
idxr = len(df1) - odd_cnt - 1
odd_cnt += 1
logger.debug('pulling flood with indexer %i'%(idxr))
ser = df1.iloc[idxr, :] #make thsi slice
df2 = df2.append(ser) #append this to the end
#clean up
df = df2.drop_duplicates().sort_values('ari').reset_index(drop=True)
logger.debug('built extremes flood df with %i aeps: %s'%(len(df), df.loc[:,'ari'].values.tolist()))
if not len(df) == int(self.dbg_fld_cnt):
raise IOError
else:
df = df1.copy()
if not len(df) > 0: raise IOError
self.floods_df = df
return
def set_area_prot_lvl(self): #assign the area_prot_lvl to the binv based on your tab
#logger = self.logger.getChild('set_area_prot_lvl')
"""
TODO: Consider moving this onto the binv and making the binv dynamic...
Calls:
handles for flood_tbl_nm
"""
logger = self.logger.getChild('set_area_prot_lvl')
logger.debug('assigning \'area_prot_lvl\' for \'%s\''%self.flood_tbl_nm)
#=======================================================================
# get data
#=======================================================================
ftbl_o = self.ftblos_d[self.flood_tbl_nm] #get the activated flood table object
ftbl_o.apply_on_binv('aprot_df', 'area_prot_lvl')
return True
def set_fhr(self): #assign the fhz bfe and zone from the fhr_tbl data
logger = self.logger.getChild('set_fhr')
logger.debug('assigning for \'fhz\' and \'bfe\'')
#get the data for this fhr set
fhr_tbl_o = self.fdmgo_d['fhr_tbl']
try:
df = fhr_tbl_o.d[self.fhr_nm]
except:
if not self.fhr_nm in list(fhr_tbl_o.d.keys()):
logger.error('could not find selected fhr_nm \'%s\' in the loaded rule sets: \n %s'
%(self.fhr_nm, list(fhr_tbl_o.d.keys())))
raise IOError
#=======================================================================
# loop through each series and apply
#=======================================================================
"""
not the most generic way of handling this...
todo:
add generic method to the binv
can take ser or df
updates the childmeta_df if before init
updates the children if after init
"""
for hse_attn in ['fhz', 'bfe']:
ser = df[hse_attn]
if not self.session.state == 'init':
#=======================================================================
# tell teh binv to update its houses
#=======================================================================
self.binv.set_all_hse_atts(hse_attn, ser = ser)
else:
logger.debug('set column \'%s\' onto the binv_df'%hse_attn)
self.binv.childmeta_df.loc[:,hse_attn] = ser #set this column in teh binvdf
"""I dont like this
fhr_tbl_o.apply_on_binv('fhz_df', 'fhz', coln = self.fhr_nm)
fhr_tbl_o.apply_on_binv('bfe_df', 'bfe', coln = self.fhr_nm)"""
return True
def get_all_aeps_classic(self): #get the list of flood aeps from the classic flood table format
'kept this special syntax reader separate in case we want to change th eformat of the flood tables'
flood_pars_df = self.session.pars_df_d['floods'] #load the data from the flood table
fld_aep_l = flood_pars_df.loc[:, 'ari'].values #drop the 2 values and convert to a list
return fld_aep_l
def run(self, **kwargs): #placeholder for simulation runs
logger = self.logger.getChild('run')
logger.debug('on run_cnt %i'%self.run_cnt)
self.run_cnt += 1
self.state='run'
#=======================================================================
# prechecks
#=======================================================================
if self.db_f:
if not isinstance(self.outpath, str):
raise IOError
logger.info('\n fdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmg')
logger.info('for run_cnt %i'%self.run_cnt)
self.calc_fld_set(**kwargs)
return
def setup_res_dxcol(self, #setup the results frame
fld_aep_l = None,
#dmg_type_list = 'all',
bid_l = None):
#=======================================================================
# defaults
#=======================================================================
if bid_l == None: bid_l = self.binv.bid_l
if fld_aep_l is None: fld_aep_l = list(self.fld_aep_od.keys()) #just get all teh keys from the dictionary
#if dmg_type_list=='all': dmg_type_list = self.dmg_types
#=======================================================================
# setup the dxind for writing
#=======================================================================
lvl0_values = fld_aep_l
lvl1_values = self.dmg_df_cols #include extra reporting columns
#fold these into a mdex (each flood_aep has all dmg_types)
columns = pd.MultiIndex.from_product([lvl0_values, lvl1_values],
names=['flood_aep','hse_atts'])
dmg_dx = pd.DataFrame(index = bid_l, columns = columns).sort_index() #make the frame
self.dmg_dx_base = dmg_dx.copy()
if self.db_f:
logger = self.logger.getChild('setup_res_dxcol')
if self.write_beg_hist:
fld_aep_l.sort()
columns = pd.MultiIndex.from_product([fld_aep_l, ['egrd', 'cond']],
names=['flood_aep','egrd'])
self.beg_hist_df = pd.DataFrame(index=bid_l, columns = columns)
logger.info('recording bsmt_egrd history with %s'%str(self.beg_hist_df.shape))
else:
self.beg_hist_df = None
"""
dmg_dx.columns
"""
return
def calc_fld_set(self, #calc flood damage for the flood set
fld_aep_l = None, #list of flood aeps to calcluate
#dmg_type_list = 'all', #list of damage types to calculate
bid_l = None, #list of building names ot calculate
wsl_delta = None, #delta value to add to all wsl
wtf = None, #optinonal flag to control writing of dmg_dx (otherwise session.write_fdmg_set_dx is used)
**run_fld): #kwargs to send to run_fld
'we could separate the object creation and the damage calculation'
"""
#=======================================================================
# INPUTS
#=======================================================================
fld_aep_l: list of floods to calc
this can be a custom list built by the user
extracted from the flood table (see session.get_ftbl_aeps)
loaded from the legacy rfda pars (session.rfda_pars.fld_aep_l)\
bid_l: list of ids (matching the mind varaible set under Fdmg)
#=======================================================================
# OUTPUTS
#=======================================================================
dmg_dx: dxcol of flood damage across all dmg_types and floods
mdex
lvl0: flood aep
lvl1: dmg_type + extra cols
I wanted to have this flexible, so the dfunc could pass up extra headers
couldnt get it to work. instead used a global list and acheck
new headers must be added to the gloabl list and Dfunc.
index
bldg_id
#=======================================================================
# TODO:
#=======================================================================
setup to calc across binvs as well
"""
#=======================================================================
# defaults
#=======================================================================
start = time.time()
logger = self.logger.getChild('calc_fld_set')
if wtf is None: wtf = self.session.write_fdmg_set_dx
if wsl_delta is None: wsl_delta= self.wsl_delta
#=======================================================================
# setup and load the results frame
#=======================================================================
#check to see that all of these conditions pass
if not np.all([bid_l is None, fld_aep_l is None]):
logger.debug('non default run. rebuild the dmg_dx_base')
#non default run. rebuild the frame
self.setup_res_dxcol( fld_aep_l = fld_aep_l,
#dmg_type_list = dmg_type_list,
bid_l = bid_l)
elif self.dmg_dx_base is None: #probably the first run
if not self.run_cnt == 1: raise IOError
logger.debug('self.dmg_dx_base is None. rebuilding')
self.setup_res_dxcol(fld_aep_l = fld_aep_l,
#dmg_type_list = dmg_type_list,
bid_l = bid_l) #set it up with the defaults
dmg_dx = self.dmg_dx_base.copy() #just start witha copy of the base
#=======================================================================
# finish defaults
#=======================================================================
'these are all mostly for reporting'
if fld_aep_l is None: fld_aep_l = list(self.fld_aep_od.keys()) #just get all teh keys from the dictionary
""" leaving these as empty kwargs and letting floods handle
if bid_l == None: bid_l = binv_dato.bid_l
if dmg_type_list=='all': dmg_type_list = self.dmg_types """
"""
lvl0_values = dmg_dx.columns.get_level_values(0).unique().tolist()
lvl1_values = dmg_dx.columns.get_level_values(1).unique().tolist()"""
logger.info('calc flood damage (%i) floods w/ wsl_delta = %.2f'%(len(fld_aep_l), wsl_delta))
logger.debug('ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff \n')
#=======================================================================
# loop and calc eacch flood
#=======================================================================
fcnt = 0
first = True
for flood_aep in fld_aep_l: #lopo through and build each flood
#self.session.prof(state='%s.fdmg.calc_fld_set.%i'%(self.get_id(), fcnt)) #memory profiling
self.state = flood_aep
'useful for keeping track of what the model is doing'
#get teh flood
flood_dato = self.fld_aep_od[flood_aep] #pull thsi from the dictionary
logger.debug('getting dmg_df for %s'%flood_dato.name)
#===================================================================
# run sequence
#===================================================================
#get damage for these depths
dmg_df = flood_dato.run_fld(**run_fld) #add the damage df to this slice
if dmg_df is None: continue #skip this one
#===================================================================
# wrap up
#===================================================================
dmg_dx[flood_aep] = dmg_df #store into the frame
fcnt += 1
logger.debug('for flood_aep \'%s\' on fcnt %i got dmg_df %s \n'%(flood_aep, fcnt, str(dmg_df.shape)))
#===================================================================
# checking
#===================================================================
if self.db_f:
#check that the floods are increasing
if first:
first = False
last_aep = None
else:
if not flood_aep > last_aep:
raise IOError
last_aep = flood_aep
#=======================================================================
# wrap up
#=======================================================================
self.state = 'na'
if wtf:
filetail = '%s %s %s %s res_fld'%(self.session.tag, self.simu_o.name, self.tstep_o.name, self.name)
filepath = os.path.join(self.outpath, filetail)
hp_pd.write_to_file(filepath, dmg_dx, overwrite=True, index=True) #send for writing
self.dmg_dx = dmg_dx
stop = time.time()
logger.info('in %.4f secs calcd damage on %i of %i floods'%(stop - start, fcnt, len(fld_aep_l)))
return
def get_results(self): #called by Timestep.run_dt()
self.state='wrap'
logger = self.logger.getChild('get_results')
#=======================================================================
# optionals
#=======================================================================
s = self.session.outpars_d[self.__class__.__name__]
if (self.session.write_fdmg_fancy) or (self.session.write_fdmg_sum):
logger.debug("calc_summaries \n")
dmgs_df = self.calc_summaries()
self.dmgs_df = dmgs_df.copy()
else: dmgs_df = None
if ('ead_tot' in s) or ('dmg_df' in s):
logger.debug('\n')
self.calc_annulized(dmgs_df = dmgs_df, plot_f = False)
'this will also run calc_sumamries if it hasnt happened yet'
if 'dmg_tot' in s:
#get a cross section of the 'total' column across all flood_aeps and sum for all entries
self.dmg_tot = self.dmg_dx.xs('total', axis=1, level=1).sum().sum()
if ('bwet_cnt' in s) or ('bdamp_cnt' in s) or ('bdry_cnt' in s):
logger.debug('get_fld_begrd_cnt')
self.get_fld_begrd_cnt()
if 'fld_pwr_cnt' in s:
logger.debug('calc_fld_pwr_cnt \n')
cnt = 0
for aep, obj in self.fld_aep_od.items():
if obj.gpwr_f: cnt +=1
self.fld_pwr_cnt = cnt
self.binv.calc_binv_stats()
if self.session.write_fdmg_fancy:
self.write_res_fancy()
if self.write_fdmg_sum_fly: #write the results after each run
self.write_dmg_fly()
#update the bdmg_dx
if not self.session.bdmg_dx is None:
#add the timestep
bdmg_dx = pd.concat([self.dmg_dx],
keys=[self.tstep_o.name],
names=['tstep'],
axis=1,verify_integrity=True,copy=False)
bdmg_dx.index.name = self.mind
"""trying this as a column so we can append
#add the sim
bdmg_dx = pd.concat([bdmg_dx],
keys=[self.simu_o.name],
names=['simu'],
axis=1,verify_integrity=True,copy=False)"""
#join to the big
if len(self.session.bdmg_dx) == 0:
self.session.bdmg_dx = bdmg_dx.copy()
else:
self.session.bdmg_dx = self.session.bdmg_dx.join(bdmg_dx)
"""
view(self.session.bdmg_dx.join(bdmg_dx))
view(bdmg_dx)
view(self.session.bdmg_dx)
"""
#=======================================================================
# checks
#=======================================================================
if self.db_f:
self.check_dmg_dx()
logger.debug('finished \n')
def calc_summaries(self, #annualize the damages
fsts_l = ['gpwr_f', 'dmg_sw', 'dmg_gw'], #list of additional flood attributes to report in teh summary
dmg_dx=None,
plot=False, #flag to execute plot_dmgs() at the end. better to do this explicitly with an outputr
wtf=None):
"""
basically dropping dimensions on the outputs and adding annuzlied damages
#=======================================================================
# OUTPUTS
#=======================================================================
DROP BINV DIMENSIOn
dmgs_df: df with
columns: raw damage types, and annualized damage types
index: each flood
entries: total damage for binv
DROP FLOODS DIMENSIOn
aad_sum_ser
DROP ALL DIMENSIONS
ead_tot
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('calc_summaries')
if dmg_dx is None: dmg_dx = self.dmg_dx.copy()
if plot is None: plot = self.session._write_figs
if wtf is None: wtf = self.write_fdmg_sum
#=======================================================================
# #setup frame
#=======================================================================
#get the columns
dmg_types = list(self.dmg_types) + ['total']
#=======================================================================
# #build the annualized damage type names
#=======================================================================
admg_types = []
for entry in dmg_types: admg_types.append(entry+'_a')
cols = dmg_types + ['prob', 'prob_raw'] + admg_types + fsts_l
dmgs_df = pd.DataFrame(columns = cols)
dmgs_df['ari'] = dmg_dx.columns.get_level_values(0).unique()
dmgs_df = dmgs_df.sort_values('ari').reset_index(drop=True)
#=======================================================================
# loop through and fill out the data
#=======================================================================
for index, row in dmgs_df.iterrows(): #loop through an dfill out
dmg_df = dmg_dx[row['ari']] #get the fdmg for this aep
#sum all the damage types
for dmg_type in dmg_types:
row[dmg_type] = dmg_df[dmg_type].sum() #sum them all up
#calc the probability
row['prob_raw'] = 1/float(row['ari']) #inverse of aep
row['prob'] = row['prob_raw'] * self.fprob_mult #apply the multiplier
#calculate the annualized damages
for admg_type in admg_types:
dmg_type = admg_type[:-2] #drop the a
row[admg_type] = row[dmg_type] * row['prob']
#===================================================================
# get stats from the floodo
#===================================================================
floodo = self.fld_aep_od[row['ari']]
for attn in fsts_l:
row[attn] = getattr(floodo, attn)
#===================================================================
# #add this row backinto the frame
#===================================================================
dmgs_df.loc[index,:] = row
#=======================================================================
# get series totals
#=======================================================================
dmgs_df = dmgs_df.sort_values('prob').reset_index(drop='true')
#=======================================================================
# closeout
#=======================================================================
logger.debug('annualized %i damage types for %i floods'%(len(dmg_type), len(dmgs_df)))
if wtf:
filetail = '%s dmg_sumry'%(self.session.state)
filepath = os.path.join(self.outpath, filetail)
hp_pd.write_to_file(filepath, dmgs_df, overwrite=True, index=False) #send for writing
logger.debug('set data with %s and cols: %s'%(str(dmgs_df.shape), dmgs_df.columns.tolist()))
if plot:
self.plot_dmgs(wtf=wtf)
#=======================================================================
# post check
#=======================================================================
if self.db_f:
#check for sort logic
if not dmgs_df.loc[:,'prob'].is_monotonic:
raise IOError
if not dmgs_df['total'].iloc[::-1].is_monotonic: #flip the order
logger.warning('bigger floods arent causing more damage')
'some of the flood tables seem bad...'
#raise IOError
#all probabilities should be larger than zero
if not np.all(dmgs_df.loc[:,'prob'] > 0):
raise IOError
return dmgs_df
def calc_annulized(self, dmgs_df = None,
ltail = None, rtail = None, plot_f=None,
dx = 0.001): #get teh area under the damage curve
"""
#=======================================================================
# INPUTS
#=======================================================================
ltail: left tail treatment code (low prob high damage)
flat: extend the max damage to the zero probability event
'none': don't extend the tail
rtail: right trail treatment (high prob low damage)
'none': don't extend
'2year': extend to zero damage at the 2 year aep
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('calc_annulized')
if ltail is None: ltail = self.ca_ltail
if rtail is None: rtail = self.ca_rtail
'plotter ignores passed kwargs here'
if plot_f is None: plot_f= self.session._write_figs
#=======================================================================
# get data
#=======================================================================
if dmgs_df is None:
dmgs_df = self.calc_summaries()
#df_raw = self.data.loc[:,('total', 'prob', 'ari')].copy().reset_index(drop=True)
'only slicing columns for testing'
df = dmgs_df.copy().reset_index(drop=True)
#=======================================================================
# shortcuts
#=======================================================================
if len(df) <2 :
logger.warning('not enough floods to calculate EAD')
self.ead_tot = 0
self.dmgs_df_wtail = df
return
if df['total'].sum() < 1:
logger.warning('calculated zero damages!')
self.ead_tot = 0
self.dmgs_df_wtail = df
return
logger.debug("with ltail = \'%s\', rtail = \'%s\' and df %s"%(ltail, rtail, str(df.shape)))
#=======================================================================
# left tail treatment
#=======================================================================
if ltail == 'flat':
#zero probability
'assume 1000yr flood is the max damage'
max_dmg = df['total'].max()*1.0001
df.loc[-1, 'prob'] = 0
df.loc[-1, 'ari'] = 999999
df.loc[-1, 'total'] = max_dmg
logger.debug('ltail == flat. duplicated danage %.2f at prob 0'%max_dmg)
elif ltail == 'none':
pass
else: raise IOError
'todo: add option for value multiplier'
#=======================================================================
# right tail
#=======================================================================
if rtail == 'none':
pass
elif hp_basic.isnum(rtail):
rtail_yr = float(rtail)
rtail_p = 1.0 / rtail_yr
max_p = df['prob'].max()
#floor check
if rtail_p < max_p:
logger.error('rtail_p (%.2f) < max_p (%.2f)'%(rtail_p, max_p))
raise IOError
#same
elif rtail_p == max_p:
logger.debug("rtail_p == min(xl. no changes made")
else:
logger.debug("adding zero damage for aep = %.1f"%rtail_yr)
#zero damage
'assume no damage occurs at the passed rtail_yr'
loc = len(df)
df.loc[loc, 'prob'] = rtail_p
df.loc[loc, 'ari'] = 1.0/rtail_p
df.loc[loc, 'total'] = 0
"""
hp_pd.view_web_df(self.data)
"""
else: raise IOError
#=======================================================================
# clean up
#=======================================================================
df = df.sort_index() #resort the index
if self.db_f:
'these should still hold'
if not df.loc[:,'prob'].is_monotonic:
raise IOError
"""see above
if not df['total'].iloc[::-1].is_monotonic:
raise IOError"""
x, y = df['prob'].values.tolist(), df['total'].values.tolist()
#=======================================================================
# find area under curve
#=======================================================================
try:
#ead_tot = scipy.integrate.simps(y, x, dx = dx, even = 'avg')
'this was giving some weird results'
ead_tot = scipy.integrate.trapz(y, x, dx = dx)
except:
raise Error('scipy.integrate.trapz failed')
logger.info('found ead_tot = %.2f $/yr from %i points with tail_codes: \'%s\' and \'%s\''
%(ead_tot, len(y), ltail, rtail))
self.ead_tot = ead_tot
#=======================================================================
# checks
#=======================================================================
if self.db_f:
if pd.isnull(ead_tot):
raise IOError
if not isinstance(ead_tot, float):
raise IOError
if ead_tot <=0:
"""
view(df)
"""
raise Error('got negative damage! %.2f'%ead_tot)
#=======================================================================
# update data with tails
#=======================================================================
self.dmgs_df_wtail = df.sort_index().reset_index(drop=True)
#=======================================================================
# generate plot
#=======================================================================
if plot_f:
self.plot_dmgs(self, right_nm = None, xaxis = 'prob', logx = False)
return
def get_fld_begrd_cnt(self): #tabulate the bsmt_egrd counts from each flood
logger = self.logger.getChild('get_fld_begrd_cnt')
#=======================================================================
# data setup
#=======================================================================
dmg_dx = self.dmg_dx.copy()
#lvl1_values = dmg_dx.columns.get_level_values(0).unique().tolist()
#get all teh basement egrade types
df1 = dmg_dx.loc[:,idx[:, 'bsmt_egrd']] #get a slice by level 2 values
#get occurances by value
d = hp_pd.sum_occurances(df1, logger=logger)
#=======================================================================
# loop and calc
#=======================================================================
logger.debug('looping through %i bsmt_egrds: %s'%(len(d), list(d.keys())))
for bsmt_egrd, cnt in d.items():
attn = 'b'+bsmt_egrd +'_cnt'
logger.debug('for \'%s\' got %i'%(attn, cnt))
setattr(self, attn, cnt)
logger.debug('finished \n')
def check_dmg_dx(self): #check logical consistency of the damage results
logger = self.logger.getChild('check_dmg_dx')
#=======================================================================
# data setup
#=======================================================================
dmg_dx = self.dmg_dx.copy()
mdex = dmg_dx.columns
aep_l = mdex.get_level_values(0).astype(int).unique().values.tolist()
aep_l.sort()
#=======================================================================
# check that each flood increases in damage
#=======================================================================
total = None
aep_last = None
for aep in aep_l:
#get this slice
df = dmg_dx[aep]
if total is None:
boolcol = np.isin(df.columns, ['MS', 'MC', 'BS', 'BC', 'GS']) #identify damage columns
total = df.loc[:,boolcol].sum().sum()
if not aep == min(aep_l):
raise IOError
else:
newtot = df.loc[:,boolcol].sum().sum()
if not newtot >= total:
logger.warning('aep %s tot %.2f < aep %s %.2f'%(aep, newtot, aep_last, total))
#raise IOError
#print 'new tot %.2f > oldtot %.2f'%(newtot, total)
total = newtot
aep_last = aep
return
def check_acodes(self, #check you have curves for all the acodes
ac_sec_d = None, #set of Loaded acodes {acode: asecotr}
ac_req_l = None, #set of requested acodes
dfunc_df = None, #contorl file page for the dfunc parameters
):
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('check_acodes')
if ac_sec_d is None: ac_sec_d = self.acode_sec_d
if ac_req_l is None: ac_req_l = self.binv.acode_l #pull from the binv
if dfunc_df is None: dfunc_df = self.session.pars_df_d['dfunc']
log.debug('checking acodes requested by binv against %i available'%len(ac_sec_d))
"""
for k, v in ac_sec_d.items():
print(k, v)
"""
#=======================================================================
# conversions
#=======================================================================
ava_ar = np.array(list(ac_sec_d.keys())) #convert availables to an array
req_ar = np.array(ac_req_l)
#get the pars set
pars_ar_raw = dfunc_df['acode'].dropna().unique()
pars_ar = pars_ar_raw[pars_ar_raw!='none'] #drop the nones
#=======================================================================
# check we loaded everything we requested in the pars
#=======================================================================
boolar = np.invert(np.isin(pars_ar, ava_ar))
if np.any(boolar):
raise Error('%i acodes requested by the pars were not loaded: \n %s'
%(boolar.sum(), req_ar[boolar]))
#=======================================================================
# check the binv doesnt have anything we dont have pars for
#=======================================================================
boolar = np.invert(np.isin(req_ar, pars_ar))
if np.any(boolar):
raise Error('%i binv acodes not found on the \'dfunc\' tab: \n %s'
%(boolar.sum(), req_ar[boolar]))
return
def wrap_up(self):
#=======================================================================
# update asset containers
#=======================================================================
"""
#building inventory
'should be flagged for updating during House.notify()'
if self.binv.upd_kid_f:
self.binv.update()"""
"""dont think we need this here any more.. only on udev.
keeping it just to be save"""
self.last_tstep = copy.copy(self.time)
self.state='close'
def write_res_fancy(self, #for saving results in xls per tab. called as a special outputr
dmg_dx=None,
include_ins = False,
include_raw = False,
include_begh = True):
"""
#=======================================================================
# INPUTS
#=======================================================================
include_ins: whether ot add inputs as tabs.
ive left this separate from the 'copy_inputs' flag as it is not a true file copy of the inputs
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('write_res_fancy')
if dmg_dx is None: dmg_dx = self.dmg_dx
if dmg_dx is None:
logger.warning('got no dmg_dx. skipping')
return
#=======================================================================
# setup
#=======================================================================
od = OrderedDict()
#=======================================================================
# add the parameters
#=======================================================================
#get the blank frame
df = pd.DataFrame(columns = ['par','value'] )
df['par'] = list(self.try_inherit_anl)
for indx, row in df.iterrows():
df.iloc[indx, 1] = getattr(self, row['par']) #set this value
od['pars'] = df
#=======================================================================
# try and add damage summary
#=======================================================================
if not self.dmgs_df is None:
od['dmg summary'] = self.dmgs_df
#=======================================================================
# #get theh dmg_dx decomposed
#=======================================================================
od.update(hp_pd.dxcol_to_df_set(dmg_dx, logger=self.logger))
#=======================================================================
# #add dmg_dx as a raw tab
#=======================================================================
if include_raw:
od['raw_res'] = dmg_dx
#=======================================================================
# add inputs
#=======================================================================
if include_ins:
for dataname, dato in self.kids_d.items():
if hasattr(dato, 'data') & hp_pd.isdf(dato.data):
od[dataname] = dato.data
#=======================================================================
# add debuggers
#=======================================================================
if include_begh:
if not self.beg_hist_df is None:
od['beg_hist'] = self.beg_hist_df
#=======================================================================
# #write to excel
#=======================================================================
filetail = '%s %s %s %s fancy_res'%(self.session.tag, self.simu_o.name, self.tstep_o.name, self.name)
filepath = os.path.join(self.outpath, filetail)
hp_pd.write_dfset_excel(od, filepath, engine='xlsxwriter', logger=self.logger)
return
def write_dmg_fly(self): #write damage results after each run
logger = self.logger.getChild('write_dmg_fly')
dxcol = self.dmg_dx #results
#=======================================================================
# build the resuults summary series
#=======================================================================
#get all the flood aeps
lvl0vals = dxcol.columns.get_level_values(0).unique().astype(int).tolist()
#blank holder
res_ser = pd.Series(index = lvl0vals)
#loop and calc sums for each flood
for aep in lvl0vals:
res_ser[aep] = dxcol.loc[:,(aep,'total')].sum()
#add extras
if not self.ead_tot is None:
res_ser['ead_tot'] = self.ead_tot
res_ser['dt'] = self.tstep_o.year
res_ser['sim'] = self.simu_o.ind
lindex = '%s.%s'%(self.simu_o.name, self.tstep_o.name)
hp_pd.write_fly_df(self.fly_res_fpath,res_ser, lindex = lindex,
first = self.write_dmg_fly_first, tag = 'fdmg totals',
db_f = self.db_f, logger=logger) #write results on the fly
self.write_dmg_fly_first = False
return
def get_plot_kids(self): #raise kids for plotting the damage summaries
logger = self.logger.getChild('get_plot_kids')
#=======================================================================
# get slice of aad_fmt_df matching the aad cols
#=======================================================================
aad_fmt_df = self.session.pars_df_d['dmg_sumry_plot'] #pull teh formater pars from the tab
dmgs_df = self.dmgs_df
self.data = dmgs_df
boolidx = aad_fmt_df.loc[:,'name'].isin(dmgs_df.columns) #get just those formaters with data in the aad
aad_fmt_df_slice = aad_fmt_df[boolidx] #get this slice3
"""
hp_pd.view_web_df(self.data)
hp_pd.view_web_df(df)
hp_pd.view_web_df(aad_fmt_df_slice)
aad_fmt_df_slice.columns
"""
#=======================================================================
# formatter kids setup
#=======================================================================
"""need to run this every time so the data is updated
TODO: allow some updating here so we dont have to reduibl deach time
if self.plotter_kids_dict is None:"""
self.plotr_d = self.raise_children_df(aad_fmt_df_slice, kid_class = hp_data.Data_o)
logger.debug('finisehd \n')
#===============================================================================
# def plot_dmgs(self, wtf=None, right_nm = None, xaxis = 'ari', logx = True,
# ylims = None, #tuple of min/max values for the y-axis
# ): #plot curve of aad
# """
# see tab 'aad_fmt' to control what is plotted and formatting
# """
# #=======================================================================
# # defaults
# #=======================================================================
# logger = self.logger.getChild('plot_dmgs')
# if wtf == None: wtf = self.session._write_figs
#
# #=======================================================================
# # prechecks
# #=======================================================================
# if self.db_f:
# if self.dmgs_df is None:
# raise IOError
#
#
# #=======================================================================
# # setup
# #=======================================================================
# if not ylims is None:
# try:
# ylims = eval(ylims)
# except:
# pass
#
# #get the plot workers
# if self.plotr_d is None:
# self.get_plot_kids()
#
# kids_d = self.plotr_d
#
# title = '%s-%s-%s EAD-ARI plot on %i objs'%(self.session.tag, self.simu_o.name, self.name, len(self.binv.childmeta_df))
# logger.debug('with \'%s\''%title)
#
# if not self.tstep_o is None:
# title = title + ' for %s'%self.tstep_o.name
#
# #=======================================================================
# # update plotters
# #=======================================================================
# logger.debug('updating plotters with my data')
#
# #get data
# data_og = self.data.copy() #store this for later
#
# if self.dmgs_df_wtail is None:
# df = self.dmgs_df.copy()
# else:
# df = self.dmgs_df_wtail.copy()
#
# df = df.sort_values(xaxis, ascending=True)
#
# #reformat data
# df.set_index(xaxis, inplace = True)
#
# #re set
# self.data = df
#
# #tell kids to refresh their data from here
# for gid, obj in kids_d.items(): obj.data = obj.loadr_vir()
#
# self.data = data_og #reset the data
#
# #=======================================================================
# # get annotation
# #=======================================================================
# val_str = '$' + "{:,.2f}".format(self.ead_tot/1e6)
# #val_str = "{:,.2f}".format(self.ead_tot)
# """
# txt = 'total aad: $%s \n tail kwargs: \'%s\' and \'%s\' \n'%(val_str, self.ca_ltail, self.ca_rtail) +\
# 'binv.cnt = %i, floods.cnt = %i \n'%(self.binv.cnt, len(self.fld_aep_od))"""
#
#
# txt = 'total EAD = %s'%val_str
#
#
# #=======================================================================
# #plot the workers
# #=======================================================================
# #twinx
# if not right_nm is None:
# logger.debug('twinning axis with name \'%s\''%right_nm)
# title = title + '_twin'
# # sort children into left/right buckets by name to plot on each axis
# right_pdb_d, left_pdb_d = self.sort_buckets(kids_d, right_nm)
#
# if self.db_f:
# if len (right_pdb_d) <1: raise IOError
#
# #=======================================================================
# # #send for plotting
# #=======================================================================
# 'this plots both bundles by their data indexes'
# ax1, ax2 = self.plot_twinx(left_pdb_d, right_pdb_d,
# logx=logx, xlab = xaxis, title=title, annot = txt,
# wtf=False)
# 'cant figure out why teh annot is plotting twice'
#
# ax2.set_ylim(0, 1) #prob limits
# legon = False
# else:
# logger.debug('single axis')
#
# try:
# del kids_d['prob']
# except:
# pass
#
# pdb = self.get_pdb_dict(list(kids_d.values()))
#
# ax1 = self.plot_bundles(pdb,
# logx=logx, xlab = 'ARI', ylab = 'damage ($ 10^6)', title=title, annot = txt,
# wtf=False)
#
# legon=True
#
# #hatch
# #=======================================================================
# # post formatting
# #=======================================================================
# #set axis limits
# if xaxis == 'ari': ax1.set_xlim(1, 1000) #aep limits
# elif xaxis == 'prob': ax1.set_xlim(0, .6)
#
# if not ylims is None:
# ax1.set_ylim(ylims[0], ylims[1])
#
#
# #ax1.set_ylim(0, ax1.get_ylim()[1]) #$ limits
#
#
# #=======================================================================
# # format y axis labels
# #======================================================= ================
# old_tick_l = ax1.get_yticks() #get teh old labels
#
# # build the new ticks
# l = []
#
# for value in old_tick_l:
# new_v = '$' + "{:,.0f}".format(value/1e6)
# l.append(new_v)
#
# #apply the new labels
# ax1.set_yticklabels(l)
#
# """
# #add thousands comma
# ax1.get_yaxis().set_major_formatter(
# #matplotlib.ticker.FuncFormatter(lambda x, p: '$' + "{:,.2f}".format(x/1e6)))
#
# matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))"""
#
# if xaxis == 'ari':
# ax1.get_xaxis().set_major_formatter(
# matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
#
#
# if wtf:
# fig = ax1.figure
# savepath_raw = os.path.join(self.outpath,title)
# flag = hp.plot.save_fig(self, fig, savepath_raw=savepath_raw, dpi = self.dpi, legon=legon)
# if not flag: raise IOError
#
#
# #plt.close()
# return
#===============================================================================
class Flood(
hp_dyno.Dyno_wrap,
hp_sim.Sim_o,
hp_oop.Parent, #flood object worker
hp_oop.Child):
#===========================================================================
# program pars
#===========================================================================
gpwr_f = False #grid power flag palceholder
#===========================================================================
# user defineid pars
#===========================================================================
ari = None
#loaded from flood table
#area exposure grade. control for areas depth decision algorhithim based on the performance of macro structures (e.g. dykes).
area_egrd00 = ''
area_egrd01 = ''
area_egrd02 = ''
area_egrd00_code = None
area_egrd01_code = None
area_egrd02_code = None
#===========================================================================
# calculated pars
#===========================================================================
hdep_avg = 0 #average house depth
#damate properties
total = 0
BS = 0
BC = 0
MS = 0
MC = 0
dmg_gw = 0
dmg_sw = 0
dmg_df_blank =None
wsl_avg = 0
#===========================================================================
# data containers
#===========================================================================
hdmg_cnt = 0
dmg_df = None
dmg_res_df = None
#bsmt_egrd counters. see get_begrd_cnt()
bdry_cnt = 0
bwet_cnt = 0
bdamp_cnt = 0
def __init__(self, parent, *vars, **kwargs):
logger = mod_logger.getChild('Flood')
logger.debug('start _init_')
#=======================================================================
# #attach custom vars
#=======================================================================
self.inherit_parent_ans=set(['mind', 'dmg_types'])
#=======================================================================
# initilize cascade
#=======================================================================
super(Flood, self).__init__(parent, *vars, **kwargs) #initilzie teh baseclass
#=======================================================================
# common setup
#=======================================================================
if self.sib_cnt == 0:
#update the resets
pass
#=======================================================================
# unique setup
#=======================================================================
""" handled by the outputr
self.reset_d.update({'hdmg_cnt':0})"""
self.ari = int(self.ari)
self.dmg_res_df = pd.DataFrame() #set as an empty frame for output handling
#=======================================================================
# setup functions
#=======================================================================
self.set_gpwr_f()
logger.debug('set_dmg_df_blank()')
self.set_dmg_df_blank()
logger.debug('get your water levels from the selected wsl table \n')
self.set_wsl_frm_tbl()
logger.debug('set_area_egrd()')
self.set_area_egrd()
logger.debug('get_info_from_binv()')
df = self.get_info_from_binv() #initial run to set blank frame
self.set_wsl_from_egrd(df)
""" moved into set_wsl_frm_tbl()
logger.debug('\n')
self.setup_dmg_df()"""
self.init_dyno()
self.logger.debug('__init___ finished \n')
def set_dmg_df_blank(self):
logger = self.logger.getChild('set_dmg_df_blank')
binv_df = self.model.binv.childmeta_df
colns = OrderedSet(self.model.dmg_df_cols.tolist() + ['wsl', 'area_prot_lvl'])
'wsl should be redundant'
#get boolean
self.binvboolcol = binv_df.columns.isin(colns) #store this for get_info_from_binv()
#get teh blank frame
self.dmg_df_blank = pd.DataFrame(columns = colns, index = binv_df.index) #get the blank frame
'this still needs the wsl levels attached based on your area exposure grade'
logger.debug('set dmg_df_blank with %s'%(str(self.dmg_df_blank.shape)))
return
def set_gpwr_f(self): #set your power flag
if self.is_frozen('gpwr_f'): return True#shortcut for frozen
logger = self.logger.getChild('set_gpwr_f')
#=======================================================================
# get based on aep
#=======================================================================
min_aep = int(self.model.gpwr_aep)
if self.ari < min_aep: gpwr_f = True
else: gpwr_f = False
logger.debug('for min_aep = %i, set gpwr_f = %s'%(min_aep, gpwr_f))
#update handler
self.handle_upd('gpwr_f', gpwr_f, proxy(self), call_func = 'set_gpwr_f')
return True
def set_wsl_frm_tbl(self, #build the raw wsl data from the passed flood table
flood_tbl_nm = None, #name of flood table to pull raw data from
#bid_l=None,
):
"""
here we get the raw values
these are later modified by teh area_egrd with self.get_wsl_from_egrd()
#=======================================================================
# INPUTS
#=======================================================================
flood_tbl_df_raw: raw df of the classic flood table
columns:` count, aep, aep, aep, aep....\
real_columns: bldg_id, bid, depth, depth, depth, etc...
index: unique arbitrary
wsl_ser: series of wsl for this flood on each bldg_id
#=======================================================================
# calls
#=======================================================================
dynp handles Fdmg.flood_tbl_nm
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('set_wsl_frm_tbl')
if flood_tbl_nm is None: flood_tbl_nm = self.model.flood_tbl_nm
#=======================================================================
# get data
#=======================================================================
#pull the raw flood tables
ftbl_o = self.model.ftblos_d[flood_tbl_nm]
wsl_d = ftbl_o.wsl_d
df = pd.DataFrame(index = list(wsl_d.values())[0].index) #blank frame from teh first entry
#=======================================================================
# loop and apply for each flood type
#=======================================================================
for ftype, df1 in wsl_d.items():
#=======================================================================
# data checks
#=======================================================================
if self.db_f:
if not ftype in ['wet', 'dry', 'damp']:
raise IOError
df_raw =df1.copy()
if not self.ari in df_raw.columns:
logger.error('the flood provided on the \'floods\' tab (\'%s\') does not have a match in the flood table: \n %s'%
(self.ari, self.model.ftblos_d[flood_tbl_nm].filepath))
raise IOError
#=======================================================================
# slice for this flood
#=======================================================================
boolcol = df1.columns == self.ari #slice for this aep
#get the series for this
wsl_ser = df1.loc[:, boolcol].iloc[:,0].astype(float)
#wsl_ser = wsl_ser.rename(ftype) #rename with the aep
'binv slicing moved to Flood_tbl.clean_data()'
#=======================================================================
# checks
#=======================================================================
if self.db_f:
if len(wsl_ser) <1:
raise IOError
""" allowing
#check for nuls
if np.any(pd.isnull(wsl_ser2)):
raise IOError"""
#=======================================================================
# wrap up report and attach
#=======================================================================
df[ftype] = wsl_ser
logger.debug('from \'%s\' for \'%s\' got wsl_ser %s for aep: %i'
%(flood_tbl_nm, ftype, str(wsl_ser.shape), self.ari))
self.wsl_df = df #set this
'notusing dy nps'
if self.session.state == 'init':
self.reset_d['wsl_df'] = df.copy()
return True
def set_area_egrd(self): #pull your area exposure grade from somewhere
"""
#=======================================================================
# calls
#=======================================================================
self.__init__()
dynp handles: Fdmg.flood_tbl_nm (just in case we are pulling from there
"""
#=======================================================================
# dependency check
#=======================================================================
if not self.session.state=='init':
dep_l = [([self.model], ['set_area_prot_lvl'])]
if self.deps_is_dated(dep_l, method = 'reque', caller = 'set_area_egrd'):
return False
logger = self.logger.getChild('set_area_egrd')
#=======================================================================
# steal egrd from elsewhere table if asked
#=======================================================================
for cnt in range(0,3,1): #loop through each one
attn = 'area_egrd%02d'%cnt
area_egrd_code = getattr(self, attn + '_code')
if area_egrd_code in ['dry', 'damp', 'wet']:
area_egrd = area_egrd_code
#===================================================================
# pull from teh flood table
#===================================================================
elif area_egrd_code == '*ftbl':
ftbl_o = self.model.ftblos_d[self.model.flood_tbl_nm] #get the flood tabl object
area_egrd = getattr(ftbl_o, attn) #get from teh table
#===================================================================
# pull from teh model
#===================================================================
elif area_egrd_code == '*model':
area_egrd = getattr(self.model, attn) #get from teh table
else:
logger.error('for \'%s\' got unrecognized area_egrd_code: \'%s\''%(attn, area_egrd_code))
raise IOError
#===================================================================
# set these
#===================================================================
self.handle_upd(attn, area_egrd, weakref.proxy(self), call_func = 'set_area_egrd')
'this should triger generating a new wsl set to teh blank_dmg_df'
logger.debug('set \'%s\' from \'%s\' as \'%s\''
%(attn, area_egrd_code,area_egrd))
if self.db_f:
if not area_egrd in ['dry', 'damp', 'wet']:
raise IOError
return True
def set_wsl_from_egrd(self, #calculate the wsl based on teh area_egrd
df = None):
"""
This is a partial results retrival for non damage function results
TODO:
consider checking for depednency on House.area_prot_lvl
#=======================================================================
# calls
#=======================================================================
self.__init__
dynp handles for:
Flood.area_egrd##
Fdmg.flood_tbl_nm
if area_egrd_code == *model, this loop isnt really necessary
"""
#=======================================================================
# check dependencies and frozen
#=========================================================== ============
if not self.session.state=='init':
dep_l = [([self], ['set_area_egrd', 'set_wsl_frm_tbl'])]
if self.deps_is_dated(dep_l, method = 'reque', caller = 'set_wsl_from_egrd'):
return False
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('set_wsl_from_egrd')
#if wsl_delta is None: wsl_delta = self.model.wsl_delta
#=======================================================================
# get data
#=======================================================================
if df is None: df = self.get_info_from_binv()
'need to have updated area_prot_lvls'
#=======================================================================
# precheck
#=======================================================================
if self.db_f:
if not isinstance(df, pd.DataFrame): raise IOError
if not len(df) > 0: raise IOError
#=======================================================================
# add the wsl for each area_egrd
#=======================================================================
for prot_lvl in range(0,3,1): #loop through each one
#get your grade fro this prot_lvl
attn = 'area_egrd%02d'%prot_lvl
area_egrd = getattr(self, attn)
#identify the housese for this protection level
boolidx = df.loc[:,'area_prot_lvl'] == prot_lvl
if boolidx.sum() == 0: continue
#give them the wsl corresponding to this grade
df.loc[boolidx, 'wsl'] = self.wsl_df.loc[boolidx,area_egrd]
#set a tag for the area_egrd
if 'area_egrd' in df.columns:
df.loc[boolidx, 'area_egrd'] = area_egrd
logger.debug('for prot_lvl %i, set %i wsl from \'%s\''%(prot_lvl, boolidx.sum(), area_egrd))
#=======================================================================
# set this
#=======================================================================
self.dmg_df_blank = df
#=======================================================================
# post check
#=======================================================================
logger.debug('set dmg_df_blank with %s'%str(df.shape))
if self.session.state=='init':
self.reset_d['dmg_df_blank'] = df.copy()
if self.db_f:
if np.any(pd.isnull(df['wsl'])):
raise Error('got some wsl nulls')
return True
"""
hp_pd.v(df)
hp_pd.v(self.dmg_df_blank)
"""
def run_fld(self, **kwargs): #shortcut to collect all the functions for a simulation ru n
self.run_cnt += 1
dmg_df_blank = self.get_info_from_binv()
"""
view(dmg_df_blank)
"""
dmg_df = self.get_dmg_set(dmg_df_blank, **kwargs)
if self.db_f:
self.check_dmg_df(dmg_df)
'leaving this here for simplicity'
self.calc_statres_flood(dmg_df)
return dmg_df
def get_info_from_binv(self):
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('get_info_from_binv')
binv_df = self.model.binv.childmeta_df
#pull static values
binvboolcol = self.binvboolcol
df = self.dmg_df_blank.copy()
'this should have wsl added to it from set_wsl_from_egrd()'
if self.db_f:
if not len(binvboolcol) == len(binv_df.columns):
logger.warning('got length mismatch between binvboolcol (%i) and the binv_df columns (%i)'%
(len(binvboolcol), len(binv_df.columns)))
'pandas will handle this mistmatch.. just ignores the end'
#=======================================================================
# #update with values from teh binv
#=======================================================================
df.update(binv_df.loc[:,binvboolcol], overwrite=True) #update from all the values in teh binv
logger.debug('retreived %i values from the binv_df on: %s'
%(binv_df.loc[:,binvboolcol].count().count(), binv_df.loc[:,binvboolcol].columns.tolist()))
#=======================================================================
# macro calcs
#=======================================================================
if 'hse_depth' in df.columns:
df['hse_depth'] = df['wsl'] - df['anchor_el']
#groudn water damage flag
if 'gw_f' in df.columns:
df.loc[:,'gw_f'] = df['dem_el'] > df['wsl'] #water is below grade
if self.db_f:
if 'bsmt_egrd' in binv_df.columns:
raise IOError
return df
def get_dmg_set(self, #calcluate the damage for each house
dmg_df, #pre-filled frame for calculating damage results onto
#dmg_type_list='all',
#bid_l = None,
#wsl_delta = None,
dmg_rat_f =None, #includt eh damage ratio in results
):
"""
20190521:
I dont really like how this is structured with one mega for loop trying to grab everything.
Instead, everything should be handled by Fdmg (which really should be wrapped back into the Session)
Each calculation/value (e.g. damage, static values, etc.) should be calculated in a dedicated loop
then we can control logic based on each value type
the controller can collect all of these results during wrap up
rather then trying to pass everything to each loop
#=======================================================================
# INPUTS
#=======================================================================
depth_ser: series of depths (for this flood) with index = bldg_id
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('get_dmg_set(%s)'%self.get_id())
if dmg_rat_f is None: dmg_rat_f = self.model.dmg_rat_f
hse_od = self.model.binv.hse_od #ordred dictionary by bid: hse_dato
#=======================================================================
# pre checks
#=======================================================================
if self.db_f:
if not isinstance(dmg_df, pd.DataFrame):
raise IOError
boolidx = dmg_df.index.isin(list(hse_od.keys()))
if not np.all(boolidx):
logger.error('some of the bldg_ids in the wsl_ser were not found in the binv: \n %s'
%dmg_df.index[~boolidx])
raise IOError
#check the damage columns are empty
boolcol = np.isin(dmg_df.columns, ['MS', 'MC', 'BS', 'BC', 'GS', 'total']) #identify damage columns
if not np.all(pd.isnull(dmg_df.loc[:,boolcol])):
raise IOError
#=======================================================================
# frame setup
#=======================================================================
#identify columns containing damage results
dmgbool = np.logical_or(
dmg_df.columns.isin(self.model.dmg_types), #damages
|
pd.Series(dmg_df.columns)
|
pandas.Series
|
# -*- coding: utf-8 -*-
"""
@created on: 4/28/20,
@author: <NAME>,
@version: v0.0.1
@system name: badgod
Description:
..todo::
"""
import pandas as pd
|
pd.set_option('display.max_columns', 100000)
|
pandas.set_option
|
import json
import io
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import plotly.express as px
from dash.dependencies import Output, Input, State
from datetime import datetime, timedelta
from server import app
import plotly.graph_objects as go
import plotly.express as px
from sqlalchemy import create_engine
from flask import send_file
import os
from joblib import Parallel, delayed
from dash.exceptions import PreventUpdate
# ----------------------------------------------------------------------------------------------------- 一级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取概览一级第一张图数据
def get_first_lev_first_fig_date(engine):
res_数据时间缺失及汇总 = pd.DataFrame(columns=['业务类型', '问题数', '总数', '问题数量占比'])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
# '患者基本信息': ['select count(distinct caseid) as num from overall where in_time is null or out_time is null','select count(distinct caseid) as num from overall'],
'入院时间': ['select count(distinct caseid) as num from overall where in_time is null ',
'select count(distinct caseid) as num from overall'],
'出院时间': ['select count(distinct caseid) as num from overall where out_time is null',
'select count(distinct caseid) as num from overall'],
'手术': ['select count(1) as num from oper2 where BEGINTIME is null or ENDTIME is null ','select count(1) as num from oper2 '],
'给药': ['select count(1) as num from ANTIBIOTICS where BEGINTIME is null or ENDTIME is null ','select count(1) as num from ANTIBIOTICS '],
'入出转': ['select count(1) as num from DEPARTMENT where BEGINTIME is null or ENDTIME is null ','select count(1) as num from DEPARTMENT '],
'菌检出': ['select count(1) as num from BACTERIA where REQUESTTIME is null ','select count(1) as num from BACTERIA '],
'体温': ['select count(1) as num from TEMPERATURE where RECORDDATE is null ','select count(1) as num from TEMPERATURE '],
'药敏': ['select count(1) as num from DRUGSUSCEPTIBILITY where REQUESTTIME is null or REPORTTIME is null ','select count(1) as num from DRUGSUSCEPTIBILITY '],
'检查': ['select count(1) as num from EXAM where EXAM_DATE is null ','select count(1) as num from EXAM '],
'生化': ['select count(1) as num from ROUTINE2 where REQUESTTIME is null or REPORTTIME is null ','select count(1) as num from ROUTINE2 '],
'三管': ['select count(1) as num from TREATMENT1 where BEGINTIME is null or ENDTIME is null ','select count(1) as num from TREATMENT1 '],
}
for bus in bus_dic:
try:
count_时间为空 = pd.read_sql(bus_dic[bus][0],con=engine)['num'][0]
count_总 = pd.read_sql(bus_dic[bus][1],con=engine)['num'][0]
res_数据时间缺失及汇总.loc[res_数据时间缺失及汇总.shape[0]] = [bus,count_时间为空,count_总,round(count_时间为空 / count_总, 4) * 100]
except:
res_数据时间缺失及汇总.loc[res_数据时间缺失及汇总.shape[0]] = [bus,-1,-1,-1]
print('一级图一',bus)
return res_数据时间缺失及汇总
# 更新一级图一
@app.callback(
Output('first_level_first_fig','figure'),
Output('general_situation_first_level_first_fig_data','data'),
Input('general_situation_first_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_first_fig(general_situation_first_level_first_fig_data,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
if general_situation_first_level_first_fig_data is None:
general_situation_first_level_first_fig_data = {}
first_level_first_fig_data = get_first_lev_first_fig_date(engine)
general_situation_first_level_first_fig_data['first_level_first_fig_data'] = first_level_first_fig_data.to_json(orient='split', date_format='iso')
general_situation_first_level_first_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_first_fig_data = json.dumps(general_situation_first_level_first_fig_data)
else:
general_situation_first_level_first_fig_data = json.loads(general_situation_first_level_first_fig_data)
if db_con_url['hosname'] != general_situation_first_level_first_fig_data['hosname']:
first_level_first_fig_data = get_first_lev_first_fig_date(engine)
general_situation_first_level_first_fig_data['first_level_first_fig_data'] = first_level_first_fig_data.to_json(orient='split',date_format='iso')
general_situation_first_level_first_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_first_fig_data = json.dumps(general_situation_first_level_first_fig_data)
else:
first_level_first_fig_data = pd.read_json(general_situation_first_level_first_fig_data['first_level_first_fig_data'], orient='split')
general_situation_first_level_first_fig_data = dash.no_update
#
fig_概览一级_时间缺失 = make_subplots(specs=[[{"secondary_y": True}]])
res_数据时间缺失及汇总 = first_level_first_fig_data.sort_values(['问题数'], ascending=False)
# 各业务缺失数量--柱形图
fig_概览一级_时间缺失.add_trace(
go.Bar(x=res_数据时间缺失及汇总['业务类型'], y=res_数据时间缺失及汇总['问题数'], name="问题数量",
marker_color=px.colors.qualitative.Dark24, ),
secondary_y=False,
)
# 各业务缺失数量占比--折线图
fig_概览一级_时间缺失.add_trace(
go.Scatter(x=res_数据时间缺失及汇总['业务类型'], y=res_数据时间缺失及汇总['问题数量占比'], name="问题数量占比", ),
secondary_y=True,
)
# 设置X轴title
fig_概览一级_时间缺失.update_xaxes(tickangle=45,title_text="业务指标")
# 设置Y轴title
fig_概览一级_时间缺失.update_yaxes(title_text="缺失数量", secondary_y=False)
fig_概览一级_时间缺失.update_yaxes(title_text="缺失占比(%)", secondary_y=True)
# 设置水平图例及位置
fig_概览一级_时间缺失.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
# 设置图片边距
fig_概览一级_时间缺失.update_layout(margin=dict(l=20, r=20, t=20, b=20), )
return fig_概览一级_时间缺失,general_situation_first_level_first_fig_data
# 下载一级图一明细
@app.callback(
Output('first_level_first_fig_data_detail', 'data'),
Input('first_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
prevent_initial_call=True,
)
def download_first_level_first_fig_data_detail(n_clicks,db_con_url):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
engine = create_engine(db_con_url['db'])
bus_dic = {
'入院时间': 'select * from overall where in_time is null ',
'出院时间': 'select * from overall where out_time is null',
'手术': 'select * from oper2 where BEGINTIME is null or ENDTIME is null ',
'给药': 'select * from ANTIBIOTICS where BEGINTIME is null or ENDTIME is null ',
'入出转': 'select * from DEPARTMENT where BEGINTIME is null or ENDTIME is null ',
'菌检出': 'select * from BACTERIA where REQUESTTIME is null ',
'药敏': 'select * from DRUGSUSCEPTIBILITY where REQUESTTIME is null or REPORTTIME is null ',
'检查': 'select * from EXAM where EXAM_DATE is null',
'生化': 'select * from ROUTINE2 where REQUESTTIME is null or REPORTTIME is null ',
'三管': 'select * from TREATMENT1 where BEGINTIME is null or ENDTIME is null ',
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key],con=engine)
if temp.shape[0]>0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'],columns=[key])
error_df.to_excel(writer, sheet_name = key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}各业务时间缺失数量占比.xlsx')
else:
return dash.no_update
# ----------------------------------------------------------------------------------------------------- 一级图二 ----------------------------------------------------------------------------------------------------------------------
# 获取概览一级第二张图数据
def get_first_lev_second_fig_date(engine,btime,etime):
res_数据关键字缺失及汇总 = pd.DataFrame(columns=['业务类型', '问题数', '总数', '关键字缺失占比'])
bus_dic = {'用药目的': [f"select count(1) as num from ANTIBIOTICS where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and (GOAL is null or replace(GOAL,' ','') is null)",
f"select count(1) as num from ANTIBIOTICS where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'药敏结果': [f"select count(1) as num from drugsusceptibility where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and SUSCEPTIBILITY is null or replace(SUSCEPTIBILITY,' ','') is null",
f"select count(1) as num from drugsusceptibility where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' "],
'手术名称': [f"select count(1) as num from oper2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and (OPER_NAME is null or replace(OPER_NAME,' ','') is null)",
f"select count(1) as num from oper2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'手术切口等级': [f"select count(1) as num from oper2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and ( WOUND_GRADE is null or replace(WOUND_GRADE,' ','') is null)",
f"select count(1) as num from oper2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'出入院科室': [f"select count(1) as num from overall where substr(IN_TIME,1,7)>='{btime}' and substr(IN_TIME,1,7)<='{etime}' and ( IN_DEPT is null or replace(IN_DEPT,' ','') is null or OUT_DEPT is null or replace(OUT_DEPT,' ','') is null )",
f"select count(1) as num from overall where substr(IN_TIME,1,7)>='{btime}' and substr(IN_TIME,1,7)<='{etime}' "],
'入出转科室': [f"select count(1) as num from department where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and ( DEPT is null or replace(DEPT,' ','') is null)",
f"select count(1) as num from department where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "]
}
for bus in bus_dic:
try:
count_时间为空 = pd.read_sql(bus_dic[bus][0],con=engine)['num'][0]
count_总 = pd.read_sql(bus_dic[bus][1],con=engine)['num'][0]
res_数据关键字缺失及汇总.loc[res_数据关键字缺失及汇总.shape[0]] = [bus,count_时间为空,count_总,round(count_时间为空 / count_总, 4) * 100]
except:
res_数据关键字缺失及汇总.loc[res_数据关键字缺失及汇总.shape[0]] = [bus,-1,-1,-1]
print('一级图二', bus)
return res_数据关键字缺失及汇总
# 更新一级图二
@app.callback(
Output('first_level_second_fig','figure'),
Output('general_situation_first_level_second_fig_data','data'),
Input('general_situation_first_level_second_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_second_fig(general_situation_first_level_second_fig_data,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if general_situation_first_level_second_fig_data is None:
general_situation_first_level_second_fig_data = {}
first_level_second_fig_data = get_first_lev_second_fig_date(engine,btime,etime)
general_situation_first_level_second_fig_data['first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split', date_format='iso')
general_situation_first_level_second_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_second_fig_data['btime'] = btime
general_situation_first_level_second_fig_data['etime'] = etime
general_situation_first_level_second_fig_data = json.dumps(general_situation_first_level_second_fig_data)
else:
general_situation_first_level_second_fig_data = json.loads(general_situation_first_level_second_fig_data)
if db_con_url['hosname'] != general_situation_first_level_second_fig_data['hosname']:
first_level_second_fig_data = get_first_lev_second_fig_date(engine, btime, etime)
general_situation_first_level_second_fig_data['first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split',date_format='iso')
general_situation_first_level_second_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_second_fig_data['btime'] = btime
general_situation_first_level_second_fig_data['etime'] = etime
general_situation_first_level_second_fig_data = json.dumps( general_situation_first_level_second_fig_data)
else:
if general_situation_first_level_second_fig_data['btime'] != btime or general_situation_first_level_second_fig_data['etime'] != etime:
first_level_second_fig_data = get_first_lev_second_fig_date(engine, btime, etime)
general_situation_first_level_second_fig_data[ 'first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split', date_format='iso')
general_situation_first_level_second_fig_data['btime'] = btime
general_situation_first_level_second_fig_data['etime'] = etime
general_situation_first_level_second_fig_data = json.dumps(general_situation_first_level_second_fig_data)
else:
first_level_second_fig_data = pd.read_json(general_situation_first_level_second_fig_data['first_level_second_fig_data'], orient='split')
general_situation_first_level_second_fig_data = dash.no_update
print("一级第二张图数据:")
print(first_level_second_fig_data)
fig_概览一级_关键字缺失 = make_subplots()
res_数据关键字缺失及汇总 = first_level_second_fig_data.sort_values(['关键字缺失占比'], ascending=False)
fig_概览一级_关键字缺失.add_trace(
go.Bar(x=res_数据关键字缺失及汇总['业务类型'], y=res_数据关键字缺失及汇总['关键字缺失占比'], marker_color=px.colors.qualitative.Dark24, )
)
fig_概览一级_关键字缺失.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
#title=f"{btime}--{etime}",
)
fig_概览一级_关键字缺失.update_yaxes(title_text="关键字缺失占比(%)")
fig_概览一级_关键字缺失.update_xaxes(title_text="业务指标")
return fig_概览一级_关键字缺失,general_situation_first_level_second_fig_data
# 下载一级图二明细
@app.callback(
Output('first_level_second_fig_data_detail', 'data'),
Input('first_level_second_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_second_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime']
etime = count_time['etime']
bus_dic = {
'用药目的': f"select * from ANTIBIOTICS where (GOAL is null or replace(GOAL,' ','') is null) and BEGINTIME is not null and substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}' ",
'药敏结果': f"select * from drugsusceptibility where (SUSCEPTIBILITY is null or replace(SUSCEPTIBILITY,' ','') is null) and REQUESTTIME is not null and substr(REQUESTTIME,1,10)>='{btime}' and substr(REQUESTTIME,1,10)<='{etime}' ",
'手术名称': f"select * from oper2 where (OPER_NAME is null or replace(OPER_NAME,' ','') is null) and BEGINTIME is not null and substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}'",
'手术切口等级': f"select * from oper2 where (WOUND_GRADE is null or replace(WOUND_GRADE,' ','') is null) and BEGINTIME is not null and substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}' ",
'出入院科室': f" select * from overall where (IN_DEPT is null or replace(IN_DEPT,' ','') is null or OUT_DEPT is null or replace(OUT_DEPT,' ','') is null) and in_time is not null and substr(in_time,1,10)>='{btime}' and substr(in_time,1,10)<='{etime}' ",
'入出转科室': f"select * from department where (DEPT is null or replace(DEPT,' ','') is null) and BEGINTIME is not null and substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}' ",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}各业务关键字缺失数量占比.xlsx')
else:
return dash.no_update
# ----------------------------------------------------------------------------------------------------- 一级图三 ----------------------------------------------------------------------------------------------------------------------
# 获取概览一级第三张图数据
def get_first_lev_third_fig_date(engine,btime,etime):
res_数据科室信息缺失及汇总 = pd.DataFrame(columns=['业务类型', '问题数', '总数', '科室信息映射问题占比'])
bus_dic = {'入院科室': [f" select count(1) as num from OVERALL t1 where not exists (select 1 from S_DEPARTMENTS t2 where t1.in_dept = t2.code) and t1.in_dept is not null and (substr(t1.IN_TIME,1,7)>='{btime}' and substr(t1.IN_TIME,1,7)<='{etime}') ",
f"select count(1) as num from overall where substr(IN_TIME,1,7)>='{btime}' and substr(IN_TIME,1,7)<='{etime}' "],
'出院科室': [
f" select count(1) as num from OVERALL t1 where not exists (select 1 from S_DEPARTMENTS t2 where t1.out_dept = t2.code) and t1.out_dept is not null and (substr(t1.IN_TIME,1,7)>='{btime}' and substr(t1.IN_TIME,1,7)<='{etime}') ",
f"select count(1) as num from overall where substr(IN_TIME,1,7)>='{btime}' and substr(IN_TIME,1,7)<='{etime}' "],
'入出转科室': [
f" select count(1) as num from department t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') ",
f"select count(1) as num from department where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'抗菌药物医嘱科室': [
f" select count(1) as num from ANTIBIOTICS t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') ",
f"select count(1) as num from ANTIBIOTICS where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'手术科室': [
f" select count(1) as num from OPER2 t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') ",
f"select count(1) as num from OPER2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'菌检出送检科室': [
f" select count(1) as num from BACTERIA t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') ",
f"select count(1) as num from BACTERIA where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' "],
'药敏送检科室': [
f" select count(1) as num from DRUGSUSCEPTIBILITY t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') ",
f"select count(1) as num from DRUGSUSCEPTIBILITY where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' "],
'体温科室': [
f" select count(1) as num from TEMPERATURE t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.RECORDDATE,1,7)>='{btime}' and substr(t1.RECORDDATE,1,7)<='{etime}') ",
f"select count(1) as num from TEMPERATURE where substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' "],
'治疗科室': [
f" select count(1) as num from TREATMENT1 t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') ",
f"select count(1) as num from TREATMENT1 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'常规科室': [
f" select count(1) as num from ROUTINE2 t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') ",
f"select count(1) as num from ROUTINE2 where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' "],
}
for bus in bus_dic:
try:
count_时间为空 = pd.read_sql(bus_dic[bus][0], con=engine)['num'][0]
count_总 = pd.read_sql(bus_dic[bus][1], con=engine)['num'][0]
res_数据科室信息缺失及汇总.loc[res_数据科室信息缺失及汇总.shape[0]] = [bus, count_时间为空, count_总,round(count_时间为空 / count_总, 4) * 100]
except:
res_数据科室信息缺失及汇总.loc[res_数据科室信息缺失及汇总.shape[0]] = [bus, -1, -1, -1]
return res_数据科室信息缺失及汇总
# 更新一级图三
@app.callback(
Output('first_level_third_fig','figure'),
Output('general_situation_first_level_third_fig_data','data'),
Input('general_situation_first_level_third_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_third_fig(general_situation_first_level_third_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if general_situation_first_level_third_fig_data is None:
first_level_third_fig_data = get_first_lev_third_fig_date(engine, btime, etime)
general_situation_first_level_third_fig_data={}
general_situation_first_level_third_fig_data['first_level_third_fig_data'] = first_level_third_fig_data.to_json(orient='split', date_format='iso')
general_situation_first_level_third_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_third_fig_data['btime'] = btime
general_situation_first_level_third_fig_data['etime'] = etime
general_situation_first_level_third_fig_data = json.dumps(general_situation_first_level_third_fig_data)
else:
general_situation_first_level_third_fig_data = json.loads(general_situation_first_level_third_fig_data)
if db_con_url['hosname'] != general_situation_first_level_third_fig_data['hosname']:
first_level_third_fig_data = get_first_lev_third_fig_date(engine, btime, etime)
general_situation_first_level_third_fig_data['first_level_third_fig_data'] = first_level_third_fig_data.to_json(orient='split',date_format='iso')
general_situation_first_level_third_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_third_fig_data['btime'] = btime
general_situation_first_level_third_fig_data['etime'] = etime
general_situation_first_level_third_fig_data = json.dumps(general_situation_first_level_third_fig_data)
else:
if general_situation_first_level_third_fig_data['btime'] != btime or general_situation_first_level_third_fig_data['etime'] != etime:
first_level_third_fig_data = get_first_lev_third_fig_date(engine, btime, etime)
general_situation_first_level_third_fig_data['first_level_third_fig_data'] = first_level_third_fig_data.to_json(orient='split',date_format='iso')
general_situation_first_level_third_fig_data['btime'] = btime
general_situation_first_level_third_fig_data['etime'] = etime
general_situation_first_level_third_fig_data = json.dumps(general_situation_first_level_third_fig_data)
else:
first_level_third_fig_data = pd.read_json(general_situation_first_level_third_fig_data['first_level_third_fig_data'], orient='split')
general_situation_first_level_third_fig_data = dash.no_update
fig_概览一级_科室映射缺失 = go.Figure()
res_数据科室信息缺失及汇总 = first_level_third_fig_data.sort_values(['科室信息映射问题占比'], ascending=False)
fig_概览一级_科室映射缺失.add_trace(
go.Bar(x=res_数据科室信息缺失及汇总['业务类型'], y=res_数据科室信息缺失及汇总['科室信息映射问题占比'], marker_color=px.colors.qualitative.Dark24 )
)
fig_概览一级_科室映射缺失.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
)
fig_概览一级_科室映射缺失.update_yaxes(title_text="科室信息映射问题占比(%)")
fig_概览一级_科室映射缺失.update_xaxes(title_text="业务指标")
return fig_概览一级_科室映射缺失,general_situation_first_level_third_fig_data
# 下载一级图三明细
@app.callback(
Output('first_level_third_fig_data_detail', 'data'),
Input('first_level_third_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime']
etime = count_time['etime']
bus_dic = {
'入院科室': f" select * from OVERALL t1 where not exists (select 1 from S_DEPARTMENTS t2 where t1.in_dept = t2.code) and t1.in_dept is not null and substr(t1.IN_TIME,1,10)>='{btime}' and substr(t1.IN_TIME,1,10)<='{etime}' ",
'出院科室': f" select * from OVERALL t1 where not exists (select 1 from S_DEPARTMENTS t2 where t1.out_dept = t2.code) and t1.out_dept is not null and substr(t1.IN_TIME,1,10)>='{btime}' and substr(t1.IN_TIME,1,10)<='{etime}' ",
'入出转科室': f" select * from department t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and substr(t1.BEGINTIME,1,10) >='{btime}' and substr(t1.BEGINTIME,1,10) <='{etime}' ",
'抗菌药物医嘱科室': f" select * from ANTIBIOTICS t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,10)>='{btime}' and substr(t1.BEGINTIME,1,10)<='{etime}') ",
'手术科室': f" select * from OPER2 t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,10)>='{btime}' and substr(t1.BEGINTIME,1,10)<='{etime}') ",
'菌检出送检科室': f" select * from BACTERIA t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.REQUESTTIME,1,10)>='{btime}' and substr(t1.REQUESTTIME,1,10)<='{etime}') ",
'药敏送检科室': f" select * from DRUGSUSCEPTIBILITY t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.REQUESTTIME,1,10)>='{btime}' and substr(t1.REQUESTTIME,1,10)<='{etime}') ",
'体温科室': " select * from TEMPERATURE t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.RECORDDATE,1,10)>='{btime}' and substr(t1.RECORDDATE,1,10)<='{etime}') ",
'治疗科室': f" select * from TREATMENT1 t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,10)>='{btime}' and substr(t1.BEGINTIME,1,10)<='{etime}') ",
'常规科室': f" select * from ROUTINE2 t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.REQUESTTIME,1,10)>='{btime}' and substr(t1.REQUESTTIME,1,10)<='{etime}') ",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}科室映射缺失数量占比.xlsx')
else:
return dash.no_update
# ----------------------------------------------------------------------------------------------------- 二级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取概二级各业务逻辑问题数据
def get_second_level_fig_date(engine,btime,etime):
res_业务逻辑问题数据汇总 = pd.DataFrame(columns=['问题数据数量', '问题'])
ques_dic = {
'出院时间小于等于入院时间' : f""" select count(1) from overall where in_time is not null and out_time is not null and in_time >= out_time and (substr(in_time,1,7)>='{btime}' and substr(in_time,1,7)<='{etime}')""",
'存在测试患者数据' : f""" select count(1) from overall where (pname like '%测试%' or pname like '%test%') and (substr(in_time,1,7)>='{btime}' and substr(in_time,1,7)<='{etime}') """,
'存在住院时长超四个月患者' : f""" select count(1) from overall where (((out_time is null or out_time='9999') and ( trunc(sysdate)-to_date(substr(in_time,0,10),'yyyy-mm-dd') )> 120)
or (out_time is not null and ( to_date(substr(out_time,0,10),'yyyy-mm-dd')-to_date(substr(in_time,0,10),'yyyy-mm-dd') )> 120)) and (substr(in_time,1,7)>='{btime}' and substr(in_time,1,7)<='{etime}')
""",
'存在住院天数不足一天患者' : f""" select count(1) from overall where (out_time is not null and out_time <> '9999' and ( to_date(substr(out_time,0,10),'yyyy-mm-dd')-to_date(substr(in_time,0,10),'yyyy-mm-dd') )< 1 ) and (substr(in_time,1,7)>='{btime}' and substr(in_time,1,7)<='{etime}') """,
'转科时间在出入院时间之外' : f""" select count(1) from department t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}')
""",
'转入时间大于等于转出时间' : f""" select count(1) from department where BEGINTIME is not null and ENDTIME is not null and BEGINTIME >= ENDTIME and (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') """,
'治疗开始时间大于等于结束时间' : f""" select count(1) from TREATMENT1 where BEGINTIME is not null and ENDTIME is not null and BEGINTIME>= ENDTIME and (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') """,
'治疗时间在出入院时间之外' : f""" select count(1) from TREATMENT1 t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}')
""",
'医嘱开始时间大于结束时间' : f""" select count(1) from ANTIBIOTICS where BEGINTIME is not null and ENDTIME is not null and BEGINTIME> ENDTIME and (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}')""",
'医嘱时间在出入院时间之外' : f""" select count(1) from ANTIBIOTICS t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}')
""",
'送检时间大于等于报告时间' : f""" select count(1) from BACTERIA where REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>= REPORTTIME and (substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}')""",
'送检时间在出入院时间之外' : f""" select count(1) from BACTERIA t1,overall t2 where
( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}')
""",
'药敏送检时间大于等于报告时间' : f""" select count(1) from DRUGSUSCEPTIBILITY where REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>= REPORTTIME and ( substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' )""",
'药敏送检时间在出入院时间之外' : f""" select count(1) from DRUGSUSCEPTIBILITY t1,overall t2 where
( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}')
""",
'手术开始时间大于结束时间' : f""" select count(1) from OPER2 where BEGINTIME is not null and ENDTIME is not null and BEGINTIME> ENDTIME and ( substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' )""",
'手术时间在出入院时间之外' : f""" select count(1) from OPER2 t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}')
""",
'OPERID重复' : f""" select count(1) from oper2 where operid in (select operid from oper2 group by operid having count(operid)>1) and ( substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' ) order by operid """,
'体温值异常' : f""" select count(1) from TEMPERATURE where (VALUE > 46 or VALUE < 34 or VALUE is null) and ( substr(RECORDDATE,1,7) >='{btime}' and substr(RECORDDATE,1,7) <='{etime}') """,
'体温测量时间在出入院时间之外' : f""" select count(1) from TEMPERATURE t1,overall t2 where
( t1.RECORDDATE is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.RECORDDATE<t2.IN_TIME or t1.RECORDDATE > t2.OUT_TIME )
and ( substr(t1.RECORDDATE,1,7)>='{btime}' and substr(t1.RECORDDATE,1,7)<='{etime}')
""",
'入出转入科时间重复': f""" select count(1) from department t1,
(select caseid ,begintime from department where substr(begintime,1,7)>='{btime}' and substr(begintime,1,7)<='{etime}' group by caseid ,begintime having count(1)>1) t2
where t1.caseid=t2.caseid and t1.begintime = t2.begintime
""",
}
for ques in ques_dic:
try:
ques_df =
|
pd.read_sql(ques_dic[ques], con=engine)
|
pandas.read_sql
|
import rba
import numpy
import pandas
from .rba_Session import RBA_Session
from scipy.stats.mstats import gmean
class RBA_Calibrator(object):
def __init__(self, xml_dir):
self.rbaSession = RBA_Session(xml_dir)
def estimate_specific_Kapps(self, proteomicsData, flux_bounds, mu, biomass_function=None, target_biomass_function=True):
"""
Parameters
----------
proteomicsData : pandas.DataFrame (in mmol/gDW)
flux_bounds : pandas.DataFrame (in mmol/(gDW*h))
mu : float (in 1/h)
biomass_function : str
target_biomass_function : bool
atp_maintenance_to_biomassfunction : bool
eukaryotic : bool
"""
Avogadro_constant = 6.022e23
self.rbaSession.addExchangeReactions()
self.rbaSession.setMu(mu)
if target_biomass_function:
self.rbaSession.buildFBA(objective='targets', maintenanceToBM=True)
BMfunction = 'R_BIOMASS_targetsRBA'
else:
self.rbaSession.buildFBA(objective='classic', maintenanceToBM=False)
BMfunction = biomass_function
for j in [i for i in self.rbaSession.Medium.keys() if self.rbaSession.Medium[i] == 0]:
Exrxn = 'R_EX_'+j.split('M_')[-1]+'_e'
self.rbaSession.FBA.setUB({Exrxn: 0})
rxn_LBs = {}
rxn_UBs = {}
for rx in flux_bounds['Reaction_ID']:
lb = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'LB'].values[0]
ub = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'UB'].values[0]
if not pandas.isna(lb):
rxn_LBs.update({rx: lb})
if not pandas.isna(ub):
rxn_UBs.update({rx: ub})
self.rbaSession.FBA.setLB(rxn_LBs)
self.rbaSession.FBA.setUB(rxn_UBs)
self.rbaSession.FBA.clearObjective()
self.rbaSession.FBA.setObjectiveCoefficients({BMfunction: -1})
self.rbaSession.FBA.solveLP(feasibleStatuses=[1, 2, 3, 5, 6])
BMfluxOld = self.rbaSession.FBA.SolutionValues[BMfunction]
self.rbaSession.FBA.parsimonise()
self.rbaSession.FBA.setLB(rxn_LBs)
self.rbaSession.FBA.setUB(rxn_UBs)
self.rbaSession.FBA.setLB({BMfunction: BMfluxOld})
self.rbaSession.FBA.setUB({BMfunction: BMfluxOld})
self.rbaSession.FBA.solveLP(feasibleStatuses=[1, 2, 3, 5, 6])
FluxDistribution = pandas.DataFrame(index=list(
self.rbaSession.FBA.SolutionValues.keys()), columns=['FluxValues'])
FluxDistribution['FluxValues'] = list(self.rbaSession.FBA.SolutionValues.values())
BMfluxNew = self.rbaSession.FBA.SolutionValues[BMfunction]
ProtoIDmap = {}
for i in self.rbaSession.ModelStructure.ProteinInfo.Elements.keys():
ProtoID = self.rbaSession.ModelStructure.ProteinInfo.Elements[i]['ProtoID']
if ProtoID in list(proteomicsData.index):
if not
|
pandas.isna(proteomicsData.loc[ProtoID, 'copy_number'])
|
pandas.isna
|
from tqdm import tqdm
import pandas as pd
import numpy as np
from collections import Counter
import itertools
from db import get_annotation_db
from utils import *
from journal import *
phase_labels = ['pretreatment', 'treatment', 'end_of_life', 'cured']
phase_labels_with_unknown = phase_labels + ['unknown']
valid_transitions=[('pretreatment', 'treatment'),
('treatment', 'end_of_life'),
('treatment', 'cured'),
('cured', 'treatment')]
# in addition, self-loops are always allowed
valid_transitions.extend([(phase, phase) for phase in phase_labels])
DEFAULT_TREATMENT_SITE_IDS = [714287, 623581]
def sort_phase_list(phase_list):
return sorted(phase_list, key=lambda phase: phase_labels_with_unknown.index(phase))
def get_labels_from_phase_string(phase, remove_unknown=False):
"""
:param phase: The phase string, as stored in the annotation database.
(Assumes that phases are defined as particular strings and that they are separated by '|'.)
:param remove_unknown: If the 'unknown' label should be removed from the returned list
:return: A list of the phase labels extracted from the given phase string
"""
if phase == "":
return []
labels = phase.split('|')
if remove_unknown and 'unknown' in labels:
labels.remove('unknown')
if 'screening' in labels or 'info_seeking' in labels:
if 'screening' in labels:
labels.remove('screening')
if 'info_seeking' in labels:
labels.remove('info_seeking')
labels.insert(0, 'pretreatment')
# we sort the phase list to ensure they will always appear in a standard order
# will throw an exception if an invalid phase occurred in the string
labels = sort_phase_list(labels)
# a given journal should never have more than 2 phases tagged + unknown
if len(labels) > 3:
raise ValueError(f"Phase string tags an impossible {len(labels)} phases simultaneously.")
return labels
def combine_phase_annotations(annotation1, annotation2):
"""
Note: This function ASSUMES that annotation1 != annotation2
"""
phases1 = get_labels_from_phase_string(annotation1)
phases2 = get_labels_from_phase_string(annotation2)
combo = set(phases1) | set(phases2)
if 'unknown' in combo:
combo.remove('unknown')
transition_validity_list = [(phase1, phase2) in valid_transitions or (phase2, phase1) in valid_transitions
for phase1, phase2 in itertools.combinations(combo, 2)]
if False in transition_validity_list:
# This created an illegal combination!
return ['unknown']
combo.add('unknown') # we always add unknown in this case, since there was disagreement between the two annotations
return sort_phase_list(list(combo))
def get_phase_annotations(conflict_resolution_strategy="combine"):
try:
db = get_annotation_db()
cursor = db.execute("""
SELECT a.site_id, a.journal_oid, a.data, a.username, c.correct_username
FROM journalAnnotation a LEFT JOIN journalAnnotationConflictResolution c
ON a.site_id = c.site_id AND a.journal_oid = c.journal_oid AND a.annotation_type = c.annotation_type
WHERE a.annotation_type = "journal_journey_phase" AND a.data <> ""
GROUP BY a.site_id, a.journal_oid, a.username
ORDER BY a.id DESC
""")
phase_annotations = []
ambiguous_phase_annotation_count = 0
# Sort the returned annotations so that we can group by the individual journals
def group_by_journal_function(row):
return row['site_id'], row['journal_oid']
# group by the journals, writing a single line for each journal in the dataset
all_rows = cursor.fetchall()
all_rows.sort(key=group_by_journal_function)
for key, group in itertools.groupby(all_rows, group_by_journal_function):
rows = list(group)
site_id, journal_oid = key
# We are considering all of the annotations for a single journal here
data = None
phases = None
if len(rows) == 1:
assert rows[0]['correct_username'] is None or rows[0]['correct_username'] == ""
annotator_usernames = rows[0]['username']
conflict_status = "SINGLE USER"
data = rows[0]['data']
else: # 2 or more annotators
# get the list of annotator names
annotator_usernames = "|".join(sorted([row['username'] for row in rows]))
if rows[0]['correct_username'] is not None and rows[0]['correct_username'] != "":
# this annotation was corrected!
correct_username = rows[0]['correct_username']
conflict_status = "RESOLVED"
data = None
for row in rows:
if row['username'] == correct_username:
data = row['data']
if data is None:
raise ValueError("Annotation unexpectedly lacks data.")
if data is None:
# this condition implies an invalid correction in the database
print(f"WARNING: {correct_username} not found in {annotator_usernames}. Replacing with 'unknown'.")
data = 'unknown'
else: # no correction for this journal
phases, conflict_status = \
resolve_phase_annotation_conflicts(rows, resolution_strategy=conflict_resolution_strategy)
if data is None and phases is None:
raise ValueError("Unexpected and unhandled conflicts between a journal's phase annotations.")
if phases is None:
phases = get_labels_from_phase_string(data)
phase_annotation_data = {'site_id': site_id,
'journal_oid': journal_oid,
'conflict_status': conflict_status,
'phases': phases}
phase_annotations.append(phase_annotation_data)
return phase_annotations
finally:
db.close()
def resolve_phase_annotation_conflicts(rows, allow_majority_agreement=True, resolution_strategy="combine"):
"""
:param rows: Rows of annotations containing 'data'
:param allow_majority_agreement: If annotation sets with at least one set of complete agreement ought to be
allowed as a no-conflict situation
:param resolution_strategy: View the source for the exact implementation of each approach.
Valid values are: 'unknown', 'none', 'and', 'or', 'safe_or', 'combine' (default)
:return: The phases, as resolved from any conflict, and a string describing the status of conflict resolution
"""
combinations = [(combo[0]['data'] == combo[1]['data'], combo[0]['data'])
for combo in itertools.combinations(rows, 2)]
agreements = [data for is_match, data in combinations if is_match is True]
phases = None # this function resolves conflicts, assigning this phase list
if len(agreements) == len(combinations): # all annotators agree
conflict_status = "NO CONFLICT"
data = agreements[0]
elif allow_majority_agreement and len(agreements) >= 1: # at least one pair of annotators agree
# note that this isn't the same as majority agreement if > 3 annotators have annotated a single journal
# but at the time of implementation that will never happen
conflict_status = "MINIMAL CONFLICT"
data = agreements[0]
else: # no agreements between any of the annotators!
# this annotation was not corrected and there is no absolute agreement
# In this situation, we'll resolve based on the resolution strategy
conflict_status = "CONFLICT"
if resolution_strategy == "unknown":
data = 'unknown'
phases = ['unknown']
elif resolution_strategy == "none":
data = ""
phases = []
elif resolution_strategy == "and":
# includes all phases selected by all annotators
phase_sets = [set(get_labels_from_phase_string(row['data'])) for row in rows]
phases = sort_phase_list(list(set.intersection(*phase_sets)))
elif resolution_strategy == "or" or resolution_strategy == "all_checked":
# includes all phases present among the annotations
# this strategy may be dangerous to use because
phase_sets = [set(get_labels_from_phase_string(row['data'])) for row in rows]
phases = sort_phase_list(list(set.union(*phase_sets)))
elif resolution_strategy == "safe_or":
# includes all phases present among the annotations
# but, if an illegal phase combination is created, will reset to 'unknown'
phase_sets = [set(get_labels_from_phase_string(row['data'])) for row in rows]
phases = sort_phase_list(list(set.union(*phase_sets)))
if 'unknown' in phases:
phases.remove('unknown')
transition_validity_list = [(phase1, phase2) in valid_transitions or (phase2, phase1) in valid_transitions
for phase1, phase2 in itertools.combinations(phases, 2)]
if False in transition_validity_list:
# This created an illegal combination of annotations!
phases = ['unknown']
elif resolution_strategy == "combine":
# In this situation, we'll try to combine the annotations if there are two annotators,
# otherwise just returning 'unknown'
data = "unknown"
if len(rows) > 2:
print("WARNING: 3+ annotators, but no agreement. Setting 'unknown'.",
[r['username'] for r in rows])
phases = ['unknown']
else:
phases = combine_phase_annotations(rows[0]['data'], rows[1]['data'])
else:
raise ValueError(f"Resolution strategy f{resolution_strategy} unknown.")
if phases is None:
phases = get_labels_from_phase_string(data)
return phases, conflict_status
def get_phase_annotations_by_username():
try:
db = get_annotation_db()
cursor = db.execute("""
SELECT a.site_id, a.journal_oid, a.data, a.username, c.correct_username
FROM journalAnnotation a LEFT JOIN journalAnnotationConflictResolution c
ON a.site_id = c.site_id AND a.journal_oid = c.journal_oid AND a.annotation_type = c.annotation_type
WHERE a.annotation_type = "journal_journey_phase" AND a.data <> ""
GROUP BY a.site_id, a.journal_oid, a.username
ORDER BY a.id DESC
""")
phase_annotations = []
rows = cursor.fetchall()
for row in rows:
site_id, journal_oid = row['site_id'], row['journal_oid']
username = row['username']
is_corrected = row['correct_username'] is not None and row['correct_username'] != ""
phases = get_labels_from_phase_string(row['data'])
phase_annotation_data = {'site_id': site_id,
'journal_oid': journal_oid,
'username': username,
'phases': phases,
'is_corrected': is_corrected}
phase_annotations.append(phase_annotation_data)
return phase_annotations
finally:
db.close()
def trim_phase_annotations_by_valid_sites(phase_annotations, valid_sites, print_report=True):
"""
:param phase_annotations: Iterable that can be keyed on 'site_id'
:param valid_sites: List of site_ids from which to exclude annotations
:param print_report: If a report about the excluded sites should be printed to stdout
:return:
"""
site_ids_not_in_candidate_sites = []
phase_annotations_filtered = []
for a in phase_annotations:
site_id = a['site_id']
if site_id not in valid_sites:
site_ids_not_in_candidate_sites.append(site_id)
else:
phase_annotations_filtered.append(a)
if print_report:
excluded_site_counts = Counter(site_ids_not_in_candidate_sites).most_common()
for site_id, count in excluded_site_counts:
print('Not valid site excluded:', count, site_id, get_webclient_url(site_id))
return phase_annotations_filtered
def group_by_site_id(phase_annotations):
"""
This is the function that groups by site_id and actually retrieves the journal data.
This function does 3 things:
1) Retrieves text data for all journals on a site with annotations
2) Filters out annotations for sites that aren't completely or near-completely annotated
3) Returns a dataframe of the annotations with phases and text together
"""
site_id_count, total_journal_count = 0, 0
annotated_journal_list = [] # a list of dictionaries, so that it can be turned into a Pandas dataframe
def sort_by_site_id(tup):
return tup['site_id']
phase_annotations = sorted(phase_annotations, key=sort_by_site_id)
for k, g in tqdm(itertools.groupby(phase_annotations, sort_by_site_id)):
site_id = k
site_annotations = list(g)
if len(site_annotations) < 5:
continue # only want complete sites, and sites with less than five definitely aren't complete
journals = get_journal_info(site_id)
assert len(site_annotations) <= len(journals)
journals = [j for j in journals if j['text_length'] >= 50]
unannotated_journals = len(journals) - len(site_annotations)
if unannotated_journals > 1:
print("\nSite %d lacks %d journals with phase annotations; %s" % (
site_id, unannotated_journals, get_webclient_url(site_id)))
print("It will be excluded unless the whole site is coded for phases!")
continue # not every non-trivial journal on this site is coded
annotations_added = 0
unmatched_count = 0
annotation_dict = {a['journal_oid']: a['phases'] for a in site_annotations}
for journal in journals:
if journal['journal_oid'] not in annotation_dict:
unmatched_count += 1
continue
journal_phases = annotation_dict[journal['journal_oid']]
journal['phases'] = "|".join(journal_phases)
journal_text = get_journal_text_representation(journal)
# FIXME Make this more robust
if len(annotated_journal_list) > 0:
prev_journal_text = annotated_journal_list[-1]['journal_text']
else:
prev_journal_text = ""
if len(annotated_journal_list) > 1:
prev_prev_journal_text = annotated_journal_list[-2]['journal_text']
else:
prev_prev_journal_text = ""
journal['journal_text'] = journal_text
# journal['text'] = prev_prev_journal_text + " " + FLD + " 2 " + prev_journal_text + " " + FLD + " 3 " + journal_text
annotated_journal_list.append(journal)
annotations_added += 1
total_journal_count += annotations_added
site_id_count += 1
columns = ['site_id', 'journal_oid', 'journal_index', 'text', 'phases']
df = pd.DataFrame(annotated_journal_list, columns=columns)
print("Sites:", site_id_count, "; total annotated journals:", total_journal_count, "; df len:", len(df))
return df
def get_annotated_phase_df(conflict_score_cost=0.1, unknown_score_cost=0.2):
"""
:return: Pandas DataFrame with the following columns:
site_id, journal_oid, journal_index, is_annotated, conflict_status, phases, {phase_name}_score, journal_text
"""
phase_annotations = get_phase_annotations()
df_entries = []
def sort_by_site_id(tup):
return tup['site_id']
phase_annotations = sorted(phase_annotations, key=sort_by_site_id)
for k, g in tqdm(itertools.groupby(phase_annotations, sort_by_site_id)):
site_id = k
site_annotations = list(g)
if len(site_annotations) < 5:
continue # only want complete sites, and sites with less than five definitely aren't complete
journals = get_journal_info(site_id)
assert len(site_annotations) <= len(journals)
annotation_dict = {a['journal_oid']: a for a in site_annotations}
journals = [j for j in journals if j['text_length'] >= 50]
unannotated_journals = len(journals) - len(site_annotations)
if unannotated_journals > 1:
if site_id in DEFAULT_TREATMENT_SITE_IDS:
# This is an exception created originally for two sites coded by Saumik
# The sites had too many journals, so to save time the unannotated journals should just be treatment
# We construction new annotations and inject them into the annotation_dict
for journal in journals:
if journal['journal_oid'] not in annotation_dict:
new_annotation = {'phases': ['treatment'], 'conflict_status': 'NO CONFLICT'}
annotation_dict[journal['journal_oid']] = new_annotation
else:
print("\nSite %d lacks %d journals with phase annotations; %s" % (
site_id, unannotated_journals, get_webclient_url(site_id)))
print("It will be excluded unless the whole site is coded for phases!")
continue # not every non-trivial journal on this site is coded
unmatched_count = 0
journal_index = 0
for journal in journals:
site_id, journal_oid = journal['site_id'], journal['journal_oid']
journal_text = get_journal_text_representation(journal)
if journal_text is None:
continue
if journal_oid not in annotation_dict:
unmatched_count += 1
continue
annotation = annotation_dict[journal_oid]
journal_phases = annotation['phases']
conflict_status = annotation['conflict_status']
new_entry = {'site_id': site_id,
'journal_oid': journal_oid,
'journal_index': journal_index,
'created_at': journal['created_at'],
'is_annotated': True,
'conflict_status': conflict_status,
'phases': journal_phases,
'journal_text': journal_text}
journal_index += 1
unknown_score_penalty = unknown_score_cost if 'unknown' in phase_labels else 0
for phase_label in phase_labels:
score = int(phase_label in journal_phases)
if score >= 0.5:
score -= unknown_score_penalty
if conflict_status == "CONFLICT":
score -= conflict_score_cost
new_entry[phase_label + "_score"] = score
df_entries.append(new_entry)
if unmatched_count > 0:
print(f"{unmatched_count} non-trivial journals did not have annotations on site {site_id} and were skipped.")
columns = ['site_id', 'journal_oid', 'journal_index', 'created_at',
'is_annotated', 'conflict_status', 'phases', 'journal_text']
columns += [phase_label + "_score" for phase_label in phase_labels]
df = pd.DataFrame(df_entries, columns=columns)
return df
def add_sites_to_phase_df(df, new_entries, disallow_duplicate_sites=True):
"""
Add the given [unannotated] sites to the given df.
:param df: A df generated by get_annotated_phase_df()
:param new_entries: A list of integer site_ids
:param disallow_duplicate_sites: If true, journals will only be added from sites not already in the df.
:return: The new dataframe containing all journals in the sites indicated by new_entries
"""
existing_site_ids = set(df.site_id)
skipped_sites_count = 0
df_entries = []
for site_id in tqdm(new_entries):
if disallow_duplicate_sites and site_id in existing_site_ids:
skipped_sites_count += 1
continue
journals = get_journal_info(site_id)
journal_index = 0
for journal in journals:
journal_text = get_journal_text_representation(journal)
if journal_text is None:
continue
new_entry = {'site_id': site_id,
'journal_oid': journal['journal_oid'],
'journal_index': journal_index,
'created_at': journal['created_at'],
'is_annotated': False,
'conflict_status': None,
'phases': [],
'journal_text': journal_text}
journal_index += 1
df_entries.append(new_entry)
unannotated_df =
|
pd.DataFrame(df_entries)
|
pandas.DataFrame
|
"""Methods to find information in the different pipelines of Clinica."""
import os
from glob import glob
from os import path
import pandas as pd
def pet_volume_pipeline(
caps_dir,
df,
group_selection=None,
volume_atlas_selection=None,
pvc_restriction=None,
tracers_selection=None,
**kwargs,
):
"""Merge the data of the PET-Volume pipeline to the merged file containing the BIDS information.
Args:
caps_dir: the path to the CAPS directory
df: the DataFrame containing the BIDS information
group_selection: allows to choose the DARTEL groups to merge. If None all groups are selected.
volume_atlas_selection: allows to choose the atlas to merge (default = 'all')
pvc_restriction: gives the restriction on the inclusion or not of the file with the label 'pvc-rbv'
1 --> only the atlases containing the label will be used
0 --> the atlases containing the label won't be used
None --> all the atlases will be used
tracers_selection: allows to choose the PET tracer to merge (default = 'all')
Returns:
final_df: a DataFrame containing the information of the bids and the pipeline
"""
pet_path = path.join("pet", "preprocessing")
return volume_pipeline(
caps_dir,
df,
pet_path,
group_selection=group_selection,
atlas_selection=volume_atlas_selection,
pvc_restriction=pvc_restriction,
pipeline_name="pet-volume",
tracers_selection=tracers_selection,
)
def t1_freesurfer_pipeline(caps_dir, df, freesurfer_atlas_selection=None, **kwargs):
"""Merge the data of the PET-Volume pipeline to the merged file containing the BIDS information.
Args:
caps_dir: the path to the CAPS directory
df: the DataFrame containing the BIDS information
freesurfer_atlas_selection: allows to choose the atlas to merge (default = 'all')
Returns:
final_df: a DataFrame containing the information of the bids and the pipeline
"""
from clinica.iotools.converters.adni_to_bids.adni_utils import (
replace_sequence_chars,
)
from clinica.utils.stream import cprint
# Ensures that df is correctly indexed
if "participant_id" in df.columns.values:
df.set_index(["participant_id", "session_id"], inplace=True, drop=True)
subjects_dir = path.join(caps_dir, "subjects")
pipeline_df = pd.DataFrame()
for participant_id, session_id in df.index.values:
ses_path = path.join(subjects_dir, participant_id, session_id)
mod_path = path.join(
ses_path, "t1", "freesurfer_cross_sectional", "regional_measures"
)
ses_df = pd.DataFrame(
[[participant_id, session_id]], columns=["participant_id", "session_id"]
)
ses_df.set_index(["participant_id", "session_id"], inplace=True, drop=True)
if os.path.exists(mod_path):
# Looking for atlases
atlas_paths = glob(
path.join(mod_path, f"{participant_id}_{session_id}_*thickness.tsv")
)
for atlas_path in atlas_paths:
atlas_name = atlas_path.split("_parcellation-")[1].split("_")[0]
if path.exists(atlas_path) and (
not freesurfer_atlas_selection
or (
freesurfer_atlas_selection
and atlas_name in freesurfer_atlas_selection
)
):
atlas_df = pd.read_csv(atlas_path, sep="\t")
label_list = [
f"t1-freesurfer_atlas-{atlas_name}_ROI-{replace_sequence_chars(roi_name)}_thickness"
for roi_name in atlas_df.label_name.values
]
ses_df[label_list] = atlas_df["label_value"].to_numpy()
# Always retrieve subcortical volumes
atlas_path = path.join(
mod_path, f"{participant_id}_{session_id}_segmentationVolumes.tsv"
)
atlas_df = pd.read_csv(atlas_path, sep="\t")
label_list = [
f"t1-freesurfer_segmentation-volumes_ROI-{replace_sequence_chars(roi_name)}_volume"
for roi_name in atlas_df.label_name.values
]
ses_df[label_list] = atlas_df["label_value"].to_numpy()
pipeline_df = pipeline_df.append(ses_df)
summary_df = generate_summary(pipeline_df, "t1-freesurfer", ignore_groups=True)
final_df = pd.concat([df, pipeline_df], axis=1)
final_df.reset_index(inplace=True)
return final_df, summary_df
def t1_volume_pipeline(
caps_dir, df, group_selection=None, volume_atlas_selection=None, **kwargs
):
"""Merge data of the t1-volume pipeline to the merged file containing the BIDS information.
Args:
caps_dir: the path to the CAPS directory
df: the DataFrame containing the BIDS information
group_selection: allows to choose the DARTEL groups to merge. If None all groups are selected.
volume_atlas_selection: allows to choose the atlas to merge. If None all atlases are selected.
Returns:
final_df: a DataFrame containing the information of the bids and the pipeline
"""
t1_spm_path = path.join("t1", "spm", "dartel")
return volume_pipeline(
caps_dir,
df,
t1_spm_path,
group_selection=group_selection,
atlas_selection=volume_atlas_selection,
pvc_restriction=None,
pipeline_name="t1-volume",
)
def volume_pipeline(
caps_dir,
df,
pipeline_path,
pipeline_name,
group_selection=None,
atlas_selection=None,
pvc_restriction=None,
tracers_selection=None,
):
"""Merge data of the t1-volume and pet-volume pipelines to the merged file containing the BIDS information.
Args:
caps_dir: the path to the CAPS directory
df: the DataFrame containing the BIDS information
pipeline_path: path between the session folder and the group folder
pipeline_name: name of the pipeline
group_selection: allows to choose the DARTEL groups to merge. If None all groups are selected.
atlas_selection: allows to choose the atlas to merge. If None all atlases are selected.
pvc_restriction: gives the restriction on the inclusion or not of the file with the label 'pvc-rbv'
1 --> only the atlases containing the label will be used
0 --> the atlases containing the label won't be used
None --> all the atlases will be used
tracers_selection: allows to choose the PET tracer to merge (default = 'all')
Returns:
final_df: a DataFrame containing the information of the bids and the pipeline
"""
from clinica.iotools.converters.adni_to_bids.adni_utils import (
replace_sequence_chars,
)
# Ensures that df is correctly indexed
if "participant_id" in df.columns.values:
df.set_index(["participant_id", "session_id"], inplace=True, drop=True)
if not group_selection:
try:
group_selection = os.listdir(path.join(caps_dir, "groups"))
except FileNotFoundError:
return df, None
else:
group_selection = [f"group-{group}" for group in group_selection]
subjects_dir = path.join(caps_dir, "subjects")
pipeline_df = pd.DataFrame()
for participant_id, session_id in df.index.values:
ses_path = path.join(subjects_dir, participant_id, session_id)
mod_path = path.join(ses_path, pipeline_path)
ses_df = pd.DataFrame(
[[participant_id, session_id]], columns=["participant_id", "session_id"]
)
ses_df.set_index(["participant_id", "session_id"], inplace=True, drop=True)
if os.path.exists(mod_path):
# Looking for groups
for group in group_selection:
group_path = path.join(mod_path, group)
if os.path.exists(group_path):
# Looking for atlases
if not atlas_selection:
atlas_paths = glob(
path.join(
group_path,
"atlas_statistics",
f"{participant_id}_{session_id}_*_statistics.tsv",
)
)
else:
atlas_paths = list()
for atlas in atlas_selection:
atlas_paths += glob(
path.join(
group_path,
"atlas_statistics",
f"{participant_id}_{session_id}_*{atlas}*_statistics.tsv",
)
)
# Filter pvc_restriction
if pvc_restriction:
if pvc_restriction == 1:
atlas_paths = [
atlas_path
for atlas_path in atlas_paths
if "pvc-rbv" in atlas_path
]
else:
atlas_paths = [
atlas_path
for atlas_path in atlas_paths
if "pvc-rbv" not in atlas_path
]
# Filter tracers
if tracers_selection:
atlas_paths = [
atlas_path
for atlas_path in atlas_paths
for tracer in tracers_selection
if tracer in atlas_path
]
for atlas_path in atlas_paths:
atlas_name = atlas_path.split("_space-")[-1].split("_")[0]
if path.exists(atlas_path):
atlas_df = pd.read_csv(atlas_path, sep="\t")
additional_desc = ""
if "acq" in atlas_path:
tracer = atlas_path.split("_acq-")[1].split("_")[0]
additional_desc += f"_acq-{tracer}"
if "pvc-rbv" in atlas_path:
additional_desc += f"_pvc-rbv"
label_list = [
f"{pipeline_name}_{group}_atlas-{atlas_name}{additional_desc}_ROI-{replace_sequence_chars(roi_name)}_intensity"
for roi_name in atlas_df.label_name.values
]
ses_df[label_list] = atlas_df["mean_scalar"].to_numpy()
pipeline_df = pipeline_df.append(ses_df)
summary_df = generate_summary(pipeline_df, pipeline_name)
final_df = pd.concat([df, pipeline_df], axis=1)
final_df.reset_index(inplace=True)
return final_df, summary_df
def generate_summary(pipeline_df, pipeline_name, ignore_groups=False):
columns = [
"pipeline_name",
"group_id",
"atlas_id",
"tracer",
"pvc",
"regions_number",
"first_column_name",
"last_column_name",
]
summary_df =
|
pd.DataFrame(columns=columns)
|
pandas.DataFrame
|
from mdstudio.deferred.chainable import chainable
from mdstudio.component.session import ComponentSession
from mdstudio.runner import main
from os.path import join
import numpy as np
import os
import pandas as pd
import sys
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
file_path = os.path.realpath(__file__)
root = os.path.split(file_path)[0]
def create_path_file_obj(path, encoding='utf8'):
"""
Encode the input files
"""
extension = os.path.splitext(path)[1]
with open(path, 'r') as f:
content = f.read()
return {
u'path': path, u'encoding': encoding,
u'content': str(content), u'extension': extension}
def create_workdir(name, path="/tmp/mdstudio/mdstudio_pylie"):
"""Create temporal workdir dir"""
workdir = join(path, name)
if not os.path.isdir(workdir):
os.makedirs(workdir)
return workdir
def compare_csv_files(str_1, str_2):
"""check if two csv files are the same"""
f1 = StringIO(str_1)
f2 = StringIO(str_2)
df1 = pd.read_csv(f1).sort_index(axis=1)
df2 = pd.read_csv(f2).sort_index(axis=1)
return df1.equals(df2)
def compare_dictionaries(d1, d2):
"""Compare two dictionaries with nested numerical results """
df1 =
|
pd.DataFrame(d1)
|
pandas.DataFrame
|
# Neural network for pop assignment
# Load packages
import tensorflow.keras as tf
from kerastuner.tuners import RandomSearch
from kerastuner import HyperModel
import numpy as np
import pandas as pd
import allel
import zarr
import h5py
from sklearn.model_selection import RepeatedStratifiedKFold, train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import log_loss
import itertools
import shutil
import sys
import os
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sn
def hyper_tune(
infile,
sample_data,
max_trials=10,
runs_per_trial=10,
max_epochs=100,
train_prop=0.8,
seed=None,
save_dir="out",
mod_name="hyper_tune",
):
"""
Tunes hyperparameters of keras model for population assignment.
Paramters
---------
infile : string
Path to VCF file containing genetic data.
sample_data : string
Path to tab-delimited file containing columns x, y,
pop, and sampleID.
max_trials : int
Number of trials to run for RandomSearch (Default=10).
runs_per_trial : int
Number of runs per trial for RandomSearch (Default=10).
max_epochs : int
Number of epochs to train model (Default=100).
train_prop : float
Proportion of data to train on. Remaining data will be kept
as a test set and not used until final model is trained
(Default=0.8).
seed : int
Random seed (Default=None).
save_dir : string
Directory to save output to (Default='out').
mod_name : string
Name of model in save directory (Default='hyper_tune').
Returns
-------
best_mod : keras sequential model
Best model from hyperparameter tuning
y_train : pd.DataFrame
training labels
y_val : pd.DataFrame
Validation labels
"""
# Check input types
if os.path.exists(infile) is False:
raise ValueError("infile does not exist")
if os.path.exists(sample_data) is False:
raise ValueError("sample_data does not exist")
if isinstance(max_trials, np.int) is False:
raise ValueError("max_trials should be integer")
if isinstance(runs_per_trial, np.int) is False:
raise ValueError("runs_per_trial should be integer")
if isinstance(max_epochs, np.int) is False:
raise ValueError("max_epochs should be integer")
if isinstance(train_prop, np.float) is False:
raise ValueError("train_prop should be float")
if isinstance(seed, np.int) is False and seed is not None:
raise ValueError("seed should be integer or None")
if isinstance(save_dir, str) is False:
raise ValueError("save_dir should be string")
if isinstance(mod_name, str) is False:
raise ValueError("mod_name should be string")
# Create save_dir if doesn't already exist
print(f"Output will be saved to: {save_dir}")
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
# Read data
samp_list, dc = read_data(
infile=infile,
sample_data=sample_data,
save_allele_counts=False,
kfcv=True,
)
# Train prop can't be greater than num samples
if len(dc) * (1 - train_prop) < len(np.unique(samp_list["pops"])):
raise ValueError("train_prop is too high; not enough samples for test")
# Create test set that will be used to assess model performance later
X_train_0, X_test, y_train_0, y_test = train_test_split(
dc, samp_list, stratify=samp_list["pops"], train_size=train_prop
)
# Save train and test set to save_dir
np.save(save_dir + "/X_train.npy", X_train_0)
y_train_0.to_csv(save_dir + "/y_train.csv", index=False)
np.save(save_dir + "/X_test.npy", X_test)
y_test.to_csv(save_dir + "/y_test.csv", index=False)
# Split data into training and hold-out test set
X_train, X_val, y_train, y_val = train_test_split(
dc,
samp_list,
stratify=samp_list["pops"],
train_size=train_prop,
random_state=seed,
)
# Make sure all classes represented in y_val
if len(np.unique(y_train["pops"])) != len(np.unique(y_val["pops"])):
raise ValueError(
"Not all pops represented in validation set \
choose smaller value for train_prop."
)
# One hot encoding
enc = OneHotEncoder(handle_unknown="ignore")
y_train_enc = enc.fit_transform(
y_train["pops"].values.reshape(-1, 1)).toarray()
y_val_enc = enc.fit_transform(
y_val["pops"].values.reshape(-1, 1)).toarray()
popnames = enc.categories_[0]
hypermodel = classifierHyperModel(
input_shape=X_train.shape[1], num_classes=len(popnames)
)
tuner = RandomSearch(
hypermodel,
objective="val_loss",
seed=seed,
max_trials=max_trials,
executions_per_trial=runs_per_trial,
directory=save_dir,
project_name=mod_name,
)
tuner.search(
X_train - 1,
y_train_enc,
epochs=max_epochs,
validation_data=(X_val - 1, y_val_enc),
)
best_mod = tuner.get_best_models(num_models=1)[0]
tuner.get_best_models(num_models=1)[0].save(save_dir + "/best_mod")
return best_mod, y_train, y_val
def kfcv(
infile,
sample_data,
mod_path=None,
n_splits=5,
n_reps=5,
ensemble=False,
save_dir="kfcv_output",
return_plot=True,
save_allele_counts=False,
**kwargs,
):
"""
Runs K-fold cross-validation to get an accuracy estimate of the model.
Parameters
----------
infile : string
Path to VCF or hdf5 file with genetic information
for all samples (including samples of unknown origin).
sample_data : string
Path to input file with all samples present (including
samples of unknown origin), which is a tab-delimited
text file with columns x, y, pop, and sampleID.
n_splits : int
Number of folds in k-fold cross-validation
(Default=5).
n_reps : int
Number of times to repeat k-fold cross-validation,
creating the number of models in the ensemble
(Default=5).
ensemble : bool
Whether to use ensemble of models of single model (Default=False).
save_dir : string
Directory where results will be stored (Default='kfcv_output').
return_plot : boolean
Returns a confusion matrix of correct assignments (Default=True).
save_allele counts : boolean
Whether or not to store derived allele counts in hdf5
file (Default=False).
**kwargs
Keyword arguments for pop_finder function.
Returns
-------
report : pd.DataFrame
Classification report for all models.
ensemble_report : pd.DataFrame
Classification report for ensemble of models.
"""
# Check inputs
# Check is sample_data path exists
if os.path.exists(sample_data) is False:
raise ValueError("path to sample_data incorrect")
# Make sure hdf5 file is not used as gen_dat
if os.path.exists(infile) is False:
raise ValueError("path to infile does not exist")
# Check data types
if isinstance(n_splits, np.int) is False:
raise ValueError("n_splits should be an integer")
if isinstance(n_reps, np.int) is False:
raise ValueError("n_reps should be an integer")
if isinstance(ensemble, bool) is False:
raise ValueError("ensemble should be a boolean")
if isinstance(save_dir, str) is False:
raise ValueError("save_dir should be a string")
# Check nsplits is > 1
if n_splits <= 1:
raise ValueError("n_splits must be greater than 1")
samp_list, dc = read_data(
infile=infile,
sample_data=sample_data,
save_allele_counts=save_allele_counts,
kfcv=True,
)
popnames = np.unique(samp_list["pops"])
# Check there are more samples in the smallest pop than n_splits
if n_splits > samp_list.groupby(["pops"]).agg(["count"]).min().values[0]:
raise ValueError(
"n_splits cannot be greater than number of samples in smallest pop"
)
# Create stratified k-fold
rskf = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_reps)
pred_labels = []
true_labels = []
pred_labels_ensemble = []
true_labels_ensemble = []
ensemble_preds = pd.DataFrame()
preds = pd.DataFrame()
fold_var = 1
for t, v in rskf.split(dc, samp_list["pops"]):
# Subset train and validation data
X_train = dc[t, :] - 1
X_val = dc[v, :] - 1
y_train = samp_list.iloc[t]
y_val = samp_list.iloc[v]
if ensemble:
test_dict, tot_bag_df = pop_finder(
X_train,
y_train,
X_val,
y_val,
save_dir=save_dir,
ensemble=True,
**kwargs,
)
# Unit tests for results from pop_finder
if bool(test_dict) is False:
raise ValueError("Empty dictionary from pop_finder")
if tot_bag_df.empty:
raise ValueError("Empty dataframe from pop_finder")
if len(test_dict) == 1:
raise ValueError(
"pop_finder results consists of single dataframe\
however ensemble set to True"
)
ensemble_preds = ensemble_preds.append(tot_bag_df)
else:
test_dict = pop_finder(
X_train,
y_train,
X_val,
y_val,
save_dir=save_dir,
**kwargs,
)
# Unit tests for results from pop_finder
if bool(test_dict) is False:
raise ValueError("Empty dictionary from pop_finder")
if len(test_dict["df"]) != 1:
raise ValueError(
"pop_finder results contains ensemble of models\
should be a single dataframe"
)
preds = preds.append(test_dict["df"][0])
tmp_pred_label = []
tmp_true_label = []
for i in range(0, len(test_dict["df"])):
tmp_pred_label.append(
test_dict["df"][i].iloc[
:, 0:len(popnames)
].idxmax(axis=1).values
)
tmp_true_label.append(test_dict["df"][i]["true_pops"].values)
if ensemble:
pred_labels_ensemble.append(
tot_bag_df.iloc[:, 0:len(popnames)].idxmax(axis=1).values
)
true_labels_ensemble.append(tmp_true_label[0])
pred_labels.append(np.concatenate(tmp_pred_label, axis=0))
true_labels.append(np.concatenate(tmp_true_label, axis=0))
fold_var += 1
# return pred_labels, true_labels
pred_labels = np.concatenate(pred_labels)
true_labels = np.concatenate(true_labels)
report = classification_report(
true_labels, pred_labels, zero_division=1, output_dict=True
)
report = pd.DataFrame(report).transpose()
report.to_csv(save_dir + "/classification_report.csv")
if ensemble:
ensemble_preds.to_csv(save_dir + "/ensemble_preds.csv")
true_labels_ensemble = np.concatenate(true_labels_ensemble)
pred_labels_ensemble = np.concatenate(pred_labels_ensemble)
ensemble_report = classification_report(
true_labels_ensemble,
pred_labels_ensemble,
zero_division=1,
output_dict=True,
)
ensemble_report = pd.DataFrame(ensemble_report).transpose()
ensemble_report.to_csv(
save_dir + "/ensemble_classification_report.csv")
else:
preds.to_csv(save_dir + "/preds.csv")
if return_plot is True:
cm = confusion_matrix(true_labels, pred_labels, normalize="true")
cm = np.round(cm, 2)
plt.style.use("default")
plt.figure()
plt.imshow(cm, cmap="Blues")
plt.colorbar()
plt.ylabel("True Pop")
plt.xlabel("Pred Pop")
plt.title("Confusion Matrix")
tick_marks = np.arange(len(np.unique(true_labels)))
plt.xticks(tick_marks, np.unique(true_labels))
plt.yticks(tick_marks, np.unique(true_labels))
thresh = cm.max() / 2.0
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(
j,
i,
cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
plt.tight_layout()
plt.savefig(save_dir + "/cm.png")
plt.close()
if ensemble:
# Plot second confusion matrix
cm = confusion_matrix(
true_labels_ensemble, pred_labels_ensemble, normalize="true"
)
cm = np.round(cm, 2)
plt.style.use("default")
plt.figure()
plt.imshow(cm, cmap="Blues")
plt.colorbar()
plt.ylabel("True Pop")
plt.xlabel("Pred Pop")
plt.title("Confusion Matrix")
tick_marks = np.arange(len(np.unique(true_labels)))
plt.xticks(tick_marks, np.unique(true_labels))
plt.yticks(tick_marks, np.unique(true_labels))
thresh = cm.max() / 2.0
for i, j in itertools.product(
range(cm.shape[0]), range(cm.shape[1])
):
plt.text(
j,
i,
cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
plt.tight_layout()
plt.savefig(save_dir + "/ensemble_cm.png")
plt.close()
if ensemble:
return report, ensemble_report
else:
return report
def pop_finder(
X_train,
y_train,
X_test,
y_test,
unknowns=None,
ukgen=None,
ensemble=False,
try_stacking=False,
nbags=10,
train_prop=0.8,
mod_path=None,
predict=False,
save_dir="out",
save_weights=False,
patience=20,
batch_size=32,
max_epochs=100,
gpu_number="0",
plot_history=False,
seed=None,
):
"""
Trains classifier neural network, calculates accuracy on
test set, and makes predictions.
Parameters
----------
X_train: np.array
Array of genetic data corresponding to train samples.
y_train: pd.DataFrame
Dataframe of train samples, including columns for samples and pops.
X_test: np.array
Array of genetic data corresponding to test samples.
y_test: pd.DataFrame
Dataframe of test samples, including columns for samples and pops.
unknowns: pd.DataFrame
Dataframe of unknowns calculated from read_data (Default=None).
ukgen : np.array
Array of genetic data corresponding to unknown samples
(Default=None).
ensemble : boolean
If set to true, will train an ensemble of models using
bootstrap aggregating (Default=False).
try_stacking : boolean
Use weights to influence ensemble model decisions. Must have
ensemble set to True to use. Use caution: with low test set sizes,
can be highly inaccurate and overfit (Default=False).
nbags : int
Number of "bags" (models) to create for the bootstrap
aggregating algorithm. This option only needs to be set if
ensemble is set to True (Default=20).
train_prop : float
Proportion of samples used in training (Default=0.8).
mod_path : string
Default=None
predict : boolean
Predict on unknown data. Must have unknowns in sample_data to use
this feature (Default=False).
save_dir : string
Directory to save results to (Default="out").
save_weights : boolean
Save model weights for later use (Default=False).
patience : int
How many epochs to wait before early stopping if loss has not
improved (Default=20).
batch_size : int
Default=32,
max_epochs : int
Default=100
gpu_number : string
Not in use yet, coming soon (Default="0").
plot_history : boolean
Plot training / validation history (Default=False).
seed : int
Random seed for splitting data (Default=None).
Returns
-------
test_dict : dict
Dictionary with test results.
tot_bag_df : pd.DataFrame
Dataframe with test results from ensemble.
"""
print(f"Output will be saved to: {save_dir}")
# Check if data is in right format
if isinstance(y_train, pd.DataFrame) is False:
raise ValueError("y_train is not a pandas dataframe")
if y_train.empty:
raise ValueError("y_train exists, but is empty")
if isinstance(y_test, pd.DataFrame) is False:
raise ValueError("y_test is not a pandas dataframe")
if y_test.empty:
raise ValueError("y_test exists, but is empty")
if isinstance(X_train, np.ndarray) is False:
raise ValueError("X_train is not a numpy array")
if len(X_train) == 0:
raise ValueError("X_train exists, but is empty")
if isinstance(X_test, np.ndarray) is False:
raise ValueError("X_test is not a numpy array")
if len(X_test) == 0:
raise ValueError("X_test exists, but is empty")
if isinstance(ensemble, bool) is False:
raise ValueError("ensemble should be a boolean")
if isinstance(try_stacking, bool) is False:
raise ValueError("try_stacking should be a boolean")
if isinstance(nbags, int) is False:
raise ValueError("nbags should be an integer")
if isinstance(train_prop, np.float) is False:
raise ValueError("train_prop should be a float")
if isinstance(predict, bool) is False:
raise ValueError("predict should be a boolean")
if isinstance(save_dir, str) is False:
raise ValueError("save_dir should be a string")
if isinstance(save_weights, bool) is False:
raise ValueError("save_weights should be a boolean")
if isinstance(patience, np.int) is False:
raise ValueError("patience should be an integer")
if isinstance(batch_size, np.int) is False:
raise ValueError("batch_size should be an integer")
if isinstance(max_epochs, np.int) is False:
raise ValueError("max_epochs should be an integer")
if isinstance(plot_history, bool) is False:
raise ValueError("plot_history should be a boolean")
if isinstance(mod_path, str) is False and mod_path is not None:
raise ValueError("mod_path should be a string or None")
# Create save directory
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
# If unknowns are not none
if unknowns is not None:
# Check if exists
if isinstance(unknowns, pd.DataFrame) is False:
raise ValueError("unknowns is not pandas dataframe")
if unknowns.empty:
raise ValueError("unknowns exists, but is empty")
if isinstance(ukgen, np.ndarray) is False:
raise ValueError("ukgen is not a numpy array")
if len(ukgen) == 0:
raise ValueError("ukgen exists, but is empty")
uksamples = unknowns["sampleID"].to_numpy()
# Add info about test samples
y_test_samples = y_test["samples"].to_numpy()
y_test_pops = y_test["pops"].to_numpy()
# One hot encode test values
enc = OneHotEncoder(handle_unknown="ignore")
y_test_enc = enc.fit_transform(
y_test["pops"].values.reshape(-1, 1)).toarray()
popnames = enc.categories_[0]
# results storage
TEST_LOSS = []
TEST_ACCURACY = []
TEST_95CI = []
yhats = []
ypreds = []
test_dict = {"count": [], "df": []}
pred_dict = {"count": [], "df": []}
top_pops = {"df": [], "pops": []}
if ensemble:
for i in range(nbags):
n_prime = np.int(np.ceil(len(X_train) * 0.8))
good_bag = False
while good_bag is False:
bag_X = np.zeros(shape=(n_prime, X_train.shape[1]))
bag_y = pd.DataFrame({"samples": [], "pops": [], "order": []})
for j in range(0, n_prime):
ind = np.random.choice(len(X_train))
bag_X[j] = X_train[ind]
bag_y = bag_y.append(y_train.iloc[ind])
dup_pops_df = bag_y.groupby(["pops"]).agg(["count"])
if (
pd.Series(popnames).isin(bag_y["pops"]).all()
and (dup_pops_df[("samples", "count")] > 1).all()
):
# Create validation set from training set
bag_X, X_val, bag_y, y_val = train_test_split(
bag_X, bag_y, stratify=bag_y["pops"],
train_size=train_prop
)
if (
pd.Series(popnames).isin(bag_y["pops"]).all()
and pd.Series(popnames).isin(y_val["pops"]).all()
):
good_bag = True
enc = OneHotEncoder(handle_unknown="ignore")
bag_y_enc = enc.fit_transform(
bag_y["pops"].values.reshape(-1, 1)).toarray()
y_val_enc = enc.fit_transform(
y_val["pops"].values.reshape(-1, 1)).toarray()
if mod_path is None:
model = tf.Sequential()
model.add(tf.layers.BatchNormalization(
input_shape=(bag_X.shape[1],)))
model.add(tf.layers.Dense(128, activation="elu"))
model.add(tf.layers.Dense(128, activation="elu"))
model.add(tf.layers.Dense(128, activation="elu"))
model.add(tf.layers.Dropout(0.25))
model.add(tf.layers.Dense(128, activation="elu"))
model.add(tf.layers.Dense(128, activation="elu"))
model.add(tf.layers.Dense(128, activation="elu"))
model.add(tf.layers.Dense(len(popnames), activation="softmax"))
aopt = tf.optimizers.Adam(lr=0.0005)
model.compile(
loss="categorical_crossentropy",
optimizer=aopt,
metrics="accuracy"
)
else:
model = tf.models.load_model(mod_path + "/best_mod")
# Create callbacks
checkpointer = tf.callbacks.ModelCheckpoint(
filepath=save_dir + "/checkpoint.h5",
verbose=1,
# save_best_only=True,
save_weights_only=True,
monitor="val_loss",
# monitor="loss",
save_freq="epoch",
)
earlystop = tf.callbacks.EarlyStopping(
monitor="val_loss", min_delta=0, patience=patience
)
reducelr = tf.callbacks.ReduceLROnPlateau(
monitor="val_loss",
factor=0.2,
patience=int(patience / 3),
verbose=1,
mode="auto",
min_delta=0,
cooldown=0,
min_lr=0,
)
callback_list = [checkpointer, earlystop, reducelr]
# Train model
history = model.fit(
bag_X - 1,
bag_y_enc,
batch_size=int(batch_size),
epochs=int(max_epochs),
callbacks=callback_list,
validation_data=(X_val - 1, y_val_enc),
verbose=0,
)
# Load best model
model.load_weights(save_dir + "/checkpoint.h5")
if not save_weights:
os.remove(save_dir + "/checkpoint.h5")
# plot training history
if plot_history:
plt.switch_backend("agg")
fig = plt.figure(figsize=(3, 1.5), dpi=200)
plt.rcParams.update({"font.size": 7})
ax1 = fig.add_axes([0, 0, 1, 1])
ax1.plot(
history.history["val_loss"][3:],
"--",
color="black",
lw=0.5,
label="Validation Loss",
)
ax1.plot(
history.history["loss"][3:],
"-",
color="black",
lw=0.5,
label="Training Loss",
)
ax1.set_xlabel("Epoch")
ax1.legend()
fig.savefig(
save_dir + "/model" + str(i) + "_history.pdf",
bbox_inches="tight"
)
plt.close()
test_loss, test_acc = model.evaluate(X_test - 1, y_test_enc)
yhats.append(model.predict(X_test - 1))
test_df = pd.DataFrame(model.predict(X_test - 1))
test_df.columns = popnames
test_df["sampleID"] = y_test_samples
test_df["true_pops"] = y_test_pops
test_df["bag"] = i
test_dict["count"].append(i)
test_dict["df"].append(test_df)
# Fill test lists with information
TEST_LOSS.append(test_loss)
TEST_ACCURACY.append(test_acc)
if predict:
ypreds.append(model.predict(ukgen))
tmp_df = pd.DataFrame(model.predict(ukgen))
tmp_df.columns = popnames
tmp_df["sampleID"] = uksamples
tmp_df["bag"] = i
pred_dict["count"].append(i)
pred_dict["df"].append(tmp_df)
# Find top populations for each sample
top_pops["df"].append(i)
top_pops["pops"].append(
pred_dict["df"][i].iloc[
:, 0:len(popnames)
].idxmax(axis=1)
)
# Collect yhats and ypreds for weighted ensemble
yhats = np.array(yhats)
if predict:
ypreds = np.array(ypreds)
# Get ensemble accuracy
tot_bag_df = test_dict["df"][0].iloc[
:, 0:len(popnames)
].copy()
for i in range(0, len(test_dict["df"])):
tot_bag_df += test_dict["df"][i].iloc[:, 0:len(popnames)]
# Normalize values to be between 0 and 1
tot_bag_df = tot_bag_df / nbags
tot_bag_df["top_samp"] = tot_bag_df.idxmax(axis=1)
tot_bag_df["sampleID"] = test_dict["df"][0]["sampleID"]
tot_bag_df["true_pops"] = test_dict["df"][0]["true_pops"]
ENSEMBLE_TEST_ACCURACY = np.sum(
tot_bag_df["top_samp"] == tot_bag_df["true_pops"]
) / len(tot_bag_df)
tot_bag_df.to_csv(save_dir + "/ensemble_test_results.csv")
if predict:
top_pops_df = pd.DataFrame(top_pops["pops"])
top_pops_df.columns = uksamples
top_freqs = {"sample": [], "freq": []}
for samp in uksamples:
top_freqs["sample"].append(samp)
top_freqs["freq"].append(
top_pops_df[samp].value_counts() / len(top_pops_df)
)
# Save frequencies to csv for plotting
top_freqs_df =
|
pd.DataFrame(top_freqs["freq"])
|
pandas.DataFrame
|
'''GDELTeda.py
Project: WGU Data Management/Analytics Undergraduate Capstone
<NAME>
August 2021
Class for collecting Pymongo and Pandas operations to automate EDA on
subsets of GDELT records (Events/Mentions, GKG, or joins).
Basic use should be by import and implementation within an IDE, or by editing
section # C00 and running this script directly.
Primary member functions include descriptive docstrings for their intent and
use.
WARNING: project file operations are based on relative pathing from the
'scripts' directory this Python script is located in, given the creation of
directories 'GDELTdata' and 'EDAlogs' parallel to 'scripts' upon first
GDELTbase and GDELTeda class initializations. If those directories are not
already present, a fallback method for string-literal directory reorientation
may be found in '__init__()' at this tag: # A02b - Project directory path.
Specification for any given user's main project directory should be made for
that os.chdir() call.
See also GDELTbase.py, tag # A01a - backup path specification, as any given
user's project directory must be specified there, also.
Contents:
A00 - GDELTeda
A01 - shared class data
A02 - __init__ with instanced data
A02a - Project directory maintenance
A02b - Project directory path specification
Note: Specification at A02b should be changed to suit a user's desired
directory structure, given their local filesystem.
B00 - class methods
B01 - batchEDA()
B02 - eventsBatchEDA()
B03 - mentionsBatchEDA()
B04 - gkgBatchEDA()
Note: see GDELTedaGKGhelpers.py for helper function code & docs
B05 - realtimeEDA()
B06 - loopEDA()
C00 - main w/ testing
C01 - previously-run GDELT realtime EDA testing
'''
import json
import multiprocessing
import numpy as np
import os
import pandas as pd
import pymongo
import shutil
import wget
from datetime import datetime, timedelta, timezone
from GDELTbase import GDELTbase
from GDELTedaGKGhelpers import GDELTedaGKGhelpers
from pandas_profiling import ProfileReport
from pprint import pprint as pp
from time import time, sleep
from urllib.error import HTTPError
from zipfile import ZipFile as zf
# A00
class GDELTeda:
'''Collects Pymongo and Pandas operations for querying GDELT records
subsets and performing semi-automated EDA.
Shared class data:
-----------------
logPath - dict
Various os.path objects for EDA log storage.
configFilePaths - dict
Various os.path objects for pandas_profiling.ProfileReport
configuration files, copied to EDA log storage directories upon
__init__, for use in report generation.
Instanced class data:
--------------------
gBase - GDELTbase instance
Used for class member functions, essential for realtimeEDA().
Class methods:
-------------
batchEDA()
eventsBatchEDA()
mentionsBatchEDA()
gkgBatchEDA()
realtimeEDA()
loopEDA()
Helper functions from GDELTedaGKGhelpers.py used in gkgBatchEDA():
pullMainGKGcolumns()
applyDtypes()
convertDatetimes()
convertGKGV15Tone()
mainReport()
locationsReport()
countsReport()
themesReport()
personsReport()
organizationsReport()
'''
# A01 - shared class data
# These paths are set relative to the location of this script, one directory
# up and in 'EDAlogs' parallel to the script directory, which can be named
# arbitrarily.
logPath = {}
logPath['base'] = os.path.join(os.path.abspath(__file__),
os.path.realpath('..'),
'EDAlogs')
logPath['events'] = {}
logPath['events'] = {
'table' : os.path.join(logPath['base'], 'events'),
'batch' : os.path.join(logPath['base'], 'events', 'batch'),
'realtime' : os.path.join(logPath['base'], 'events', 'realtime'),
}
logPath['mentions'] = {
'table' : os.path.join(logPath['base'], 'mentions'),
'batch' : os.path.join(logPath['base'], 'mentions', 'batch'),
'realtime' : os.path.join(logPath['base'], 'mentions', 'realtime'),
}
logPath['gkg'] = {
'table' : os.path.join(logPath['base'], 'gkg'),
'batch' : os.path.join(logPath['base'], 'gkg', 'batch'),
'realtime' : os.path.join(logPath['base'], 'gkg', 'realtime'),
}
# Turns out, the following isn't the greatest way of keeping track
# of each configuration file. It's easiest to just leave them in the
# exact directories where ProfileReport.to_html() is aimed (via
# os.chdir()), since it's pesky maneuvering outside parameters into
# multiprocessing Pool.map() calls.
# Still, these can and are used in realtimeEDA(), since the size of
# just the most recent datafiles should permit handling them without
# regard for Pandas DataFrame RAM impact (it's greedy, easiest method
# for mitigation is multiprocessing threads, that shouldn't be
# necessary for realtimeEDA()).
# Regardless, all these entries are for copying ProfileReport config
# files to their appropriate directories for use, given base-copies
# present in the 'scripts' directory. Those base copies may be edited
# in 'scripts', since each file will be copied from there.
configFilePaths = {}
configFilePaths['events'] = {
'batch' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTeventsEDAconfig_batch.yaml"),
'realtime' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTeventsEDAconfig_realtime.yaml"),
}
configFilePaths['mentions'] = {
'batch' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTmentionsEDAconfig_batch.yaml"),
'realtime' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTmentionsEDAconfig_realtime.yaml"),
}
configFilePaths['gkg'] = {}
configFilePaths['gkg']['batch'] = {
'main' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgMainEDAconfig_batch.yaml"),
'locations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgLocationsEDAconfig_batch.yaml"),
'counts' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgCountsEDAconfig_batch.yaml"),
'themes' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgThemesEDAconfig_batch.yaml"),
'persons' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgPersonsEDAconfig_batch.yaml"),
'organizations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgOrganizationsEDAconfig_batch.yaml"),
}
configFilePaths['gkg']['realtime'] = {
'main' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgMainEDAconfig_realtime.yaml"),
'locations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgLocationsEDAconfig_realtime.yaml"),
'counts' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgCountsEDAconfig_realtime.yaml"),
'themes' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgThemesEDAconfig_realtime.yaml"),
'persons' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgPersonsEDAconfig_realtime.yaml"),
'organizations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgOrganizationsEDAconfig_realtime.yaml"),
}
# A02
def __init__(self, tableList = ['events', 'mentions', 'gkg']):
'''GDELTeda class initialization, takes a list of GDELT tables to
perform EDA on. Instantiates a GDELTbase() instance for use by class
methods and checks for presence of EDAlogs directories, creating them if
they aren't present, and copying all ProfileReport-required config files
to their applicable directories.
Parameters:
----------
tableList - list of strings, default ['events','mentions','gkg']
Controls detection and creation of .../EDALogs/... subdirectories for
collection of Pandas Profiling ProfileReport HTML EDA document output.
Also controls permission for class member functions to perform
operations on tables specified by those functions' tableList parameters
as a failsafe against a lack of project directories required for those
operations, specifically output of HTML EDA documents.
output:
------
Produces exhaustive EDA for GDELT record subsets for specified tables
through Pandas Profiling ProfileReport-output HTML documents.
All procedurally automated steps towards report generation are shown
in console output during script execution.
'''
# instancing tables for operations to be passed to member functions
self.tableList = tableList
print("Instantiating GDELTeda...\n")
self.gBase = GDELTbase()
if 'events' not in tableList and \
'mentions' not in tableList and \
'gkg' not in tableList:
print("Error! 'tableList' values do not include a valid GDELT table.",
"\nPlease use one or more of 'events', 'mentions', and/or 'gkg'.")
# instancing trackers for realtimeEDA() and loopEDA()
self.realtimeStarted = False
self.realtimeLooping = False
self.realtimeWindow = 0
self.lastRealDatetime = ''
self.nextRealDatetime = ''
# A02a - Project EDA log directories confirmation and/or creation, and
# Pandas Profiling ProfileReport configuration file copying from 'scripts'
# directory.
print(" Checking log directory...")
if not os.path.isdir(self.logPath['base']):
print(" Doesn't exist! Making...")
# A02b - Project directory path
# For obvious reasons, any user of this script should change this
# string to suit their needs. The directory described with this string
# should be one directory above the location of the 'scripts' directory
# this file should be in. If this file is not in 'scripts', unpredictable
# behavior may occur, and no guarantees of functionality are intended for
# such a state.
os.chdir('C:\\Users\\urf\\Projects\\WGU capstone')
os.mkdir(self.logPath['base'])
for table in tableList:
# switch to EDAlogs directory
os.chdir(self.logPath['base'])
# Branch: table subdirectories not found, create all
if not os.path.isdir(self.logPath[table]['table']):
print("Did not find .../EDAlogs/", table, "...")
print(" Creating .../EDAlogs/", table, "...")
os.mkdir(self.logPath[table]['table'])
os.chdir(self.logPath[table]['table'])
print(" Creating .../EDAlogs/", table, "/batch")
os.mkdir(self.logPath[table]['batch'])
print(" Creating .../EDAlogs/", table, "/realtime")
os.mkdir(self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['realtime'])
# Branch: table subdirectories found, create batch/realtime directories
# if not present.
else:
print(" Found .../EDAlogs/", table,"...")
os.chdir(self.logPath[table]['table'])
if not os.path.isdir(self.logPath[table]['batch']):
print(" Did not find .../EDAlogs/", table, "/batch , creating...")
os.mkdir(self.logPath[table]['batch'])
if not os.path.isdir(self.logPath[table]['realtime']):
print(" Did not find .../EDAlogs/", table, "/realtime , creating...")
os.mkdir(self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['realtime'])
# Copying pandas_profiling.ProfileReport configuration files
print(" Copying configuration files...\n")
if table == 'gkg':
# There's a lot of these, but full normalization of GKG is
# prohibitively RAM-expensive, so reports need to be generated for
# both the main columns and the main columns normalized for each
# variable-length subfield.
shutil.copy(self.configFilePaths[table]['realtime']['main'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['locations'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['counts'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['themes'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['persons'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['organizations'],
self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['main'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['locations'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['counts'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['themes'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['persons'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['organizations'],
self.logPath[table]['batch'])
else:
shutil.copy(self.configFilePaths[table]['realtime'],
self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch'],
self.logPath[table]['batch'])
# B00 - class methods
# B01
def batchEDA(self, tableList = ['events','mentions','gkg']):
'''Reshapes and re-types GDELT records for generating Pandas
Profiling ProfileReport()-automated, simple EDA reports from Pandas
DataFrames, from MongoDB-query-cursors.
WARNING: extremely RAM, disk I/O, and processing intensive. Be aware of
what resources are available for these operations at runtime.
Relies on Python multiprocessing.Pool.map() calls against class member
functions eventsBatchEDA() and mentionsBatchEDA(), and a regular call on
gkgBatchEDA(), which uses multiprocessing.Pool.map() calls within it.
Parameters:
----------
tableList - list of strings, default ['events','mentions','gkg']
Permits limiting analysis to one or more tables.
Output:
------
Displays progress through the function's operations via console output
while producing Pandas Profiling ProfileReport.to_file() html documents
for
'''
if tableList != self.tableList:
print("\n Error: this GDELTeda object may have been initialized\n",
" without checking for the presence of directories\n",
" required for this function's operations.\n",
" Please check GDELTeda parameters and try again.")
for table in tableList:
print("\n------------------------------------------------------------\n")
print("Executing batch EDA on GDELT table", table, "records...")
# WARNING: RAM, PROCESSING, and DISK I/O INTENSIVE
# Events and Mentions are both much easier to handle than GKG, so
# they're called in their own collective function threads with
# multiprocessing.Pool(1).map().
if table == 'events':
os.chdir(self.logPath['events']['batch'])
pool = multiprocessing.Pool(1)
eventsReported = pool.map(self.eventsBatchEDA(), ['batch'])
pool.close()
pool.join()
if table == 'mentions':
os.chdir(self.logPath['mentions']['batch'])
pool = multiprocessing.Pool(1)
mentionsReported = pool.map(self.mentionsBatchEDA(), ['batch'])
pool.close()
pool.join()
if table == 'gkg':
# Here's the GKG bottleneck! Future investigation of parallelization
# improvements may yield gains here, as normalization of all subfield
# and variable-length measures is very RAM expensive, given the
# expansion in records required.
# So, current handling of GKG subfield and variable-length measures
# is isolating most operations in their own process threads within
# gkgBatchEDA() execution, forcing deallocation of those resources upon
# each Pool.close(), as with Events and Mentions table operations above
# which themselves do not require any additional subfield handling.
os.chdir(self.logPath['gkg']['batch'])
self.gkgBatchEDA()
# B02
def eventsBatchEDA(mode):
'''Performs automatic EDA on GDELT Events record subsets. See
function batchEDA() for "if table == 'events':" case handling and how
this function is invoked as a multiprocessing.Pool.map() call, intended
to isolate its RAM requirements for deallocation upon Pool.close().
In its current state, this function can handle collections of GDELT
Events records up to at least the size of the batch EDA test subset used
in this capstone project, the 30 day period from 05/24/2020 to
06/22/2020.
Parameters:
----------
mode - arbitrary
This parameter is included to meet Python multiprocessing.Pool.map()
function requirements. As such, it is present only to receive a
parameter determined by map(), e.g. one iteration of the function will
execute.
Output:
------
Console displays progress through steps with time taken throughout,
and function generates EDA profile html documents in appropriate project
directories.
'''
columnNames = [
'GLOBALEVENTID',
'Actor1Code',
'Actor1Name',
'Actor1CountryCode',
'Actor1Type1Code',
'Actor1Type2Code',
'Actor1Type3Code',
'Actor2Code',
'Actor2Name',
'Actor2CountryCode',
'Actor2Type1Code',
'Actor2Type2Code',
'Actor2Type3Code',
'IsRootEvent',
'EventCode',
'EventBaseCode',
'EventRootCode',
'QuadClass',
'AvgTone',
'Actor1Geo_Type',
'Actor1Geo_FullName',
'Actor1Geo_Lat',
'Actor1Geo_Long',
'Actor2Geo_Type',
'Actor2Geo_FullName',
'Actor2Geo_Lat',
'Actor2Geo_Long',
'ActionGeo_Type',
'ActionGeo_FullName',
'ActionGeo_Lat',
'ActionGeo_Long',
'DATEADDED',
'SOURCEURL',
]
columnTypes = {
'GLOBALEVENTID' : type(1),
'Actor1Code': pd.StringDtype(),
'Actor1Name': pd.StringDtype(),
'Actor1CountryCode': pd.StringDtype(),
'Actor1Type1Code' : pd.StringDtype(),
'Actor1Type2Code' : pd.StringDtype(),
'Actor1Type3Code' : pd.StringDtype(),
'Actor2Code': pd.StringDtype(),
'Actor2Name': pd.StringDtype(),
'Actor2CountryCode': pd.StringDtype(),
'Actor2Type1Code' : pd.StringDtype(),
'Actor2Type2Code' : pd.StringDtype(),
'Actor2Type3Code' : pd.StringDtype(),
'IsRootEvent': type(True),
'EventCode': pd.StringDtype(),
'EventBaseCode': pd.StringDtype(),
'EventRootCode': pd.StringDtype(),
'QuadClass': type(1),
'AvgTone': type(1.1),
'Actor1Geo_Type': type(1),
'Actor1Geo_FullName': pd.StringDtype(),
'Actor1Geo_Lat':
|
pd.StringDtype()
|
pandas.StringDtype
|
import pickle
import numpy as np, pandas as pd, matplotlib as mpl
from matplotlib import dates as mdates
# # Use environment rws_dev
# from sys import path
# for extra in ["C:/Users/mphum/GitHub/koolstof", "C:/Users/mphum/GitHub/calkulate"]:
# if extra not in path:
# path.append(extra)
import koolstof as ks
mpl.rcParams["date.epoch"] = "1970-01-01T00:00:00"
#%% Import station positions
allstations =
|
pd.read_excel("data/Coordinaten_verzuring_20190429.xlsx")
|
pandas.read_excel
|
# -*- coding: utf-8 -*-
"""
Analyse results of a testing run.
It prints a one-line summary of each trace, plus some general statistics.
Optionally, it can also report how many rows were added to each database table.
@author: <EMAIL>
"""
import pandas as pd
import argparse
from pathlib import Path
import textwrap
import agilkia
def read_database_changes(before_csv: str, after_csv: str) -> pd.DataFrame:
"""Reads two files of database row counts and calculates tuples added to each table.
Args:
before_csv: name of CSV file containing the 'before' counts.
after_csv: name of CSV file containing the 'after' counts.
Returns:
A Pandas table with an 'added' column for how many rows were added to each table.
"""
before = pd.read_csv(before_csv)
after = pd.read_csv(after_csv)
col_msg = "ERROR: {} must have columns 'name', 'row_count', ..."
if list(before.columns[0:2]) != ["name", "row_count"]:
print(col_msg.format(before_csv))
if list(after.columns[0:2]) != ["name", "row_count"]:
print(col_msg.format(before_csv))
# we use inner join to get the intersection of the two sets of tables.
changes = pd.merge(before, after, how="inner", on="name", suffixes=("_before", "_after"))
changes["added"] = changes["row_count_after"] - changes["row_count_before"]
return changes
def make_action_status_table(df: pd.DataFrame) -> pd.DataFrame:
"""From TraceSet DataFrame, creates a table of Actions showing how many got Ok vs Error."""
ok = df[df.Status == 0].groupby("Action").size()
err = df[df.Status != 0].groupby("Action").size()
data = pd.DataFrame({"Ok": ok, "Err": err})
data.fillna(0, inplace=True, downcast="infer")
data["Total"] = data.Ok + data.Err
totals = data.sum().rename("Total")
# add Totals row at bottom
data = data.append(totals)
# total = df.shape[0] # number of rows = total event count
# percents = (totals * 100.0 / total).rename("Percent")
# data = data.append(percents)
return data
def main():
"""A command line program that gives an overview of a set of generated traces."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-a", "--after", help="database row counts AFTER testing (*.csv)")
parser.add_argument("-b", "--before", help="database row counts BEFORE testing (*.csv)")
parser.add_argument("-c", "--chars", help="name of action-to-CHAR mapping file (*.csv)")
parser.add_argument("-r", "--repeats", help="remove REPEATS of this action")
parser.add_argument("-s", "--status", help="show STATUS in color (red=error)",
action="store_true")
parser.add_argument("traces", help="traces file (*.json)")
args = parser.parse_args()
# print(f"Args are:", args)
if args.before and args.after:
changes = read_database_changes(args.before, args.after)
nonzero = changes[changes.added > 0].sort_values(by="added", ascending=False)
print("==== database changes ====")
print(nonzero)
traceset = agilkia.TraceSet.load_from_json(Path(args.traces))
actions = agilkia.all_action_names(traceset.traces)
if args.chars:
mapfile =
|
pd.read_csv(args.chars, header=None)
|
pandas.read_csv
|
#!/usr/bin/env python
"""
analyse Elasticsearch query
"""
import json
from elasticsearch import Elasticsearch
from elasticsearch import logger as es_logger
from collections import defaultdict, Counter
import re
import os
from datetime import datetime
# Preprocess terms for TF-IDF
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
# progress bar
from tqdm import tqdm
# ploting
import matplotlib.pyplot as plt
# LOG
import logging
from logging.handlers import RotatingFileHandler
# Word embedding for evaluation
from sentence_transformers import SentenceTransformer
from sklearn.manifold import TSNE
import seaborn as sns
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn.metrics.pairwise import cosine_similarity
from scipy import sparse
import scipy.spatial as sp
# Spatial entity as descriptor :
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter
# venn
from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud
import operator
# Global var on Levels on spatial and temporal axis
spatialLevels = ['city', 'state', 'country']
temporalLevels = ['day', 'week', 'month', 'period']
def elasticsearch_query(query_fname, logger):
"""
Build a ES query and return a default dict with resuls
:return: tweetsByCityAndDate
"""
# Elastic search credentials
client = Elasticsearch("http://localhost:9200")
es_logger.setLevel(logging.WARNING)
index = "twitter"
# Define a Query
query = open(query_fname, "r").read()
result = Elasticsearch.search(client, index=index, body=query, scroll='2m', size=5000)
# Append all pages form scroll search : avoid the 10k limitation of ElasticSearch
results = avoid10kquerylimitation(result, client, logger)
# Initiate a dict for each city append all Tweets content
tweetsByCityAndDate = defaultdict(list)
for hits in results:
# parse Java date : EEE MMM dd HH:mm:ss Z yyyy
inDate = hits["_source"]["created_at"]
parseDate = datetime.strptime(inDate, "%a %b %d %H:%M:%S %z %Y")
try:# geodocing may be bad
geocoding = hits["_source"]["rest"]["features"][0]["properties"]
except:
continue # skip this iteraction
if "country" in hits["_source"]["rest"]["features"][0]["properties"]:
# locaties do not necessarily have an associated stated
try:
cityStateCountry = str(hits["_source"]["rest"]["features"][0]["properties"]["city"]) + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["state"]) + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["country"])
except: # there is no state in geocoding
try:
logger.debug(hits["_source"]["rest"]["features"][0]["properties"]["city"] + " has no state")
cityStateCountry = str(hits["_source"]["rest"]["features"][0]["properties"]["city"]) + "_" + \
str("none") + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["country"])
except: # there is no city as well : only country
# print(json.dumps(hits["_source"], indent=4))
try: #
cityStateCountry = str("none") + "_" + \
str("none") + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["country"])
except:
cityStateCountry = str("none") + "_" + \
str("none") + "_" + \
str("none")
try:
tweetsByCityAndDate[cityStateCountry].append(
{
"tweet": preprocessTweets(hits["_source"]["full_text"]),
"created_at": parseDate
}
)
except:
print(json.dumps(hits["_source"], indent=4))
# biotexInputBuilder(tweetsByCityAndDate)
# pprint(tweetsByCityAndDate)
return tweetsByCityAndDate
def avoid10kquerylimitation(result, client, logger):
"""
Elasticsearch limit results of query at 10 000. To avoid this limit, we need to paginate results and scroll
This method append all pages form scroll search
:param result: a result of a ElasticSearcg query
:return:
"""
scroll_size = result['hits']['total']["value"]
logger.info("Number of elasticsearch scroll: " + str(scroll_size))
results = []
# Progress bar
pbar = tqdm(total=scroll_size)
while (scroll_size > 0):
try:
scroll_id = result['_scroll_id']
res = client.scroll(scroll_id=scroll_id, scroll='60s')
results += res['hits']['hits']
scroll_size = len(res['hits']['hits'])
pbar.update(scroll_size)
except:
pbar.close()
logger.error("elasticsearch search scroll failed")
break
pbar.close()
return results
def preprocessTweets(text):
"""
1 - Clean up tweets text cf : https://medium.com/analytics-vidhya/basic-tweet-preprocessing-method-with-python-56b4e53854a1
2 - Detection lang
3 - remove stopword ??
:param text:
:return: list : texclean, and langue detected
"""
## 1 clean up twetts
# remove URLs
textclean = re.sub('((www\.[^\s]+)|(https?://[^\s]+)|(http?://[^\s]+))', '', text)
textclean = re.sub(r'http\S+', '', textclean)
# remove usernames
# textclean = re.sub('@[^\s]+', '', textclean)
# remove the # in #hashtag
# textclean = re.sub(r'#([^\s]+)', r'\1', textclean)
return textclean
def matrixOccurenceBuilder(tweetsofcity, matrixAggDay_fout, matrixOccurence_fout, save_intermediaire_files, logger):
"""
Create a matrix of :
- line : (city,day)
- column : terms
- value of cells : TF (term frequency)
Help found here :
http://www.xavierdupre.fr/app/papierstat/helpsphinx/notebooks/artificiel_tokenize_features.html
https://towardsdatascience.com/natural-language-processing-feature-engineering-using-tf-idf-e8b9d00e7e76
:param tweetsofcity:
:param matrixAggDay_fout: file to save
:param matrixOccurence_fout: file to save
:return:
"""
# initiate matrix of tweets aggregate by day
# col = ['city', 'day', 'tweetsList', 'bow']
col = ['city', 'day', 'tweetsList']
matrixAggDay = pd.DataFrame(columns=col)
cityDayList = []
logger.info("start full_text concatenation for city & day")
pbar = tqdm(total=len(tweetsofcity))
for city in tweetsofcity:
# create a table with 2 columns : tweet and created_at for a specific city
matrix = pd.DataFrame(tweetsofcity[city])
# Aggregate list of tweets by single day for specifics cities
## Loop on days for a city
period = matrix['created_at'].dt.date
period = period.unique()
period.sort()
for day in period:
# aggregate city and date document
document = '. \n'.join(matrix.loc[matrix['created_at'].dt.date == day]['tweet'].tolist())
# Bag of Words and preprocces
# preproccesFullText = preprocessTerms(document)
tweetsOfDayAndCity = {
'city': city,
'day': day,
'tweetsList': document
}
cityDayList.append(city + "_" + str(day))
try:
matrixAggDay = matrixAggDay.append(tweetsOfDayAndCity, ignore_index=True)
except:
print("full_text empty after pre-process: "+document)
continue
pbar.update(1)
pbar.close()
if save_intermediaire_files:
logger.info("Saving file: matrix of full_text concatenated by day & city: "+str(matrixAggDay_fout))
matrixAggDay.to_csv(matrixAggDay_fout)
# Count terms with sci-kit learn
cd = CountVectorizer(
stop_words='english',
#preprocessor=sklearn_vectorizer_no_number_preprocessor,
#min_df=2, # token at least present in 2 cities : reduce size of matrix
max_features=25000,
ngram_range=(1, 1),
token_pattern='[a-zA-Z0-9#@]+', #remove user name, i.e term starting with @ for personnal data issue
# strip_accents= "ascii" # remove token with special character (trying to keep only english word)
)
cd.fit(matrixAggDay['tweetsList'])
res = cd.transform(matrixAggDay["tweetsList"])
countTerms = res.todense()
# create matrix
## get terms :
# voc = cd.vocabulary_
# listOfTerms = {term for term, index in sorted(voc.items(), key=lambda item: item[1])}
listOfTerms = cd.get_feature_names()
##initiate matrix with count for each terms
matrixOccurence = pd.DataFrame(data=countTerms[0:, 0:], index=cityDayList, columns=listOfTerms)
# save to file
if save_intermediaire_files:
logger.info("Saving file: occurence of term: "+str(matrixOccurence_fout))
matrixOccurence.to_csv(matrixOccurence_fout)
return matrixOccurence
def spatiotemporelFilter(matrix, listOfcities='all', spatialLevel='city', period='all', temporalLevel='day'):
"""
Filter matrix with list of cities and a period
:param matrix:
:param listOfcities:
:param spatialLevel:
:param period:
:param temporalLevel:
:return: matrix filtred
"""
if spatialLevel not in spatialLevels or temporalLevel not in temporalLevels:
print("wrong level, please double check")
return 1
# Extract cities and period
## cities
if listOfcities != 'all': ### we need to filter
###Initiate a numpy array of False
filter = np.zeros((1, len(matrix.index)), dtype=bool)[0]
for city in listOfcities:
### edit filter if index contains the city (for each city of the list)
filter += matrix.index.str.startswith(str(city) + "_")
matrix = matrix.loc[filter]
##period
if str(period) != 'all': ### we need a filter on date
datefilter = np.zeros((1, len(matrix.index)), dtype=bool)[0]
for date in period:
datefilter += matrix.index.str.contains(date.strftime('%Y-%m-%d'))
matrix = matrix.loc[datefilter]
return matrix
def HTFIDF(matrixOcc, matrixHTFIDF_fname, biggestHTFIDFscore_fname, listOfcities='all', spatialLevel='city',
period='all', temporalLevel='day'):
"""
Aggregate on spatial and temporel and then compute TF-IDF
:param matrixOcc: Matrix with TF already compute
:param listOfcities: filter on this cities
:param spatialLevel: city / state / country / world
:param period: Filter on this period
:param temporalLevel: day / week (month have to be implemented)
:return:
"""
matrixOcc = spatiotemporelFilter(matrix=matrixOcc, listOfcities=listOfcities,
spatialLevel='state', period=period)
# Aggregate by level
## Create 4 new columns : city, State, Country and date
def splitindex(row):
return row.split("_")
matrixOcc["city"], matrixOcc["state"], matrixOcc["country"], matrixOcc["date"] = \
zip(*matrixOcc.index.map(splitindex))
if temporalLevel == 'day':
## In space
if spatialLevel == 'city':
# do nothing
pass
elif spatialLevel == 'state' and temporalLevel == 'day':
matrixOcc = matrixOcc.groupby("state").sum()
elif spatialLevel == 'country' and temporalLevel == 'day':
matrixOcc = matrixOcc.groupby("country").sum()
elif temporalLevel == "week":
matrixOcc.date = pd.to_datetime((matrixOcc.date)) - pd.to_timedelta(7, unit='d')# convert date into datetime
## in space and time
if spatialLevel == 'country':
matrixOcc = matrixOcc.groupby(["country", pd.Grouper(key="date", freq="W")]).sum()
elif spatialLevel == 'state':
matrixOcc = matrixOcc.groupby(["state", pd.Grouper(key="date", freq="W")]).sum()
elif spatialLevel == 'city':
matrixOcc = matrixOcc.groupby(["city", pd.Grouper(key="date", freq="W")]).sum()
# Compute TF-IDF
## compute TF : for each doc, devide count by Sum of all count
### Sum fo all count by row
matrixOcc['sumCount'] = matrixOcc.sum(axis=1)
### Devide each cell by these sums
listOfTerms = matrixOcc.keys()
matrixOcc = matrixOcc.loc[:, listOfTerms].div(matrixOcc['sumCount'], axis=0)
## Compute IDF : create a vector of length = nb of termes with IDF value
idf = pd.Series(index=matrixOcc.keys(), dtype=float)
### N : nb of doucments <=> nb of rows :
N = matrixOcc.shape[0]
### DFt : nb of document that contains the term
DFt = matrixOcc.astype(bool).sum(axis=0) # Tip : convert all value in boolean. float O,O will be False, other True
#### Not a Number when value 0 because otherwise log is infinite
DFt.replace(0, np.nan, inplace=True)
### compute log(N/DFt)
idf = np.log10(N / (DFt))
# idf = np.log10( N / (DFt * 10))
## compute TF-IDF
matrixTFIDF = matrixOcc * idf
# matrixTFIDF = matrixOcc * idf * idf
## remove terms if for all documents value are Nan
matrixTFIDF.dropna(axis=1, how='all', inplace=True)
# Save file
matrixTFIDF.to_csv(matrixHTFIDF_fname)
# Export N biggest TF-IDF score:
top_n = 500
extractBiggest = pd.DataFrame(index=matrixTFIDF.index, columns=range(0, top_n))
for row in matrixTFIDF.index:
try:
row_without_zero = matrixTFIDF.loc[row]# we remove term with a score = 0
row_without_zero = row_without_zero[ row_without_zero !=0 ]
try:
extractBiggest.loc[row] = row_without_zero.nlargest(top_n).keys()
except:
extractBiggest.loc[row] = row_without_zero.nlargest(len(row_without_zero)).keys()
except:
logger.debug("H-TFIDF: city "+str(matrixTFIDF.loc[row].name)+ "not enough terms")
extractBiggest.to_csv(biggestHTFIDFscore_fname+".old.csv")
# Transpose this table in order to share the same structure with TF-IDF classifical biggest score :
hbt = pd.DataFrame()
extractBiggest = extractBiggest.reset_index()
for index, row in extractBiggest.iterrows():
hbtrow = pd.DataFrame(row.drop([spatialLevel, "date"]).values, columns=["terms"])
hbtrow[spatialLevel] = row[spatialLevel]
hbtrow["date"] = row["date"]
hbt = hbt.append(hbtrow, ignore_index=True)
hbt.to_csv(biggestHTFIDFscore_fname)
def TFIDF_TF_with_corpus_state(elastic_query_fname, logger, save_intermediaire_files, nb_biggest_terms=500, path_for_filesaved="./",
spatial_hiearchy="country", temporal_period='all', listOfCities='all'):
"""
Compute TFIDF and TF from an elastic query file
1 doc = 1 tweet
Corpus = by hiearchy level, i.e. : state or country
:param elastic_query_fname: filename and path of the elastic query
:param logger: logger of the main program
:param nb_biggest_terms: How many biggest term are to keep
:param spatial_hiearchy: define the size of the corpus : state or country
:param temporal_period:
:param listOfCities: If you want to filter out some cities, you can
:return:
"""
# tfidfStartDate = date(2020, 1, 23)
# tfidfEndDate = date(2020, 1, 30)
# temporal_period = pd.date_range(tfidfStartDate, tfidfEndDate)
# listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff']
# listOfState = ["England", "Scotland", "Northern Ireland", "Wales"]
tweets = elasticsearch_query(elastic_query_fname, logger)
if listOfCities == 'all':
listOfCities = []
listOfStates = []
listOfCountry = []
for triple in tweets:
splitted = triple.split("_")
listOfCities.append(splitted[0])
listOfStates.append(splitted[1])
listOfCountry.append(splitted[2])
listOfCities = list(set(listOfCities))
listOfStates = list(set(listOfStates))
listOfCountry = list(set(listOfCountry))
# reorganie tweets (dict : tweets by cities) into dataframe (city and date)
matrixAllTweets = pd.DataFrame()
for tweetByCity in tweets.keys():
# Filter cities :
city = str(tweetByCity).split("_")[0]
state = str(tweetByCity).split("_")[1]
country = str(tweetByCity).split("_")[2]
if city in listOfCities:
matrix = pd.DataFrame(tweets[tweetByCity])
matrix['city'] = city
matrix['state'] = state
matrix['country'] = country
matrixAllTweets = matrixAllTweets.append(matrix, ignore_index=True)
# Split datetime into date and time
matrixAllTweets["date"] = [d.date() for d in matrixAllTweets['created_at']]
matrixAllTweets["time"] = [d.time() for d in matrixAllTweets['created_at']]
# Filter by a period
if temporal_period != "all":
mask = ((matrixAllTweets["date"] >= temporal_period.min()) & (matrixAllTweets["date"] <= temporal_period.max()))
matrixAllTweets = matrixAllTweets.loc[mask]
# Compute TF-IDF and TF by state
extractBiggestTF_allstates = pd.DataFrame()
extractBiggestTFIDF_allstates = pd.DataFrame()
if spatial_hiearchy == "country":
listOfLocalities = listOfCountry
elif spatial_hiearchy == "state":
listOfLocalities = listOfStates
elif spatial_hiearchy == "city":
listOfLocalities = listOfCities
for locality in listOfLocalities:
matrix_by_locality = matrixAllTweets[matrixAllTweets[spatial_hiearchy] == locality]
vectorizer = TfidfVectorizer(
stop_words='english',
min_df=0.001,
# max_features=50000,
ngram_range=(1, 1),
token_pattern='[<KEY>',
)
# logger.info("Compute TF-IDF on corpus = "+spatial_hiearchy)
try:
vectors = vectorizer.fit_transform(matrix_by_locality['tweet'])
feature_names = vectorizer.get_feature_names()
dense = vectors.todense()
denselist = dense.tolist()
except:
logger.info("Impossible to compute TF-IDF on: "+locality)
continue
## matrixTFIDF
TFIDFClassical = pd.DataFrame(denselist, columns=feature_names)
locality_format = locality.replace("/", "_")
locality_format = locality_format.replace(" ", "_")
if save_intermediaire_files:
logger.info("saving TF-IDF File: "+path_for_filesaved+"/tfidf_on_"+locality_format+"_corpus.csv")
TFIDFClassical.to_csv(path_for_filesaved+"/tfidf_on_"+locality_format+"_corpus.csv")
## Extract N TOP ranking score
extractBiggest = TFIDFClassical.max().nlargest(nb_biggest_terms)
extractBiggest = extractBiggest.to_frame()
extractBiggest = extractBiggest.reset_index()
extractBiggest.columns = ['terms', 'score']
extractBiggest[spatial_hiearchy] = locality
extractBiggestTFIDF_allstates = extractBiggestTFIDF_allstates.append(extractBiggest, ignore_index=True)
"""
# Compute TF
tf = CountVectorizer(
stop_words='english',
min_df=2,
ngram_range=(1,2),
token_pattern='[a-zA-Z0-9@#]+',
)
try:
tf.fit(matrix_by_locality['tweet'])
tf_res = tf.transform(matrix_by_locality['tweet'])
listOfTermsTF = tf.get_feature_names()
countTerms = tf_res.todense()
except:# locality does not have enough different term
logger.info("Impossible to compute TF on: "+locality)
continue
## matrixTF
TFClassical = pd.DataFrame(countTerms.tolist(), columns=listOfTermsTF)
### save in file
logger.info("saving TF File: "+path_for_filesaved+"/tf_on_"+locality.replace("/", "_")+"_corpus.csv")
TFClassical.to_csv(path_for_filesaved+"/tf_on_"+locality.replace("/", "_")+"_corpus.csv")
## Extract N TOP ranking score
extractBiggestTF = TFClassical.max().nlargest(nb_biggest_terms)
extractBiggestTF = extractBiggestTF.to_frame()
extractBiggestTF = extractBiggestTF.reset_index()
extractBiggestTF.columns = ['terms', 'score']
extractBiggestTF[spatial_hiearchy] = locality
extractBiggestTF_allstates = extractBiggestTF_allstates.append(extractBiggestTF, ignore_index=True)
"""
logger.info("saving TF and TF-IDF top"+str(nb_biggest_terms)+" biggest score")
extractBiggestTF_allstates.to_csv(path_for_filesaved+"/TF_BiggestScore_on_"+spatial_hiearchy+"_corpus.csv")
extractBiggestTFIDF_allstates.to_csv(path_for_filesaved+"/TF-IDF_BiggestScore_on_"+spatial_hiearchy+"_corpus.csv")
def TFIDF_TF_on_whole_corpus(elastic_query_fname, logger, save_intermediaire_files, path_for_filesaved="./",
temporal_period='all', listOfCities='all'):
"""
Compute TFIDF and TF from an elastic query file
1 doc = 1 tweet
Corpus = on the whole elastic query (with filter out cities that are not in listOfCities
:param elastic_query_fname: filename and path of the elastic query
:param logger: logger of the main program
:param nb_biggest_terms: How many biggest term are to keep. It has to be greater than H-TF-IDF or
TF-IDF classical on corpus = localité because a lot of temrs have 1.0 has the score
:param spatial_hiearchy: define the size of the corpus : state or country
:param temporal_period:
:param listOfCities: If you want to filter out some cities, you can
:return:
"""
# tfidfStartDate = date(2020, 1, 23)
# tfidfEndDate = date(2020, 1, 30)
# temporal_period = pd.date_range(tfidfStartDate, tfidfEndDate)
# listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff']
# listOfState = ["England", "Scotland", "Northern Ireland", "Wales"]
# Query Elasticsearch to get all tweets from UK
tweets = elasticsearch_query(elastic_query_fname, logger)
if listOfCities == 'all':
listOfCities = []
listOfStates = []
listOfCountry = []
for triple in tweets:
splitted = triple.split("_")
listOfCities.append(splitted[0])
listOfStates.append(splitted[1])
listOfCountry.append(splitted[2])
listOfCities = list(set(listOfCities))
listOfStates = list(set(listOfStates))
listOfCountry = list(set(listOfCountry))
# reorganie tweets (dict : tweets by cities) into dataframe (city and date)
matrixAllTweets = pd.DataFrame()
for tweetByCity in tweets.keys():
# Filter cities :
city = str(tweetByCity).split("_")[0]
state = str(tweetByCity).split("_")[1]
country = str(tweetByCity).split("_")[2]
if city in listOfCities:
matrix = pd.DataFrame(tweets[tweetByCity])
matrix["country"] = country
matrixAllTweets = matrixAllTweets.append(matrix, ignore_index=True)
# Split datetime into date and time
matrixAllTweets["date"] = [d.date() for d in matrixAllTweets['created_at']]
matrixAllTweets["time"] = [d.time() for d in matrixAllTweets['created_at']]
# Filter by a period
if temporal_period != "all":
mask = ((matrixAllTweets["date"] >= temporal_period.min()) & (matrixAllTweets["date"] <= temporal_period.max()))
matrixAllTweets = matrixAllTweets.loc[mask]
vectorizer = TfidfVectorizer(
stop_words='english',
min_df=0.001,
# max_features=50000,
ngram_range=(1, 1),
token_pattern='[a-zA-Z0-9#]+', #remove user name, i.e term starting with @ for personnal data issue
)
try:
vectors = vectorizer.fit_transform(matrixAllTweets['tweet'])
feature_names = vectorizer.get_feature_names()
dense = vectors.todense()
denselist = dense.tolist()
except:
logger.info("Impossible to compute TF-IDF")
exit(-1)
## matrixTFIDF
TFIDFClassical = pd.DataFrame(denselist, columns=feature_names)
TFIDFClassical["country"] = matrixAllTweets["country"]
if save_intermediaire_files:
logger.info("saving TF-IDF File: "+path_for_filesaved+"/tfidf_on_whole_corpus.csv")
TFIDFClassical.to_csv(path_for_filesaved+"/tfidf_on_whole_corpus.csv")
extractBiggest = pd.DataFrame()
for term in TFIDFClassical.keys():
try:
index = TFIDFClassical[term].idxmax()
score = TFIDFClassical[term].max()
country = TFIDFClassical.iloc[index]["country"]
row = {
'terms': term,
'score': score,
'country': country
}
extractBiggest = extractBiggest.append(row, ignore_index=True)
except:
logger.info(term+' : '+str(index)+" : "+str(score)+" : "+country)
## Extract N TOP ranking score
# extractBiggest = TFIDFClassical.max()
extractBiggest = extractBiggest[extractBiggest['score'] == 1] # we keep only term with high score TF-IDF, i.e 1.0
# extractBiggest = extractBiggest.to_frame()
# extractBiggest = extractBiggest.reset_index()
# extractBiggest.columns = ['terms', 'score', 'country']
logger.info("saving TF-IDF top"+str(extractBiggest['terms'].size)+" biggest score")
extractBiggest.to_csv(path_for_filesaved+"/TFIDF_BiggestScore_on_whole_corpus.csv")
def logsetup(log_fname):
"""
Initiate a logger object :
- Log in file : collectweets.log
- also print on screen
:return: logger object
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(funcName)20s() ::%(message)s')
now = datetime.now()
file_handler = RotatingFileHandler(log_fname + "_" + now.strftime("%Y-%m-%d_%H-%M-%S") + ".log", 'a', 1000000, 1)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
# Only display on screen INFO
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
return logger
def t_SNE_bert_embedding_visualization(biggest_score, logger, listOfLocalities="all", spatial_hieararchy="country",
plotname="colored by country", paht2save="./"):
"""
Plot t-SNE representation of terms by country
ressources:
+ https://colab.research.google.com/drive/1FmREx0O4BDeogldyN74_7Lur5NeiOVye?usp=sharing#scrollTo=Fbq5MAv0jkft
+ https://github.com/UKPLab/sentence-transformers
:param biggest_score:
:param listOfLocalities:
:param spatial_hieararchy:
:param plotname:
:param paht2save:
:return:
"""
modelSentenceTransformer = SentenceTransformer('distilbert-base-nli-mean-tokens')
# filter by localities
for locality in biggest_score[spatial_hieararchy].unique():
if locality not in listOfLocalities:
biggest_score = biggest_score.drop(biggest_score[biggest_score[spatial_hieararchy] == locality].index)
embeddings = modelSentenceTransformer.encode(biggest_score['terms'].to_list(), show_progress_bar=True)
# embeddings.tofile(paht2save+"/tsne_bert-embeddings_"+plotname+"_matrix-embeddig")
modelTSNE = TSNE(n_components=2) # n_components means the lower dimension
low_dim_data = modelTSNE.fit_transform(embeddings)
label_tsne = biggest_score[spatial_hieararchy]
# Style Plots a bit
sns.set_style('darkgrid')
sns.set_palette('muted')
sns.set_context("notebook", font_scale=1, rc={"lines.linewidth": 2.5})
plt.rcParams['figure.figsize'] = (20, 14)
tsne_df = pd.DataFrame(low_dim_data, label_tsne)
tsne_df.columns = ['x', 'y']
ax = sns.scatterplot(data=tsne_df, x='x', y='y', hue=tsne_df.index)
plt.setp(ax.get_legend().get_texts(), fontsize='40') # for legend text
plt.setp(ax.get_legend().get_title(), fontsize='50') # for legend title
plt.ylim(-100,100)
plt.xlim(-100, 100)
#ax.set_title('T-SNE BERT Sentence Embeddings for '+plotname)
plt.savefig(paht2save+"/tsne_bert-embeddings_"+plotname)
logger.info("file: "+paht2save+"/tsne_bert-embeddings_"+plotname+" has been saved.")
#plt.show()
plt.close()
# Perform kmean clustering
# num_clusters = 5
# clustering_model = KMeans(n_clusters=num_clusters)
# clustering_model.fit(embeddings)
# cluster_assignment = clustering_model.labels_
# Normalize the embeddings to unit length
corpus_embeddings = embeddings / np.linalg.norm(embeddings, axis=1, keepdims=True)
# Perform kmean clustering
clustering_model = AgglomerativeClustering(n_clusters=None,
distance_threshold=1.5) # , affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
# clustered_sentences = [[] for i in range(num_clusters)]
# for sentence_id, cluster_id in enumerate(cluster_assignment):
# clustered_sentences[cluster_id].append(biggest_score['terms'].iloc[sentence_id])
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(biggest_score['terms'].iloc[sentence_id])
#for i, cluster in enumerate(clustered_sentences):
# for i, cluster in clustered_sentences.items():
# print("Cluster ", i+1)
# print(cluster)
# print("")
def bert_embedding_filtred(biggest_score, listOfLocalities="all", spatial_hieararchy="country"):
"""
Retrieve embedding of a matrix of terms (possibility of filtring by a list of locality)
:param biggest_score: pd.Datraframe with columns : [terms, country/state/city]
:param listOfLocalities:
:param spatial_hieararchy:
:return:
"""
modelSentenceTransformer = SentenceTransformer('distilbert-base-nli-mean-tokens')
# filter by localities
if listOfLocalities != "all":
for locality in biggest_score[spatial_hieararchy].unique():
if locality not in listOfLocalities:
biggest_score = biggest_score.drop(biggest_score[biggest_score[spatial_hieararchy] == locality].index)
embeddings = modelSentenceTransformer.encode(biggest_score['terms'].to_list(), show_progress_bar=True)
return embeddings
def similarity_intra_matrix_pairwise(matrix):
"""
Compute pairwise cosine similarity on the rows of a Matrix and retrieve unique score by pair.
indeed, cosine_similarity pairwise retrive a matrix with duplication : let's take an exemple :
Number of terms : 4, cosine similarity :
w1 w2 w3 w4
+---+---+----+--+
w1 | 1 | | | |
w2 | | 1 | | |
w3 | | | 1 | |
w4 | | | | 1 |
+---+---+----+--+
(w1, w2) = (w2, w1), so we have to keep only : (number_of_terms)^2/2 - (number_of_terms)/2
for nb_term = 4 :
4*4/2 - 4/2 = 16/2 - 4/2 = 6 => we have 6 unique scores
:param matrix:
:return: list of unique similarity score
"""
similarity = cosine_similarity(sparse.csr_matrix(matrix))
similarity_1D = np.array([])
for i, row in enumerate(similarity):
similarity_1D = np.append(similarity_1D, row[i+1:]) # We remove duplicate pairwise value
return similarity_1D
def similarity_inter_matrix(matrix1, matrix2):
"""
:param matrix1:
:param matrix2:
:return:
"""
similarity = 1 - sp.distance.cdist(matrix1, matrix2, 'cosine')
return similarity
def clustering_terms(biggest, logger, cluster_f_out, listOfLocalities="all", spatial_hieararchy="country", method="kmeans"):
"""
:param biggest:
:param method:
:return:
"""
method_list = ["kmeans", "agglomerative_clustering"]
if method not in method_list:
logger.error("This method is not implemented for clustering: "+str(method))
return -1
# filter by localities
if listOfLocalities != "all":
for locality in biggest[spatial_hieararchy].unique():
if locality not in listOfLocalities:
biggest = biggest.drop(biggest[biggest[spatial_hieararchy] == locality].index)
embeddings = bert_embedding_filtred(biggest)
if method == "kmeans":
# Perform kmean clustering
num_clusters = 5
clustering_model = KMeans(n_clusters=num_clusters)
clustering_model.fit(embeddings)
cluster_assignment = clustering_model.labels_
elif method == "agglomerative_clustering":
# Normalize the embeddings to unit length
corpus_embeddings = embeddings / np.linalg.norm(embeddings, axis=1, keepdims=True)
# Perform Agglomerative clustering
clustering_model = AgglomerativeClustering(n_clusters=None,
distance_threshold=1.5) # , affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if str(cluster_id) not in clustered_sentences:
clustered_sentences[str(cluster_id)] = []
clustered_sentences[str(cluster_id)].append(biggest['terms'].iloc[sentence_id])
with open(cluster_f_out, "w") as outfile:
json.dump(clustered_sentences, outfile)
logger.info("file " + cluster_f_out + " has been saved")
def geocoding_token(biggest, listOfLocality, spatial_hieararchy, logger):
"""
Find and geocode Spatial entity with OSM data (nominatim)
Respect terms and use of OSM and Nomitim :
- Specify a name for the application, Ie.e user agent
- add delay between each query : min_delay_seconds = 1.
See : https://geopy.readthedocs.io/en/stable/#module-geopy.extra.rate_limiter
- define a time out for waiting nomatim answer : to 10 seconds
:param biggest:
:return: biggest with geocoding information
"""
try:
if listOfLocality != "all":
for locality in biggest[spatial_hieararchy].unique():
if locality not in listOfLocality:
biggest = biggest.drop(biggest[biggest[spatial_hieararchy] == locality].index)
except:
logger.info("could not filter, certainly because there is no spatial hiearchy on biggest score")
geolocator = Nominatim(user_agent="h-tfidf-evaluation", timeout=10)
geocoder = RateLimiter(geolocator.geocode, min_delay_seconds=1)
tqdm.pandas()
biggest["geocode"] = biggest["terms"].progress_apply(geocoder)
return biggest
def post_traitement_flood(biggest, logger, spatialLevel, ratio_of_flood=0.5):
"""
Remove terms from people flooding : return same dataframe with 1 more column : user_flooding
With default ratio_of_flood : If an twitter.user use a term in more than 50% of occurence of this terms,
we consider this user is flooding
:param biggest: File of terms to process
:param logger:
:param: spatialLevel : work on Country / State / City
:param: ratio_of_flood
:return: return same dataframe with 1 more column : user_flooding
"""
ratio_of_flood_global = ratio_of_flood
es_logger.setLevel(logging.WARNING)
# pre-build elastic query for spatialLevel :
rest_user_osm_level = ""
if spatialLevel == "country":
rest_user_osm_level = "rest_user_osm.country"
elif spatialLevel == "state":
rest_user_osm_level = "rest.features.properties.state"
elif spatialLevel == "city":
rest_user_osm_level = "rest.features.properties.city"
def is_an_user_flooding(term, locality):
client = Elasticsearch("http://localhost:9200")
index = "twitter"
# Query :
## Retrieve only user name where in full_text = term and rest_user_osm.country = locality
if term is not np.NAN:
query = {"_source": "user.name","query":{"bool":{"filter":[{"bool":{"should":[{"match_phrase":{"full_text":term}}],"minimum_should_match":1}},
{"bool":{"should":[{"match_phrase":{rest_user_osm_level:locality}}],"minimum_should_match":1}}]}}}
try:
result = Elasticsearch.search(client, index=index, body=query)
list_of_user = []
if len(result["hits"]["hits"]) != 0:
for hit in result["hits"]["hits"]:
user = hit["_source"]["user"]["name"]
list_of_user.append(user)
dict_user_nbtweet = dict(Counter(list_of_user))
d = dict((k, v) for k, v in dict_user_nbtweet.items() if v >= (ratio_of_flood_global * len(list_of_user)))
if len(d) > 0 : # there is a flood on this term:
return 1
else:
return 0
else: # not found in ES why ?
return "not_in_es"
except:
logger.info("There is a trouble with this term: " + str(term))
return np.NAN
else:
return 0
logger.debug("start remove terms if they coming from a flooding user, ie, terms in "+str(ratio_of_flood_global*100)+"% of tweets from an unique user over tweets with this words")
tqdm.pandas()
biggest["user_flooding"] = biggest.progress_apply(lambda t: is_an_user_flooding(t.terms, t[spatialLevel]), axis=1)
return biggest
def venn(biggest, logger, spatial_level, result_path, locality):
"""
Build Venn diagramm in word_cloud
Save fig in result_path
Discussion about font size :
In each subset (common or specific), the font size of term is related with the H-TFIDF Rank inside the subset
:param biggest:
:param logger:
:param spatialLevel:
:return:
"""
# Post-traitement
biggest = biggest[biggest["user_flooding"] == "0"]
# Select locality
biggest = biggest[biggest[spatial_level] == locality]
# select week
weeks = biggest['date'].unique()
if len(weeks) == 2:
sets = []
weeks_list = []
for week in weeks:
sets.append(set(biggest[biggest["date"] == week].terms[0:100]))
weeks_list.append(week)
try:
venn = venn2_wordcloud(sets, set_labels=weeks_list, wordcloud_kwargs=dict(min_font_size=10),)
except:
logger.info("Can't build venn for: "+locality)
elif len(weeks) == 3 or len(weeks) > 3:
sets = []
weeks_list = []
word_frequency = {} # for font-size of wordcloud : based on H-TFIDF Rank
for nb, week in enumerate(weeks[-3:]):
sets.append(set(biggest[biggest["date"] == week].terms[0:100]))
weeks_list.append(week)
for rank, term in enumerate(biggest[biggest["date"] == week].terms[0:100]):
if term not in word_frequency:
word_frequency[term] = (100 - rank)
try:
venn = venn3_wordcloud(sets, set_labels=weeks_list, word_to_frequency=word_frequency,
wordcloud_kwargs=dict(min_font_size=4,),)
except:
logger.info("Can't build venn for: "+locality)
sorted_word_frequency = dict(sorted(word_frequency.items(), key=operator.itemgetter(1),reverse=True))
logger.info(locality + ": " + str(sorted_word_frequency))
plt.savefig(result_path + "/venn_" + locality)
def frequent_terms_by_level(matrixOcc, logger, most_frequent_terms_fpath, listOfLocalities='all', spatialLevel='country'):
"""
:param matrixOcc:
:param most_frequent_terms_fpath:
:param listOfLocalities:
:param spatialLevel:
:return:
"""
#matrixOcc = spatiotemporelFilter(matrix=matrixOcc, listOfcities=listOfLocalities,
# spatialLevel=spatialLevel, period='all')
# Aggregate by level
## Create 4 new columns : city, State, Country and date
def splitindex(row):
return row.split("_")
matrixOcc["city"], matrixOcc["state"], matrixOcc["country"], matrixOcc["date"] = \
zip(*matrixOcc.index.map(splitindex))
matrixOcc.date = pd.to_datetime((matrixOcc.date)) # convert date into datetime
if spatialLevel == 'city':
matrixOcc = matrixOcc.groupby(["city", pd.Grouper(key="date", freq="Y")]).sum()
elif spatialLevel == 'state':
matrixOcc = matrixOcc.groupby(["state", pd.Grouper(key="date", freq="Y")]).sum()
elif spatialLevel == 'country':
matrixOcc = matrixOcc.groupby(["country", pd.Grouper(key="date", freq="Y")]).sum()
# Export N biggest TF-IDF score:
top_n = 500
extractBiggest = pd.DataFrame(index=matrixOcc.index, columns=range(0, top_n))
for row in matrixOcc.index:
try:
row_without_zero = matrixOcc.loc[row]# we remove term with a score = 0
row_without_zero = row_without_zero[ row_without_zero !=0 ]
try:
extractBiggest.loc[row] = row_without_zero.nlargest(top_n).keys()
except:
extractBiggest.loc[row] = row_without_zero.nlargest(len(row_without_zero)).keys()
except:
logger.debug("H-TFIDF: city " + str(matrixOcc.loc[row].name) + "not enough terms")
# Transpose this table in order to share the same structure with TF-IDF classifical biggest score :
hbt = pd.DataFrame()
extractBiggest = extractBiggest.reset_index()
for index, row in extractBiggest.iterrows():
hbtrow = pd.DataFrame(row.drop([spatialLevel, "date"]).values, columns=["terms"])
hbtrow[spatialLevel] = row[spatialLevel]
hbtrow["date"] = row["date"]
hbt = hbt.append(hbtrow, ignore_index=True)
# save file
logger.info("saving file: "+most_frequent_terms_fpath)
hbt.to_csv(most_frequent_terms_fpath)
return hbt
def comparison_htfidf_tfidf_frequentterms(htfidf_f, tfidf_corpus_country_f, frequent_terms, logger, plot_f_out, listOfCountries="all"):
# Open dataframes
htfidf = pd.read_csv(htfidf_f, index_col=0)
tfidf = pd.read_csv(tfidf_corpus_country_f, index_col=0)
for nb_terms in [100, 200, 500]:
# barchart building
barchart_df_col = ["country", "h-tfidf", "tf-idf"]
barchart_df = pd.DataFrame(columns=barchart_df_col, index=range(len(listOfCountries)))
# loop on countries
for country in listOfCountries:
htfidf_country = htfidf[htfidf["country"] == country]
tfidf_country = tfidf[tfidf["country"] == country]
frequent_terms_country = frequent_terms[frequent_terms["country"] == country]
# loop on weeks
htfidf_overlap_per_week_df = pd.DataFrame(index=range(1))
for week in htfidf_country.date.unique():
htfidf_country_week = htfidf_country[htfidf_country["date"] == week]
# build on venn comparison H-TFIDF with Frequent terms
sets = []
sets.append(set(htfidf_country_week.terms[0:nb_terms]))
sets.append(set(frequent_terms_country.terms[0:nb_terms]))
try:
venn_htfidf = venn2_wordcloud(sets)
htfidf_overlap_per_week_df[week] = len(venn_htfidf.get_words_by_id('11'))
except:
htfidf_overlap_per_week_df[week] = np.NAN
# mean value for all weeks :
mean_htfidf_overlap_per_week_df = htfidf_overlap_per_week_df.mean(axis=1).iloc[0] * 100 / nb_terms
# Compute TF-IDF overlap with Frequent termes
sets = []
sets.append(set(tfidf_country.terms[0:nb_terms]))
sets.append(set(frequent_terms_country.terms[0:nb_terms]))
logger.info(country)
venn_tfidf = venn2_wordcloud(sets)
plt.close('all')
# barchart_df['TFIDF_' + country] = len(venn_tfidf.get_words_by_id('11'))
tfidf_overlap = len(venn_tfidf.get_words_by_id('11')) * 100 / nb_terms
# build the row for barchart
if country == "Ἑλλάς":
country = "Greece"
row = {"country": country, "h-tfidf": mean_htfidf_overlap_per_week_df, "tf-idf": tfidf_overlap}
barchart_df = barchart_df.append(row, ignore_index=True)
# Plot bar chart
barchart_df = barchart_df.set_index("country")
barchart_df = barchart_df.dropna()
barchart_df.plot.bar(figsize=(8,6))
plt.subplots_adjust(bottom=0.27)
plt.ylabel("% overlap between H-TFIDF / TF-IDF with most frequent terms")
plt.savefig(plot_f_out + "_" + str(nb_terms) + ".png")
# build venn diagramm
## Choose a country
country = "United Kingdom"
nb_terms = 100
week = "2020-01-26"
## Filtering matrix to keep TOP15 terms without term with 1 caracter or digital number
htfidf_country = htfidf[(htfidf["country"] == country) & (htfidf["date"] == week)]
tfidf_country = tfidf[tfidf["country"] == country]
frequent_terms_country = frequent_terms[frequent_terms["country"] == country]
htfidf_country = htfidf_country[htfidf_country["terms"].map(len) > 3]
tfidf_country = tfidf_country[tfidf_country["terms"].map(len) > 3]
frequent_terms_country = frequent_terms_country[frequent_terms_country["terms"].map(len) > 3]
### Remove number
htfidf_country_terms = htfidf_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms)
tfidf_country_terms = tfidf_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms)
frequent_terms_country_terms = frequent_terms_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms)
columns_name = []
latex_table_nb_terms = 30
for i in range(latex_table_nb_terms):
columns_name.append("rank "+str(i))
latex_table = pd.DataFrame(index=range(3), columns=columns_name)
latex_table.loc["H-TFIDF"] = htfidf_country_terms.head(latex_table_nb_terms).values
latex_table.loc["TF-IDF"] = tfidf_country_terms.head(latex_table_nb_terms).values
latex_table.loc["Frequent terms"] = frequent_terms_country_terms.head(latex_table_nb_terms).values
print(latex_table.T[["H-TFIDF", "TF-IDF", "Frequent terms"]].to_latex(index=False))
sets = []
sets.append(set(htfidf_country_terms))
sets.append(set(tfidf_country_terms))
sets.append(set(frequent_terms_country_terms))
fig, ax = plt.subplots(figsize=(8, 6))
venn_3 = venn3_wordcloud(sets, set_labels=["H-TFIDF", "TF-IDF", "Frequent terms"], ax=ax)
plt.savefig(plot_f_out + "_"+ country + "venn3.png")
plt.show()
def comparison_htfidf_tfidfwhole_frequentterms(htfidf_f, tfidf_whole_f, frequent_terms, logger, plot_f_out, listOfCountries="all"):
# Open dataframes
htfidf = pd.read_csv(htfidf_f, index_col=0)
tfidf = pd.read_csv(tfidf_whole_f, index_col=0)
for nb_terms in [100, 200, 500]:
# barchart building
barchart_df_col = ["country", "h-tfidf", "tf-idf"]
barchart_df = pd.DataFrame(columns=barchart_df_col, index=range(len(listOfCountries)))
# loop on countries
for country in listOfCountries:
# build_compare_measures_localities = ["Ἑλλάς", "Deutschland", "España", "France", "Italia", "Portugal", "United Kingdom"]
if country == "Ἑλλάς":
htfidf_country = htfidf[(htfidf["country"] == country) | (htfidf["country"] == "Greece")]
tfidf_country = tfidf[(tfidf["country"] == country) | (tfidf["country"] == "Greece")]
elif country == "Deutschland":
htfidf_country = htfidf[(htfidf["country"] == country) | (htfidf["country"] == "Germany")]
tfidf_country = tfidf[(tfidf["country"] == country) | (tfidf["country"] == "Germany")]
elif country == "España":
htfidf_country = htfidf[(htfidf["country"] == country) | (htfidf["country"] == "Spain")]
tfidf_country = tfidf[(tfidf["country"] == country) | (tfidf["country"] == "Spain")]
elif country == "Italia":
htfidf_country = htfidf[(htfidf["country"] == country) | (htfidf["country"] == "Italy")]
tfidf_country = tfidf[(tfidf["country"] == country) | (tfidf["country"] == "Italy")]
else:
htfidf_country = htfidf[htfidf["country"] == country]
tfidf_country = tfidf[tfidf["country"] == country]
frequent_terms_country = frequent_terms[frequent_terms["country"] == country]
# loop on weeks
htfidf_overlap_per_week_df = pd.DataFrame(index=range(1))
for week in htfidf_country.date.unique():
htfidf_country_week = htfidf_country[htfidf_country["date"] == week]
# build on venn comparison H-TFIDF with Frequent terms
sets = []
sets.append(set(htfidf_country_week.terms[0:nb_terms]))
sets.append(set(frequent_terms_country.terms[0:nb_terms]))
try:
venn_htfidf = venn2_wordcloud(sets)
htfidf_overlap_per_week_df[week] = len(venn_htfidf.get_words_by_id('11'))
except:
htfidf_overlap_per_week_df[week] = np.NAN
# mean value for all weeks :
mean_htfidf_overlap_per_week_df = htfidf_overlap_per_week_df.mean(axis=1).iloc[0] * 100 / nb_terms
# Compute TF-IDF overlap with Frequent termes
sets = []
sets.append(set(tfidf_country.terms[0:nb_terms]))
sets.append(set(frequent_terms_country.terms[0:nb_terms]))
logger.info(country)
try :
venn_tfidf = venn2_wordcloud(sets)
plt.close('all')
# barchart_df['TFIDF_' + country] = len(venn_tfidf.get_words_by_id('11'))
tfidf_overlap = len(venn_tfidf.get_words_by_id('11')) * 100 / nb_terms
except:
logger.info("No terms in biggest score for TF-IDF - country: " + country)
tfidf_overlap = 0.0
# build the row for barchart
if country == "Ἑλλάς":
country = "Greece"
row = {"country": country, "h-tfidf": mean_htfidf_overlap_per_week_df, "tf-idf": tfidf_overlap}
barchart_df = barchart_df.append(row, ignore_index=True)
# Plot bar chart
barchart_df = barchart_df.set_index("country")
barchart_df = barchart_df.dropna()
barchart_df.plot.bar(figsize=(8,6))
plt.subplots_adjust(bottom=0.27)
plt.ylabel("% overlap between H-TFIDF / TF-IDF with most frequent terms")
plt.savefig(plot_f_out + "_" + str(nb_terms) + ".png")
# build venn diagramm
## Choose a country
country = "Germany"
nb_terms = 100
week = "2020-01-26"
## Filtering matrix to keep TOP15 terms without term with 1 caracter or digital number
htfidf_country = htfidf[(htfidf["country"] == country) & (htfidf["date"] == week)]
tfidf_country = tfidf[tfidf["country"] == country]
frequent_terms_country = frequent_terms[frequent_terms["country"] == country]
htfidf_country = htfidf_country[htfidf_country["terms"].map(len) > 3]
tfidf_country = tfidf_country[tfidf_country["terms"].map(len) > 3]
frequent_terms_country = frequent_terms_country[frequent_terms_country["terms"].map(len) > 3]
### Remove number
htfidf_country_terms = htfidf_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms)
tfidf_country_terms = tfidf_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms)
frequent_terms_country_terms = frequent_terms_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms)
columns_name = []
latex_table_nb_terms = 15
for i in range(latex_table_nb_terms):
columns_name.append("rank "+str(i))
latex_table = pd.DataFrame(index=range(3), columns=columns_name)
latex_table.loc["H-TFIDF"] = htfidf_country_terms.head(latex_table_nb_terms).values
latex_table.loc["TF-IDF"] = tfidf_country_terms.head(latex_table_nb_terms).values
latex_table.loc["Frequent terms"] = frequent_terms_country_terms.head(latex_table_nb_terms).values
print(latex_table.T[["H-TFIDF", "TF-IDF", "Frequent terms"]].to_latex(index=False))
sets = []
sets.append(set(htfidf_country_terms))
sets.append(set(tfidf_country_terms))
sets.append(set(frequent_terms_country_terms))
fig, ax = plt.subplots(figsize=(8, 6))
venn_3 = venn3_wordcloud(sets, set_labels=["H-TFIDF", "TF-IDF", "Frequent terms"], ax=ax)
plt.savefig(plot_f_out + "_"+ country + "venn3.png")
plt.show()
if __name__ == '__main__':
# Global parameters :
## Spatial level hierarchie :
# spatialLevels = ['country', 'state', 'city']
spatialLevels = ['country', 'state']
## Time level hierarchie :
timeLevel = "week"
## List of country to work on :
listOfLocalities = ["Deutschland", "España", "France", "Italia", "United Kingdom"]
## elastic query :
query_fname = "elasticsearch/analyse/nldb21/elastic-query/nldb21_europeBySpatialExtent_en_february.txt"
## Path to results :
period_extent = "feb_tfidf_whole"
f_path_result = "elasticsearch/analyse/nldb21/results/" + period_extent + "_" + timeLevel
if not os.path.exists(f_path_result):
os.makedirs(f_path_result)
# Workflow parameters :
## Rebuild H-TFIDF (with Matrix Occurence)
build_htfidf = False
build_htfidf_save_intermediaire_files = True
## eval 1 : Comparison with classical TF-IDf
build_classical_tfidf = False
build_classical_tfidf_save_intermediaire_files = False
## evla 2 : Use word_embedding with t-SNE
build_tsne = False
build_tsne_spatial_level = "country"
## eval 3 : Use word_embedding with box plot to show disparity
build_boxplot = False
build_boxplot_spatial_level = "country"
## eval 4 : Compare H-TFIDF and TF-IDF with most frequent terms by level
build_compare_measures = True
build_compare_measures_build_intermedate_files = False
build_compare_measures_level = "country"
build_compare_measures_localities = ["Ἑλλάς", "Deutschland", "España", "France", "Italia", "Portugal", "United Kingdom"]
## post-traitement 1 : geocode term
build_posttraitement_geocode = False
## post-traitement 2 : remove terms form a flooding user
build_posttraitement_flooding = False
build_posttraitement_flooding_spatial_levels = spatialLevels
## Analyse H-TFIDF for epidemiology 1 : clustering
build_clustering = False
build_clustering_spatial_levels = ['country', 'state']
build_clustering_list_hierachical_locality = {
"country": ["France", "Deutschland", "España", "Italia", "United Kingdom"],
'state': ["Lombardia", "Lazio"],
# "city": ["London"]
}
## Venn diagramm
build_venn = False
build_venn_spatial_level = "country"
# initialize a logger :
log_fname = "elasticsearch/analyse/nldb21/logs/nldb21_"
logger = logsetup(log_fname)
logger.info("H-TFIDF expirements starts")
if build_htfidf:
# start the elastic query
query = open(query_fname, "r").read()
logger.debug("elasticsearch : start quering")
tweetsByCityAndDate = elasticsearch_query(query_fname, logger)
logger.debug("elasticsearch : stop quering")
# Build a matrix of occurence for each terms in document aggregate by city and day
## prepare tree for file in commun for all spatial level :
f_path_result_common = f_path_result+"/common"
if not os.path.exists(f_path_result_common):
os.makedirs(f_path_result_common)
## Define file path
matrixAggDay_fpath = f_path_result_common + "/matrixAggDay.csv"
matrixOccurence_fpath = f_path_result_common + "/matrixOccurence.csv"
logger.debug("Build matrix of occurence : start")
matrixOccurence = matrixOccurenceBuilder(tweetsByCityAndDate, matrixAggDay_fpath, matrixOccurence_fpath, build_htfidf_save_intermediaire_files, logger)
logger.debug("Build matrix of occurence : stop")
## import matrixOccurence if you don't want to re-build it
# matrixOccurence = pd.read_csv('elasticsearch/analyse/matrixOccurence.csv', index_col=0)
for spatialLevel in spatialLevels:
logger.info("H-TFIDF on: "+spatialLevel)
f_path_result_level = f_path_result+"/"+spatialLevel
if not os.path.exists(f_path_result_level):
os.makedirs(f_path_result_level)
## Compute H-TFIDF
matrixHTFIDF_fname = f_path_result_level + "/matrix_H-TFIDF.csv"
biggestHTFIDFscore_fname = f_path_result_level + "/h-tfidf-Biggest-score.csv"
logger.debug("H-TFIDF : start to compute")
HTFIDF(matrixOcc=matrixOccurence,
matrixHTFIDF_fname=matrixHTFIDF_fname,
biggestHTFIDFscore_fname=biggestHTFIDFscore_fname,
spatialLevel=spatialLevel,
temporalLevel=timeLevel,
)
logger.info("H-TFIDF : stop to compute for all spatial levels")
## Comparison with TF-IDF
f_path_result_tfidf = f_path_result + "/tf-idf-classical"
f_path_result_tfidf_by_locality = f_path_result_tfidf + "/tfidf-tf-corpus-country"
if build_classical_tfidf :
if not os.path.exists(f_path_result_tfidf):
os.makedirs(f_path_result_tfidf)
if not os.path.exists(f_path_result_tfidf_by_locality):
os.makedirs(f_path_result_tfidf_by_locality)
### On whole corpus
TFIDF_TF_on_whole_corpus(elastic_query_fname=query_fname,
logger=logger,
save_intermediaire_files=build_classical_tfidf_save_intermediaire_files,
path_for_filesaved=f_path_result_tfidf)
### By Country
TFIDF_TF_with_corpus_state(elastic_query_fname=query_fname,
logger=logger,
save_intermediaire_files=build_classical_tfidf_save_intermediaire_files,
nb_biggest_terms=500,
path_for_filesaved=f_path_result_tfidf_by_locality,
spatial_hiearchy="country",
temporal_period='all')
if build_compare_measures:
f_path_result_compare_meassures_dir = f_path_result+"/common"
f_path_result_compare_meassures_file = \
f_path_result_compare_meassures_dir + "/most_frequent_terms_by_" + build_compare_measures_level + ".csv"
f_path_result_compare_meassures_plot = \
f_path_result_compare_meassures_dir + "/most_frequent_terms_by_" + build_compare_measures_level
if not os.path.exists(f_path_result_compare_meassures_dir):
os.makedirs(f_path_result_compare_meassures_dir)
# open Matrix of occurence:
try:
matrixOccurence = pd.read_csv(f_path_result_compare_meassures_dir + '/matrixOccurence.csv', index_col=0)
except:
logger.error("File: " + f_path_result_compare_meassures_dir + '/matrixOccurence.csv' + "doesn't exist. You may need to save intermediate file for H-TFIDF")
logger.info("Retrieve frequent terms per country")
if build_compare_measures_build_intermedate_files:
ft = frequent_terms_by_level(matrixOccurence, logger, f_path_result_compare_meassures_file, build_compare_measures_localities, build_compare_measures_level)
else:
ft = pd.read_csv(f_path_result_compare_meassures_file)
# files_path
htfidf_f = f_path_result + "/country/h-tfidf-Biggest-score.csv"
tfidf_corpus_whole_f = f_path_result + "/tf-idf-classical/TFIDF_BiggestScore_on_whole_corpus.csv"
comparison_htfidf_tfidfwhole_frequentterms(htfidf_f, tfidf_corpus_whole_f, ft, logger,
f_path_result_compare_meassures_plot,
listOfCountries=build_compare_measures_localities)
if build_tsne :
f_path_result_tsne = f_path_result+"/tsne"
if not os.path.exists(f_path_result_tsne):
os.makedirs(f_path_result_tsne)
biggest_TFIDF_country = pd.read_csv(f_path_result+"/tf-idf-classical/tfidf-tf-corpus-country/TF-IDF_BiggestScore_on_country_corpus.csv", index_col=0)
biggest_TFIDF_whole = pd.read_csv(f_path_result+"/tf-idf-classical/TFIDF_BiggestScore_on_whole_corpus.csv")
biggest_H_TFIDF = pd.read_csv(f_path_result+"/"+build_tsne_spatial_level+'/h-tfidf-Biggest-score.csv', index_col=0)
# t_SNE visulation
t_SNE_bert_embedding_visualization(biggest_TFIDF_country, logger, listOfLocalities=listOfLocalities,
plotname="TF-IDF on corpus by Country",
paht2save=f_path_result_tsne)
t_SNE_bert_embedding_visualization(biggest_H_TFIDF, logger, listOfLocalities=listOfLocalities,
plotname="H-TFIDF", paht2save=f_path_result_tsne)
if build_boxplot :
# dir path to save :
f_path_result_boxplot = f_path_result+"/pairwise-similarity-boxplot"
if not os.path.exists(f_path_result_boxplot):
os.makedirs(f_path_result_boxplot)
# open result from mesures :
biggest_TFIDF_country =
|
pd.read_csv(f_path_result_tfidf_by_locality+"/TF-IDF_BiggestScore_on_country_corpus.csv", index_col=0)
|
pandas.read_csv
|
import collections
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
isna,
)
import pandas._testing as tm
class TestCategoricalMissing:
def test_isna(self):
exp = np.array([False, False, True])
cat = Categorical(["a", "b", np.nan])
res = cat.isna()
tm.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = list(range(10))
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
tm.assert_numpy_array_equal(isna(cat), labels == -1)
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8))
c[1] = np.nan
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0], dtype=np.int8))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8))
def test_set_dtype_nans(self):
c = Categorical(["a", "b", np.nan])
result = c._set_dtype(CategoricalDtype(["a", "c"]))
tm.assert_numpy_array_equal(result.codes, np.array([0, -1, -1], dtype="int8"))
def test_set_item_nan(self):
cat = Categorical([1, 2, 3])
cat[1] = np.nan
exp = Categorical([1, np.nan, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(cat, exp)
@pytest.mark.parametrize(
"fillna_kwargs, msg",
[
(
{"value": 1, "method": "ffill"},
"Cannot specify both 'value' and 'method'.",
),
({}, "Must specify a fill 'value' or 'method'."),
({"method": "bad"}, "Invalid fill method. Expecting .* bad"),
(
{"value": Series([1, 2, 3, 4, "a"])},
"Cannot setitem on a Categorical with a new category",
),
],
)
def test_fillna_raises(self, fillna_kwargs, msg):
# https://github.com/pandas-dev/pandas/issues/19682
# https://github.com/pandas-dev/pandas/issues/13628
cat = Categorical([1, 2, 3, None, None])
with pytest.raises(ValueError, match=msg):
cat.fillna(**fillna_kwargs)
@pytest.mark.parametrize("named", [True, False])
def test_fillna_iterable_category(self, named):
# https://github.com/pandas-dev/pandas/issues/21097
if named:
Point = collections.namedtuple("Point", "x y")
else:
Point = lambda *args: args # tuple
cat = Categorical(np.array([Point(0, 0), Point(0, 1), None], dtype=object))
result = cat.fillna(Point(0, 0))
expected = Categorical([Point(0, 0), Point(0, 1), Point(0, 0)])
tm.assert_categorical_equal(result, expected)
def test_fillna_array(self):
# accept Categorical or ndarray value if it holds appropriate values
cat = Categorical(["A", "B", "C", None, None])
other = cat.fillna("C")
result = cat.fillna(other)
tm.assert_categorical_equal(result, other)
assert isna(cat[-1]) # didnt modify original inplace
other = np.array(["A", "B", "C", "B", "A"])
result = cat.fillna(other)
expected =
|
Categorical(["A", "B", "C", "B", "A"], dtype=cat.dtype)
|
pandas.Categorical
|
import json
import math
import logging
import matplotlib.ticker as ticker
import warnings
import argparse
import numpy as np
import pandas as pd
import seaborn as sns
from pathlib import Path
from itertools import product
import matplotlib.pyplot as plt
from scipy.stats import ttest_ind, pearsonr, hmean
warnings.filterwarnings("ignore")
def plot_metric_to_threshold(metric, dataset_perthr_metric, default_thr, opt_threshold, outdir, outbase, names=None):
logging.info("Plotting metrics progress with threshold: {}; {}".format(metric, outbase))
if names is not None:
default_thr.rename(names, inplace=True)
opt_threshold.rename(names, inplace=True)
s = math.ceil(len(dataset_perthr_metric)**.5)
fig, axes = plt.subplots(s, s, sharex=True, sharey=True, figsize=(s*2.5, s*2.5))
def df_sorter(df):
if names is None:
sorter = df.index.get_level_values(0)[0]
else:
sorter = names(df.index.get_level_values(0)[0])
return sorter
for i, (ax, p) in enumerate(zip(axes.ravel(), sorted(dataset_perthr_metric, key=df_sorter))):
p.columns = p.columns.values.astype(np.float)
p = p.sort_index(axis=1)
method = p.index.droplevel(1).unique()[0]
method = names(method) if names is not None else method
p = p.reset_index(level=0, drop=True)
# x = np.array(dataset_perthr_metric.columns.values, dtype=np.float)
p.loc[metric].plot(ax=ax, label=None)
# dataset_perthr_metric.loc[p].sort_index().plot(ax=ax, legend=default_thr[p])
if i == 0 or i % s == 0:
ax.set_ylabel(metric.upper())
if i > s**2-s:
ax.set_xlabel("Thresholds")
ax.set_title(method)
ax.axvline(default_thr[method], linestyle="--", label="Default: {}".format(default_thr[method]), color="orange")
ax.axvline(opt_threshold[method], linestyle="--", label="Optimal: {}".format(opt_threshold[method]), color="deeppink")
ax.legend(*zip(*list(zip(*ax.get_legend_handles_labels()))[1:]))
for x in range(-1, -(s**2 - len(dataset_perthr_metric))-1, -1):
fig.delaxes(axes.ravel()[x])
plt.gcf().suptitle("{} progress with threshold".format(metric.upper()), y=1.01)
fig.tight_layout()
plt.savefig(outdir / "{}{}ToThr.png".format(outbase + "_" if outbase else outbase, metric), dpi=dpi, bbox_inches="tight")
plt.close()
def plot_pertarget_permethod_heatmap(metric, target_metrics_preds, target_metrics_bases, outdir, outbase, names=None):
logging.info("Plotting target heatmap: {}; {}".format(metric, outbase))
fig, ax = plt.subplots(figsize=(20, 8))
tgt_metric = target_metrics_preds[metric].unstack().append(target_metrics_bases[metric].unstack())
if names is not None:
tgt_metric.rename(names, inplace=True)
tgt_pred_metrics_avg_col = tgt_metric.mean().sort_values()
tgt_pred_metrics_avg_row = tgt_metric.mean(axis=1).sort_values(ascending=False)
ax.set_facecolor('royalblue')
ax = sns.heatmap(tgt_metric.reindex(tgt_pred_metrics_avg_col.index, axis=1).reindex(tgt_pred_metrics_avg_row.index), ax=ax)
ax2 = ax.twiny()
ax2.set_xticks(ax.xaxis.get_ticklocs())
ax2.set_xticklabels(tgt_pred_metrics_avg_col.values[[list(tgt_pred_metrics_avg_col.index).index(l.get_text()) for l in
ax.xaxis.get_ticklabels()]].round(3))
ax2.tick_params(axis='x', rotation=90)
plt.savefig(outdir / "{}tgtheatmap_{}.png".format(outbase + "_" if outbase else outbase, metric), dpi=dpi, bbox_inches="tight")
plt.close(fig)
def plot_icontent_correlation(metric, predictions, cons, pdbr, gene3dr, outdir, outbase, names=None):
logging.info("Plotting idcontent correlation: {}; {}".format(metric, outbase))
predictions = pd.DataFrame({**predictions.to_dict(), **cons.to_dict(), **pdbr.to_dict(), **gene3dr.to_dict()}).dropna()
n = len(predictions.columns.get_level_values(0).unique())
n = int(n ** .5) if (n ** .5) % 2 == 0 else int(math.ceil(n ** .5))
if names is not None:
predictions.rename(names, axis=1, level=0, inplace=True)
idcontent_ref = predictions.iloc[:, 0].groupby(level=0).mean()
fig, axes = plt.subplots(n, n, figsize=(int(n)*2.5, int(n)*2.5), sharey="row")
axes = axes.flatten()
for ax, p in zip(axes, predictions.columns.get_level_values(0).unique()):
x = predictions[(p, "states")].groupby(level=0).mean()
y = idcontent_ref
sns.scatterplot(x, y, ax=ax, label="Pearson R = {:.3f}".format(pearsonr(x, y)[0]))
ax.legend(loc="upper left")
# ax.set_title(p)
ax.set_ylabel("Reference")
ax.set_xlabel(p)
fig.tight_layout()
plt.savefig(outdir / "{}icontentcorr_{}.png".format(outbase + "_" if outbase else outbase, metric), dpi=dpi, bbox_inches="tight")
plt.close()
def plot_methdod_correlation(metric, target_metrics_preds, outdir, outbase):
logging.info("Plotting methods correlation: {}; {}".format(metric, outbase))
t = target_metrics_preds[metric].unstack().reindex(
target_metrics_preds[metric].groupby(level=0).mean().sort_values(ascending=False).index)
ax = sns.pairplot(t.T)
plt.savefig(outdir / "{}methodcorr_{}.png".format(outbase + "_" if outbase else outbase, metric), dpi=dpi, bbox_inches="tight")
plt.close()
def plot_metrics_correlation(resdir, outdir):
logging.info("Plotting metrics correlation")
anymetric = resdir / "new-disprot-all_simple.analysis.all.dataset.default.metrics.csv"
anymetric = pd.read_csv(anymetric, index_col=0)
fig, ax = plt.subplots(figsize=(8, 8))
sns.heatmap(anymetric.reindex(anymetric.corr().mean().sort_values().drop('thr').index, axis=1).corr(),
cmap='Blues', cbar=False, annot=True, ax=ax)
plt.savefig(outdir / "metrics_corr.png", dpi=dpi, bbox_inches="tight")
plt.close()
def plot_metrics_clustermap(resdir, outdir):
logging.info("Plotting metrics clustermap")
anymetric = resdir / "new-disprot-all_simple.analysis.all.dataset.default.metrics.csv"
anymetric = pd.read_csv(anymetric, index_col=0)
sns.clustermap(anymetric.drop('thr', axis=1).corr(), metric="correlation")
plt.savefig(outdir / "metrics_cluster.png", dpi=dpi, bbox_inches="tight")
plt.close()
def plot_average_overall_ranking(metric, metrics_preds, metrics_bases, outdir, outbase, plotfirst=None, names=None, level='target'):
logging.info("Plotting ranking: {}".format(metric))
if level == 'target':
metrics_preds = metrics_preds.groupby(level=0).mean()
metrics_bases = metrics_bases.groupby(level=0).mean()
metrics_selection = ['bac', 'f1s', 'fpr', 'mcc', 'ppv', 'tpr', 'tnr']
pred_ranking = metrics_preds.append(metrics_bases)[metrics_selection]
pred_ranking = pred_ranking.rank(axis=0, method='max', ascending=False, na_option='bottom')
pred_ranking = pred_ranking.reindex(pred_ranking.mean(axis=1).sort_values().index)
if names is not None:
pred_ranking.rename(names, inplace=True)
if plotfirst is not None:
pred_ranking = pred_ranking.head(10)
cartesian_product = product(pred_ranking.index, pred_ranking.index)
dat_pred_ranking_pvals = [[*couple, ttest_ind(*pred_ranking.loc[[*couple]].values, equal_var=True)[1]] for
couple in cartesian_product]
dat_pred_ranking_pvals = pd.DataFrame(dat_pred_ranking_pvals).set_index([0, 1]).unstack()
dat_pred_ranking_pvals.columns = dat_pred_ranking_pvals.columns.droplevel(0)
dat_pred_ranking_pvals = dat_pred_ranking_pvals.reindex(index=pred_ranking.index,
columns=pred_ranking.index).round(2)
fig, axes = plt.subplots(figsize=(len(dat_pred_ranking_pvals) / 2, len(dat_pred_ranking_pvals) / 2))
ax = sns.heatmap(dat_pred_ranking_pvals, annot=True, mask=np.triu(dat_pred_ranking_pvals), cmap="Reds", cbar=False,
center=0.1, vmin=0, vmax=0.2, edgecolor='w')
ax = sns.heatmap(dat_pred_ranking_pvals, annot=True, mask=np.tril(dat_pred_ranking_pvals), cmap="Greens",
cbar=False, center=0.1, vmin=0, vmax=0.2, edgecolor='w')
plt.savefig(outdir / "{}ranking.{}_{}{}.png".format(outbase + "_" if outbase else outbase, level,
metric, "_best{}".format(plotfirst) if plotfirst else ""),
dpi=dpi, bbox_inches="tight")
plt.close(fig)
def plot_cput_stacked_bars(ax, predictors, names=None):
cputime = pd.read_csv('../data/dataset_stats/cpu_time.csv', index_col=0, header=[0, 1])
order = ['prediction', 'hhblits', 'psiblast']
ax = cputime.mean().unstack()[order].rename(names).reindex(predictors).plot.bar(ax=ax, stacked=True, log=True)
ax.errorbar(range(len(predictors)),
cputime.mean().unstack()[order].rename(names).reindex(predictors).sum(axis=1),
cputime.std().unstack()[order].rename(names).reindex(predictors).apply(
lambda r: np.sqrt(np.sum(np.log(r) ** 2)) / len(r), axis=1),
linewidth=0, elinewidth=2, c='k')
ax.set_ylabel('$\log_{10}(Seconds)$')
# ax.yaxis.set_major_formatter(ticker.FuncFormatter(lambda x, p: '$10^{{{0}}}$'.format(int(x))))
ax.set_xlabel(None)
def plot_cput_boxplot(ax, predictors, names):
cputime = pd.read_csv('../data/dataset_stats/cpu_time.csv', index_col=0, header=[0, 1])
order = ['prediction', 'hhblits', 'psiblast']
cputime = cputime.groupby(level=0, axis=1).sum().rename(names, axis=1).reindex(predictors, axis=1)
cputime.boxplot(rot=90,
ax=ax,
grid=False,
flierprops=dict(marker=',', markerfacecolor='steelblue', markeredgecolor='none', alpha=.1),
boxprops=dict(alpha=.7))
cputimelessthanone = pd.Series(np.nan, index=cputime.median().index)
cputimelessthanone[cputime.quantile(.75) < 1] = 1
ax.plot(np.arange(len(cputimelessthanone)) + 1, cputimelessthanone.values, marker='o', markersize=5,
color='magenta', linestyle='None')
ax.set_yscale('log')
ax.set_ylabel('$\log_{10}(Seconds)$')
# ax.text(-0.15, 1.05, plotlbl, transform=ax.transAxes, size=20, weight='bold')
ax.set_xlabel(None)
ax.set_yticks(10 ** np.linspace(0, 4, 5))
ax.grid(which='major', axis='both', alpha=.1)
ax.set_ylim(0.5, 10 ** 4)
def plot_dat_tgt_metric_cpuspeed(metric, dat_metr_preds, dat_metr_bases, tgt_metr_preds, tgt_metr_bases, bts_ci_preds, outdir, outbase="", names=None):
logging.info("Plotting metric barplot: {}, {}".format(metric, outbase))
fig = plt.figure(figsize=(15, 6))
gs = fig.add_gridspec(2, 2, height_ratios=[2, 1])
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[0, 1], sharey=ax1)
ax4 = fig.add_subplot(gs[1, 1], sharey=ax2)
negatives, optm = outbase.split("_")
optm = optm[3:]
# plot dataset metrics on left subplot
dat_m = dat_metr_preds[metric].sort_values(ascending=False).append(
dat_metr_bases[metric].sort_values(ascending=False))
yerr = bts_ci_preds.xs(metric, level=1)[["lo", 'hi']].reindex(dat_m.index)
colors = ['silver'] * len(dat_metr_preds) + ['grey'] * len(dat_metr_bases)
if names is not None:
dat_m = dat_m.rename(names)
yerr = yerr.rename(names)
dat_m.plot.bar(ax=ax1, color=colors, yerr=yerr)
# plot_cput_stacked_bars(ax2, dat_m.index, names)
plot_cput_boxplot(ax2, dat_m.index, names)
ax1.axhline(dat_metr_bases[metric].max())
ax1.set_title("{}; Dataset {}; Optimized on: {}".format(dataset_names[negatives], metric.upper(), optm.upper()))
ax1.set_ylabel(metric.upper())
ax1.set_xticks([])
# plot target metrics on right subplot
n = tgt_metr_preds.groupby(level=0).count().append(tgt_metr_bases.groupby(level=0).count())
tgt_m = tgt_metr_preds.groupby(level=0).mean()[metric].sort_values(ascending=False).append(
tgt_metr_bases.groupby(level=0).mean()[metric].sort_values(ascending=False))
yerr = tgt_metr_preds.groupby(level=0).std()[metric].append(
tgt_metr_bases.groupby(level=0).std()[metric])[tgt_m.index] / n[metric]**0.5 / 2
colors = ['silver'] * len(tgt_metr_preds.groupby(level=0).mean()) + ['grey'] * len(tgt_metr_bases.groupby(level=0).mean())
if names is not None:
tgt_m = tgt_m.rename(names)
yerr = yerr.rename(names)
tgt_m.plot.bar(ax=ax3, color=colors, yerr=yerr)
plot_cput_boxplot(ax4, tgt_m.index, names)
ax3.axhline(tgt_metr_bases.groupby(level=0).mean()[metric].max())
ax3.set_title("{}; Target {}; Optimized on: {}".format(dataset_names[negatives], metric.upper(), optm.upper()))
ax3.set_ylabel(metric.upper())
ax3.set_xticks([])
plt.savefig(outdir / "{}bar_{}.png".format(outbase+"_" if outbase else outbase, metric), dpi=dpi, bbox_inches="tight")
plt.close(fig)
def plot_dataset_target_metric(metric, dat_metr_preds, dat_metr_bases, tgt_metr_preds, tgt_metr_bases, bts_ci_preds, outdir, outbase="", names=None):
logging.info("Plotting metric barplot: {}, {}".format(metric, outbase))
fig, axes = plt.subplots(1, 2, figsize=(15, 6), sharey=True)
negatives, optm = outbase.split("_")
optm = optm[3:]
# plot dataset metrics on left subplot
dat_m = dat_metr_preds[metric].sort_values(ascending=False).append(
dat_metr_bases[metric].sort_values(ascending=False))
yerr = bts_ci_preds.xs(metric, level=1)[["lo", 'hi']].reindex(dat_m.index)
colors = ['silver'] * len(dat_metr_preds) + ['grey'] * len(dat_metr_bases)
if names is not None:
dat_m = dat_m.rename(names)
yerr = yerr.rename(names)
ax = dat_m.plot.bar(ax=axes[0], color=colors, yerr=yerr)
ax.axhline(dat_metr_bases[metric].max())
ax.set_title("Dataset {}; Negatives: {}; Optimized on: {}".format(metric.upper(), negatives, optm.upper()))
ax.set_ylabel(metric.upper())
# plot target metrics on right subplot
n = tgt_metr_preds.groupby(level=0).count().append(tgt_metr_bases.groupby(level=0).count())
tgt_m = tgt_metr_preds.groupby(level=0).mean()[metric].sort_values(ascending=False).append(
tgt_metr_bases.groupby(level=0).mean()[metric].sort_values(ascending=False))
yerr = tgt_metr_preds.groupby(level=0).std()[metric].append(
tgt_metr_bases.groupby(level=0).std()[metric])[tgt_m.index] / n[metric]**0.5 / 2
colors = ['silver'] * len(tgt_metr_preds.groupby(level=0).mean()) + ['grey'] * len(tgt_metr_bases.groupby(level=0).mean())
if names is not None:
tgt_m = tgt_m.rename(names)
yerr = yerr.rename(names)
ax = tgt_m.plot.bar(ax=axes[1], color=colors, yerr=yerr)
ax.axhline(tgt_metr_bases.groupby(level=0).mean()[metric].max())
ax.set_title("Average target {}; Negatives: {}; Optimized on: {}".format(metric.upper(), negatives, optm.upper()))
ax.set_ylabel(metric.upper())
plt.savefig(outdir / "{}bar_{}.png".format(outbase+"_" if outbase else outbase, metric), dpi=dpi, bbox_inches="tight")
plt.close(fig)
def plot_roc(preds_rocs, cons_roc, pdb_roc, gene3d_roc, random_rocs, outdir, outbase, names=None):
# procs, proc, groc, dataset, title, names = names, croc = None
logging.info("Plotting roc")
fig, ax = plt.subplots(figsize=(7.5, 7.5))
preds_rocs.rename(names, axis=1, level=0)
auc_rocs = sorted(preds_rocs.columns.droplevel(2).unique(), key=lambda t: t[1], reverse=True)[:10]
rocs = preds_rocs.reindex(list(zip(*auc_rocs))[0], axis=1, level=0)
ax.plot([0, 1], [0, 1], color='k', linestyle='--')
for p in rocs.columns.get_level_values(0).unique()[:10]:
ax.plot(*rocs[p].dropna().T.values, label=p)
ax.plot(*cons_roc.dropna().T.values, label="Naive Conservation", color='silver', linewidth=2)
for n, m in zip([pdb_roc, gene3d_roc], ['o', 's']):
idx = n.index.values - 0.5
ax.plot(*n.iloc[idx[idx > 0].argmin()], marker=m, markeredgecolor='silver', markeredgewidth=2,
markerfacecolor='w', markersize=10,
c='w', label=names(n.columns.get_level_values(0)[0]))
for rr, m in zip(random_rocs, ['*', 'P', 'd']):
ax.plot(*rr[['fpr', 'tpr']].mean(), marker=m, markeredgecolor='silver', markeredgewidth=2,
markerfacecolor='w', markersize=10, c='w', label=names(rr.index.get_level_values(0)[0]))
ax.legend()
lhandles, llabels = ax.get_legend_handles_labels()
if cons_roc is not None:
auc_rocs.extend(cons_roc.rename(names, axis=1, level=0).columns.droplevel(2).unique().tolist())
pwauc = next(zip(*auc_rocs))
ax.legend(lhandles,
['{}'.format('{} - AUC: {}'.format(names(l), auc_rocs[pwauc.index(l)][1]) if l in pwauc else l) for l in
llabels],
bbox_to_anchor=(0., -.37, 1., .102), loc='lower left', ncol=2, mode="expand", borderaxespad=0)
ax.set_xlabel("FPR")
ax.set_ylabel("TPR")
plt.savefig(outdir /"{}roc.png".format(outbase+"_" if outbase else outbase), dpi=dpi, bbox_inches="tight")
plt.close(fig)
def plot_pr(pred_prs, cons_prc, pdb_prc, gene3d_prc, random_prcs, cov, outdir, outbase, sortby="auc", names=None):
logging.info("Plotting precision-recall curve")
fig, ax = plt.subplots(figsize=(10.5, 7))
auc_pr = sorted(pred_prs.columns.droplevel([2, 3]).unique(), key=lambda t: t[1], reverse=True)
aps_pr = sorted(pred_prs.columns.droplevel([1, 3]).unique(), key=lambda t: t[1], reverse=True)
fmax_pr = sorted(((p, hmean(pred_prs[p].dropna().values, axis=1).max().round(2)) for p in pred_prs.columns.get_level_values(0).unique()),
key=lambda t: t[1], reverse=True)
sorter = None
if sortby == 'auc':
sorter = auc_pr
elif sortby == 'aps':
sorter = aps_pr
elif sortby == 'fmax':
sorter = fmax_pr
# select first 10 predictors (based on AUC)
prcs = pred_prs.reindex(list(zip(*sorter))[0], axis=1, level=0)
# plot f-score level lines
r = np.linspace(0, 1, 1000)
fs = hmean(np.array(np.meshgrid(r, r)).T.reshape(-1, 2), axis=1).reshape(1000, 1000)
cs = plt.contour(r, r, fs, levels=np.linspace(0.1, 1, 10), colors='silver', alpha=0.7, linewidths=1, linestyles='--')
ax.clabel(cs, inline=True, fmt='%.1f', fontsize=10, manual=[(l, l) for l in cs.levels[:-1]])
# plot predictor lines and markers
for p, z in zip(prcs.columns.get_level_values(0).unique()[:10], range(5, 55, 5)):
fmax_idx = hmean(prcs[p].dropna().T).argmax()
lines = ax.plot(*prcs[p].dropna().T.values, label=p, zorder=55-z)
ax.plot(*prcs[p].dropna().T.values[:, fmax_idx], color='w', marker='o', markerfacecolor=lines[0].get_color(), markersize=10, zorder=55-z)
ax.plot(*prcs[p].dropna().T.values[:, fmax_idx], color='w', marker='o', markerfacecolor=lines[0].get_color(), zorder=55-z)
# plot naive conservation
ax.plot(*cons_prc.dropna().T.values, label="Naive Conservation", color='k', linewidth=1, zorder=5)
cov['Naive Conservation'] = 1
# plot naives
for n, m in zip([pdb_prc, gene3d_prc], ['o', 's']):
nname = n.columns.get_level_values(0)[0]
cov[nname] = 1
ax.plot(*n.loc[1.0], marker=m, markeredgecolor='k', markeredgewidth=1,
markerfacecolor='w', markersize=8, zorder=60,
c='w', label=names(nname))
# plot randoms
for rprc, m in zip(random_prcs, ['*', 'P', 'd']):
rname = rprc.index.get_level_values(0)[0]
cov[rname] = 1
ax.plot(*rprc[['tpr', 'ppv']].mean(), marker=m, markeredgecolor='k', markeredgewidth=1, zorder=60,
markerfacecolor='w', markersize=8, c='w', label=names(rname))
cov = cov.to_dict()
ax.legend()
lhandles, llabels = ax.get_legend_handles_labels()
if cons_roc is not None:
sorter.extend(cons_roc.rename(names, axis=1, level=0).columns.droplevel(2).unique().tolist())
pwauc = next(zip(*sorter))
ax.legend(lhandles,
['{}'.format('{} - {}: {}, C: {:.2f}'.format(names(l), sortby.upper(), sorter[pwauc.index(l)][1], cov[l]) if l in pwauc else l) for l in llabels],
loc='center left', bbox_to_anchor=(1, 0.5))#, mode="expand", borderaxespad=0)
ax.set_xlim(-0.05, 1.05)
ax.set_ylim(-0.05, 1.05)
ax.set_xlabel("Recall")
ax.set_ylabel("Precision")
fig.tight_layout()
plt.savefig(outdir / "{}pr.{}.png".format(outbase + "_" if outbase else outbase, sortby), dpi=dpi, bbox_inches="tight")
plt.close(fig)
def plot_cputime_to_performance(metric, tgt_pred_metrics, outdir, outbase, names=None):
logging.info('plotting cputime to {}'.format(metric))
fig, ax = plt.subplots(figsize=(8, 6))
cputime = pd.read_csv('../data/dataset_stats/cpu_time.csv', header=[0, 1], index_col=[0]).groupby(level=0, axis=1).sum()
tgt_pred_metrics = tgt_pred_metrics[metric]
y = tgt_pred_metrics.groupby(level=0).mean().sort_values(ascending=False)
x = np.log10(cputime).mean().reindex(y.index).replace([np.inf, -np.inf], -2)
ax = sns.scatterplot(x=x, y=y, hue=y.index, zorder=50)
ax.errorbar(x=x,
y=y,
yerr=tgt_pred_metrics.groupby(level=0).std() / (len(tgt_pred_metrics) / len(y.index)) ** 0.5,
xerr=np.log10(cputime).std().reindex(y.index),
linewidth=0, elinewidth=0.5, c='k', capsize=2, capthick=0.5)
ax.axvline(0, linestyle='--', linewidth=1, label='1 Second')
ax.xaxis.set_major_formatter(ticker.FuncFormatter(lambda x, pos: '$10^{{{}}}$'.format(int(x))))
_ = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), ncol=2)
pointstoname = dict(zip(*ax.get_legend_handles_labels()[::-1]))
sorter = y.sort_values(ascending=False)
ax.legend([pointstoname[k] for k in sorter.index],
["{} (F={:.2f})".format(names(k), v) for k, v in sorter.iteritems()],
bbox_to_anchor=(1, .5),
loc='center left',
ncol=2)
ax.set_ylabel('$F_{max}$')
ax.set_xlabel('Seconds')
plt.savefig(outdir / "{}cputime_to_{}.png".format(outbase + "_" if outbase else outbase, metric), dpi=dpi,
bbox_inches="tight")
plt.close(fig)
def parse_args():
parser = argparse.ArgumentParser(
prog='caid-plots', description="automate plots for CAID analysis",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('resultDir', help='directory where CAID predictors results are saved')
parser.add_argument('baselineDir', help="directory where CAID baselines results are saved")
parser.add_argument('referenceDir', help="directory where refernces are stored")
parser.add_argument('datasetStatsDir', help="directory where data is stored")
parser.add_argument('-o', '--outputDir', default='.',
help='directory where the output will be written (default: cwd)')
parser.add_argument('-d', '--dpi', default=75, help='figures dpi')
parser.add_argument('-g', '--glob', default='*.txt')
parser.add_argument('-n', '--names', default=None, help='json file with predictors names')
parser.add_argument('-l', '--log', type=str, default=None, help='log file')
parser.add_argument("-ll", "--logLevel", default="ERROR",
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
help='log level filter. All levels <= choice will be displayed')
args = parser.parse_args()
return args
def set_logger(logfile, level):
handlers = list()
log_formatter = logging.Formatter('%(asctime)s | %(module)-13s | %(levelname)-8s | %(message)s')
if logfile:
file_handler = logging.FileHandler(logfile, 'a')
file_handler.setFormatter(log_formatter)
handlers.append(file_handler)
else:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
handlers.append(console_handler)
logging.basicConfig(level=level, format=log_formatter, handlers=handlers)
def plot_subset_redundancy(newvsnew, newvsold, outdir):
logging.info("Plotting subset redundancy")
axes = pd.concat([newvsnew, newvsold], axis=1).hist(edgecolor='k', figsize=(15, 5), bins=20)
axes = axes.ravel()
line = axes[0].axvline(float(newvsnew.median()), linestyle="--", color='darkorange', linewidth=2)
l = axes[0].legend([line], ["Median: {:.1f} %".format(float(newvsnew.median()))])
line = axes[1].axvline(float(newvsold.median()), linestyle="--", color='darkorange', linewidth=2)
l = axes[1].legend([line], ["Median: {:.1f} %".format(float(newvsold.median()))])
plt.savefig(outdir / "subset_redundancy.png", dpi=dpi, bbox_inches="tight")
plt.close(plt.gcf())
def plot_dataset_redundancy(totredund, outdir):
logging.info("Plotting dataset redundancy")
axes = totredund.hist(by="Age", edgecolor="k", figsize=(15, 5))
med_new = float(totredund[totredund["Age"] == "CAID"].mean())
med_old = float(totredund[totredund["Age"] == "DisProt7"].mean())
line = axes[0].axvline(med_new, color="darkorange", linestyle="--")
axes[0].legend([line], ["Mean: {:.1f} %".format(med_new)])
line = axes[1].axvline(med_old, color="darkorange", linestyle="--")
axes[1].legend([line], ["Mean: {:.1f} %".format(med_old)])
plt.savefig(outdir / "dataset_redundancy.png", dpi=dpi, bbox_inches="tight")
plt.close(plt.gcf())
def plot_dataset_counts(counts, outdir):
logging.info("Plotting dataset counts")
fig, axes = plt.subplots(2, 3, figsize=(15, 7), sharey=True, sharex="col")
axes = axes.ravel()
counts.loc["CAID"].drop("PDB missing").xs("Proteins", level=2, axis=1).plot.barh(ax=axes[0], legend=False)
axes[0].set_title("CAID - Number of Proteins")
counts.xs("Regions", level=1, axis=1).loc["CAID"].drop("PDB missing").drop(("PDB", "Positive"), axis=1).plot.barh(
ax=axes[1])
axes[1].legend(axes[1].get_legend().get_patches(), ["Postives", "Negatives (Simples)", "Negatives (PDB)"])
axes[1].set_title("CAID - Number of Regions")
counts.xs("Residues", level=1, axis=1).loc["CAID"].drop("PDB missing").drop(("PDB", "Positive"), axis=1).plot.barh(
ax=axes[2])
axes[2].legend(axes[2].get_legend().get_patches(), ["Postives", "Negatives (Simples)", "Negatives (PDB)"])
axes[2].set_title("CAID - Number of Residues")
counts.loc["DisProt 7"].drop("PDB missing").xs("Proteins", level=2, axis=1).plot.barh(ax=axes[3], legend=False)
axes[3].set_title("DisProt 7 - Number of Proteins")
counts.xs("Regions", level=1, axis=1).loc["DisProt 7"].drop("PDB missing").drop(("PDB", "Positive"),
axis=1).plot.barh(ax=axes[4])
axes[4].legend(axes[4].get_legend().get_patches(), ["Postives", "Negatives (Simples)", "Negatives (PDB)"])
axes[4].set_title("DisProt 7 - Number of Regions")
counts.xs("Residues", level=1, axis=1).loc["DisProt 7"].drop("PDB missing").drop(("PDB", "Positive"),
axis=1).plot.barh(ax=axes[5])
axes[5].legend(axes[5].get_legend().get_patches(), ["Postives", "Negatives (Simples)", "Negatives (PDB)"])
axes[5].set_title("DisProt 7 - Number of Residues")
plt.savefig(outdir / "dataset_counts.png", dpi=dpi, bbox_inches="tight")
plt.close(plt.gcf())
def plot_species_count(dst, outdir):
dst = dst.loc['disprot-disorder']
ax = dst.data.groupby('species').count().taxon.sort_values().plot.barh(figsize=(4, 18), logx=True)
plt.tick_params(axis='x', which='both', labeltop='on', labelbottom='on', top='on')
ax.vlines([1, 10, 100], 0, len(dst), linestyle='--', linewidth=1, color='silver', zorder=0)
plt.savefig(outdir / "species_counts.png", dpi=dpi, bbox_inches='tight')
plt.close(plt.gcf())
def get_names(fname):
names = json.load(open(args.names))
name = names.get(fname)
fname = fname.lower()
if name is None:
if "cons" in fname:
name = "Naive Conservation"
elif "pdb" in fname and 'reverse' in fname:
name = "Naive PDB"
elif "gene3d" in fname and 'reverse' in fname:
name = "Naive Gene3D"
elif "random" in fname:
name = "Random"
elif "dataset" in fname:
name = "Shuffled dataset"
elif "target" in fname:
name = "Shuffled targets"
elif "fix" in fname:
name = "Fixed ID content"
elif "ref" in fname:
name = "Reference"
return name
dataset_names = {
'disprot-disorder': 'DisProt',
'disprot-disorder-pdb-atleast': 'DisProt-PDB',
'disprot-binding': 'DisProt-Binding',
'disprot-binding-all': 'DisProt-Binding-All',
'disprot-binding-disorder': 'DisProt-Binding-Disorder'
}
if __name__ == "__main__":
args = parse_args()
set_logger(args.log, args.logLevel)
logging.getLogger('matplotlib').setLevel(logging.WARNING)
dpi = args.dpi
resultdir = Path(args.resultDir)
baselndir = Path(args.baselineDir)
outputdir = Path(args.outputDir)
refdir = Path(args.referenceDir)
datadir = Path(args.datasetStatsDir)
get_names = get_names if args.names is not None else None
# DON'T CHANGE THE ORDER
basetypes = ["cons", "naive-pdb-atleast-reverse", "naive-gene3d-reverse", # naive
# "fixedposfrc",
"random", "shuffledataset", "shuffletargets"] # random
plot_metrics_correlation(resultdir, outputdir)
plot_metrics_clustermap(resultdir, outputdir)
cons_nvn = pd.read_csv(datadir / "blast_distribution_new_vs_new.txt", index_col=0)
cons_nvo = pd.read_csv(datadir / "blast_distribution_new_vs_old.txt", index_col=0)
cons_tot = pd.read_csv(datadir / "blast_distribution.txt", index_col=0)
counts = pd.read_csv(datadir / "reference.csv", index_col=[0, 1], header=[0, 1, 2])
refstats_target = pd.read_csv('../data/dataset_stats/references-stats.target.csv', index_col=[0, 1], header=[0, 1])
plot_subset_redundancy(cons_nvn, cons_nvo, outputdir)
plot_dataset_redundancy(cons_tot, outputdir)
plot_dataset_counts(counts, outputdir)
plot_species_count(refstats_target, outputdir)
# iterate over file in dir (foreach reference)
for reference in refdir.glob(args.glob):
logging.info(reference)
reference = Path(reference)
refname = reference.stem
logging.debug(refname)
roc_preds_f = resultdir / "{}.analysis.all.dataset._.roc.csv".format(refname)
roc_preds = pd.read_csv(roc_preds_f, index_col=[0], header=[0, 1, 2])
roc_bases = [pd.read_csv(baselndir / "{}.{}.all.dataset._.roc.csv".format(refname, b), index_col=[0],
header=[0, 1, 2]) for b in basetypes[1:3]]
cons_roc = pd.read_csv('../baseline/{}.cons.all.dataset._.roc.csv'.format(refname), index_col=[0], header=[0, 1, 2])
roc_random_bases = [pd.read_csv('../baseline/{}.{}.all.target.mcc.metrics.csv'.format(refname, b), index_col=0) for b in basetypes[3:]]
pr_preds_f = resultdir / "{}.analysis.all.dataset._.pr.csv".format(refname)
pr_preds = pd.read_csv(pr_preds_f, index_col=[0], header=[0, 1, 2, 3])
pr_bases = [pd.read_csv(baselndir / "{}.{}.all.dataset._.pr.csv".format(refname, b), index_col=[0],
header=[0, 1, 2, 3]) for b in basetypes[1:3]]
cons_pr = pd.read_csv('../baseline/{}.cons.all.dataset._.pr.csv'.format(refname), index_col=[0],
header=[0, 1, 2, 3])
pr_random_bases = [pd.read_csv('../baseline/{}.{}.all.target.mcc.metrics.csv'.format(refname, b), index_col=0)
for b in basetypes[3:]]
plot_roc(roc_preds, cons_roc, *roc_bases, roc_random_bases, outputdir, refname, names=get_names)
coverage = pd.read_csv(resultdir / '{}.analysis.all.target.default.metrics.csv'.format(refname), index_col=[0,1], header=[0,1])
coverage = (coverage.groupby(level=0).count().max(axis=1) / np.max(coverage.groupby(level=0).count().values))
plot_pr(pr_preds, cons_pr, *pr_bases, pr_random_bases, coverage, outputdir, refname, sortby="auc", names=get_names)
plot_pr(pr_preds, cons_pr, *pr_bases, pr_random_bases, coverage, outputdir, refname, sortby="aps", names=get_names)
plot_pr(pr_preds, cons_pr, *pr_bases, pr_random_bases, coverage, outputdir, refname, sortby="fmax", names=get_names)
dataset_metrics_default_f = resultdir / "{}.analysis.all.dataset.default.metrics.csv".format(refname)
dataset_metrics_default =
|
pd.read_csv(dataset_metrics_default_f, index_col=0)
|
pandas.read_csv
|
#!/usr/bin/env python
from __future__ import print_function, division
import os
import tarfile
import pandas as pd
import numpy as np
import gzip
import shutil
import itertools
import multiprocessing as mp
import astropy.units as u
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy.io import ascii, fits
from astropy import utils, io
from astroquery.vizier import Vizier
from astroquery.irsa import Irsa
from astroquery.vsa import Vsa
from astroquery.ukidss import Ukidss
from astroquery.sdss import SDSS
from dl import queryClient as qc
try:
from urllib2 import urlopen #python2
from httplib import IncompleteRead
from urllib2 import HTTPError
except ImportError:
from urllib.request import urlopen #python3
from urllib.error import HTTPError
from http.client import IncompleteRead
#SIA
from pyvo.dal import sia
import pyvo
from qso_toolbox import utils as ut
from qso_toolbox import vlass_quicklook
# ------------------------------------------------------------------------------
# Supported surveys, data releases, bands
# ------------------------------------------------------------------------------
astroquery_dict = {
'tmass': {'service': 'irsa', 'catalog': 'fp_psc',
'ra': 'ra', 'dec': 'dec', 'mag_name':
'TMASS_J', 'mag': 'j_m', 'distance':
'dist', 'data_release': None},
'nomad': {'service': 'vizier', 'catalog': 'NOMAD',
'ra': 'RAJ2000', 'dec': 'DECJ2000',
'mag_name': 'R', 'mag': 'Rmag', 'distance':
'distance', 'data_release': None},
'vhsdr6': {'service': 'vsa', 'catalog': 'VHS',
'ra': 'ra', 'dec': 'dec',
'data_release': 'VHSDR6', 'mag_name': 'VHS_J',
'mag': 'jAperMag3', 'distance': 'distance'},
# new, needs to be tested!
'vikingdr5': {'service': 'vsa', 'catalog': 'VIKING',
'ra': 'ra', 'dec': 'dec',
'data_release': 'VIKINGDR5', 'mag_name': 'VHS_J',
'mag': 'jAperMag3', 'distance': 'distance'}
# ,
# 'sdss': {'service': 'sdss', 'catalog': 'VIKING',
# 'ra': 'ra', 'dec': 'dec',
# 'data_release': 'VIKINGDR5', 'mag_name': 'VHS_J',
# 'mag': 'jAperMag3', 'distance': 'distance'}
}
datalab_offset_dict = {'des_dr1.main': {'ra': 'ra', 'dec': 'dec',
'mag': 'mag_auto_z',
'mag_name': 'mag_auto_z'}}
# To add more surveys from the VISTA Science Archive, this dictionary can be
# expanded:
vsa_info_dict = {'vhsdr6': ('VHS', 'VHSDR6', 'tilestack'),
# new, needs to be tested
'vikingdr5': ('VIKING', 'VIKINGDR5', 'tilestack')}
# Surveys as serviced by VSA, append list if necessary (see VSA dictionary
# above)
vsa_survey_list = ['vhsdr6', 'vikingdr5']
# all surveys that directly allow to download fits files
unzipped_download_list = ['desdr1', 'desdr2', 'ps1', 'vhsdr6', 'vikingdr5',
'2MASS', 'DSS2', 'skymapper', 'ukidss']
# ------------------------------------------------------------------------------
# Input table manipulation
# ------------------------------------------------------------------------------
# copied from http://docs.astropy.org/en/stable/_modules/astropy/io/fits/column.html
# L: Logical (Boolean)
# B: Unsigned Byte
# I: 16-bit Integer
# J: 32-bit Integer
# K: 64-bit Integer
# E: Single-precision Floating Point
# D: Double-precision Floating Point
# C: Single-precision Complex
# M: Double-precision Complex
# A: Character
fits_to_numpy = {'L': 'i1', 'B': 'u1', 'I': 'i2', 'J': 'i4', 'K': 'i8',
'E': 'f4',
'D': 'f8', 'C': 'c8', 'M': 'c16', 'A': 'a'}
def fits_to_hdf(filename):
""" Convert fits data table to hdf5 data table.
:param filename:
:return:
"""
hdu = fits.open(filename)
filename = os.path.splitext(filename)[0]
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Utilities for iterating constructing data sets and iterating over
DNA sequence data.
"""
import multiprocessing
import pandas as pd
import numpy as np
import functools
import math
from collections import defaultdict
from multiprocessing import Pool
import pyfasta
import pyBigWig
from pybedtools import Interval, BedTool
import logging
import tensorflow as tf
def filter_chromosomes(input_df, to_filter=None, to_keep=None):
"""
This function takes as input a pandas DataFrame
Parameters:
input_df (dataFrame): A pandas dataFrame, the first column is expected to
be a chromosome. Example: chr1.
to_filter (list): Default None (bool = False), will iterate over list
objects and filter the listed chromosomes.
( Default: None, i.e. this condition will not be triggered unless a list
is supplied)
to_keep (list): Default None, will iterate over list objects and only
retain the listed chromosomes.
Returns:
output_df (dataFrame): The filtered pandas dataFrame
"""
if to_filter:
output_df = input_df.copy()
for chromosome in to_filter:
# note: using the str.contains method to remove all
# contigs; for example: chrUn_JH584304
bool_filter = ~(output_df['chrom'].str.contains(chromosome))
output_df = output_df[bool_filter]
elif to_keep:
# keep only the to_keep chromosomes:
# note: this is slightly different from to_filter, because
# at a time, if only one chromosome is retained, it can be used
# sequentially.
filtered_chromosomes = []
for chromosome in to_keep:
filtered_record = input_df[(input_df['chrom'] == chromosome)]
filtered_chromosomes.append(filtered_record)
# merge the retained chromosomes
output_df = pd.concat(filtered_chromosomes)
else:
output_df = input_df
return output_df
def get_genome_sizes(genome_sizes_file, to_filter=None, to_keep=None):
"""
Loads the genome sizes file which should look like this:
chr1 45900011
chr2 10001401
...
chrX 9981013
This function parses this file, and saves the resulting intervals file
as a BedTools object.
"Random" contigs, chrUns and chrMs are filtered out.
Parameters:
genome_sizes_file (str): (Is in an input to the class,
can be downloaded from UCSC genome browser)
to_filter (list): Default None (bool = False), will iterate over list
objects and filter the listed chromosomes.
( Default: None, i.e. this condition will not be triggered unless a list
is supplied)
to_keep (list): Default None, will iterate over list objects and only
retain the listed chromosomes.
Returns:
A BedTools (from pybedtools) object containing all the chromosomes,
start (0) and stop (chromosome size) positions
"""
genome_sizes = pd.read_csv(genome_sizes_file, sep='\t',
header=None, names=['chrom', 'length'])
genome_sizes_filt = filter_chromosomes(genome_sizes, to_filter=to_filter,
to_keep=to_keep)
genome_bed_data = []
# Note: Modifying this to deal with unexpected (incorrect) edge case \
# BedTools shuffle behavior.
# While shuffling data, BedTools shuffle is placing certain windows at the \
# edge of a chromosome
# Why it's doing that is unclear; will open an issue on GitHub.
# It's probably placing the "start" co-ordinate within limits of the genome,
# with the end coordinate not fitting.
# This leads to the fasta file returning an incomplete sequence \
# (< 500 base pairs)
# This breaks the generator feeding into Model.fit.
# Therefore, in the genome sizes file, buffering 550 from the edges
# to allow for BedTools shuffle to place window without running of the
# chromosome.
for chrom, sizes in genome_sizes_filt.values:
genome_bed_data.append(Interval(chrom, 0 + 550, sizes - 550))
genome_bed_data = BedTool(genome_bed_data)
return genome_bed_data
def load_chipseq_data(chip_peaks_file, genome_sizes_file, to_filter=None,
to_keep=None):
"""
Loads the ChIP-seq peaks data.
The chip peaks file is an events bed file:
chr1:451350
chr2:91024
...
chrX:870000
This file can be constructed using a any peak-caller. We use multiGPS.
Also constructs a 1 bp long bedfile for each coordinate and a
BedTools object which can be later used to generate
negative sets.
"""
chip_seq_data = pd.read_csv(chip_peaks_file, delimiter=':', header=None,
names=['chrom', 'start'])
chip_seq_data['end'] = chip_seq_data['start'] + 1
chip_seq_data = filter_chromosomes(chip_seq_data, to_filter=to_filter,
to_keep=to_keep)
sizes = pd.read_csv(genome_sizes_file, names=['chrom', 'chrsize'],
sep='\t')
# filtering out any regions that are close enough to the edges to
# result in out-of-range windows when applying data augmentation.
chrom_sizes_dict = (dict(zip(sizes.chrom, sizes.chrsize)))
chip_seq_data['window_max'] = chip_seq_data['end'] + 500
chip_seq_data['window_min'] = chip_seq_data['start'] - 500
chip_seq_data['chr_limits_upper'] = chip_seq_data['chrom'].map(
chrom_sizes_dict)
chip_seq_data = chip_seq_data[chip_seq_data['window_max'] <=
chip_seq_data['chr_limits_upper']]
chip_seq_data = chip_seq_data[chip_seq_data['window_min'] >= 0]
chip_seq_data = chip_seq_data[['chrom', 'start', 'end']]
return chip_seq_data
def exclusion_regions(blacklist_file, chip_seq_data):
"""
This function takes as input a bound bed file (from multiGPS).
The assumption is that the bed file reports the peak center
For example: chr2 45 46
It converts these peak centers into 501 base pair windows, and adds them to
the exclusion list which will be used when constructing negative sets.
It also adds the mm10 blacklisted windows to the exclusion list.
Parameters:
blacklist_file (str): Path to the blacklist file.
chip_seq_data (dataFrame): The pandas chip-seq data loaded by load_chipseq_data
Returns:
exclusion_windows (BedTool): A bedtools object containing all exclusion windows.
bound_exclusion_windows (BedTool): A bedtool object containing only
those exclusion windows where there exists a binding site.
"""
temp_chip_file = chip_seq_data.copy() # Doesn't modify OG array.
temp_chip_file['start'] = temp_chip_file['start'] - 250
temp_chip_file['end'] = temp_chip_file['end'] + 250
if blacklist_file is None:
print('No blacklist file specified ...')
exclusion_windows = BedTool.from_dataframe(temp_chip_file[['chrom', 'start','end']])
else:
bound_exclusion_windows = BedTool.from_dataframe(temp_chip_file[['chrom', 'start','end']])
blacklist_exclusion_windows = BedTool(blacklist_file)
exclusion_windows = BedTool.cat(
*[blacklist_exclusion_windows, bound_exclusion_windows])
return exclusion_windows
def make_random_shift(coords, L, buffer=25):
"""
This function takes as input a set of bed coordinates dataframe
It finds the mid-point for each record or Interval in the bed file,
shifts the mid-point, and generates a windows of length L.
If training window length is L, then we must ensure that the
peak center is still within the training window.
Therefore: -L/2 < shift < L/2
To add in a buffer: -L/2 + 25 <= shift <= L/2 + 25
# Note: The 50 here is a tunable hyper-parameter.
Parameters:
coords(pandas dataFrame): This is an input bedfile (first 3 column names: "chr", "start", "end")
Returns:
shifted_coords(pandas dataFrame): The output bedfile with shifted coords
"""
low = int(-L/2 + buffer)
high = int(L/2 - buffer)
return (coords.assign(midpoint=lambda x: (x["start"]+x["end"])/2)
.astype({"midpoint": int})
.assign(midpoint=lambda x: x["midpoint"] + np.random.randint(low=low, high=high, size=len(coords)))
.apply(lambda s: pd.Series([s["chrom"], int(s["midpoint"]-L/2), int(s["midpoint"]+L/2)],
index=["chrom", "start", "end"]), axis=1))
def make_flank(coords, L, d):
"""
Make flanking regions by:
1. Shift midpoint by d
2. Expand midpoint to upstream/downstream by L/2
"""
return (coords.assign(midpoint=lambda x: (x["start"]+x["end"])/2)
.astype({"midpoint": int})
.assign(midpoint=lambda x: x["midpoint"] + d)
.apply(lambda s: pd.Series([s["chrom"], int(s["midpoint"]-L/2), int(s["midpoint"]+L/2)],
index=["chrom", "start", "end"]), axis=1))
def random_coords(gs, incl, excl, l=500, n=1000):
"""
Randomly sample n intervals of length l from the genome,
shuffle to make all intervals inside the desired regions
and outside exclusion regions
"""
return (BedTool()
.random(l=l, n=n, g=gs)
.shuffle(g=gs, incl=incl.fn, excl=excl.fn)
.to_dataframe()[["chrom", "start", "end"]])
def chop_genome(gs, chroms, excl, stride=500, l=500):
"""
Given a genome size file and chromosome list,
chop these chromosomes into intervals of length l,
with include/exclude regions specified
"""
def intervals_loop(chrom, start, stride, l, size):
intervals = []
while True:
if (start + l) < size:
intervals.append((chrom, start, start+l))
else:
break
start += stride
return pd.DataFrame(intervals, columns=["chrom", "start", "end"])
genome_sizes = (
|
pd.read_csv(gs, sep="\t", names=["chrom", "len"])
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
import itertools
from numpy import nan
import numpy as np
from pandas import (DataFrame, Series, Timestamp, date_range, compat,
option_context, Categorical)
from pandas.core.arrays import IntervalArray, integer_array
from pandas.compat import StringIO
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
# Segregated collection of methods that require the BlockManager internal data
# structure
class TestDataFrameBlockInternals():
def test_cast_internals(self, float_frame):
casted = DataFrame(float_frame._data, dtype=int)
expected = DataFrame(float_frame._series, dtype=int)
assert_frame_equal(casted, expected)
casted = DataFrame(float_frame._data, dtype=np.int32)
expected = DataFrame(float_frame._series, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self, float_frame):
float_frame['E'] = 7.
consolidated = float_frame._consolidate()
assert len(consolidated._data.blocks) == 1
# Ensure copy, do I want this?
recons = consolidated._consolidate()
assert recons is not consolidated
tm.assert_frame_equal(recons, consolidated)
float_frame['F'] = 8.
assert len(float_frame._data.blocks) == 3
float_frame._consolidate(inplace=True)
assert len(float_frame._data.blocks) == 1
def test_consolidate_inplace(self, float_frame):
frame = float_frame.copy() # noqa
# triggers in-place consolidation
for letter in range(ord('A'), ord('Z')):
float_frame[chr(letter)] = chr(letter)
def test_values_consolidate(self, float_frame):
float_frame['E'] = 7.
assert not float_frame._data.is_consolidated()
_ = float_frame.values # noqa
assert float_frame._data.is_consolidated()
def test_modify_values(self, float_frame):
float_frame.values[5] = 5
assert (float_frame.values[5] == 5).all()
# unconsolidated
float_frame['E'] = 7.
float_frame.values[6] = 6
assert (float_frame.values[6] == 6).all()
def test_boolean_set_uncons(self, float_frame):
float_frame['E'] = 7.
expected = float_frame.values.copy()
expected[expected > 1] = 2
float_frame[float_frame > 1] = 2
assert_almost_equal(expected, float_frame.values)
def test_values_numeric_cols(self, float_frame):
float_frame['foo'] = 'bar'
values = float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
def test_values_lcd(self, mixed_float_frame, mixed_int_frame):
# mixed lcd
values = mixed_float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = mixed_float_frame[['A', 'B', 'C']].values
assert values.dtype == np.float32
values = mixed_float_frame[['C']].values
assert values.dtype == np.float16
# GH 10364
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = mixed_int_frame[['A', 'D']].values
assert values.dtype == np.int64
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[['A', 'B', 'C']].values
assert values.dtype == np.float64
# as B and C are both unsigned, no forcing to float is needed
values = mixed_int_frame[['B', 'C']].values
assert values.dtype == np.uint64
values = mixed_int_frame[['A', 'C']].values
assert values.dtype == np.int32
values = mixed_int_frame[['C', 'D']].values
assert values.dtype == np.int64
values = mixed_int_frame[['A']].values
assert values.dtype == np.int32
values = mixed_int_frame[['C']].values
assert values.dtype == np.uint8
def test_constructor_with_convert(self):
# this is actually mostly a test of lib.maybe_convert_objects
# #2845
df = DataFrame({'A': [2 ** 63 - 1]})
result = df['A']
expected = Series(np.asarray([2 ** 63 - 1], np.int64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2 ** 63]})
result = df['A']
expected = Series(np.asarray([2 ** 63], np.uint64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [datetime(2005, 1, 1), True]})
result = df['A']
expected = Series(np.asarray([datetime(2005, 1, 1), True], np.object_),
name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [None, 1]})
result = df['A']
expected = Series(np.asarray([np.nan, 1], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0, 2]})
result = df['A']
expected = Series(np.asarray([1.0, 2], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, 3]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, 3], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, 3.0]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, 3.0], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, True]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, True], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0, None]})
result = df['A']
expected = Series(np.asarray([1.0, np.nan], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, None]})
result = df['A']
expected = Series(np.asarray(
[1.0 + 2.0j, np.nan], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2.0, 1, True, None]})
result = df['A']
expected = Series(np.asarray(
[2.0, 1, True, None], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2.0, 1, datetime(2006, 1, 1), None]})
result = df['A']
expected = Series(np.asarray([2.0, 1, datetime(2006, 1, 1),
None], np.object_), name='A')
assert_series_equal(result, expected)
def test_construction_with_mixed(self, float_string_frame):
# test construction edge cases with mixed types
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check dtypes
result = df.get_dtype_counts().sort_values()
expected = Series({'datetime64[ns]': 3})
# mixed-type frames
float_string_frame['datetime'] = datetime.now()
float_string_frame['timedelta'] = timedelta(days=1, seconds=1)
assert float_string_frame['datetime'].dtype == 'M8[ns]'
assert float_string_frame['timedelta'].dtype == 'm8[ns]'
result = float_string_frame.get_dtype_counts().sort_values()
expected = Series({'float64': 4,
'object': 1,
'datetime64[ns]': 1,
'timedelta64[ns]': 1}).sort_values()
assert_series_equal(result, expected)
def test_construction_with_conversions(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype='timedelta64[s]')
df = DataFrame(index=range(3))
df['A'] = arr
expected = DataFrame({'A': pd.timedelta_range('00:00:01', periods=3,
freq='s')},
index=range(3))
assert_frame_equal(df, expected)
expected = DataFrame({
'dt1': Timestamp('20130101'),
'dt2': date_range('20130101', periods=3),
# 'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),
}, index=range(3))
df = DataFrame(index=range(3))
df['dt1'] = np.datetime64('2013-01-01')
df['dt2'] = np.array(['2013-01-01', '2013-01-02', '2013-01-03'],
dtype='datetime64[D]')
# df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01
# 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')
assert_frame_equal(df, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
data = list(itertools.repeat((datetime(2001, 1, 1),
"aa", 20), 9))
return DataFrame(data=data,
columns=["A", "B", "C"],
dtype=dtype)
pytest.raises(NotImplementedError, f,
[("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
# 10822
# invalid error message on dt inference
if not compat.is_platform_windows():
f('M8[ns]')
def test_equals_different_blocks(self):
# GH 9330
df0 = pd.DataFrame({"A": ["x", "y"], "B": [1, 2],
"C": ["w", "z"]})
df1 = df0.reset_index()[["A", "B", "C"]]
# this assert verifies that the above operations have
# induced a block rearrangement
assert (df0._data.blocks[0].dtype != df1._data.blocks[0].dtype)
# do the real tests
assert_frame_equal(df0, df1)
assert df0.equals(df1)
assert df1.equals(df0)
def test_copy_blocks(self, float_frame):
# API/ENH 9607
df = DataFrame(float_frame, copy=True)
column = df.columns[0]
# use the default copy=True, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = df.as_blocks()
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did not change the original DataFrame
assert not _df[column].equals(df[column])
def test_no_copy_blocks(self, float_frame):
# API/ENH 9607
df = DataFrame(float_frame, copy=True)
column = df.columns[0]
# use the copy=False, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = df.as_blocks(copy=False)
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did change the original DataFrame
assert _df[column].equals(df[column])
def test_copy(self, float_frame, float_string_frame):
cop = float_frame.copy()
cop['E'] = cop['A']
assert 'E' not in float_frame
# copy objects
copy = float_string_frame.copy()
assert copy._data is not float_string_frame._data
def test_pickle(self, float_string_frame, empty_frame, timezone_frame):
unpickled = tm.round_trip_pickle(float_string_frame)
assert_frame_equal(float_string_frame, unpickled)
# buglet
float_string_frame._data.ndim
# empty
unpickled = tm.round_trip_pickle(empty_frame)
repr(unpickled)
# tz frame
unpickled = tm.round_trip_pickle(timezone_frame)
assert_frame_equal(timezone_frame, unpickled)
def test_consolidate_datetime64(self):
# numpy vstack bug
data = """\
starting,ending,measure
2012-06-21 00:00,2012-06-23 07:00,77
2012-06-23 07:00,2012-06-23 16:30,65
2012-06-23 16:30,2012-06-25 08:00,77
2012-06-25 08:00,2012-06-26 12:00,0
2012-06-26 12:00,2012-06-27 08:00,77
"""
df = pd.read_csv(
|
StringIO(data)
|
pandas.compat.StringIO
|
# Author: <NAME>
# Created on: August 2020
# Last modified on: September 17 2020
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from ipywidgets import interact, interact_manual, widgets, Layout, VBox, HBox, Button
from IPython.display import display, Javascript, Markdown, HTML, clear_output
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
def rerun_cell( b ):
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1,\
IPython.notebook.get_selected_index()+2)'))
# The SEIR model differential equations.
def deriv(y, t, Delta, beta, mu, epsilon,gamma,alpha,delta):
"""
This function contains a system of equations for the S.E.I.R. model assuming non constant population
death (natural and due infection) and birth rates, as well as reinfection post recovery.
Args:
y (array): contains five floating point numbers S0, E0, I0, R0, D0 where each denotes initial conditions (float)
t (float): variable denoting time
Delta (float): rate of birth
beta (float): rate of contact with infectious
mu (float): rate of natural death
epsilon (float): rate of infectiousness
gamma (float): rate of recovery
alpha (float): rate of death due disease
delta (float): rate of reintegration into susceptible state
Returns:
[dS, dE, dI, dR, dD] (array)
dS: differential equation for Susceptible
dE: differential equation of Exposed
dI: differential equation for Infectious
dR: differential equation for Recovered
dD: differential equation for Deaths
"""
S, E, I, R, D = y
N = S + E + I + R
dS = Delta*N - beta*S*I/N - mu*S + delta*R
dE = beta*S*I/N - (mu + epsilon)*E
dI = epsilon*E - (gamma + mu + alpha)*I
dR = gamma*I - mu*R - delta*R
dD = alpha*I
return [dS,dE, dI, dR, dD]
def run_seir_model(Delta, beta, mu, epsilon,gamma,alpha,delta):
"""
This function creates an interactive plot simulating the S.E.I.R. model
Note that susceptible has been commented out for the Callysto teacher and student notebooks
Args:
Delta (float): rate of birth
beta (float): rate of contact with infectious
mu (float): rate of natural death
epsilon (float): rate of infectiousness
gamma (float): rate of recovery
alpha (float): rate of death due disease
delta (float): rate of reintegration into susceptible state
Returns:
seir_simulation (pandas DataFrame): contains data resulting from our model for each of the SEIRD stages
"""
# Initial number of infected and recovered individuals, I0 and R0.
S0, E0,I0, R0 ,D0 = 37000,0,1,0,0
# Total population, N.
N = S0 + E0 + I0 + R0
# Initial conditions vector
y0 = S0,E0, I0, R0, D0
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(Delta, beta, mu, epsilon,gamma,alpha,delta))
S, E,I, R, D = ret.T
# Build dataframe with the data from the model
seir_simulation =
|
pd.DataFrame({"Susceptible":S,"Exposed":E,"Infectious":I,"Recovered":R,"Deaths":D, "Time (days)":t})
|
pandas.DataFrame
|
"""
Import as:
import core.artificial_signal_generators as carsigen
"""
import datetime
import logging
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import scipy as sp
# import statsmodels as sm
import statsmodels.api as sm
import helpers.hdbg as hdbg
# TODO(*): statsmodels needs this import to work properly.
# import statsmodels.tsa.arima_process as smarima # isort: skip # noqa: F401 # pylint: disable=unused-import
_LOG = logging.getLogger(__name__)
# TODO(gp): Remove after PTask2335.
if True:
import gluonts
import gluonts.dataset.artificial as gda
import gluonts.dataset.artificial.recipe as rcp
import gluonts.dataset.repository.datasets as gdrd # isort: skip # noqa: F401 # pylint: disable=unused-import
import gluonts.dataset.util as gdu # isort: skip # noqa: F401 # pylint: disable=unused-import
def get_gluon_dataset_names() -> List[str]:
"""
Get names of available Gluon datasets. Each of those names can be used
in `get_gluon_dataset` function.
:return: list of names
"""
return list(gluonts.dataset.repository.datasets.dataset_recipes.keys())
def get_gluon_dataset(
dataset_name: str = "m4_hourly",
train_length: Optional[int] = None,
test_length: Optional[int] = None,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Load Gluon dataset, transform it into train and test dataframes.
The default `m4_hourly` time series look like this:
https://gluon-ts.mxnet.io/_images/examples_forecasting_tutorial_9_0.png
:param dataset_name: name of the dataset. Supported names can be
obtained using `get_gluon_dataset_names`.
:param train_length: length of the train dataset
:param test_length: length of the test dataset
:return: train and test dataframes
"""
dataset = gluonts.dataset.repository.datasets.get_dataset(
dataset_name, regenerate=False
)
train_entry = next(iter(dataset.train))
test_entry = next(iter(dataset.test))
train_df = gluonts.dataset.util.to_pandas(train_entry)
test_df = gluonts.dataset.util.to_pandas(test_entry)
train_length = train_length or train_df.shape[0]
test_length = test_length or test_df.shape[0]
hdbg.dassert_lte(train_length, train_df.shape[0])
hdbg.dassert_lte(test_length, test_df.shape[0])
train_df = pd.DataFrame(train_df.head(train_length), columns=["y"])
test_df = pd.DataFrame(test_df.head(test_length), columns=["y"])
return train_df, test_df
def evaluate_recipe(
recipe: List[Tuple[str, Callable]], length: int, **kwargs: Any
) -> Dict[str, np.array]:
"""
Generate data based on recipe.
For documentation on recipes, see
https://gluon-ts.mxnet.io/_modules/gluonts/dataset/artificial/_base.html#RecipeDataset.
:param recipe: [(field, function)]
:param length: length of data to generate
:param kwargs: kwargs passed into gluonts.dataset.artificial.recipe.evaluate
:return: field names mapped to generated data
"""
return rcp.evaluate(recipe, length, **kwargs)
def add_recipe_components(
recipe: List[Tuple[str, Callable]], name: str = "signal"
) -> List[Tuple[str, rcp.Lifted]]:
"""
Append the sum of the components to the recipe.
:param recipe: [(field, function)]
:param name: name of the sum
:return: recipe with the sum component
"""
recipe = recipe.copy()
names = [name for name, _ in recipe]
addition = rcp.Add(names)
recipe.append((name, addition))
return recipe
def generate_recipe_dataset(
recipe: Union[Callable, List[Tuple[str, Callable]]],
freq: str,
start_date: pd.Timestamp,
max_train_length: int,
prediction_length: int,
num_timeseries: int,
trim_length_func: Callable = lambda x, **kwargs: 0,
) -> gluonts.dataset.common.TrainDatasets:
"""
Generate GluonTS TrainDatasets from recipe.
For more information on recipes, see
https://gluon-ts.mxnet.io/_modules/gluonts/dataset/artificial/_base.html#RecipeDataset
and
https://gluon-ts.mxnet.io/examples/synthetic_data_generation_tutorial/tutorial.html.
For `feat_dynamic_cat` and `feat_dynamic_real` generation pass in
`shape=(n_features, 0)`. GluonTS replaces `0` in shape with
`max_train_length + prediction_length`.
:param recipe: GluonTS recipe. Datasets with keys `feat_dynamic_cat`,
`feat_dynamic_real` and `target` are passed into `ListDataset`.
:param freq: frequency
:param start_date: start date of the dataset
:param max_train_length: maximum length of a training time series
:param prediction_length: length of prediction range
:param num_timeseries: number of time series to generate
:param trim_length_func: Callable f(x: int) -> int returning the
(shortened) training length
:return: GluonTS TrainDatasets (with `train` and `test` attributes).
"""
names = [name for name, _ in recipe]
hdbg.dassert_in("target", names)
metadata = gluonts.dataset.common.MetaData(freq=freq)
recipe_dataset = gda.RecipeDataset(
recipe,
metadata,
max_train_length,
prediction_length,
num_timeseries,
trim_length_fun=trim_length_func,
data_start=start_date,
)
return recipe_dataset.generate()
class ArmaProcess:
"""
A thin wrapper around statsmodels `ArmaProcess`, with Pandas support.
"""
def __init__(self, ar_coeffs: List[float], ma_coeffs: List[float]) -> None:
"""
Initialize `arma_process` using given coefficients.
Useful properties include
- arroots
- isinvertible
- isstationary
- maroots
Further details are available at
- https://www.statsmodels.org/stable/generated/statsmodels.tsa.arima_process.ArmaProcess.html # pylint: disable=line-too-long
"""
self.ar_coeffs = ar_coeffs
self.ma_coeffs = ma_coeffs
self.arma_process = sm.tsa.ArmaProcess.from_coeffs(
self.ar_coeffs, self.ma_coeffs
)
def generate_sample(
self,
date_range_kwargs: Dict[str, Any],
scale: float = 1,
burnin: float = 0,
seed: Optional[int] = None,
) -> pd.Series:
"""
Generate an ARMA realization.
This wraps statsmodels' `generate_sample`, placing the values in a
`pd.Series` with index specified through the date range parameters.
:param date_range_kwargs: kwargs to forward to `pd.date_range`, e.g.,
- "start", "end", "periods", "freq"
:param scale: standard deviation of noise
:param burnin: number of leading samples to drop
:seed: np.random.seed seed
"""
if seed is None:
seed = 0
np.random.seed(seed)
# Create index and infer number of samples.
index = pd.date_range(**date_range_kwargs)
nsample = index.size
# Generate the time series.
data = self.arma_process.generate_sample(
nsample=nsample, scale=scale, burnin=burnin
)
# Create series index and name.
name = f"arma({len(self.ar_coeffs)},{len(self.ma_coeffs)})"
return pd.Series(index=index, data=data, name=name)
class MultivariateNormalProcess:
"""
A wrapper around sp.stats.multivariate_normal, with Pandas support.
"""
def __init__(
self,
mean: Optional[pd.Series] = None,
cov: Optional[pd.DataFrame] = None,
allow_singular: Optional[bool] = None,
) -> None:
"""
Optionally initialize mean and covariance of multivariate normal RV.
"""
self.mean = self._maybe_return_values(mean, pd.Series)
self.cov = self._maybe_return_values(cov, pd.DataFrame)
self.allow_singular = allow_singular
def set_cov_from_inv_wishart_draw(
self, dim: int, seed: Optional[int] = None
) -> None:
"""
Set covariance matrix equal to a draw from Inverse Wishart.
- Defaults to least informative proper distribution
- Takes dof = dim, scale = identify matrix of dimension `dim`
https://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.stats.invwishart.html#scipy.stats.invwishart
"""
scale = np.identity(dim)
# TODO(Paul): Replace with numpy if available.
rv = sp.stats.invwishart(df=dim, scale=scale)
self.cov = rv.rvs(random_state=seed)
def generate_sample(
self, date_range_kwargs: Dict[str, Any], seed: Optional[int] = None
) -> pd.DataFrame:
"""
Generate a multivariate normal distribution sample over index.
https://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.stats.multivariate_normal.html#scipy.stats.multivariate_normal
"""
index = pd.date_range(**date_range_kwargs)
nsample = index.size
# TODO(Paul): Replace with numpy.
rv = sp.stats.multivariate_normal(
mean=self.mean,
cov=self.cov,
allow_singular=self.allow_singular,
)
# Setting the seed through scipy interface seems to be jittery (see
# AmpTask1649).
data = rv.rvs(size=nsample, random_state=seed)
return pd.DataFrame(index=index, data=data)
@staticmethod
def _maybe_return_values(
obj: Union[pd.Series, pd.DataFrame, None],
expected_type: Union[pd.Series, pd.DataFrame],
) -> Union[None, np.array]:
"""
Return values of series or dataframe or else None if object is None.
This is a convenience method used in initialization.
"""
if obj is None:
return None
if isinstance(obj, expected_type):
return obj.values
raise ValueError(f"Unsupported type {type(obj)}")
class PoissonProcess:
"""
A thin wrapper around sp.stats.poisson, with Pandas support.
We interpret the values as the number of events that occurred in the
last interval.
"""
def __init__(self, mu: float) -> None:
"""
Set shape parameter.
"""
self.mu = mu
def generate_sample(
self, date_range_kwargs: Dict[str, Any], seed: Optional[int] = None
) -> pd.Series:
"""
Generate a Poisson sample over index.
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.poisson.html
"""
index =
|
pd.date_range(**date_range_kwargs)
|
pandas.date_range
|
# -*- coding: utf-8 -*-
"""
Author
------
<NAME>
Email
-----
<EMAIL>
Created on
----------
- Mon Sep 05 12:00:00 2016
Modifications
-------------
- Mon Sep 05 12:00:00 2016
Aims
----
- SVR hyper-parameters
"""
import numpy as np
from astropy.table import Table
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
from copy import deepcopy
import pandas as pd
from joblib import Parallel, delayed
__all__ = ['summarize_hyperparameters_to_table', 'summarize_table']
# ############################ #
# to summarize grid parameters #
# ############################ #
def hyperparameter_grid_stats(svrs, pivot=("param_C", "param_gamma"),
n_jobs=10, verbose=10):
""" statistics for GridSearchCV results """
stats_train = []
stats_test = []
r = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(hyperparameter_grid_stats_)(svr, pivot=pivot) for svr in svrs)
for i in range(len(r)):
stats_train.append(r[i][0])
stats_test.append(r[i][1])
return stats_train, stats_test
def hyperparameter_grid_stats_(svr, pivot=("param_C", "param_gamma")):
""" statistics for GridSearchCV results """
if isinstance(svr, GridSearchCV):
# yes, that's it
cvr = svr.cv_results_
stats_train_ = deepcopy(cvr)
stats_test_ = deepcopy(cvr)
for k in cvr.keys():
if k.find("test") > -1:
stats_train_.pop(k)
elif k.find("train") > -1:
stats_test_.pop(k)
if pivot is not None:
return (
|
pd.DataFrame(stats_train_)
|
pandas.DataFrame
|
import logging
from typing import Optional
import numpy as np
import pandas as pd
import ppqm
from ppqm import chembridge
from rdkit.Chem import Mol
import regiosqm_lib
_logger = logging.getLogger(__name__)
mopac_calculation = {
"pm3": None,
"eps": 4.8,
"cycles": 200,
}
def predict_regioselective_dataframe(mol: Mol, mopac_options={}) -> pd.DataFrame:
"""Expects neutralized molecule"""
rows = []
# Protonate tautomers
atoms, protomers = regiosqm_lib.generate_protonations(mol)
for atom, protomer in zip(atoms, protomers):
mol_ = chembridge.copy_molobj(mol)
mol_.__sssAtoms = [atom]
row = {"target": mol_, "molobj": protomer, "atom": atom, "tautomer_energy": 0.0}
rows.append(row)
pdf_protomers =
|
pd.DataFrame(rows)
|
pandas.DataFrame
|
"""
Compare Monte Carlo and Quasi-Monte Carlo methods when
evaluating the Keister function with varying dimension
"""
from qmcpy import *
from workouts.mc_vs_qmc.integrations_keister import integrations_dict
from time import time
from numpy import arange, nan
import pandas as pd
def vary_dimension(dimension=[1,2,3], abs_tol=0, rel_tol=.1, trials=1):
"""
Record solution, wall-clock time, and number of samples
for integrating the Keister function with varying dimensions
"""
header = ['Stopping Criterion','Distribution','MC/QMC','dimension','solution','n_samples','time']
results =
|
pd.DataFrame(columns=header,dtype=float)
|
pandas.DataFrame
|
import pandas
from scipy.stats import shapiro
from scipy.stats import spearmanr
from class_framework import sig_level
# loading data
cor_data = pandas.read_csv('country_data.csv', sep = '\t').set_index('Unnamed: 0').sort_index(axis=1, ascending=True)
wdi_data = cor_data[(cor_data.index.str.contains('wdi'))].T
wdi_data.columns = wdi_data.columns.str.replace('wdi_', '')
cba_data = cor_data[(cor_data.index.str.contains('cba'))].T
cba_data.columns = cba_data.columns.str.replace('cba_', '')
all_data = pandas.merge(cba_data.stack().to_frame().reset_index().rename(columns = {0:'cba', 'level_0':'country', 'Unnamed: 0':'year'}),
wdi_data.stack().to_frame().reset_index().rename(columns = {0:'wdi', 'level_0':'country', 'Unnamed: 0':'year'}),
how = 'left', left_on = ['country', 'year'], right_on = ['country', 'year']).drop(['country', 'year'], axis = 1)
year_list = cba_data.columns.tolist()
# descriptive statistics
## all data
descriptives_all = pandas.DataFrame(all_data.describe()).rename(columns = {'cba':'All_combined_cba', 'wdi':'All_combined_wdi'}).T
print(descriptives_all)
## by countries
descriptives_country = pandas.DataFrame(cba_data.T.describe()).join(pandas.DataFrame(wdi_data.T.describe()), lsuffix = '_cba', rsuffix = '_wdi').T
## by years
descriptives_year = pandas.DataFrame(cba_data.describe()).join(pandas.DataFrame(wdi_data.describe()), lsuffix = '_cba', rsuffix = '_wdi').T
# normality testing
## all data
norm_test_all = pandas.DataFrame(index = ['All_combined'], columns = ['cba_sw', 'cba_p', 'wdi_sw', 'wdi_p', 'normal_dist'])
norm_test_all['cba_sw']['All_combined'] = shapiro(all_data['cba'])[0]
norm_test_all['cba_p']['All_combined'] = shapiro(all_data['cba'])[1]
norm_test_all['wdi_sw']['All_combined'] = shapiro(all_data['wdi'])[0]
norm_test_all['wdi_p']['All_combined'] = shapiro(all_data['wdi'])[1]
if norm_test_all['cba_p'][0] > 0.05 and norm_test_all['wdi_p'][0] > 0.05:
norm_test_all['normal_dist'][0] = True
else:
norm_test_all['normal_dist'][0] = False
## by countries
norm_test_country = pandas.DataFrame(index = cba_data.index, columns = ['cba_sw', 'cba_p', 'wdi_sw', 'wdi_p', 'normal_dist'])
for i in range(len(cba_data)):
norm_test_country['cba_sw'][i] = shapiro(cba_data.iloc[i])[0]
norm_test_country['cba_p'][i] = shapiro(cba_data.iloc[i])[1]
norm_test_country['wdi_sw'][i] = shapiro(wdi_data.iloc[i])[0]
norm_test_country['wdi_p'][i] = shapiro(wdi_data.iloc[i])[1]
if norm_test_country['cba_p'][i] > 0.05 and norm_test_country['wdi_p'][i] > 0.05:
norm_test_country['normal_dist'][i] = True
else:
norm_test_country['normal_dist'][i] = False
## by years
norm_test_year = pandas.DataFrame(index = cba_data.T.index, columns = ['cba_sw', 'cba_p', 'wdi_sw', 'wdi_p', 'normal_dist'])
for i in range(len(cba_data.T)):
norm_test_year['cba_sw'][i] = shapiro(cba_data.T.iloc[i])[0]
norm_test_year['cba_p'][i] = shapiro(cba_data.T.iloc[i])[1]
norm_test_year['wdi_sw'][i] = shapiro(wdi_data.T.iloc[i])[0]
norm_test_year['wdi_p'][i] = shapiro(wdi_data.T.iloc[i])[1]
if norm_test_year['cba_p'][i] > 0.05 and norm_test_year['wdi_p'][i] > 0.05:
norm_test_year['normal_dist'][i] = True
else:
norm_test_year['normal_dist'][i] = False
# correlations using spearman's rho, because distributions were not normal
## all data
cor_data_all = pandas.DataFrame(index = ['All_combined'], columns = ['spearman_correlation', 'p_value', 'significance'])
cor_data_all['spearman_correlation'][0] = spearmanr(all_data['cba'], all_data['wdi'])[0]
cor_data_all['p_value'][0] = spearmanr(all_data['cba'], all_data['wdi'])[1]
cor_data_all['significance'][0] = sig_level(cor_data_all['p_value'][0])
print(cor_data_all)
## by country
cor_data_country = cor_data.T.reset_index().rename(columns = {'Unnamed: 0':'index'}).set_index('index')
cor_list = []
p_list = []
country_list = []
for i in range(len(cor_data_country)):
cor_list.append(spearmanr(cor_data_country.iloc[i][0 : int(len(cor_data_country.columns) / 2)].tolist(),
cor_data_country.iloc[0][int(len(cor_data_country.columns) / 2) : int(len(cor_data_country.columns))].tolist()
)[0])
p_list.append(spearmanr(cor_data_country.iloc[i][0 : int(len(cor_data_country.columns) / 2)].tolist(),
cor_data_country.iloc[0][int(len(cor_data_country.columns) / 2) : int(len(cor_data_country.columns))].tolist()
)[1])
country_list.append(cor_data_country.index[i])
cor_data_country =
|
pandas.DataFrame({'country': country_list, 'spearman_correlation': cor_list, 'p_value': p_list})
|
pandas.DataFrame
|
import streamlit as st
import pandas as pd
import numpy as np
import altair as alt
import pydeck as pdk
@st.cache(persist=True)
def load_data(nrows):
data =
|
pd.read_csv(DATA_URL, nrows=nrows)
|
pandas.read_csv
|
# Modified PointNetVLAD code: https://github.com/mikacuy/pointnetvlad
# Modified by: <NAME>, <NAME>, <NAME> (Poznan University of Technology 2021)
import pandas as pd
import numpy as np
import os
import pandas as pd
from sklearn.neighbors import KDTree
import pickle
#####For training and test data split#####
x_width = 100
y_width = 100
buffer = 10
# points in easting, northing (x, y) format
p1_bl_corner = [332_530, -3_750_950]
p2_bl_corner = [332_250, -3_751_240]
p3_bl_corner = [332_630, -3_751_450]
p4_bl_corner = [332_555, -3_751_125]
p = [p1_bl_corner, p2_bl_corner, p3_bl_corner, p4_bl_corner]
# modified, since regions are defined by bottom left corner + width and buffer is added
def check_in_test_set(northing, easting, points, x_width, y_width):
in_test_set = False
in_buffer_set = False
for point in points:
# points in easting, northing (x, y) format
if (point[0] - buffer) < easting < (point[0] + x_width + buffer) and (point[1] - buffer) < northing < (point[1] + y_width + buffer):
# in buffer range - test or reject:
if (point[0]) < easting < (point[0] + x_width) and (point[1]) < northing < (point[1] + y_width):
# in test range
in_test_set = True
else:
in_buffer_set = True
break
return in_test_set, in_buffer_set
##########################################
def output_to_file(output, filename):
with open(filename, 'wb') as handle:
pickle.dump(output, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("Done ", filename)
def construct_query_and_database_sets(base_path, runs_folder, folders, pointcloud_fols, filenames, p, output_name):
database_trees = []
test_trees = []
for folder, filename in zip(folders, filenames):
print(folder)
df_database =
|
pd.DataFrame(columns=['file', 'northing', 'easting'])
|
pandas.DataFrame
|
import datetime
import time
import pandas as pd
from loguru import logger
import ofanalysis.jiuquan.fund_info as fund_info
import ofanalysis.const as const
import ofanalysis.utility as ut
class FundRank:
"""
基金相关排名
从韭圈儿获取数据并原始存入mongodb->raw....
构建实例的时候,传入六位基金代码将从数据库里面提取数据
如果数据库里面没有该基金,则从韭圈儿上获取数据并存入数据库
如果数据库里有该基金,则和当前运行日期比较,如果已经超过一个季度,则更新数据库
如果传入参数force_update_db,则从韭圈儿上获取数据并存入数据库
"""
def __init__(self, fund_code: str, cate_l2: str, force_update_db: bool = False):
self.__v_init(fund_code, cate_l2)
fund_category_list = fund_info.FundInfo(
fund_code=fund_code).category # 取回该基金的所属类别
# 由于分析都是权益类,手工添加权益类分类排名
fund_category_list.append('韭圈 - 权益型')
if self.cate_l2 not in fund_category_list: # 判断给定类别是否在该基金的列表中
raise Exception(
f'基金<{self.code}>不具备<{self.cate_l2}>类别;试试:{fund_category_list}')
# 从db中获取数据
item_list = ut.db_get_dict_from_mongodb(
mongo_db_name=const.MONGODB_DB_JIUQUAN,
col_name=const.MONGODB_COL_JQ_FUND_RANK_RAW,
query_dict={
'fund_code': self.code,
'cate_l2': self.cate_l2
}
)
if len(item_list) != 4: # 不存在数据,或者有冗余数据。先删除冗余数据,在插入新数据
ut.db_del_dict_from_mongodb(
mongo_db_name=const.MONGODB_DB_JIUQUAN,
col_name=const.MONGODB_COL_JQ_FUND_RANK_RAW,
query_dict={
'fund_code': self.code,
'cate_l2': self.cate_l2
}
)
item_list = self.__save_fund_rank_raw_to_db(
self.__retrieve_fund_rank_raw_from_jiuquan())
else:
item_raw = item_list[0]
# 判断取回的4条记录是否retrieve_date相等,如果不等,删除重新取
flag_retrieve_date_equal = True
for item in item_list:
if item['retrieve_date'] != item_raw['retrieve_date']:
flag_retrieve_date_equal = False
break
existed_q = pd.Period(item_raw['retrieve_date'], 'Q')
now_q = pd.Period(self.__today, 'Q')
if (now_q > existed_q) or force_update_db or (
flag_retrieve_date_equal is False): # 数据是上季度的,需要从韭圈儿更新,或者传入参数强制更新, 或者存量数据不一致
ut.db_del_dict_from_mongodb(
mongo_db_name=const.MONGODB_DB_JIUQUAN,
col_name=const.MONGODB_COL_JQ_FUND_RANK_RAW,
query_dict={
'fund_code': self.code,
'cate_l2': self.cate_l2
}
)
item_list = self.__save_fund_rank_raw_to_db(
self.__retrieve_fund_rank_raw_from_jiuquan())
self.__populate_item_raw_to_object(item_list)
def __v_init(self, fund_code: str, cate_l2: str, ):
self.code = fund_code
self.cate_l2 = cate_l2
self.__today = datetime.date.today()
def __retrieve_fund_rank_raw_from_jiuquan(self):
sign_info = {
1: '阶段涨幅',
2: '季度涨幅',
3: '年度涨幅',
4: '基金大爆炸'
}
list_dict_fund_rank_raw = []
for sign in range(1, 5):
request_data = {
"code": self.code,
"sign": sign,
# 如果分类是 '韭圈 - 权益型' ,则id参数应该为13
"id": 13 if self.cate_l2 == '韭圈 - 权益型' else self.cate_l2,
"name": self.cate_l2,
# "data_source": "xichou", # 字段不同基金不一样,暂时注释,好像不影响
"type": "pc",
"version": const.JIUQUAN_VERSION,
"authtoken": const.JIUQUAN_TOKEN,
"act_time": int(round(time.time() * 1000))
}
dict_fund_rank_single_raw = ut.request_post_json(
api_url='https://api.jiucaishuo.com/v2/fund-lists/fundachieve',
headers=const.JIUQUAN_HEADER,
request_param=request_data
)['data']
if (dict_fund_rank_single_raw is None) or (len(dict_fund_rank_single_raw) == 0):
logger.info('Get %s fund rank failed...' % self.code)
dict_fund_rank_single_raw['rank_name'] = sign_info[sign]
list_dict_fund_rank_raw.append(dict_fund_rank_single_raw)
return list_dict_fund_rank_raw
def __save_fund_rank_raw_to_db(self, list_dict_fund_rank_raw):
'''
在插入数据库之前,在传入的Dict中添加fund_code, cate_l2, retrieve_date字段
:param dict_fund_rank_raw:
:return:
'''
refined_list_dict_fund_rank_raw = []
for dict_fund_rank_raw in list_dict_fund_rank_raw:
dict_fund_rank_raw['fund_code'] = self.code
dict_fund_rank_raw['cate_l2'] = self.cate_l2
dict_fund_rank_raw['retrieve_date'] = self.__today.strftime(
'%Y%m%d')
refined_list_dict_fund_rank_raw.append(dict_fund_rank_raw)
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_JIUQUAN,
col_name=const.MONGODB_COL_JQ_FUND_RANK_RAW,
target_dict=refined_list_dict_fund_rank_raw
)
return refined_list_dict_fund_rank_raw
def __populate_item_raw_to_object(self, item_list):
dict_raw = dict()
for item in item_list:
dict_raw[item['rank_name']] = item
self.df_period_increase = pd.DataFrame(dict_raw['阶段涨幅']['dl_list'])
self.df_quarter_increase = pd.DataFrame(dict_raw['季度涨幅']['dl_list'])
self.df_year_increase = pd.DataFrame(dict_raw['年度涨幅']['dl_list'])
self.df_dabaozha =
|
pd.DataFrame(dict_raw['基金大爆炸']['dl_list'])
|
pandas.DataFrame
|
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
browser = webdriver.Chrome()
wait = WebDriverWait(browser, 5)
browser.get('https://www.beyondblue.org.au/get-support/online-forums/')
forum_name_list = ['Depression', 'Suicidal thoughts and self-harm']
forum_xpath_list = ['//*[@id="MainContentPlaceholder_C006_forumsFrontendList_ctl00_ctl00_pnlMain"]/table/tbody[2]/tr[2]/td[1]/a', '//*[@id="MainContentPlaceholder_C006_forumsFrontendList_ctl00_ctl00_pnlMain"]/table/tbody[2]/tr[4]/td[1]/a']
forum_count = 0
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import typing
import pandas as pd
import copy
import os
import random
import collections
import typing
import logging
import json
import re
import io
import string
import time
import cgitb
import sys
from ast import literal_eval
from itertools import combinations
from d3m import container
from d3m import utils
from d3m.container import DataFrame as d3m_DataFrame
from d3m.container import Dataset as d3m_Dataset
from d3m.base import utils as d3m_utils
from d3m.metadata.base import DataMetadata, ALL_ELEMENTS
from collections import defaultdict
from datamart import TabularVariable, ColumnRelationship, AugmentSpec
from datamart_isi import config
from datamart_isi.augment import Augment
from datamart_isi.joiners.rltk_joiner import RLTKJoinerGeneral
from datamart_isi.joiners.rltk_joiner import RLTKJoinerWikidata
from datamart_isi.utilities.utils import Utils
from datamart_isi.utilities.timeout import timeout_call
from datamart_isi.utilities.singleton import singleton
from datamart_isi.utilities import d3m_wikifier
from datamart_isi.utilities.d3m_metadata import MetadataGenerator
from datamart_isi.utilities.download_manager import DownloadManager
from datamart_isi.cache.wikidata_cache import QueryCache
from datamart_isi.cache.general_search_cache import GeneralSearchCache
from datamart_isi.cache.metadata_cache import MetadataCache
from datamart_isi.cache.materializer_cache import MaterializerCache
# from datamart_isi.joiners.join_result import JoinResult
# from datamart_isi.joiners.joiner_base import JoinerType
__all__ = ('DatamartQueryCursor', 'Datamart', 'DatasetColumn', 'DatamartSearchResult', 'AugmentSpec',
'TabularJoinSpec', 'TemporalGranularity', 'ColumnRelationship', 'DatamartQuery',
'VariableConstraint', 'TabularVariable', 'VariableConstraint')
Q_NODE_SEMANTIC_TYPE = config.q_node_semantic_type
TEXT_SEMANTIC_TYPE = config.text_semantic_type
ATTRIBUTE_SEMANTIC_TYPE = config.attribute_semantic_type
AUGMENTED_COLUMN_SEMANTIC_TYPE = config.augmented_column_semantic_type
TIME_SEMANTIC_TYPE = config.time_semantic_type
MAX_ENTITIES_LENGTH = config.max_entities_length
P_NODE_IGNORE_LIST = config.p_nodes_ignore_list
SPECIAL_REQUEST_FOR_P_NODE = config.special_request_for_p_nodes
AUGMENT_RESOURCE_ID = config.augmented_resource_id
DEFAULT_DATAMART_URL = config.default_datamart_url
TIME_COLUMN_MARK = config.time_column_mark
random.seed(42)
class DatamartQueryCursor(object):
"""
Cursor to iterate through Datamarts search results.
"""
def __init__(self, augmenter, search_query, supplied_data, need_run_wikifier=None, connection_url=None, **kwargs):
"""
:param augmenter: The manager used to parse query and search on datamart general part(blaze graph),
because it search quick and need instance update, we should not cache this part
:param search_query: query generated from Datamart class
:param supplied_data: supplied data for search
:param need_run_wikifier: an optional parameter, can help to control whether need to run wikifier to get
wikidata-related parts, it can help to improve the speed when processing large data
:param connection_url: control paramter for the connection url
"""
self._logger = logging.getLogger(__name__)
if connection_url:
self._logger.info("Using user-defined connection url as " + connection_url)
self.connection_url = connection_url
else:
connection_url = os.getenv('DATAMART_URL_ISI', DEFAULT_DATAMART_URL)
self.connection_url = connection_url
self.supplied_data = supplied_data
if type(self.supplied_data) is d3m_Dataset:
self.res_id, self.supplied_dataframe = d3m_utils.get_tabular_resource(dataset=self.supplied_data, resource_id=None)
else:
self.supplied_dataframe = self.supplied_data
self._logger.debug("Current datamart connection url is: " + self.connection_url)
self.augmenter = augmenter
self.search_query = search_query
self.current_searching_query_index = 0
self.remained_part = None
self.wikidata_cache_manager = QueryCache()
self.q_nodes_columns = list()
self.q_node_column_names = set()
if need_run_wikifier is None:
self.need_run_wikifier = self._check_need_wikifier_or_not()
else:
self.need_run_wikifier = need_run_wikifier
self.consider_wikifier_columns_only = kwargs.get("consider_wikifier_columns_only", False)
self.augment_with_time = kwargs.get("augment_with_time", False)
self.consider_time = kwargs.get("consider_time", True)
if self.consider_wikifier_columns_only:
self._find_q_node_columns()
self.search_limit_amount = 20
def get_next_page(self, *, limit: typing.Optional[int] = 20, timeout: int = None) \
-> typing.Optional[typing.Sequence['DatamartSearchResult']]:
"""
Return the next page of results. The call will block until the results are ready.
Note that the results are not ordered; the first page of results can be returned first simply because it was
found faster, but the next page might contain better results. The caller should make sure to check
`DatamartSearchResult.score()`.
Parameters
----------
limit : int or None
Maximum number of search results to return. None means no limit.
timeout : int
Maximum number of seconds before returning results. An empty list might be returned if it is reached.
Returns
-------
Sequence[DatamartSearchResult] or None
A list of `DatamartSearchResult's, or None if there are no more results.
"""
if timeout is None:
timeout = 1800
self._logger.info("Set time limit to be " + str(timeout) + " seconds.")
if limit is not None:
self.search_limit_amount = limit
# if need to run wikifier, run it before any search
if self.current_searching_query_index == 0 and self.need_run_wikifier:
self.supplied_data = self.run_wikifier(self.supplied_data)
# if already remained enough part
current_result = self.remained_part or []
if len(current_result) > limit:
self.remained_part = current_result[limit:]
current_result = current_result[:limit]
return current_result
# start searching
while self.current_searching_query_index < len(self.search_query):
time_start = time.time()
self._logger.debug("Start searching on query No." + str(self.current_searching_query_index))
if self.search_query[self.current_searching_query_index].search_type == "wikidata":
# TODO: now wikifier can only automatically search for all possible columns and do exact match
search_res = timeout_call(timeout, self._search_wikidata, [])
elif self.search_query[self.current_searching_query_index].search_type == "general":
search_res = timeout_call(timeout, self._search_datamart, [])
elif self.search_query[self.current_searching_query_index].search_type == "vector":
search_res = timeout_call(timeout, self._search_vector, [])
elif self.search_query[self.current_searching_query_index].search_type == "geospatial":
search_res = timeout_call(timeout, self._search_geospatial_data, [])
else:
raise ValueError("Unknown search query type for " +
self.search_query[self.current_searching_query_index].search_type)
time_used = (time.time() - time_start)
timeout -= time_used
if search_res is not None:
self._logger.info("Running search on query No." + str(self.current_searching_query_index) + " used "
+ str(time_used) + " seconds and finished.")
self._logger.info("Remained searching time: " + str(timeout) + " seconds.")
elif timeout <= 0:
self._logger.error(
"Running search on query No." + str(self.current_searching_query_index) + " timeout!")
break
else:
self._logger.error("Running search on query No." + str(self.current_searching_query_index) + " failed!")
self.current_searching_query_index += 1
if search_res is not None:
self._logger.info("Totally {} results found.".format(str(len(search_res))))
current_result.extend(search_res)
if len(current_result) == 0:
self._logger.warning("No search results found!")
return None
else:
current_result = sorted(current_result, key=lambda x: x.score(), reverse=True)
if len(current_result) > limit:
self.remained_part = current_result[limit:]
current_result = current_result[:limit]
return current_result
def _check_need_wikifier_or_not(self) -> bool:
"""
Check whether need to run wikifier or not, if wikidata type column detected, this column's semantic type will also be
checked if no Q node semantic exist
:return: a bool value
True means Q nodes column already detected and skip running wikifier
False means no Q nodes column detected, need to run wikifier
"""
need_wikifier_or_not, self.supplied_data = d3m_wikifier.check_and_correct_q_nodes_semantic_type(self.supplied_data)
if not need_wikifier_or_not:
# if not need to run wikifier, we can find q node columns now
self._find_q_node_columns()
return need_wikifier_or_not
def _find_q_node_columns(self) -> None:
"""
Inner function used to find q node columns by semantic type
:return: None
"""
if len(self.q_nodes_columns) > 0 or len(self.q_node_column_names) > 0:
self._logger.warning("Q node columns has already been found once! Should not run again")
self.q_node_column_names = set()
self.q_nodes_columns = list()
if type(self.supplied_data) is d3m_Dataset:
selector_base_type = "ds"
else:
selector_base_type = "df"
# check whether Qnode is given in the inputs, if given, use this to search
metadata_input = self.supplied_data.metadata
for i in range(self.supplied_dataframe.shape[1]):
if selector_base_type == "ds":
metadata_selector = (self.res_id, ALL_ELEMENTS, i)
else:
metadata_selector = (ALL_ELEMENTS, i)
if Q_NODE_SEMANTIC_TYPE in metadata_input.query(metadata_selector)["semantic_types"]:
# if no required variables given, attach any Q nodes found
self.q_nodes_columns.append(i)
self.q_node_column_names.add(self.supplied_dataframe.columns[i])
def _find_time_ranges(self) -> dict:
"""
inner function that used to find the time information from search queries
:return: a dict with start time, end time and time granularity
"""
info = defaultdict(list)
for i, each_search_query in enumerate(self.search_query):
if each_search_query.search_type == "general":
for each_variable in each_search_query.variables:
if each_variable.key.startswith(TIME_COLUMN_MARK):
start_time, end_time, granularity = each_variable.values.split("____")
info["start"].append(pd.to_datetime(start_time).isoformat())
info["end"].append(pd.to_datetime(end_time).isoformat())
info["granularity"].append(Utils.map_granularity_to_value(granularity))
# if no time information found
if len(info) == 0:
return {}
time_column_info = {
"start": min(info["start"]),
"end": max(info["end"]),
"granularity": min(info["granularity"]),
}
return time_column_info
def run_wikifier(self, input_data: d3m_Dataset) -> d3m_Dataset:
"""
function used to run wikifier, and then return a d3m_dataset as the wikified results if success,
otherwise return original input
:return: None
"""
self._logger.debug("Start running wikifier for supplied data in search...")
results = d3m_wikifier.run_wikifier(supplied_data=input_data)
self._logger.info("Wikifier running finished.")
self.need_run_wikifier = False
self._find_q_node_columns()
return results
def _search_wikidata(self, query=None, supplied_data: typing.Union[d3m_DataFrame, d3m_Dataset] = None,
search_threshold=0.5) -> typing.List["DatamartSearchResult"]:
"""
The search function used for wikidata search
:param query: JSON object describing the query.
:param supplied_data: the data you are trying to augment.
:param search_threshold: the minimum appeared times of the properties
:return: list of search results of DatamartSearchResult
"""
self._logger.debug("Start running search on wikidata...")
if supplied_data is None:
supplied_data = self.supplied_data
wikidata_results = []
try:
if len(self.q_nodes_columns) == 0:
self._logger.warning("No wikidata Q nodes detected on corresponding required_variables!")
self._logger.warning("Will skip wikidata search part")
return wikidata_results
else:
self._logger.info("Wikidata Q nodes inputs detected! Will search with it.")
self._logger.info("Totally " + str(len(self.q_nodes_columns)) + " Q nodes columns detected!")
# do a wikidata search for each Q nodes column
for each_column in self.q_nodes_columns:
self._logger.debug("Start searching on column " + str(each_column))
q_nodes_list = self.supplied_dataframe.iloc[:, each_column].tolist()
p_count = collections.defaultdict(int)
p_nodes_needed = []
# old method, the generated results are not very good
"""
http_address = 'http://minds03.isi.edu:4444/get_properties'
headers = {"Content-Type": "application/json"}
requests_data = str(q_nodes_list)
requests_data = requests_data.replace("'", '"')
r = requests.post(http_address, data=requests_data, headers=headers)
results = r.json()
for each_p_list in results.values():
for each_p in each_p_list:
p_count[each_p] += 1
"""
# TODO: temporary change to call wikidata service, may change back in the future
# Q node format (wd:Q23)(wd: Q42)
q_node_query_part = ""
# ensure every time we get same order of q nodes so the hash tag will be same
unique_qnodes = set(q_nodes_list)
# updated v2020.1.7, use blacklist to filter q nodes
unique_qnodes = unique_qnodes - DownloadManager.fetch_blacklist_nodes()
unique_qnodes = list(unique_qnodes)
unique_qnodes.sort()
# updated v2020.1.6, not skip if unique Q nodes are too few
if len(unique_qnodes) == 0:
self._logger.warning("No Q nodes detected on column No.{} need to search, skip.".format(str(each_column)))
continue
if len(unique_qnodes) > config.max_q_node_query_size:
unique_qnodes = random.sample(unique_qnodes, config.max_q_node_query_size)
for each in unique_qnodes:
if len(each) > 0:
q_node_query_part += "(wd:" + each + ")"
sparql_query = "select distinct ?item ?property where \n{\n VALUES (?item) {" + q_node_query_part \
+ " }\n ?item ?property ?value .\n ?wd_property wikibase:directClaim ?property ." \
+ " values ( ?type ) \n {\n ( wikibase:Quantity )\n" \
+ " ( wikibase:Time )\n ( wikibase:Monolingualtext )\n }" \
+ " ?wd_property wikibase:propertyType ?type .\n}\norder by ?item ?property "
results = self.wikidata_cache_manager.get_result(sparql_query)
if results is None:
# if response none, it means get wikidata query results failed
self._logger.error("Can't get wikidata search results for column No." + str(each_column) + "(" +
self.supplied_dataframe.columns[each_column] + ")")
continue
self._logger.debug("Response from server for column No." + str(each_column) + "(" +
self.supplied_dataframe.columns[each_column] + ")" +
" received, start parsing the returned data from server.")
# count the appeared times and find the p nodes appeared rate that higher than threshold
for each in results:
if "property" not in each:
self._logger.error("Wikidata query returned wrong results!!! Please check!!!")
raise ValueError("Wikidata query returned wrong results!!! Please check!!!")
p_count[each['property']['value'].split("/")[-1]] += 1
for key, val in p_count.items():
if float(val) / len(unique_qnodes) >= search_threshold:
p_nodes_needed.append(key)
wikidata_search_result = {"p_nodes_needed": p_nodes_needed,
"target_q_node_column_name": self.supplied_dataframe.columns[each_column]}
wikidata_results.append(DatamartSearchResult(search_result=wikidata_search_result,
supplied_data=supplied_data,
query_json=query,
search_type="wikidata")
)
self._logger.debug("Running search on wikidata finished.")
return wikidata_results
except Exception as e:
self._logger.error("Searching with wikidata failed!")
self._logger.debug(e, exc_info=True)
finally:
return wikidata_results
def _search_datamart(self) -> typing.List["DatamartSearchResult"]:
"""
function used for searching in datamart with blaze graph database
:return: List[DatamartSearchResult]
"""
self._logger.debug("Start searching on datamart...")
search_result = []
variables_search = self.search_query[self.current_searching_query_index].variables_search
keywords_search = self.search_query[self.current_searching_query_index].keywords_search
# COMMENT: title does not used, may delete later
variables, title = dict(), dict()
variables_temp = dict() # this temp is specially used to store variable for time query
if self.augment_with_time:
time_information = self._find_time_ranges()
if len(time_information) == 0:
self._logger.warning("Required to search with time but no time column found from supplied data!")
return []
for each_variable in self.search_query[self.current_searching_query_index].variables:
# updated v2019.12.11, now we only search "time column only" if augment_with_time is set to false
if each_variable.key.startswith(TIME_COLUMN_MARK):
if self.augment_with_time:
self._logger.warning("Not search with time only if augment_with_time is set to True")
return []
elif self.consider_time is False:
self._logger.warning("Not search with time only if consider_time is set to False")
return []
else:
variables_temp[each_variable.key.split("____")[1]] = each_variable.values
start_time, end_time, granularity = each_variable.values.split("____")
variables_search = {
"temporal_variable":
{
"start": start_time,
"end": end_time,
"granularity": granularity
}
}
else:
# updated v2019.12.18: if consider wikifier columns only, not search on other columns
if self.consider_wikifier_columns_only and each_variable.key not in self.q_node_column_names:
self._logger.warning(
"Set to consider wikifier columns only, will not search for column {}".format(each_variable.key))
return []
variables[each_variable.key] = each_variable.values
query = {"keywords": self.search_query[self.current_searching_query_index].keywords,
"variables": variables,
"keywords_search": keywords_search,
"variables_search": variables_search,
}
if self.augment_with_time:
query["variables_time"] = time_information
query_results = self.augmenter.query_by_sparql(query=query,
dataset=self.supplied_data,
consider_wikifier_columns_only=self.consider_wikifier_columns_only,
augment_with_time=self.augment_with_time,
limit_amount=self.search_limit_amount)
if len(variables_temp) != 0:
query["variables"] = variables_temp
for i, each_result in enumerate(query_results):
# self._logger.debug("Get returned No." + str(i) + " query result as ")
# self._logger.debug(str(each_result))
# the special way to calculate the score of temporal variable search
if "start_time" in each_result.keys() and "end_time" in each_result.keys():
if self.augment_with_time:
tv = time_information
else:
tv = query["variables_search"]["temporal_variable"]
start_date = pd.to_datetime(tv["start"]).timestamp()
end_date = pd.to_datetime(tv["end"]).timestamp() # query time
start_time =
|
pd.to_datetime(each_result['start_time']['value'])
|
pandas.to_datetime
|
import requests
import datetime as dt
import pandas
import html
STOCK = "TSLA"
COMPANY_NAME = "Tesla"
STOCK_ENDPOINT = "https://www.alphavantage.co/query"
STOCK_PARAMS = {
"function": "TIME_SERIES_DAILY",
"symbol": STOCK,
"apikey": "<KEY>",
}
response = requests.get(STOCK_ENDPOINT, STOCK_PARAMS)
response.raise_for_status()
df =
|
pandas.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_allclose
from numpy.testing import assert_array_almost_equal
from ruspy.config import TEST_RESOURCES_DIR
from ruspy.estimation.est_cost_params import create_state_matrix
from ruspy.estimation.est_cost_params import derivative_loglike_cost_params
from ruspy.estimation.estimation import estimate
from ruspy.estimation.estimation_transitions import create_transition_matrix
from ruspy.model_code.cost_functions import cubic_costs
from ruspy.model_code.cost_functions import cubic_costs_dev
TEST_FOLDER = TEST_RESOURCES_DIR + "replication_test/"
@pytest.fixture(scope="module")
def inputs():
out = {}
disc_fac = 0.9999
num_states = 90
scale = 1e-8
init_dict = {
"model_specifications": {
"discount_factor": disc_fac,
"number_states": num_states,
"maint_cost_func": "cubic",
"cost_scale": scale,
},
"optimizer": {"approach": "NFXP", "algorithm": "scipy_lbfgsb"},
}
df =
|
pd.read_pickle(TEST_FOLDER + "group_4.pkl")
|
pandas.read_pickle
|
import pandas as pd
from collections import defaultdict
import os
import copy
from .ev_parser import EvParserBase
from .utils import parse_time, validate_path
class Region2DParser(EvParserBase):
"""Class for parsing EV 2D region (EVR) files.
Using this class directly is not recommended; use Regions2D instead.
"""
def __init__(self, input_file=None):
super().__init__(input_file, 'EVR')
self.raw_range = None
self.min_depth = None # Set to replace -9999.9900000000 range values which are EVR min range
self.max_depth = None # Set to replace 9999.9900000000 range values which are EVR max range
def _parse(self, fid, convert_time=False, convert_range_edges=False, offset=0):
"""Reads an open file and returns the file metadata and region information"""
def _region_metadata_to_dict(line):
"""Assigns a name to each value in the metadata line for each region"""
top_y = self.swap_range_edge(line[9]) if convert_range_edges else line[9]
bottom_y = self.swap_range_edge(line[12]) if convert_range_edges else line[12]
top_y = float(top_y) + offset
bottom_y = float(bottom_y) + offset
left_x = parse_time(f'D{line[7]}T{line[8]}') if convert_time else f'D{line[7]}T{line[8]}'
right_x = parse_time(f'D{line[10]}T{line[11]}') if convert_time else f'D{line[10]}T{line[11]}'
return {
'structure_version': line[0], # 13 currently
'point_count': line[1], # Number of points in the region
'selected': line[3], # Always 0
'creation_type': line[4], # How the region was created
'dummy': line[5], # Always -1
'bounding_rectangle_calculated': line[6], # 1 if next 4 fields valid. O otherwise
# Date encoded as CCYYMMDD and times in HHmmSSssss
# Where CC=Century, YY=Year, MM=Month, DD=Day, HH=Hour, mm=minute, SS=second, ssss=0.1 milliseconds
'bounding_rectangle_left_x': left_x, # Time and date of bounding box left x
'bounding_rectangle_top_y': top_y, # Top of bounding box
'bounding_rectangle_right_x': right_x, # Time and date of bounding box right x
'bounding_rectangle_bottom_y': bottom_y, # Bottom of bounding box
}
def _points_to_dict(line):
"""Takes a line with point information and creates a tuple (x, y) for each point"""
points = {}
for point_num, idx in enumerate(range(0, len(line), 3)):
x = f'D{line[idx]}T{line[idx + 1]}'
if convert_time:
x = parse_time(x)
y = line[idx + 2]
if convert_range_edges:
if y == '9999.9900000000' and self.max_depth is not None:
y = float(self.max_depth)
elif y == '-9999.9900000000' and self.min_depth is not None:
y = float(self.min_depth)
points[point_num] = [x, y]
return points
# Read header containing metadata about the EVR file
file_type, file_format_number, echoview_version = self.read_line(fid, True)
file_metadata = {
'file_name': os.path.splitext(os.path.basename(self.input_file))[0],
'file_type': file_type,
'file_format_number': file_format_number,
'echoview_version': echoview_version
}
regions = defaultdict(dict)
n_regions = int(self.read_line(fid))
# Loop over all regions in file
for r in range(n_regions):
fid.readline() # blank line separates each region
region_metadata = self.read_line(fid, True)
rid = region_metadata[2] # Region ID (unique for each region)
regions[rid]['metadata'] = _region_metadata_to_dict(region_metadata)
# Add notes to region data
n_note_lines = int(self.read_line(fid))
regions[rid]['notes'] = [self.read_line(fid) for line in range(n_note_lines)]
# Add detection settings to region data
n_detection_setting_lines = int(self.read_line(fid))
regions[rid]['detection_settings'] = [self.read_line(fid) for line in range(n_detection_setting_lines)]
# Add classification to region data
regions[rid]['metadata']['region_classification'] = self.read_line(fid)
# Add point x and y
points_line = self.read_line(fid, True)
# For type: 0=bad (No data), 1=analysis, 3=fishtracks, 4=bad (empty water)
regions[rid]['metadata']['type'] = points_line.pop()
regions[rid]['points'] = _points_to_dict(points_line)
regions[rid]['metadata']['name'] = self.read_line(fid)
return file_metadata, regions
def to_dataframe(self, **kwargs):
# Parse EVR file if it hasn't already been done
if not self.output_data:
self.parse_file(**kwargs)
df = pd.DataFrame()
# Save file metadata for each point
metadata = pd.Series(self.output_data['metadata'])
row = []
# Loop over each region
for rid, region in self.output_data['regions'].items():
# Save region information for each point
region_metadata = pd.Series(region['metadata'])
region_notes = pd.Series({'notes': region['notes']})
detection_settings =
|
pd.Series({'detection_settings': region['detection_settings']})
|
pandas.Series
|
import pandas as pd
from pandas.tseries.offsets import DateOffset
import configparser
import fire
import os
import math
import numpy as np
import qlib
from qlib.data import D
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
from sklearn.metrics.pairwise import cosine_similarity
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from src.util import getLatestFile, getFolderNameInConfig
def analyzeHistoricalValue(ifUseNewIssues = True, ifUseOldIssues = True, ifUseWatchList = False, ifUseAdjustFactorToLatestDay = False, ifPrintFundCode = False):
'''
Args:
ifUseNewIssues: if use those funds whose days range are less than daysRangeToAnalyze
ifUseOldIssues: if use those funds whose days range are more than daysRangeToAnalyze
ifUseWatchList: if only figure funds in config/watchlist.txt
ifUseAdjustFactorToLatestDay: if use adjustFactorToLatestDay generated by trainGBDT.py
ifPrintFundCode: if print fund code, if so, the image would be larger
'''
print ("------------------------ Begin to analyze historical value... ------------------------")
# read config file
cf = configparser.ConfigParser()
cf.read("config/config.ini")
# offset of days
numberOfYears = int(cf.get("Parameter", "numberOfYears"))
numberOfMonths = int(cf.get("Parameter", "numberOfMonths"))
numberOfDays = int(cf.get("Parameter", "numberOfDays"))
minDaysRange = int(cf.get("Parameter", "minDaysRange"))
daysRangeInOneYear = int(cf.get("Parameter", "daysRangeInOneYear"))
if ifUseAdjustFactorToLatestDay:
dfAdjustFactorToLatestDay = pd.read_csv(cf.get("Analyze", "pathOfDfAdjustFactorToLatestDay"), dtype={'Unnamed: 0':object})
# read watchlist
watchlist = []
for line in open("./config/watchlist.txt", "r"): # ['110011', '161028', '110020', '180003', '006479', '007994', '001015']
watchlist.append(line.split("\n")[0])
# we should ignore some strange funds
ignorelist = []
for line in open("./config/ignorelist.txt", "r"): # ['009317', '009763', '009764']
ignorelist.append(line.split("\n")[0])
# qlib init
qlib.init(provider_uri='data/bin')
# use one fund be the standard of trading day
calendar = D.calendar(freq='day')
lastDay = calendar[-1] # 2021-02-10 00:00:00
firstDay = lastDay - DateOffset(years=numberOfYears, months=numberOfMonths, days=numberOfDays) # 2018-02-10 00:00:00
# exclude the influence of days without trading
calendarBetweenFirstDayAndLastDay = D.calendar(freq='day', start_time=firstDay, end_time=lastDay)
firstDayToAnalyze = calendarBetweenFirstDayAndLastDay[0]
lastDayToAnalyze = calendarBetweenFirstDayAndLastDay[-1]
daysRangeToAnalyze = (lastDayToAnalyze - firstDayToAnalyze).days # 1094
count = 0
riskListForOldIssues = []
returnListForOldIssues = []
fundCodeListForOldIssues = []
riskListForNewIssues = []
returnListForNewIssues = []
fundCodeListForNewIssues = []
instruments = D.instruments(market='all')
for file in D.list_instruments(instruments=instruments, as_list=True):
fundCode = file.split("_")[0] # 000001
# exclude some funds
if fundCode in ignorelist:
continue
if ifUseWatchList and fundCode not in watchlist:
continue
if count % 100 == 0:
print ("\ncount = %s\tfundCode = %s" % (count, fundCode)) # 180003
try:
# read file and remove empty line
df = D.features([file], [
'$AccumulativeNetAssetValue',
'($AccumulativeNetAssetValue - Ref($AccumulativeNetAssetValue, 1)) / Ref($AccumulativeNetAssetValue, 1)'
], start_time=firstDayToAnalyze, end_time=lastDayToAnalyze)
df.columns = [
'AccumulativeNetAssetValue',
'GrowthRatio'
]
#df = df.unstack(level=0)
df["datetime"] = df.index.levels[1]
# abandom those values before the date when GrowthRatio is too large (abs >= 1.0)
df["AbsoluteGrowthRatio"] = df["GrowthRatio"].abs()
if df[df["AbsoluteGrowthRatio"] > 1].shape[0] > 0:
df = df.loc[0:df[df["AbsoluteGrowthRatio"] > 1].first_valid_index() - 1]
# reset the index
df = df.dropna(axis=0, subset=['datetime', 'GrowthRatio']).reset_index(drop=True)
# like http://fundf10.eastmoney.com/jjjz_010476.html, the return in 30 days is 26%, so the annualized return is too high
if df.shape[0] <= minDaysRange:
continue
# count the days between first day and last day
day = df['datetime']
# TODO: how about fund 519858, which trade in 2018-01-28 (Sunday)
firstDayInThisFund = day[day.first_valid_index()] # 2018-02-12 00:00:00, 2018-02-10 is Satuaday
lastDayInThisFund = day[day.last_valid_index()] # 2021-02-10 00:00:00
daysRange = (lastDayInThisFund - firstDayInThisFund).days # 1094
# get the value in important days
earliestNetValue = df[df['datetime'] == firstDayInThisFund]["AccumulativeNetAssetValue"].tolist()[0] # 3.49
lastestNetValue = df[df['datetime'] == lastDayInThisFund]["AccumulativeNetAssetValue"].tolist()[0] # 4.046
# standardrize the risk in one year
# assume the value is a list like (0, 1, 0, 1,...), growth ratio is a list like (1, -1, 1, -1,...)
# set ddof be 0 to standardrize the risk by n, not (n - 1), then the std is 1, not related to daysRange
riskCurrent = df["GrowthRatio"].std(ddof=0)
returnCurrent = (lastestNetValue-earliestNetValue)/earliestNetValue/daysRange*daysRangeInOneYear
if not ifUseNewIssues:
if (firstDayInThisFund - firstDayToAnalyze).days > 0:
continue
else:
# use latest value to reflect the true percentage gain
# this is worthful if the fund rise rapidly recently but have no change in long previous days
if ifUseAdjustFactorToLatestDay:
if (firstDayInThisFund - firstDayToAnalyze).days > 0:
# if the fund code locates in dfAdjustFactorToLatestDay, adjust the latest value and days range
adjustedFactor = dfAdjustFactorToLatestDay[fundCode]
adjustedFactor = adjustedFactor[adjustedFactor.first_valid_index()] # 0.987561058590916
lastestNetValue = lastestNetValue * adjustedFactor
returnCurrent = (lastestNetValue-earliestNetValue)/earliestNetValue/daysRangeToAnalyze*daysRangeInOneYear
# new issues
if (firstDayInThisFund - firstDayToAnalyze).days > 0:
riskListForNewIssues.append(riskCurrent)
returnListForNewIssues.append(returnCurrent)
fundCodeListForNewIssues.append(fundCode)
else:
riskListForOldIssues.append(riskCurrent)
returnListForOldIssues.append(returnCurrent)
fundCodeListForOldIssues.append(fundCode)
count += 1
except Exception as e:
print ("fundCode = %s\terror = %s" % (fundCode, e))
continue
if not ifUseWatchList and ifPrintFundCode:
plt.figure(figsize=(10, 10))
if ifUseOldIssues:
plt.scatter(riskListForOldIssues, returnListForOldIssues, c='k')
if ifUseNewIssues:
plt.scatter(riskListForNewIssues, returnListForNewIssues, c='k')
plt.xlabel("Risk")
plt.ylabel("Annualized return")
ax = plt.gca()
# no line in right and top border
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
if ifPrintFundCode:
if ifUseOldIssues:
for i in range(len(fundCodeListForOldIssues)):
x = riskListForOldIssues[i]
y = returnListForOldIssues[i]
fundCode = fundCodeListForOldIssues[i]
plt.text(x, y, fundCode, fontsize=10)
if ifUseNewIssues:
for i in range(len(fundCodeListForNewIssues)):
x = riskListForNewIssues[i]
y = returnListForNewIssues[i]
fundCode = fundCodeListForNewIssues[i]
plt.text(x, y, fundCode, fontsize=10)
nameOfPicture = "risk_return"
nameOfPicture = nameOfPicture + "_watchlist" if ifUseWatchList else nameOfPicture + "_noWatchlist"
nameOfPicture = nameOfPicture + "_useNewIssues" if ifUseNewIssues else nameOfPicture + "_notUseNewIssues"
nameOfPicture = nameOfPicture + "_useOldIssues" if ifUseOldIssues else nameOfPicture + "_notUseOldIssues"
nameOfPicture = nameOfPicture + "_useAdjustFactor" if ifUseAdjustFactorToLatestDay else nameOfPicture + "_notUseAdjustFactor"
plt.savefig("./image/%s.png" % nameOfPicture)
print ("------------------------ Done. ------------------------")
def getAverageSlopeForFundsInSameRange(ifUseAdjustFactorToLatestDay=True):
'''
in return-risk figure, the return is proportional to risk in most cases,
so we can use slope(return/risk) as the feature of this fund, if we want
to summarize funds in same range, we can use average slope to represent it.
'''
print ("------------------------ Begin to get average slope for funds in same range... ------------------------")
# read config file
cf = configparser.ConfigParser()
cf.read("config/config.ini")
# offset of days
numberOfYears = int(cf.get("Parameter", "numberOfYears"))
numberOfMonths = int(cf.get("Parameter", "numberOfMonths"))
numberOfDays = int(cf.get("Parameter", "numberOfDays"))
minDaysRange = int(cf.get("Parameter", "minDaysRange"))
daysRangeInOneYear = int(cf.get("Parameter", "daysRangeInOneYear"))
# qlib init
qlib.init(provider_uri='data/bin')
# use one fund be the standard of trading day
calendar = D.calendar(freq='day')
lastDay = calendar[-1] # 2021-02-10 00:00:00
firstDay = lastDay - DateOffset(years=numberOfYears, months=numberOfMonths, days=numberOfDays) # 2018-02-10 00:00:00
# exclude the influence of days without trading
calendarBetweenFirstDayAndLastDay = D.calendar(freq='day', start_time=firstDay, end_time=lastDay)
firstDayToAnalyze = calendarBetweenFirstDayAndLastDay[0]
lastDayToAnalyze = calendarBetweenFirstDayAndLastDay[-1]
daysRangeToAnalyze = (lastDayToAnalyze - firstDayToAnalyze).days # 1094
divideNumber = int(cf.get("Analyze", "divideNumber"))
# if use adjustFactorToLatestDay generated by trainGBDT.py
if ifUseAdjustFactorToLatestDay:
dfAdjustFactorToLatestDay = pd.read_csv(cf.get("Analyze", "pathOfDfAdjustFactorToLatestDay"), dtype={'Unnamed: 0':object})
dictOfSlopeInCountNetValue = {}
dictOfReturnInCountNetValue = {}
dictOfRiskInCountNetValue = {}
count = 0
instruments = D.instruments(market='all')
for file in D.list_instruments(instruments=instruments, as_list=True):
fundCode = file.split("_")[0] # 000001
if count % 100 == 0:
print ("count = %s\tfundCode = %s" % (count, fundCode)) # 180003
try:
# read file and remove empty line
df = D.features([file], [
'$AccumulativeNetAssetValue',
'($AccumulativeNetAssetValue - Ref($AccumulativeNetAssetValue, 1)) / Ref($AccumulativeNetAssetValue, 1)'
], start_time=firstDayToAnalyze, end_time=lastDayToAnalyze)
df.columns = [
'AccumulativeNetAssetValue',
'GrowthRatio'
]
#df = df.unstack(level=0)
df["datetime"] = df.index.levels[1]
# abandom those values before the date when GrowthRatio is too large (abs >= 1.0)
df["AbsoluteGrowthRatio"] = df["GrowthRatio"].abs()
if df[df["AbsoluteGrowthRatio"] > 1].shape[0] > 0:
df = df.loc[0:df[df["AbsoluteGrowthRatio"] > 1].first_valid_index() - 1]
# reset the index
df = df.dropna(axis=0, subset=['datetime', 'GrowthRatio']).reset_index(drop=True)
# like http://fundf10.eastmoney.com/jjjz_010476.html, the return in 30 days is 26%, so the annualized return is too high
if df.shape[0] <= minDaysRange:
continue
# count the days between first day and last day
day = df['datetime']
# TODO: how about fund 519858, which trade in 2018-01-28 (Sunday)
firstDayInThisFund = day[day.first_valid_index()] # 2018-02-12 00:00:00, 2018-02-10 is Satuaday
lastDayInThisFund = day[day.last_valid_index()] # 2021-02-10 00:00:00
daysRange = (lastDayInThisFund - firstDayInThisFund).days # 1094
# get the value in important days
earliestNetValue = df[df['datetime'] == firstDayInThisFund]["AccumulativeNetAssetValue"].tolist()[0] # 3.49
lastestNetValue = df[df['datetime'] == lastDayInThisFund]["AccumulativeNetAssetValue"].tolist()[0] # 4.046
# standardrize the risk in one year
# assume the value is a list like (0, 1, 0, 1,...), growth ratio is a list like (1, -1, 1, -1,...)
# set ddof be 0 to standardrize the risk by n, not (n - 1), then the std is 1, not related to daysRange
riskCurrent = df["GrowthRatio"].std(ddof=0)
returnCurrent = (lastestNetValue-earliestNetValue)/earliestNetValue/daysRange*daysRangeInOneYear
# standardrize the risk in one year
# assume the value is a list like (0, 1, 0, 1,...), growth ratio is a list like (1, -1, 1, -1,...)
# set ddof be 0 to standardrize the risk by n, not (n - 1), then the std is 1, not related to daysRange
riskCurrent = df["GrowthRatio"].std(ddof=0)
returnCurrent = (lastestNetValue-earliestNetValue)/earliestNetValue/daysRange*daysRangeInOneYear
# use latest value to reflect the true percentage gain
# this is worthful if the fund rise rapidly recently but have no change in long previous days
if ifUseAdjustFactorToLatestDay:
if (firstDayInThisFund - firstDayToAnalyze).days > 0:
# if the fund code locates in dfAdjustFactorToLatestDay, adjust the latest value and days range
adjustedFactor = dfAdjustFactorToLatestDay[fundCode]
adjustedFactor = adjustedFactor[adjustedFactor.first_valid_index()] # 0.987561058590916
lastestNetValue = lastestNetValue * adjustedFactor
returnCurrent = (lastestNetValue-earliestNetValue)/earliestNetValue/daysRangeToAnalyze*daysRangeInOneYear
slope = returnCurrent / riskCurrent # 28.136361711631576
# TODO: exclude 005337
if math.isnan(slope):
continue
# count them in period, not a single day
approximateDaysRange = daysRange // divideNumber * divideNumber
if approximateDaysRange not in dictOfSlopeInCountNetValue.keys():
dictOfSlopeInCountNetValue[approximateDaysRange] = []
dictOfSlopeInCountNetValue[approximateDaysRange].append(slope)
if approximateDaysRange not in dictOfReturnInCountNetValue.keys():
dictOfReturnInCountNetValue[approximateDaysRange] = []
dictOfReturnInCountNetValue[approximateDaysRange].append(returnCurrent)
if approximateDaysRange not in dictOfRiskInCountNetValue.keys():
dictOfRiskInCountNetValue[approximateDaysRange] = []
dictOfRiskInCountNetValue[approximateDaysRange].append(riskCurrent)
count += 1
except Exception as e:
print ("fundCode = %s\terror = %s" % (fundCode, e))
continue
# ------------------------ Plot Return/Risk ------------------------
plt.xlabel("Count of trading days")
plt.ylabel("Return/Risk")
ax = plt.gca()
# no line in right and top border
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
for key in dictOfSlopeInCountNetValue.keys():
n = len(dictOfSlopeInCountNetValue[key]) # Number of observations
mean = sum(dictOfSlopeInCountNetValue[key]) / n # Mean of the data
deviations = [(x - mean) ** 2 for x in dictOfSlopeInCountNetValue[key]] # Square deviations
standardDeviation = math.sqrt(sum(deviations) / n) # standard deviation
plt.errorbar(key, mean, standardDeviation, c='k', marker='+')
nameOfReturnRisk = "averageSlopeForReturnRisk_%s" % divideNumber
if ifUseAdjustFactorToLatestDay:
nameOfReturnRisk += "_useAdjustFactor"
else:
nameOfReturnRisk += "_notUseAdjustFactor"
plt.savefig("./image/%s.png" % nameOfReturnRisk)
# ------------------------ Plot Return ------------------------
plt.clf()
plt.xlabel("Count of trading days")
plt.ylabel("Return")
ax = plt.gca()
# no line in right and top border
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
listOfMean = []
for key in dictOfReturnInCountNetValue.keys():
n = len(dictOfReturnInCountNetValue[key]) # Number of observations
mean = sum(dictOfReturnInCountNetValue[key]) / n # Mean of the data
listOfMean.append(mean)
deviations = [(x - mean) ** 2 for x in dictOfReturnInCountNetValue[key]] # Square deviations
standardDeviation = math.sqrt(sum(deviations) / n) # standard deviation
plt.errorbar(key, mean, standardDeviation, c='k', marker='+')
nameOfReturn = "averageReturn_%s" % divideNumber
# get the standard deviation of mean
standardDeviationOfReturn = np.std(listOfMean, ddof = 0)
print ("standardDeviationOfReturn = %s" % standardDeviationOfReturn)
if ifUseAdjustFactorToLatestDay:
nameOfReturn += "_useAdjustFactor"
else:
nameOfReturn += "_notUseAdjustFactor"
plt.savefig("./image/%s.png" % nameOfReturn)
# ------------------------ Plot Risk ------------------------
plt.clf()
plt.xlabel("Count of trading days")
plt.ylabel("Risk")
ax = plt.gca()
# no line in right and top border
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
for key in dictOfRiskInCountNetValue.keys():
n = len(dictOfRiskInCountNetValue[key]) # Number of observations
mean = sum(dictOfRiskInCountNetValue[key]) / n # Mean of the data
deviations = [(x - mean) ** 2 for x in dictOfRiskInCountNetValue[key]] # Square deviations
standardDeviation = math.sqrt(sum(deviations) / n) # standard deviation
plt.errorbar(key, mean, standardDeviation, c='k', marker='+')
nameOfRisk = "averageRisk_%s" % divideNumber
if ifUseAdjustFactorToLatestDay:
nameOfRisk += "_useAdjustFactor"
else:
nameOfRisk += "_notUseAdjustFactor"
plt.savefig("./image/%s.png" % nameOfRisk)
print ("------------------------ Done. ------------------------")
def getDfMerge():
print ("------------------------ Begin to get dfMerge... ------------------------")
# read config file
cf = configparser.ConfigParser()
cf.read("config/config.ini")
# offset of days
numberOfYears = int(cf.get("Parameter", "numberOfYears"))
numberOfMonths = int(cf.get("Parameter", "numberOfMonths"))
numberOfDays = int(cf.get("Parameter", "numberOfDays"))
# use one fund be the standard of trading day
calendar = D.calendar(freq='day')
lastDay = calendar[-1] # 2021-02-10 00:00:00
firstDay = lastDay - DateOffset(years=numberOfYears, months=numberOfMonths, days=numberOfDays) # 2018-02-10 00:00:00
# exclude the influence of days without trading
calendarBetweenFirstDayAndLastDay = D.calendar(freq='day', start_time=firstDay, end_time=lastDay)
firstDayToAnalyze = calendarBetweenFirstDayAndLastDay[0]
lastDayToAnalyze = calendarBetweenFirstDayAndLastDay[-1]
count = 0
instruments = D.instruments(market='all')
for file in D.list_instruments(instruments=instruments, as_list=True):
fundCode = file.split("_")[0]
if count <= 700:
count += 1
continue
if count % 100 == 0:
print ("count = %s\tfundCode = %s" % (count, fundCode))
# read file and remove empty line
df = D.features([file], [
'$AccumulativeNetAssetValue'
], start_time=firstDayToAnalyze, end_time=lastDayToAnalyze)
df.columns = [
"AccumulativeNetAssetValue_%s" % fundCode
]
#df = df.unstack(level=0)
try:
df["datetime"] = df.index.levels[1]
except:
continue
# reset the index
df = df.dropna(axis=0, subset=['datetime']).reset_index(drop=True)
try:
dfMerge = pd.merge(dfMerge, df, on=['datetime'], how='outer')
except:
dfMerge = df
count += 1
dfMerge.to_csv("data/dfMerge.csv")
print ("------------------------ Done. ------------------------")
return dfMerge
def getCorrelationMatrixForOneFund(ifGetCorrFromFile = True, ifGetDfMergeFromFile = True, fundCodeToAnalyze="110011"):
print ("------------------------ Begin to get Pearson's correlation matrix for fund '%s'... ------------------------" % fundCodeToAnalyze)
# qlib init
qlib.init(provider_uri='data/bin')
if ifGetCorrFromFile:
if not os.path.exists("data/corr.csv"):
ifGetCorrFromFile = False
if not ifGetCorrFromFile:
if ifGetDfMergeFromFile:
if not os.path.exists("data/dfMerge.csv"):
ifGetDfMergeFromFile = False
if ifGetDfMergeFromFile:
dfMerge = pd.read_csv("data/dfMerge.csv", index_col=0)
else:
dfMerge = getDfMerge()
dfMerge = dfMerge.drop(labels='datetime',axis=1)
# count correlation
corr = dfMerge.corr()
corr.to_csv("data/corr.csv")
else:
corr =
|
pd.read_csv("data/corr.csv", index_col=0)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, delta, box):
# only test adding/sub offsets as + is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + delta
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, delta, box):
# only test adding/sub offsets as - is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="Index fails to return "
"NotImplemented on "
"reverse op",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box_df_fail):
# GH#18849
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box_df_fail):
# GH#18824, GH#19744
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_df_fail):
# GH#18824
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="object dtype Series "
"fails to return "
"NotImplemented",
strict=True, raises=TypeError)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box):
# GH#18849
box2 = Series if box is pd.Index else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_df_fail):
# GH#18824
box = box_df_fail # DataFrame tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_df_fail):
box = box_df_fail # DataFrame op returns object instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, delta, box):
if box is pd.DataFrame and not isinstance(delta, pd.DateOffset):
pytest.xfail(reason="returns m8[ns] instead of raising")
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng * delta
def test_tdi_mul_int_array_zerodim(self, box_df_fail):
box = box_df_fail # DataFrame op returns object dtype
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box)
expected =
|
tm.box_expected(expected, box)
|
pandas.util.testing.box_expected
|
import numpy as np
import nibabel as nib
import pandas as pd
from glob import glob
import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import settings as s
source_dir=s.projectfolder+'7NETS_vertex/4_estimatedModels' #entagled
#source_dir=s.projectfolder+'9_estimatedModels_functional_cosine'#connectivity strength
#source_dir=s.projectfolder+'12_PositionVar_estimatedModels' #position
files=glob(os.path.join(source_dir, "*.csv.gz"))
count=0
nonzero=[]
for i in range(len(files)):
data=pd.read_csv(files[i],compression='gzip')
status=data['Status Code'][0]
if status!=0:
#print(files[i])
#print(status)
count=count+1
tmpfile=os.path.split(files[i])
nonzero.append(tmpfile[1])
print(count)
Nonzero=np.array(nonzero,dtype=object)
Nonzero=np.expand_dims(Nonzero,axis=1)
table=
|
pd.DataFrame(Nonzero)
|
pandas.DataFrame
|
from pyprophet.data_handling import check_sqlite_table
import pandas as pd
import os
import sqlite3
import click
from .report import plot_scores
def precursor_report(con, max_rs_peakgroup_qvalue):
idx_query = '''
CREATE INDEX IF NOT EXISTS idx_precursor_precursor_id ON PRECURSOR (ID);
CREATE INDEX IF NOT EXISTS idx_precursor_glycopeptide_mapping_precursor_id ON PRECURSOR_GLYCOPEPTIDE_MAPPING (PRECURSOR_ID);
CREATE INDEX IF NOT EXISTS idx_precursor_peptide_mapping_precursor_id ON PRECURSOR_PEPTIDE_MAPPING (PRECURSOR_ID);
CREATE INDEX IF NOT EXISTS idx_feature_precursor_id ON FEATURE (PRECURSOR_ID);
CREATE INDEX IF NOT EXISTS idx_precursor_glycopeptide_mapping_peptide_id ON PRECURSOR_GLYCOPEPTIDE_MAPPING (GLYCOPEPTIDE_ID);
CREATE INDEX IF NOT EXISTS idx_glycopeptide_glycopeptide_id ON GLYCOPEPTIDE (ID);
CREATE INDEX IF NOT EXISTS idx_precursor_peptide_mapping_peptide_id ON PRECURSOR_PEPTIDE_MAPPING (PEPTIDE_ID);
CREATE INDEX IF NOT EXISTS idx_peptide_peptide_id ON PEPTIDE (ID);
CREATE INDEX IF NOT EXISTS idx_run_run_id ON RUN (ID);
CREATE INDEX IF NOT EXISTS idx_feature_run_id ON FEATURE (RUN_ID);
CREATE INDEX IF NOT EXISTS idx_feature_feature_id ON FEATURE (ID);
'''
if check_sqlite_table(con, "FEATURE_MS1"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_feature_ms1_feature_id ON FEATURE_MS1 (FEATURE_ID);"
if check_sqlite_table(con, "FEATURE_MS2"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_feature_ms2_feature_id ON FEATURE_MS2 (FEATURE_ID);"
if check_sqlite_table(con, "SCORE_MS2"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ms2_feature_id ON SCORE_MS2 (FEATURE_ID);"
if check_sqlite_table(con, "SCORE_MS2_PART_PEPTIDE"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ms2_part_peptide_feature_id ON SCORE_MS2_PART_PEPTIDE (FEATURE_ID);"
if check_sqlite_table(con, "SCORE_MS2_PART_GLYCAN"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ms2_part_glycan_feature_id ON SCORE_MS2_PART_GLYCAN (FEATURE_ID);"
if max_rs_peakgroup_qvalue is not None:
qvalue_filter = 'WHERE SCORE_MS2.QVALUE < %s' % max_rs_peakgroup_qvalue
else:
qvalue_filter = ''
query = '''
SELECT RUN.ID AS id_run,
GLYCOPEPTIDE.ID AS id_glycopeptide,
PEPTIDE.ID AS id_peptide,
PRECURSOR.ID AS transition_group_id,
PRECURSOR.DECOY AS decoy,
GLYCOPEPTIDE.DECOY_PEPTIDE AS decoy_peptide,
GLYCOPEPTIDE.DECOY_GLYCAN AS decoy_glycan,
RUN.ID AS run_id,
RUN.FILENAME AS filename,
FEATURE.EXP_RT AS RT,
FEATURE.EXP_RT - FEATURE.DELTA_RT AS assay_rt,
FEATURE.DELTA_RT AS delta_rt,
FEATURE.NORM_RT AS iRT,
PRECURSOR.LIBRARY_RT AS assay_iRT,
FEATURE.NORM_RT - PRECURSOR.LIBRARY_RT AS delta_iRT,
FEATURE.ID AS id,
PEPTIDE.UNMODIFIED_SEQUENCE AS Sequence,
PEPTIDE.MODIFIED_SEQUENCE AS FullPeptideName,
GLYCAN.GLYCAN_STRUCT AS GlycanStruct,
GLYCAN.GLYCAN_COMPOSITION AS GlycanComposition,
GLYCOPEPTIDE.GLYCAN_SITE AS GlycanSite,
PRECURSOR.CHARGE AS Charge,
PRECURSOR.PRECURSOR_MZ AS mz,
FEATURE_MS2.AREA_INTENSITY AS Intensity,
FEATURE_MS1.AREA_INTENSITY AS aggr_prec_Peak_Area,
FEATURE_MS1.APEX_INTENSITY AS aggr_prec_Peak_Apex,
FEATURE.LEFT_WIDTH AS leftWidth,
FEATURE.RIGHT_WIDTH AS rightWidth,
SCORE_MS2.RANK AS peak_group_rank,
SCORE_MS2.SCORE AS d_score,
SCORE_MS2.PEP AS pep,
SCORE_MS2.QVALUE AS m_score,
SCORE_MS2_PART_PEPTIDE.SCORE AS d_score_peptide,
SCORE_MS2_PART_PEPTIDE.PEP AS pep_peptide,
SCORE_MS2_PART_GLYCAN.SCORE AS d_score_glycan,
SCORE_MS2_PART_GLYCAN.PEP AS pep_glycan
FROM PRECURSOR
INNER JOIN PRECURSOR_GLYCOPEPTIDE_MAPPING ON PRECURSOR.ID = PRECURSOR_GLYCOPEPTIDE_MAPPING.PRECURSOR_ID
INNER JOIN GLYCOPEPTIDE ON PRECURSOR_GLYCOPEPTIDE_MAPPING.GLYCOPEPTIDE_ID = GLYCOPEPTIDE.ID
INNER JOIN GLYCOPEPTIDE_PEPTIDE_MAPPING ON GLYCOPEPTIDE.ID = GLYCOPEPTIDE_PEPTIDE_MAPPING.GLYCOPEPTIDE_ID
INNER JOIN PEPTIDE ON GLYCOPEPTIDE_PEPTIDE_MAPPING.PEPTIDE_ID = PEPTIDE.ID
INNER JOIN GLYCOPEPTIDE_GLYCAN_MAPPING ON GLYCOPEPTIDE.ID = GLYCOPEPTIDE_GLYCAN_MAPPING.GLYCOPEPTIDE_ID
INNER JOIN GLYCAN ON GLYCOPEPTIDE_GLYCAN_MAPPING.GLYCAN_ID = GLYCAN.ID
INNER JOIN FEATURE ON FEATURE.PRECURSOR_ID = PRECURSOR.ID
INNER JOIN RUN ON RUN.ID = FEATURE.RUN_ID
LEFT JOIN FEATURE_MS1 ON FEATURE_MS1.FEATURE_ID = FEATURE.ID
LEFT JOIN FEATURE_MS2 ON FEATURE_MS2.FEATURE_ID = FEATURE.ID
LEFT JOIN SCORE_MS2 ON SCORE_MS2.FEATURE_ID = FEATURE.ID
LEFT JOIN SCORE_MS2_PART_PEPTIDE ON SCORE_MS2_PART_PEPTIDE.FEATURE_ID = FEATURE.ID
LEFT JOIN SCORE_MS2_PART_GLYCAN ON SCORE_MS2_PART_GLYCAN.FEATURE_ID = FEATURE.ID
%s
ORDER BY transition_group_id,
peak_group_rank;
''' % (qvalue_filter)
con.executescript(idx_query)
data = pd.read_sql_query(query, con)
con.executescript('''
CREATE INDEX IF NOT EXISTS idx_glycopeptide_glycosite_mapping_glycosite_id ON GLYCOPEPTIDE_GLYCOSITE_MAPPING (GLYCOSITE_ID);
CREATE INDEX IF NOT EXISTS idx_glycosite_glycosite_id ON GLYCOSITE (ID);
CREATE INDEX IF NOT EXISTS idx_glycopeptide_glycosite_mapping_glycopeptide_id ON GLYCOPEPTIDE_GLYCOSITE_MAPPING (GLYCOPEPTIDE_ID);
CREATE INDEX IF NOT EXISTS idx_glycosite_protein_mapping_protein_id ON GLYCOSITE_PROTEIN_MAPPING (PROTEIN_ID);
CREATE INDEX IF NOT EXISTS idx_protein_protein_id ON PROTEIN (ID);
CREATE INDEX IF NOT EXISTS idx_glycosite_protein_mapping_glycosite_id ON GLYCOSITE_PROTEIN_MAPPING (GLYCOSITE_ID);
''')
data_protein_glycosite = pd.read_sql_query('''
SELECT GLYCOPEPTIDE_ID AS id_glycopeptide,
GROUP_CONCAT(PROTEIN.PROTEIN_ACCESSION,';') AS ProteinName,
GROUP_CONCAT(GLYCOSITE.PROTEIN_GLYCOSITE,';') AS ProteinGlycoSite
FROM GLYCOPEPTIDE_GLYCOSITE_MAPPING
INNER JOIN GLYCOSITE ON GLYCOPEPTIDE_GLYCOSITE_MAPPING.GLYCOSITE_ID = GLYCOSITE.ID
INNER JOIN GLYCOSITE_PROTEIN_MAPPING ON GLYCOSITE.ID = GLYCOSITE_PROTEIN_MAPPING.GLYCOSITE_ID
INNER JOIN PROTEIN ON GLYCOSITE_PROTEIN_MAPPING.PROTEIN_ID = PROTEIN.ID
GROUP BY GLYCOPEPTIDE_ID;
''', con)
data = pd.merge(data, data_protein_glycosite, how='inner', on=['id_glycopeptide'])
return data
def transition_report(con, max_transition_pep):
if max_transition_pep is not None:
pep_filter = 'AND SCORE_TRANSITION.PEP < %s' % max_transition_pep
else:
pep_filter = ''
if check_sqlite_table(con, "SCORE_TRANSITION"):
idx_transition_query = '''
CREATE INDEX IF NOT EXISTS idx_feature_transition_transition_id ON FEATURE_TRANSITION (TRANSITION_ID);
CREATE INDEX IF NOT EXISTS idx_transition_transition_id ON TRANSITION (ID);
CREATE INDEX IF NOT EXISTS idx_feature_transition_transition_id_feature_id ON FEATURE_TRANSITION (TRANSITION_ID, FEATURE_ID);
CREATE INDEX IF NOT EXISTS idx_score_transition_transition_id_feature_id ON SCORE_TRANSITION (TRANSITION_ID, FEATURE_ID);
CREATE INDEX IF NOT EXISTS idx_feature_transition_feature_id ON FEATURE_TRANSITION (FEATURE_ID);
'''
transition_query = '''
SELECT FEATURE_TRANSITION.FEATURE_ID AS id,
GROUP_CONCAT(AREA_INTENSITY,';') AS aggr_Peak_Area,
GROUP_CONCAT(APEX_INTENSITY,';') AS aggr_Peak_Apex,
GROUP_CONCAT(TRANSITION.ID || "_" || TRANSITION.TYPE || TRANSITION.ORDINAL || "_" || TRANSITION.CHARGE,';') AS aggr_Fragment_Annotation
FROM FEATURE_TRANSITION
INNER JOIN TRANSITION ON FEATURE_TRANSITION.TRANSITION_ID = TRANSITION.ID
INNER JOIN SCORE_TRANSITION ON FEATURE_TRANSITION.TRANSITION_ID = SCORE_TRANSITION.TRANSITION_ID AND FEATURE_TRANSITION.FEATURE_ID = SCORE_TRANSITION.FEATURE_ID
WHERE TRANSITION.DECOY == 0 %s
GROUP BY FEATURE_TRANSITION.FEATURE_ID
''' % pep_filter
else:
idx_transition_query = '''
CREATE INDEX IF NOT EXISTS idx_feature_transition_transition_id ON FEATURE_TRANSITION (TRANSITION_ID);
CREATE INDEX IF NOT EXISTS idx_transition_transition_id ON TRANSITION (ID);
CREATE INDEX IF NOT EXISTS idx_feature_transition_feature_id ON FEATURE_TRANSITION (FEATURE_ID);
'''
transition_query = '''
SELECT FEATURE_ID AS id,
GROUP_CONCAT(AREA_INTENSITY,';') AS aggr_Peak_Area,
GROUP_CONCAT(APEX_INTENSITY,';') AS aggr_Peak_Apex,
GROUP_CONCAT(TRANSITION.ID || "_" || TRANSITION.TYPE || TRANSITION.ORDINAL || "_" || TRANSITION.CHARGE,';') AS aggr_Fragment_Annotation
FROM FEATURE_TRANSITION
INNER JOIN TRANSITION ON FEATURE_TRANSITION.TRANSITION_ID = TRANSITION.ID
GROUP BY FEATURE_ID
'''
con.executescript(idx_transition_query)
data = pd.read_sql_query(transition_query, con)
return data
def glycopeptide_report(con, max_global_glycopeptide_qvalue):
if max_global_glycopeptide_qvalue is not None:
qvalue_filter = 'AND SCORE_GLYCOPEPTIDE.QVALUE < %s' % \
max_global_glycopeptide_qvalue
else:
qvalue_filter = ''
data = None
for context in ['run-specific', 'experiment-wide', 'global']:
context_suffix = '_' + context.replace('-', '_')
if context == 'global':
run_id = ''
else:
run_id = 'RUN_ID AS id_run,'
for part in ['peptide', 'glycan', 'total']:
if part == 'total':
part_suffix = ''
table_part_suffix = ''
m_score = ', QVALUE AS m_score_glycopeptide%s%s' % (part_suffix, context_suffix)
m_score_filter = qvalue_filter
else:
part_suffix = '_' + part
table_part_suffix = '_PART_' + part.upper()
m_score = ''
m_score_filter = ''
if not check_sqlite_table(con, "SCORE_GLYCOPEPTIDE" + table_part_suffix):
continue
data_glycopeptide = pd.read_sql_query('''
SELECT %(run_id)s
GLYCOPEPTIDE_ID AS id_glycopeptide,
PEP AS pep_glycopeptide%(part_suffix)s%(context_suffix)s
%(m_score)s
FROM SCORE_GLYCOPEPTIDE%(table_part_suffix)s
WHERE CONTEXT == '%(context)s'
%(m_score_filter)s;
''' % {
'run_id': run_id,
'part_suffix': part_suffix,
'table_part_suffix': table_part_suffix,
'context_suffix': context_suffix,
'context': context,
'm_score': m_score,
'm_score_filter': m_score_filter
}, con)
if len(data_glycopeptide.index) > 0:
if data is None:
data = data_glycopeptide
else:
if 'id_run' in data.columns and 'id_run' in data_glycopeptide.columns:
on = ['id_run', 'id_glycopeptide']
else:
on = ['id_glycopeptide']
data = pd.merge(data, data_glycopeptide, on=on)
return data
def glycoform_report(con,
match_precursor,
max_glycoform_pep,
max_glycoform_qvalue,
max_rs_peakgroup_qvalue):
if not check_sqlite_table(con, "SCORE_GLYCOFORM"):
raise click.ClickException("No glycoform scores.")
idx_query = ''
if check_sqlite_table(con, "FEATURE_MS1"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_feature_ms1_feature_id ON FEATURE_MS1 (FEATURE_ID);"
if check_sqlite_table(con, "SCORE_MS1"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ms1_feature_id ON SCORE_MS1 (FEATURE_ID);"
score_ms1_pep = "SCORE_MS1.PEP"
link_ms1 = "LEFT JOIN SCORE_MS1 ON SCORE_MS1.FEATURE_ID = FEATURE.ID"
else:
score_ms1_pep = "NULL"
link_ms1 = ""
if check_sqlite_table(con, "SCORE_MS2"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ms2_feature_id ON SCORE_MS2 (FEATURE_ID);"
if check_sqlite_table(con, "SCORE_GLYCOFORM"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_score_glycoform_feature_id ON SCORE_GLYCOFORM (FEATURE_ID);"
idx_query += "CREATE INDEX IF NOT EXISTS idx_score_glycoform_glycopeptide_id ON SCORE_GLYCOFORM (GLYCOPEPTIDE_ID);"
if match_precursor == 'exact':
glycofrom_match_precursor = ''
match_precursor_filter = 'GLYCOPEPTIDE.ID = GLYCOPEPTIDE_GLYCOFORM.ID'
transition_group_id = 'PRECURSOR.ID'
elif match_precursor == 'glycan_composition':
glycofrom_match_precursor = ''
match_precursor_filter = 'GLYCAN.GLYCAN_COMPOSITION = GLYCAN_GLYCOFORM.GLYCAN_COMPOSITION'
transition_group_id = 'PRECURSOR.ID'
else:
glycofrom_match_precursor = 'GLYCOPEPTIDE.ID = GLYCOPEPTIDE_GLYCOFORM.ID AS glycofrom_match_precursor,'
match_precursor_filter = '1 = 1'
transition_group_id = '''
PRECURSOR.ID || '_' ||
PEPTIDE_GLYCOFORM.MODIFIED_SEQUENCE || '_' ||
GLYCOPEPTIDE_GLYCOFORM.GLYCAN_SITE || ',' ||
GLYCAN_GLYCOFORM.GLYCAN_STRUCT
'''
if max_rs_peakgroup_qvalue is not None:
ms2_qvalue_filter = 'AND SCORE_MS2.QVALUE < %s' % max_rs_peakgroup_qvalue
else:
ms2_qvalue_filter = ''
if max_glycoform_pep is not None:
glycoform_pep_filter = 'AND SCORE_GLYCOFORM.PEP < %s' % max_glycoform_pep
else:
glycoform_pep_filter = ''
if max_glycoform_qvalue is not None:
glycoform_qvalue_filter = 'AND SCORE_GLYCOFORM.QVALUE < %s' % max_glycoform_qvalue
else:
glycoform_qvalue_filter = ''
query = '''
SELECT RUN.ID AS id_run,
GLYCOPEPTIDE.ID AS id_glycopeptide,
PEPTIDE.ID AS id_peptide,
%(transition_group_id)s AS transition_group_id,
PRECURSOR.DECOY AS decoy,
GLYCOPEPTIDE.DECOY_PEPTIDE AS decoy_peptide,
GLYCOPEPTIDE.DECOY_GLYCAN AS decoy_glycan,
RUN.ID AS run_id,
RUN.FILENAME AS filename,
FEATURE.EXP_RT AS RT,
FEATURE.EXP_RT - FEATURE.DELTA_RT AS assay_rt,
FEATURE.DELTA_RT AS delta_rt,
FEATURE.NORM_RT AS iRT,
PRECURSOR.LIBRARY_RT AS assay_iRT,
FEATURE.NORM_RT - PRECURSOR.LIBRARY_RT AS delta_iRT,
FEATURE.ID AS id,
PEPTIDE_GLYCOFORM.UNMODIFIED_SEQUENCE AS Sequence,
PEPTIDE_GLYCOFORM.MODIFIED_SEQUENCE AS FullPeptideName,
GLYCAN_GLYCOFORM.GLYCAN_STRUCT AS GlycanStruct,
GLYCAN_GLYCOFORM.GLYCAN_COMPOSITION AS GlycanComposition,
GLYCOPEPTIDE_GLYCOFORM.GLYCAN_SITE AS GlycanSite,
%(glycofrom_match_precursor)s
PRECURSOR.CHARGE AS Charge,
PRECURSOR.PRECURSOR_MZ AS mz,
FEATURE_MS2.AREA_INTENSITY AS Intensity,
FEATURE_MS1.AREA_INTENSITY AS aggr_prec_Peak_Area,
FEATURE_MS1.APEX_INTENSITY AS aggr_prec_Peak_Apex,
FEATURE.LEFT_WIDTH AS leftWidth,
FEATURE.RIGHT_WIDTH AS rightWidth,
%(score_ms1_pep)s AS ms1_pep,
SCORE_MS2.PEP AS ms2_pep,
SCORE_GLYCOFORM.PRECURSOR_PEAKGROUP_PEP AS precursor_pep,
SCORE_GLYCOFORM.PEP AS glycoform_pep,
SCORE_GLYCOFORM.QVALUE AS m_score,
SCORE_MS2.RANK AS peak_group_rank,
SCORE_MS2.SCORE AS d_score,
SCORE_MS2.QVALUE AS ms2_m_score,
SCORE_MS2_PART_PEPTIDE.SCORE AS d_score_peptide,
SCORE_MS2_PART_PEPTIDE.PEP AS ms2_pep_peptide,
SCORE_MS2_PART_GLYCAN.SCORE AS d_score_glycan,
SCORE_MS2_PART_GLYCAN.PEP AS ms2_pep_glycan
FROM PRECURSOR
INNER JOIN PRECURSOR_GLYCOPEPTIDE_MAPPING ON PRECURSOR.ID = PRECURSOR_GLYCOPEPTIDE_MAPPING.PRECURSOR_ID
INNER JOIN GLYCOPEPTIDE ON PRECURSOR_GLYCOPEPTIDE_MAPPING.GLYCOPEPTIDE_ID = GLYCOPEPTIDE.ID
INNER JOIN GLYCOPEPTIDE_PEPTIDE_MAPPING ON GLYCOPEPTIDE.ID = GLYCOPEPTIDE_PEPTIDE_MAPPING.GLYCOPEPTIDE_ID
INNER JOIN PEPTIDE ON GLYCOPEPTIDE_PEPTIDE_MAPPING.PEPTIDE_ID = PEPTIDE.ID
INNER JOIN GLYCOPEPTIDE_GLYCAN_MAPPING ON GLYCOPEPTIDE.ID = GLYCOPEPTIDE_GLYCAN_MAPPING.GLYCOPEPTIDE_ID
INNER JOIN GLYCAN ON GLYCOPEPTIDE_GLYCAN_MAPPING.GLYCAN_ID = GLYCAN.ID
INNER JOIN FEATURE ON FEATURE.PRECURSOR_ID = PRECURSOR.ID
INNER JOIN RUN ON RUN.ID = FEATURE.RUN_ID
LEFT JOIN FEATURE_MS1 ON FEATURE_MS1.FEATURE_ID = FEATURE.ID
LEFT JOIN FEATURE_MS2 ON FEATURE_MS2.FEATURE_ID = FEATURE.ID
%(link_ms1)s
LEFT JOIN SCORE_MS2 ON SCORE_MS2.FEATURE_ID = FEATURE.ID
LEFT JOIN SCORE_MS2_PART_PEPTIDE ON SCORE_MS2_PART_PEPTIDE.FEATURE_ID = FEATURE.ID
LEFT JOIN SCORE_MS2_PART_GLYCAN ON SCORE_MS2_PART_GLYCAN.FEATURE_ID = FEATURE.ID
LEFT JOIN SCORE_GLYCOFORM ON SCORE_GLYCOFORM.FEATURE_ID = FEATURE.ID
INNER JOIN GLYCOPEPTIDE AS GLYCOPEPTIDE_GLYCOFORM ON SCORE_GLYCOFORM.GLYCOPEPTIDE_ID = GLYCOPEPTIDE_GLYCOFORM.ID
INNER JOIN GLYCOPEPTIDE_PEPTIDE_MAPPING AS GLYCOPEPTIDE_PEPTIDE_MAPPING_GLYCOFORM ON GLYCOPEPTIDE_GLYCOFORM.ID = GLYCOPEPTIDE_PEPTIDE_MAPPING_GLYCOFORM.GLYCOPEPTIDE_ID
INNER JOIN PEPTIDE AS PEPTIDE_GLYCOFORM ON GLYCOPEPTIDE_PEPTIDE_MAPPING_GLYCOFORM.PEPTIDE_ID = PEPTIDE_GLYCOFORM.ID
INNER JOIN GLYCOPEPTIDE_GLYCAN_MAPPING AS GLYCOPEPTIDE_GLYCAN_MAPPING_GLYCOFORM ON GLYCOPEPTIDE_GLYCOFORM.ID = GLYCOPEPTIDE_GLYCAN_MAPPING_GLYCOFORM.GLYCOPEPTIDE_ID
INNER JOIN GLYCAN AS GLYCAN_GLYCOFORM ON GLYCOPEPTIDE_GLYCAN_MAPPING_GLYCOFORM.GLYCAN_ID = GLYCAN_GLYCOFORM.ID
WHERE %(match_precursor_filter)s
%(ms2_qvalue_filter)s
%(glycoform_pep_filter)s
%(glycoform_qvalue_filter)s
ORDER BY transition_group_id,
peak_group_rank;
''' % {
'transition_group_id': transition_group_id,
'glycofrom_match_precursor': glycofrom_match_precursor,
'score_ms1_pep': score_ms1_pep,
'link_ms1': link_ms1,
'match_precursor_filter': match_precursor_filter,
'ms2_qvalue_filter': ms2_qvalue_filter,
'glycoform_pep_filter': glycoform_pep_filter,
'glycoform_qvalue_filter': glycoform_qvalue_filter
}
con.executescript(idx_query)
data = pd.read_sql_query(query, con)
con.executescript('''
CREATE INDEX IF NOT EXISTS idx_glycopeptide_glycosite_mapping_glycosite_id ON GLYCOPEPTIDE_GLYCOSITE_MAPPING (GLYCOSITE_ID);
CREATE INDEX IF NOT EXISTS idx_glycosite_glycosite_id ON GLYCOSITE (ID);
CREATE INDEX IF NOT EXISTS idx_glycopeptide_glycosite_mapping_glycopeptide_id ON GLYCOPEPTIDE_GLYCOSITE_MAPPING (GLYCOPEPTIDE_ID);
CREATE INDEX IF NOT EXISTS idx_glycosite_protein_mapping_protein_id ON GLYCOSITE_PROTEIN_MAPPING (PROTEIN_ID);
CREATE INDEX IF NOT EXISTS idx_protein_protein_id ON PROTEIN (ID);
CREATE INDEX IF NOT EXISTS idx_glycosite_protein_mapping_glycosite_id ON GLYCOSITE_PROTEIN_MAPPING (GLYCOSITE_ID);
''')
data_protein_glycosite = pd.read_sql_query('''
SELECT GLYCOPEPTIDE_ID AS id_glycopeptide,
GROUP_CONCAT(PROTEIN.PROTEIN_ACCESSION,';') AS ProteinName,
GROUP_CONCAT(GLYCOSITE.PROTEIN_GLYCOSITE,';') AS ProteinGlycoSite
FROM GLYCOPEPTIDE_GLYCOSITE_MAPPING
INNER JOIN GLYCOSITE ON GLYCOPEPTIDE_GLYCOSITE_MAPPING.GLYCOSITE_ID = GLYCOSITE.ID
INNER JOIN GLYCOSITE_PROTEIN_MAPPING ON GLYCOSITE.ID = GLYCOSITE_PROTEIN_MAPPING.GLYCOSITE_ID
INNER JOIN PROTEIN ON GLYCOSITE_PROTEIN_MAPPING.PROTEIN_ID = PROTEIN.ID
GROUP BY GLYCOPEPTIDE_ID;
''', con)
data = pd.merge(data, data_protein_glycosite, how='inner', on=['id_glycopeptide'])
return data
def export_tsv(infile, outfile, format='legacy_merged', outcsv=False,
transition_quantification=True, max_transition_pep=0.7,
glycoform=False, glycoform_match_precursor='glycan_composition',
max_glycoform_pep=None,
max_glycoform_qvalue=0.01,
max_rs_peakgroup_qvalue=0.05,
glycopeptide=True, max_global_glycopeptide_qvalue=0.01):
osw = sqlite3.connect(infile)
click.echo("Info: Reading peak group-level results.")
if not glycoform:
data = precursor_report(
osw,
max_rs_peakgroup_qvalue=max_rs_peakgroup_qvalue
)
else:
data = glycoform_report(
osw,
match_precursor=glycoform_match_precursor,
max_glycoform_pep=max_glycoform_pep,
max_glycoform_qvalue=max_glycoform_qvalue,
max_rs_peakgroup_qvalue=max_rs_peakgroup_qvalue
)
if transition_quantification:
click.echo("Info: Reading transition-level results.")
data_transition = transition_report(
osw,
max_transition_pep=max_transition_pep
)
if data_transition is not None and len(data_transition.index) > 0:
data = pd.merge(data, data_transition, how='left', on=['id'])
if glycopeptide:
click.echo("Info: Reading glycopeptide-level results.")
data_glycopeptide = glycopeptide_report(
osw,
max_global_glycopeptide_qvalue=max_global_glycopeptide_qvalue
)
if data_glycopeptide is not None and len(data_glycopeptide.index) > 0:
if 'id_run' in data_glycopeptide.columns:
data = pd.merge(data, data_glycopeptide, how='inner', on=['id_run','id_glycopeptide'])
else:
data = pd.merge(data, data_glycopeptide, how='inner', on=['id_glycopeptide'])
if outcsv:
sep = ","
else:
sep = "\t"
if format == 'legacy_split':
data = data.drop(['id_run', 'id_glycopeptide', 'id_peptide'], axis=1)
data.groupby('filename').apply(lambda x: x.to_csv(
os.path.basename(x['filename'].values[0]) + '.tsv',
sep=sep, index=False
))
elif format == 'legacy_merged':
data.drop(['id_run', 'id_glycopeptide', 'id_peptide'], axis=1) \
.to_csv(outfile, sep=sep, index=False)
elif format == 'matrix':
data = data.iloc[data.groupby(['run_id', 'transition_group_id']) \
.apply(lambda x: x['m_score'].idxmin())]
data = data[['transition_group_id',
'decoy', 'decoy_peptide', 'decoy_glycan',
'Sequence', 'FullPeptideName',
'GlycanStruct', 'GlycanComposition', 'GlycanSite',
'Charge',
'ProteinName', 'ProteinGlycoSite', 'filename', 'Intensity']]
data = data.pivot_table(
index=list(data.columns \
.difference(['filename', 'Intensity'], sort=False)),
columns='filename', values='Intensity'
)
data.to_csv(outfile, sep=sep, index=True)
osw.close()
def export_score_plots(infile):
con = sqlite3.connect(infile)
if check_sqlite_table(con, "SCORE_MS2") and \
check_sqlite_table(con, "SCORE_MS2_PART_PEPTIDE") and \
check_sqlite_table(con, "SCORE_MS2_PART_GLYCAN"):
outfile = infile.split(".osw")[0] + "_ms2_score_plots.pdf"
table_ms2 =
|
pd.read_sql_query('''
SELECT *,
RUN_ID || '_' || PRECURSOR_ID AS GROUP_ID
FROM FEATURE_MS2
INNER JOIN
(SELECT RUN_ID,
ID,
PRECURSOR_ID,
EXP_RT
FROM FEATURE) AS FEATURE ON FEATURE_MS2.FEATURE_ID = FEATURE.ID
INNER JOIN
(SELECT ID,
CHARGE AS VAR_PRECURSOR_CHARGE,
DECOY
FROM PRECURSOR) AS PRECURSOR ON FEATURE.PRECURSOR_ID = PRECURSOR.ID
INNER JOIN
(SELECT PRECURSOR_ID AS ID,
DECOY_PEPTIDE,
DECOY_GLYCAN
FROM PRECURSOR_GLYCOPEPTIDE_MAPPING
INNER JOIN GLYCOPEPTIDE
ON PRECURSOR_GLYCOPEPTIDE_MAPPING.GLYCOPEPTIDE_ID == GLYCOPEPTIDE.ID) AS DECOY
ON FEATURE.PRECURSOR_ID = DECOY.ID
INNER JOIN
(SELECT PRECURSOR_ID AS ID,
COUNT(*) AS VAR_TRANSITION_NUM_SCORE
FROM TRANSITION_PRECURSOR_MAPPING
INNER JOIN TRANSITION ON TRANSITION_PRECURSOR_MAPPING.TRANSITION_ID = TRANSITION.ID
WHERE DETECTING==1
GROUP BY PRECURSOR_ID) AS VAR_TRANSITION_SCORE ON FEATURE.PRECURSOR_ID = VAR_TRANSITION_SCORE.ID
INNER JOIN SCORE_MS2 ON FEATURE.ID = SCORE_MS2.FEATURE_ID
INNER JOIN
(SELECT FEATURE_ID,
SCORE AS SCORE_PEPTIDE
FROM SCORE_MS2_PART_PEPTIDE) AS SCORE_MS2_PART_PEPTIDE
ON FEATURE.ID = SCORE_MS2_PART_PEPTIDE.FEATURE_ID
INNER JOIN
(SELECT FEATURE_ID,
SCORE AS SCORE_GLYCAN
FROM SCORE_MS2_PART_GLYCAN) AS SCORE_MS2_PART_GLYCAN
ON FEATURE.ID = SCORE_MS2_PART_GLYCAN.FEATURE_ID
WHERE RANK == 1
ORDER BY RUN_ID,
PRECURSOR.ID ASC,
FEATURE.EXP_RT ASC;
''', con)
|
pandas.read_sql_query
|
"""some objects and functions supporting html actions and routes"""
import pickle
import numpy as np
import pandas as pd
import matplotlib as plt
from scipy.spatial import distance
# from sqlalchemy import func, distinct
# from sqlalchemy.sql import expression #(.exists, .select, ...)
from .spotify_client import *
# unpickle a trained kmeans algorithm and cluster-distances
pickled_kmeans = r"kmeans_pipe.sav"
pickled_index = r"df_index.sav"
pickled_locs = r"song_space_locs.sav"
kmeans_pipe = pickle.load(open(pickled_kmeans, 'rb'))
df_index = pickle.load(open(pickled_index, 'rb'))
song_space_locs = pickle.load(open(pickled_locs, 'rb'))
def suggest_ids(song_name, artist_name, count=100):
"""Compares a track to ~440,000 others, based on 13 numeric audio features, and returns the spotify_ids of 35 songs with similar cluster-distance coordinates; it would be cool if a button press would display the next closest set. It would be cooler if matplotlib displayed a 3D plot, with 3 drop-down menus for choosing any 3 features (of 13) for plot axes (or a 3D tSNE plot, not with audio features but with projections to abstract 3D space); and if the color of input song were bright color, similar to neighbors displayed in table, but different from the faded grey others"""
song_id, artist_id = retrieve_spotify_ids(song_name, artist_name)
features = retrieve_audio_features(song_id)
feats = ['danceability','energy','key','loudness','mode','speechiness','acousticness','instrumentalness','liveness','valence','tempo','duration_ms','time_signature']
model_input = [features[0][feat] for feat in feats]
song_space_base = kmeans_pipe.transform([model_input])
dists = distance.cdist(song_space_locs, song_space_base, 'cityblock')
dists =
|
pd.DataFrame(dists, index=df_index)
|
pandas.DataFrame
|
import json
import os
from urllib.error import HTTPError, URLError
from urllib.request import urlopen
import pandas as pd
from pandas.tseries.offsets import DateOffset
def from_download(tok, start_date, end_date, offset_days, series_list):
"""Download and assemble dataset of demand data per balancing authority for desired
date range.
:param str tok: token obtained by registering with EIA.
:param pandas.Timestamp/numpy.datetime64/datetime.datetime start_date: start date.
:param pandas.Timestamp/numpy.datetime64/datetime.datetime end_date: end data.
:param list series_list: list of demand series names provided by EIA, e.g.,
['EBA.AVA-ALL.D.H', 'EBA.AZPS-ALL.D.H'].
:param int offset_days: number of business days for data to stabilize.
:return: (*pandas.DataFrame*) -- data frame with UTC timestamp as indices and
BA series name as column names.
"""
timespan = pd.date_range(
start_date, end_date - DateOffset(days=offset_days), tz="UTC", freq="H"
)
df_all = pd.DataFrame(index=timespan)
for ba in series_list:
print("Downloading", ba)
d = EIAgov(tok, [ba])
df = d.get_data()
if df is not None:
df.index = pd.to_datetime(df["Date"])
df.drop(columns=["Date"], inplace=True)
df_all = pd.concat([df_all, df], axis=1)
return df_all
def from_excel(directory, series_list, start_date, end_date):
"""Assemble EIA balancing authority (BA) data from pre-downloaded Excel
spreadsheets. The spreadsheets contain data from July 2015 to present.
:param str directory: location of Excel files.
:param list series_list: list of BA initials, e.g., ['PSE',BPAT','CISO'].
:param datetime.datetime start_date: desired start of dataset.
:param datetime.datetime end_date: desired end of dataset.
:return: (*pandas.DataFrame*) -- data frame with UTC timestamp as indices and
BA series name as column names.
"""
timespan = pd.date_range(start_date, end_date, freq="H")
df_all = pd.DataFrame(index=timespan)
for ba in series_list:
print(ba)
filename = ba + ".xlsx"
df = pd.read_excel(
io=os.path.join(directory, filename), header=0, usecols="B,U"
)
df.index = pd.to_datetime(df["UTC Time"])
# Fill missing times
df = df.resample("H").asfreq()
df.drop(columns=["UTC Time"], inplace=True)
df.rename(columns={"Published D": ba}, inplace=True)
df_all = pd.concat([df_all, df], join="inner", axis=1)
return df_all
def get_ba_demand(ba_code_list, start_date, end_date, api_key):
"""Download the demand between two dates for a list of balancing authorities.
:param pandas.DataFrame ba_code_list: List of BAs to download from eia.
:param pandas.Timestamp/numpy.datetime64/datetime.datetime start_date: beginning
bound for the demand data frame.
:param pandas.Timestamp/numpy.datetime64/datetime.datetime end_date: end bound for
the demand data frame.
:param string api_key: api key to fetch data.
:return: (*pandas.DataFrame*) -- data frame with columns of demand by BA.
"""
series_list = [f"EBA.{ba}-ALL.D.H" for ba in ba_code_list]
df = from_download(
api_key, start_date, end_date, offset_days=0, series_list=series_list
)
df.columns = [ba.replace("EBA.", "").replace("-ALL.D.H", "") for ba in df.columns]
return df
class EIAgov(object):
"""Copied from `this link <https://quantcorner.wordpress.com/\
2014/11/18/downloading-eias-data-with-python/>`_.
:param str token: EIA token.
:param list series: id code(s) of the series to be downloaded.
"""
def __init__(self, token, series):
self.token = token
self.series = series
def raw(self, ser):
"""Download json files from EIA.
:param str ser: list of file names.
:raises keyError: when URL or file are either not found or not valid.
"""
url = (
"http://api.eia.gov/series/?api_key="
+ self.token
+ "&series_id="
+ ser.upper()
)
try:
response = urlopen(url)
raw_byte = response.read()
raw_string = str(raw_byte, "utf-8-sig")
jso = json.loads(raw_string)
return jso
except HTTPError as e:
print("HTTP error type.")
print("Error code: ", e.code)
except URLError as e:
print("URL type error.")
print("Reason: ", e.reason)
def get_data(self):
"""Convert json files into data frame.
:return: (*pandas.DataFrame*) -- data frame.
"""
date_ = self.raw(self.series[0])
if "data" in date_.keys() and "error" in date_["data"].keys():
e = date_["data"]["error"]
print(f"ERROR: {self.series[0]} not found. {e}")
return None
if len(date_["series"]) == 0:
print(f"ERROR: {self.series[0]} was found but has no data")
return None
date_series = date_["series"][0]["data"]
endi = len(date_series)
date = []
for i in range(endi):
date.append(date_series[i][0])
df =
|
pd.DataFrame(data=date)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
def build_items(master_red: pd.DataFrame, master_ubicaciones: pd.DataFrame, master_demanda, master_producto):
"""
Crea un df de items con 5 columnas donde se especifica tiempo, producto, nodo, tipo, y valor. Estamos
ignorando material importado, ya que toca hacer cambios a la tabla de ubicación para agregar a CGNA_PLANT como
CGNA_PLANT_DISTR
:param master_producto:
:param master_demanda:
:param master_ubicaciones:
:param master_red:
:return:
"""
# De hecho, se debe crear primero la sección de restricciones estáticas y dinámicas, ya que no dependen de producto.
# Delimitar cantidad de tiempo
MONTHS = sorted(master_demanda['fecha'].unique())
# Nodos totales y únicos de la red
nodos = pd.concat([master_red.loc[:, 'id_locacion_origen'], master_red.loc[:, 'id_locacion_destino']],
ignore_index=True).unique()
# Creamos DF final que tiene estructura definida en documentacion: `tiempo`, `producto`, `nodo`, `tipo`, `valor`
item_df = pd.DataFrame(columns=['tiempo', 'producto', 'nodo', 'tipo', 'valor'])
for t in MONTHS:
# RESTR DINAMICA Y ESTATICA: Extraemos restricciones dinámicas y estáticas y lo ponemos en formato de `item_df`
nodos_restr = master_ubicaciones.loc[:, ['id_locacion', 'capacidad_din', 'capacidad_est']]
nodos_restr = pd.melt(nodos_restr, id_vars=['id_locacion'], value_vars=['capacidad_din', 'capacidad_est'])
nodos_restr.columns = item_df.columns[-3:]
# Borramos las filas que tengan `nodos_restr[valor].isna()`
nodos_restr = nodos_restr.dropna(subset=['valor'])
# Añadimos tiempo `t` y producto `NaN` a esas restricciones para que se pueda concatenar a `item_df`
nodos_restr['tiempo'] = t
nodos_restr['producto'] = np.nan
# PRODUCTOS: seleccionamos los productos (familias) del master de demanda para el mes en cuestion
PRODUCTS = master_demanda.loc[master_demanda['fecha'] == t, 'familia'].unique()
for k in PRODUCTS:
# PRODUCCION: Buscamos el sitio origen del producto y su producción máx en master de productos.
# Debería ser solo UN origen
nodos_prod = master_producto.loc[master_producto['familia'] == k, ['familia',
'ubicacion_producto', 'produccion_max']]
# Renombrar y agregar columnas de tipo y tiempo
nodos_prod.columns = ['producto', 'nodo', 'valor']
nodos_prod['tipo'] = 'produccion'
nodos_prod['tiempo'] = t
# DEMANDA: buscar todos los clientes para producto k en tiempo t. Los clientes los tomaremos como ciudades
clientes_demanda = master_demanda.loc[(master_demanda['fecha'] == t) & (master_demanda['familia'] == k),
['id_ciudad', 'cantidad']]
# Renombrar y crear columnas restantes para que tenga estructura de `item_df`
clientes_demanda.columns = ['nodo', 'valor']
clientes_demanda['tiempo'] = t
clientes_demanda['producto'] = k
clientes_demanda['tipo'] = 'demanda'
# FLUJO: los nodos restantes son de flujo. Estos son la diferencia de conjuntos entre todos los nodos de la
# red, el nodo de produccion, y el nodo de demanda. Recordar que hay que borrar CLIENTE de los nodos únicos,
# ya que en ITEMS ya estará representado como `clientes_demanda`
nodos_flujo = list(set(nodos) - ({'CLIENTE'} | set(nodos_prod['nodo'])))
nodos_flujo = pd.DataFrame(data={'tiempo': t, 'producto': k, 'nodo': nodos_flujo,
'tipo': 'flujo', 'valor': 0})
# ITEMS: Concatenar las secciones que iteran por producto a `item_df`
item_df = pd.concat([item_df, nodos_prod, nodos_flujo, clientes_demanda], ignore_index=True)
# ITEMS: Concatenar las restricciones estática y dinámica a `item_df`
item_df = pd.concat([item_df, nodos_restr], ignore_index=True)
return item_df
def build_activities(master_red, master_tarifario, master_demanda, master_ubicaciones):
"""
Construye la tabla de Actividades que contiene 6 columnas: 'tiempo', 'producto', 'transporte', 'origen', 'destino', 'costo'.
Esos origenes y destinos pueden ser id_locaciones para comunicaciones entre nodos de la infraestructura de Esenttia,
o pueden ser id_ciudades para las entregas a clientes. En esta tabla se evidencian todas las actividades de distribución
y almacenamiento de la red, así como sus costos
:param master_ubicaciones:
:param master_demanda:
:param master_red:
:param master_tarifario:
:return:
"""
# Delimitar cuantos meses hay para t
MONTHS = sorted(master_demanda['fecha'].unique())
# Abrir red infraestructra, seleccionar columnas relevantes ['origen', 'destino']
master_red = master_red.loc[:, ['id_locacion_origen', 'id_locacion_destino']]
# Abrir master tarifario, seleccionar columnas relevantes
master_tarifario = master_tarifario[['id_ciudad_origen', 'id_ciudad_destino', 'capacidad', 'costo']]
# Crear DF final con estructura definida en documentación
actividad_df = pd.DataFrame(columns=['tiempo', 'producto', 'transporte', 'origen', 'destino', 'costo'])
for t in MONTHS:
# PRODUCTOS: seleccionamos los productos (familias) del master de demanda para el mes `t`
PRODUCTS = master_demanda.loc[master_demanda['fecha'] == t, 'familia'].unique()
for k in PRODUCTS:
# ALMACENAMIENTO: crear actividad de almacenamiento a partir de los nodos que tengan valor diferente a cero
# en capacidad_est en el master de ubicaciones. Es decir, que no sean NaN
nodos_alm = master_ubicaciones.loc[~master_ubicaciones['capacidad_est'].isna(),
['id_locacion', 'costo_almacenamiento']]
# Para distinguir almacenamiento (mov. en dimension tiempo) de demás actividades, agregar 'ALMACENAMIENTO'
nodos_alm['id_locacion'] = nodos_alm['id_locacion'] + '_ALMACENAMIENTO'
# Renombramos columnas
nodos_alm.columns = ['origen', 'costo']
# Agregar columna destino, que es una copia de la columna origen, producto, tiempo, y transporte
nodos_alm['destino'] = nodos_alm['origen'].copy()
nodos_alm['tiempo'] = t
nodos_alm['producto'] = k
nodos_alm['transporte'] = np.nan
# TRANSPORTE: Reemplazar CLIENTE de master_red por `id_ciudad` de `master_demanda`. Haremos un DF de la
# demanda, para luego hacerle un join con master_red de acuerdo a los sitios que pueden suplir CLIENTE
clientes_demanda = master_demanda.loc[(master_demanda['fecha'] == t) & (master_demanda['familia'] == k),
'id_ciudad'].to_frame()
clientes_demanda['key'] = 'CLIENTE'
# Separamos master_red entre los que tienen en destino CLIENTE y los que no
master_red_cliente = master_red.loc[master_red['id_locacion_destino'] == 'CLIENTE', :]
master_red_no_cliente = master_red.loc[~(master_red['id_locacion_destino'] == 'CLIENTE'), :]
# Cruzar `master_red_cliente` con `clientes_demanda`
master_red_cliente = master_red_cliente.merge(clientes_demanda, left_on=['id_locacion_destino'],
right_on=['key'], how='inner')
master_red_cliente = master_red_cliente.drop(columns=['id_locacion_destino', 'key'])
master_red_cliente = master_red_cliente.rename(columns={'id_ciudad': 'id_locacion_destino'})
# Volvemos a unir master_red_cliente con master_red
master_red_clean = pd.concat([master_red_no_cliente, master_red_cliente], ignore_index=True)
# Join entre tarifario y master de red
# Se hace inner join porque si no hay vehículos que transporten, no puede existir arco en el `master_red`.
nodos_trans = master_red_clean.merge(master_tarifario,
left_on=['id_locacion_origen', 'id_locacion_destino'],
right_on=['id_ciudad_origen', 'id_ciudad_destino'], how='inner')
# Renombramos columnas específicas para que tengan formato de `actividad_df`
nodos_trans = nodos_trans.rename(columns={'id_locacion_origen': 'origen',
'id_locacion_destino': 'destino',
'capacidad': 'transporte'})
# Filtrar columnas relevantes
nodos_trans = nodos_trans.loc[:, ['transporte', 'origen', 'destino', 'costo']]
# Crear columnas restantes para tener estructura de `actividad_df`
nodos_trans['tiempo'] = t
nodos_trans['producto'] = k
# ACIVIDADES: Concatenar nodos con transportes y almacenamiento a `actividad_df`
actividad_df =
|
pd.concat([actividad_df, nodos_trans, nodos_alm], ignore_index=True)
|
pandas.concat
|
import os
import shutil
import glob
import sys
from optparse import OptionParser
import argparse
import pandas as pd
class Options(argparse.ArgumentParser):
def __init__(self, prog="sequana_summary"):
usage = """Welcome to SEQUANA - Summary standalone
sequana_summary --file file.fastq.gz
sequana_summary --glob "file*.fastq"
sequana_summary --glob "file*.bed"
AUTHORS: <NAME>, <NAME>
Documentation: http://sequana.readthedocs.io
Issues: http://github.com/sequana/sequana
"""
description = """DESCRIPTION:
prints basic stats about a set of input files.
The format of the input files must be homogeneous with one of the
following extensions:
- fastq or fastq.gz
- bed (coverage BED files)
"""
super(Options, self).__init__(usage=usage, prog=prog,
description=description)
# options to fill the config file
self.add_argument("-m", "--multiple", action="store_true", default=False)
self.add_argument("-q", "--quiet", action="store_true", default=False)
self.add_argument("-f", "--file", dest="file", type=str,
required=False, help="""one filename (either FastQ or BED file; see
DESCRIPTION)""")
self.add_argument("-g", "--glob", dest="glob", type=str,
required=False, help="""a glob/pattern of files. Must use quotes
e.g. "*.fastq.gz" (See --file or DESCRIPTION for details)""")
self.add_argument("-n", "--sample", default=1000000000000000, type=int,
help="""If input FastQ files, analyse entire file. You may restrict
analysis to set of reads""")
self.add_argument("-t", "--thread", default=4, type=int,
help="""Several files may be processed in parallel. By default 4
threads are used""")
def get_fastq_stats(filename, sample=1e16):
from sequana import FastQC
ff = FastQC(filename, max_sample=sample, verbose=False)
stats = ff.get_stats()
return stats
def get_bed_stats(filename):
from sequana import GenomeCov
import pandas as pd
bed = GenomeCov(filename)
stats = bed.get_stats("dataframe")
return stats[list(stats.keys())[0]]
def get_bam_stats(filename):
from sequana import BAM
import pandas as pd
bam = BAM(filename)
stats = bam.get_stats()
df =
|
pd.Series(stats)
|
pandas.Series
|
"""
author: <NAME>
references:
1.https://arxiv.org/pdf/1505.04597.pdf (the original research paper)
2.https://machinelearningmastery.com/convolutional-layers-for-deep-learning-neural-networks/
3.https://towardsdatascience.com/unet-line-by-line-explanation-9b191c76baf5
4.https://github.com/ashishrana160796/nalu-cell-counting/blob/master/exploring-cell-counting/model.py
The convolutional neural network, or CNN,
is a kind of neural network model designed
to work with two-dimensional image data.
It makes use of a convolutional layer that
gives the network its name. This layer
performs an operation called a convolution,
which is essentially taking the dot product
of a set of weights, or filters, and an
array derived from an input image.
U-Net, introduced in 2015, was an innovative
approach to addressing the issue of image
segmentation...
We use tensorflow, a machine learning library,
and keras, a neaural network library, to help
make it possible.
"""
# For type hinting
from typing import List, Tuple
# For image preprocessing
import numpy as np
import pandas as pd
# For accessing the dataset
from cell_counter.import_dataset import get_dataset_info, load_images_from_dataframe
# For creating and using CNN
import tensorflow as tf
# For unet
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
def unet_preprocess_data(
path: str = None, num: int = 2500, df = pd.DataFrame(), split=0.1
) -> Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]:
"""
Reduce the resolution, and normalize the images in the dataset.
Modification is in-place.
Parameters:
path (str): Path to images.
num (int): Total number of images to import from the dataset.
df (pd.DataFrame): Images to use, if any.
split (float): Proportion of images to use for testing.
Returns:
Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]: The
dataset, including the preprocessed images.
"""
if len(df.columns) == 0:
# Filter to only use images without blur
df = get_dataset_info(path)
df = df[df["blur"] == 1]
# Randomly select 'num' from the remaining images, without replacement
df = df.sample(n=num, replace=False)
# Return images and labels from dataframe
(training_images, training_labels), (
testing_images, testing_labels,
) = load_images_from_dataframe(df, path=path, resolution=(128, 128), split=split)
scale = 1 / float(255)
for index, image in enumerate(training_images):
training_images[index] = image * scale
for index, image in enumerate(testing_images):
testing_images[index] = image * scale
return (training_images, training_labels), (
testing_images,
testing_labels,
)
# The original U-Net implementation.
# from https://github.com/zhixuhao/unet/blob/master/model.py
def build_unet():
"""
Returns a unet model for use on a preprocessed dataset.
Returns:
keras.engine.sequential.Sequential: The generated CNN.
"""
preprocessed_image_shape = (128, 128, 1)
inputs = Input(pretrained_weights=None,
input_size=preprocessed_image_shape)
conv1 = Conv2D(64, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(inputs)
conv1 = Conv2D(64, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
# =========================================================================
conv2 = Conv2D(128, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(pool1)
conv2 = Conv2D(128, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
# =========================================================================
conv3 = Conv2D(256, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(pool2)
conv3 = Conv2D(256, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
# =========================================================================
conv4 = Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(pool3)
conv4 = Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
# =========================================================================
conv5 = Conv2D(1024, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
# =========================================================================
up6 = Conv2D(512, 2, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(merge6)
conv6 = Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv6)
# =========================================================================
up7 = Conv2D(256, 2, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(256, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(merge7)
conv7 = Conv2D(256, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv7)
# =========================================================================
up8 = Conv2D(128, 2, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv7))
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(128, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(merge8)
conv8 = Conv2D(128, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv8)
# =========================================================================
up9 = Conv2D(64, 2, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8))
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(64, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(merge9)
conv9 = Conv2D(64, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv9)
conv9 = Conv2D(2, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv9)
# =========================================================================
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
model = Model(inputs=inputs, outputs=conv10)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=['accuracy'])
# model.summary()
if(pretrained_weights):
model.load_weights(pretrained_weights)
return model
# The modified U-Net architecture, which truncates the decoding
# part(where upsampling occurs) and uses output from the encoder
# section (where downsampling occurs) as input to a simple CNN
# in order to get a count of the segemented objects.
#
# based on https://github.com/zhixuhao/unet/blob/master/model.py
def build_modified_unet():
"""
Returns a unet model for use on a preprocessed dataset.
Returns:
keras Model class: The generated U-Net codel.
"""
preprocessed_image_shape = (128, 128, 1)
inputs = Input(preprocessed_image_shape)
conv1 = Conv2D(64, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(inputs)
conv1 = Conv2D(64, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
# =========================================================================
conv2 = Conv2D(128, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(pool1)
conv2 = Conv2D(128, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
# =========================================================================
conv3 = Conv2D(256, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(pool2)
conv3 = Conv2D(256, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
# =========================================================================
conv4 = Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(pool3)
conv4 = Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
# =========================================================================
conv5 = Conv2D(1024, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
# =========================================================================
up6 = Conv2D(512, 2, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(merge6)
conv6 = Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv6)
# =========================================================================
up7 = Conv2D(256, 2, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(256, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(merge7)
conv7 = Conv2D(256, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv7)
# =========================================================================
# Append simple cnn to reduce the final output to appropriate dimensions as
# suggested by my team leader, <NAME>. Implementated in
# nalu_fcrn_cellcounter.py by <NAME>. Modified to fit U-Net.
cnn_filter = 16
cnn_pool_size = 2
kernal = (3, 3)
dropout_rate = 0.2
outputs_ = Conv2D(cnn_filter, kernal, activation="relu")(conv7)
outputs_ = MaxPooling2D(pool_size=cnn_pool_size)(outputs_)
outputs_ = Dropout(dropout_rate)(outputs_)
outputs_ = Conv2D(
2 * cnn_filter, kernal, activation="relu")(outputs_)
outputs_ = MaxPooling2D(pool_size=cnn_pool_size)(outputs_)
outputs_ = Conv2D(
2 * cnn_filter, kernal, activation="relu")(outputs_)
outputs_ = Flatten()(outputs_)
outputs_ = Dense(64)(outputs_)
outputs_ = Dense(1, activation="relu")(outputs_)
model = Model(inputs=inputs, outputs=outputs_)
return model
def compile_unet(model):
"""
Compiles the unet model.
Parameters: the model.
model: The unet to be compiled.
"""
model.compile(optimizer="adam", loss="mean_squared_error", metrics=["mse"])
def run_unet(
model,
path: str = None,
image_number: int = 2000,
checkpointing: bool = True,
checkpoint_path: str = None,
epochs: int = 10,
validation_split: float = 0.0,
verbose: int = 2,
df:pd.DataFrame =
|
pd.DataFrame()
|
pandas.DataFrame
|
import os
import fnmatch
from utils_thai import file2date
from bs4 import BeautifulSoup
from utils_scraping import parse_file, pptx2chartdata, sanitize_filename
from covid_data import briefing_case_types, briefing_deaths, briefing_deaths_provinces, briefing_deaths_summary, briefing_documents, get_tests_by_area_chart_pptx, test_dav_files, vac_manuf_given, vac_slides_files, vaccination_daily, vaccination_reports_files2, vaccination_tables, get_tests_by_area_pdf
import pandas as pd
import pytest
from utils_pandas import export, import_csv
import dateutil
# def write_csv(df, input, parser_name):
# export(df, f"{input.rsplit('.', 1)[0]}.{parser_name}", csv_only=True)
# def find_files(dir, pat):
# dir_path = os.path.dirname(os.path.realpath(__file__))
# dir_path = os.path.join(dir_path, dir)
# for root, dir, files in os.walk(dir_path):
# for file in fnmatch.filter(files, pat):
# base, ext = file.rsplit(".", 1)
# testdf = None
# csvs = fnmatch.filter(files, f"{base}*.csv")
# if not csvs:
# yield os.path.join(root, file), testdf, None
# continue
# for check in csvs:
# _, func, ext = check.rsplit(".", 2)
# try:
# testdf = import_csv(check.rsplit(".", 1)[0], dir=root, index=["Date"])
# except pd.errors.EmptyDataError:
# pass
# yield os.path.join(root, file), testdf, func
def dl_files(target_dir, dl_gen, check=False):
"find csv files and match them to dl files, either by filename or date"
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.join(dir_path, target_dir)
downloads = {}
for url, date, get_file in dl_gen(check):
fname = sanitize_filename(url.rsplit("/", 1)[-1])
fname, _ = fname.rsplit(".", 1) # remove ext
if date is not None:
sdate = str(date.date())
downloads[sdate] = (sdate, get_file)
# put in file so test is identified if no date
downloads[fname] = (str(date.date()) if date is not None else fname, get_file)
tests = []
missing = False
for root, dir, files in os.walk(dir_path):
for test in fnmatch.filter(files, "*.json"):
base, ext = test.rsplit(".", 1)
# special format of name with .2021-08-01 to help make finding test files easier
if "." in base:
rest, dateish = base.rsplit(".", 1)
if file2date(dateish):
base = rest
# throw away date since rest is file to check against
try:
testdf = pd.read_json(os.path.join(root, test), orient="table")
except ValueError:
testdf = None
# try:
# testdf = import_csv(check.rsplit(".", 1)[0], dir=root, index=["Date"])
# except pd.errors.EmptyDataError:
# testdf = None
date, get_file = downloads.get(base, (None, None))
if get_file is None:
missing = True
tests.append((date, testdf, get_file))
if missing and not check:
# files not cached yet so try again
return dl_files(target_dir, dl_gen, check=True)
else:
return tests
# @pytest.mark.parametrize("input, testdf, parser", find_files("testing_moph", "*.pptx"))
# def test_pptx(input, testdf, parser):
# data = pd.DataFrame()
# raw = pd.DataFrame()
# for chart, title, series, pagenum in pptx2chartdata(input):
# data, raw = get_tests_by_area_chart_pptx(input, title, series, data, raw)
# pd.testing.assert_frame_equal(testdf, data)
# 021-07-05 0.0
# 2021-07-06 0.0
# 2021-07-07 0.0
# 2021-07-08 0.0
# 2021-07-09 0.0
# 2021-07-10 0.0
# 2021-07-11 0.0
@pytest.mark.parametrize("fname, testdf, get_file", dl_files("vaccination_daily", vaccination_reports_files2))
def test_vac_reports(fname, testdf, get_file):
assert get_file is not None
file = get_file() # Actually download
assert file is not None
df = pd.DataFrame(columns=["Date"]).set_index(["Date"])
for page in parse_file(file):
df = vaccination_daily(df, None, file, page)
# df.to_json(f"tests/vaccination_daily/{fname}.{str(df.index.max().date())}.json", orient='table', indent=2)
pd.testing.assert_frame_equal(testdf, df, check_dtype=False)
@pytest.mark.parametrize("fname, testdf, get_file", dl_files("vaccination_tables", vaccination_reports_files2))
def test_vac_tables(fname, testdf, get_file):
assert get_file is not None
file = get_file() # Actually download
assert file is not None
df = pd.DataFrame(columns=["Date"]).set_index(["Date"])
for page in parse_file(file):
df = vaccination_tables(df, None, page, file)
# df.to_json(f"tests/vaccination_tables/{fname}.{str(df.index.max()[0].date())}.json", orient='table', indent=2)
pd.testing.assert_frame_equal(testdf, df, check_dtype=False)
@pytest.mark.parametrize("fname, testdf, get_file", dl_files("vac_manuf_given", vac_slides_files))
def test_vac_manuf_given(fname, testdf, get_file):
assert get_file is not None
file = get_file() # Actually download
assert file is not None
df = pd.DataFrame(columns=["Date"]).set_index(["Date"])
for i, page in enumerate(parse_file(file), 1):
df = vac_manuf_given(df, page, file, i)
# df.to_json(f"tests/vac_manuf_given/{fname}.{str(df.index.max().date())}.json", orient='table', indent=2)
pd.testing.assert_frame_equal(testdf, df, check_dtype=False)
def find_testing_pptx(check):
return [(file, None, dl) for file, dl in test_dav_files(ext=".pptx")]
def find_testing_pdf(check):
return [(file, None, dl) for file, dl in test_dav_files(ext=".pdf")]
@pytest.mark.parametrize("fname, testdf, dl", dl_files("testing_moph", find_testing_pptx))
def test_get_tests_by_area_chart_pptx(fname, testdf, dl):
data, raw = pd.DataFrame(), pd.DataFrame()
assert dl is not None
file = dl()
assert file is not None
for chart, title, series, pagenum in pptx2chartdata(file):
data, raw = get_tests_by_area_chart_pptx(input, title, series, data, raw)
# raw.to_json(f"tests/testing_moph/{fname}.json", orient='table', indent=2)
pd.testing.assert_frame_equal(testdf, raw, check_dtype=False)
@pytest.mark.parametrize("fname, testdf, dl", dl_files("testing_moph", find_testing_pdf))
def test_get_tests_by_area_chart_pdf(fname, testdf, dl):
data, raw = pd.DataFrame(),
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import glob
import numpy as np
from ..processing.base_processing import path_features , path_predictions, path_inputs, read_ethnicity_data
from ..processing.abdominal_composition_processing import read_abdominal_data
from ..processing.brain_processing import read_grey_matter_volumes_data, read_subcortical_volumes_data, read_brain_data, read_brain_dMRI_weighted_means_data
from ..processing.heart_processing import read_heart_data, read_heart_size_data, read_heart_PWA_data
from ..processing.body_composition_processing import read_body_composition_data
from ..processing.bone_composition_processing import read_bone_composition_data
from ..processing.ecg_processing import read_ecg_at_rest_data
from ..processing.anthropometry_processing import read_anthropometry_impedance_data, read_anthropometry_body_size_data, read_anthropometry_data
from ..processing.biochemestry_processing import read_blood_biomarkers_data, read_urine_biomarkers_data, read_blood_count_data, read_blood_data, read_urine_and_blood_data
from ..processing.eye_processing import read_eye_autorefraction_data, read_eye_acuity_data, read_eye_intraocular_pressure_data, read_eye_data
from ..processing.spirometry_processing import read_spirometry_data
from ..processing.blood_pressure_processing import read_blood_pressure_data
from ..processing.arterial_stiffness_processing import read_arterial_stiffness_data
from ..processing.mix_processing import read_vascular_all_data, read_all_brain_and_cognitive, read_heart_MRI_data, read_heart_all_data, read_biochemistry_data, read_musculoskeletal_data
from ..processing.carotid_ultrasound_processing import read_carotid_ultrasound_data
from ..processing.bone_densitometry_processing import read_bone_densitometry_data
from ..processing.hand_grip_strength_processing import read_hand_grip_strength_data
from ..processing.hearing_tests_processing import read_hearing_test_data
from ..processing.cognitive_tests_processing import read_reaction_time_data, read_matrix_pattern_completion_data, read_tower_rearranging_data, \
read_symbol_digit_substitution_data, read_paired_associative_learning_data, \
read_prospective_memory_data, read_numeric_memory_data, read_fluid_intelligence_data, read_trail_making_data , \
read_pairs_matching_data, read_all_cognitive_data
from ..processing.physical_activity_processing import read_physical_activity_data
map_dataset_to_field_and_dataloader = {
'BrainGreyMatterVolumes' : (1101, read_grey_matter_volumes_data),
'BrainSubcorticalVolumes': (1102, read_subcortical_volumes_data),
'BraindMRIWeightedMeans' : (135, read_brain_dMRI_weighted_means_data),
'BrainMRIAllBiomarkers' : (100, read_brain_data),
'CognitiveReactionTime' : (100032, read_reaction_time_data),
'CognitiveMatrixPatternCompletion' : (501, read_matrix_pattern_completion_data),
'CognitiveTowerRearranging' : (503, read_tower_rearranging_data),
'CognitiveSymbolDigitSubstitution' : (502, read_symbol_digit_substitution_data),
'CognitivePairedAssociativeLearning' : (506, read_paired_associative_learning_data),
'CognitiveProspectiveMemory' : (100031, read_prospective_memory_data),
'CognitiveNumericMemory' : (100029, read_numeric_memory_data),
'CognitiveFluidIntelligence' : (100027, read_fluid_intelligence_data),
'CognitiveTrailMaking' : (505, read_trail_making_data),
'CognitivePairsMatching' : (100030, read_pairs_matching_data),
'CognitiveAllBiomarkers' : ('Custom', read_all_cognitive_data),
'BrainAndCognitive' : ('Custom', read_all_brain_and_cognitive),
'EyeAutorefraction' : (100014, read_eye_autorefraction_data),
'EyeAcuity' : (100017, read_eye_acuity_data),
'EyeIntraocularPressure' : (100015, read_eye_intraocular_pressure_data),
'EyesAllBiomarkers' : (100013, read_eye_data),
# Hearing
'HearingTest' : (100049, read_hearing_test_data),
# Lungs
'Spirometry' : (100020, read_spirometry_data),
# Vascular
'BloodPressure' : (100011, read_blood_pressure_data),
'CarotidUltrasound' : (101, read_carotid_ultrasound_data),
'ArterialStiffness' : (100007, read_arterial_stiffness_data),
'VascularAllBiomarkers' : ('Custom', read_vascular_all_data),
# Heart
'HeartAllBiomarkers' : ('Custom', read_heart_all_data),
'HeartSize' : (133, read_heart_size_data),
'HeartPWA' : (128, read_heart_PWA_data),
'HeartMRIAll' : ('Custom', read_heart_MRI_data),
'ECGAtRest' : (104, read_ecg_at_rest_data),
# Musculoskeletal
'AnthropometryImpedance' : (100008, read_anthropometry_impedance_data),
'AnthropometryBodySize' : (100010, read_anthropometry_body_size_data),
'BoneDensitometryOfHeel' : (100018, read_bone_densitometry_data),
'HandGripStrength' : (100019, read_hand_grip_strength_data),
'MusculoskeletalAllBiomarkers' : ('Custom', read_musculoskeletal_data),
#Biochemistry
'BloodBiochemistry' : (17518, read_blood_biomarkers_data),
'UrineBiochemistry' : (100083, read_urine_biomarkers_data),
'Biochemistry' : ('Custom', read_biochemistry_data),
#ImmuneSystem
'BloodCount' : (100081, read_blood_count_data), # Need to do blood infection
'PhysicalActivity' : ('Custom', read_physical_activity_data),
#'Demographics' : ('Demographics',read_demographics_data)
}
dict_dataset_to_organ_and_view = {
## Brain
'BrainGreyMatterVolumes' : ('Brain', 'MRI', 'GreyMatterVolumes'),
'BrainSubcorticalVolumes': ('Brain', 'MRI', 'SubcorticalVolumes'),
'BraindMRIWeightedMeans' : ('Brain', 'MRI', 'dMRIWeightedMeans'),
'BrainMRIAllBiomarkers' : ('Brain', 'MRI', 'AllBiomarkers'),
'CognitiveReactionTime' : ('Brain', 'Cognitive', 'ReactionTime'),
'CognitiveMatrixPatternCompletion' : ('Brain', 'Cognitive', 'MatrixPatternCompletion'),
'CognitiveTowerRearranging' : ('Brain', 'Cognitive', 'TowerRearranging'),
'CognitiveSymbolDigitSubstitution' : ('Brain', 'Cognitive', 'SymbolDigitSubstitution'),
'CognitivePairedAssociativeLearning' : ('Brain', 'Cognitive', 'PairedAssociativeLearning'),
'CognitiveProspectiveMemory' : ('Brain', 'Cognitive', 'ProspectiveMemory'),
'CognitiveNumericMemory' : ('Brain', 'Cognitive', 'NumericMemory'),
'CognitiveFluidIntelligence' : ('Brain', 'Cognitive', 'FluidIntelligence'),
'CognitiveTrailMaking' : ('Brain', 'Cognitive', 'TrailMaking'),
'CognitivePairsMatching' : ('Brain', 'Cognitive', 'PairsMatching'),
'CognitiveAllBiomarkers' : ('Brain', 'Cognitive', 'AllScalars'),
'BrainAndCognitive' : ('Brain', 'All', 'Scalars'),
## Eyes
'EyeAutorefraction' : ('Eyes', 'Autorefraction', 'Scalars'),
'EyeAcuity' : ('Eyes', 'Acuity', 'Scalars'),
'EyeIntraocularPressure' : ('Eyes', 'IntraocularPressure', 'Scalars'),
'EyesAllBiomarkers' : ('Eyes', 'All', 'Scalars'),
# Hearing
'HearingTest' : ('Hearing', 'HearingTest', 'Scalars'),
# Lungs
'Spirometry' : ('Lungs', 'Spirometry', 'Scalars'),
# Vascular
'BloodPressure' : ('Arterial', 'BloodPressure', 'Scalars'),
'CarotidUltrasound' : ('Arterial', 'Carotids', 'Scalars'),
'ArterialStiffness' : ('Arterial', 'PWA', 'Scalars'),
'VascularAllBiomarkers' : ('Arterial', 'All', 'Scalars'),
# Heart
'HeartAllBiomarkers' : ('Heart', 'All', 'Scalars'),
'HeartSize' : ('Heart', 'MRI', 'Size'),
'HeartPWA' : ('Heart', 'MRI', 'PWA'),
'HeartMRIAll' : ('Heart', 'MRI', 'AllScalars'),
'ECGAtRest' : ('Heart', 'ECG', 'Scalars'),
# Musculoskeletal
'AnthropometryImpedance' : ('Musculoskeletal', 'Scalars', 'Impedance'),
'AnthropometryBodySize' : ('Musculoskeletal', 'Scalars', 'Anthropometry'),
'BoneDensitometryOfHeel' : ('Musculoskeletal', 'Scalars', 'HeelBoneDensitometry'),
'HandGripStrength' : ('Musculoskeletal', 'Scalars', 'HandGripStrength'),
'MusculoskeletalAllBiomarkers' : ('Musculoskeletal', 'Scalars', 'AllScalars'),
#Biochemistry
'BloodBiochemistry' : ('Biochemistry', 'Blood', 'Scalars'),
'UrineBiochemistry' : ('Biochemistry', 'Urine', 'Scalars'),
'Biochemistry' : ('Biochemistry', 'All', 'Scalars'),
#ImmuneSystem
'BloodCount' : ('ImmuneSystem', 'BloodCount', 'Scalars'), # Need to do blood infection
'PhysicalActivity' : ('PhysicalActivity', 'FullWeek', 'Scalars'),
'Demographics' : ('Demographics', 'All', 'Scalars')
}
def load_data(dataset, **kwargs):
if 'Cluster' in dataset :
df = pd.read_csv(dataset).set_index('id')
organ, view = 'Cluster', 'main'
elif '/n' not in dataset:
if dataset == 'Demographics':
df = pd.read_csv('/n/groups/patel/samuel/sex_age_eid_ethnicity.csv').set_index('id')
elif dataset == 'PhysicalActivity' :
path_dataset = path_inputs + dataset + '.csv'
df =
|
pd.read_csv(path_dataset)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
from lib import pandas_option as pd_op
from lib import repository
def save_entry(date, side, price):
sql = "insert into backtest_entry values('{date}','{side}',{price},0)" \
.format(date=date, side=side, price=price)
repository.execute(database=database, sql=sql, log=False)
volume_ma = 10
diff_ratio = 2
back_min = 5
print("----------------------------------------------")
print("volume_ma", volume_ma, "diff_ratio", diff_ratio, "back_min", back_min)
asset = 1000000
database = "tradingbot"
sql = "truncate backtest_entry"
repository.execute(database=database, sql=sql, log=False)
pd_op.display_max_columns()
pd_op.display_round_down()
sql = """
select
b2.Date as date,
b3.Close as fr_Price,
b2.Close as to_Price,
b1.Volume as v1,
b2.Volume as v2,
b1.Date as bb1,
b2.Date as bb2,
b3.Date as bb3
from
bitflyer_btc_ohlc_1M b1
inner join
bitflyer_btc_ohlc_1M b2
on (b1.Date + interval 1 minute) = b2.Date
inner join
bitflyer_btc_ohlc_1M b3
on (b3.Date + interval {back_min} minute) = b2.Date
order by
Date
""".format(back_min=back_min)
be = repository.read_sql(database=database, sql=sql)
df = be["v1"]
sma = df.rolling(volume_ma).mean()[:volume_ma]
be["v1_ma"] = pd.concat([sma, df[volume_ma:]]).ewm(
span=volume_ma, adjust=False).mean()
df = be["v2"]
sma = df.rolling(volume_ma).mean()[:volume_ma]
be["v2_ma"] =
|
pd.concat([sma, df[volume_ma:]])
|
pandas.concat
|
# Imports: standard library
import os
import logging
from abc import ABC
from typing import Any, Dict, List, Tuple
from datetime import datetime
# Imports: third party
import numpy as np
import pandas as pd
import unidecode
# Imports: first party
from ml4c3.utils import get_unix_timestamps
from definitions.edw import EDW_FILES, MED_ACTIONS
from definitions.globals import TIMEZONE
from tensorize.edw.data_objects import (
Event,
Procedure,
Medication,
StaticData,
Measurement,
)
# pylint: disable=too-many-branches, dangerous-default-value
class Reader(ABC):
"""
Parent class for our Readers class.
As an abstract class, it can't be directly instanced. Its children
should be used instead.
"""
@staticmethod
def _ensure_contiguous(data: np.ndarray) -> np.ndarray:
if len(data) > 0:
dtype = Any
try:
data = data.astype(float)
if all(x.is_integer() for x in data):
dtype = int
else:
dtype = float
except ValueError:
dtype = "S"
try:
data = np.ascontiguousarray(data, dtype=dtype)
except (UnicodeEncodeError, SystemError):
logging.info("Unknown character. Not ensuring contiguous array.")
new_data = []
for element in data:
new_data.append(unidecode.unidecode(str(element)))
data = np.ascontiguousarray(new_data, dtype="S")
except ValueError:
logging.exception(
f"Unknown method to convert np.ndarray of "
f"{dtype} objects to numpy contiguous type.",
)
raise
return data
class EDWReader(Reader):
"""
Implementation of the Reader for EDW data.
Usage:
>>> reader = EDWReader('MRN')
>>> hr = reader.get_measurement('HR')
"""
def __init__(
self,
path: str,
mrn: str,
csn: str,
med_file: str = EDW_FILES["med_file"]["name"],
move_file: str = EDW_FILES["move_file"]["name"],
adm_file: str = EDW_FILES["adm_file"]["name"],
demo_file: str = EDW_FILES["demo_file"]["name"],
vitals_file: str = EDW_FILES["vitals_file"]["name"],
lab_file: str = EDW_FILES["lab_file"]["name"],
surgery_file: str = EDW_FILES["surgery_file"]["name"],
other_procedures_file: str = EDW_FILES["other_procedures_file"]["name"],
transfusions_file: str = EDW_FILES["transfusions_file"]["name"],
events_file: str = EDW_FILES["events_file"]["name"],
medhist_file: str = EDW_FILES["medhist_file"]["name"],
surghist_file: str = EDW_FILES["surghist_file"]["name"],
socialhist_file: str = EDW_FILES["socialhist_file"]["name"],
):
"""
Init EDW Reader.
:param path: absolute path of files.
:param mrn: MRN of the patient.
:param csn: CSN of the patient visit.
:param med_file: file containing the medicines data from the patient.
Can be inferred if None.
:param move_file: file containing the movements of the patient
(admission, transfer and discharge) from the patient.
Can be inferred if None.
:param demo_file: file containing the demographic data from
the patient. Can be inferred if None.
:param vitals_file: file containing the vital signals from
the patient. Can be inferred if None.
:param lab_file: file containing the laboratory signals from
the patient. Can be inferred if None.
:param adm_file: file containing the admission data from
the patient. Can be inferred if None.
:param surgery_file: file containing the surgeries performed to
the patient. Can be inferred if None.
:param other_procedures_file: file containing procedures performed to
the patient. Can be inferred if None.
:param transfusions_file: file containing the transfusions performed to
the patient. Can be inferred if None.
:param eventss_file: file containing the events during
the patient stay. Can be inferred if None.
:param medhist_file: file containing the medical history information of the
patient. Can be inferred if None.
:param surghist_file: file containing the surgical history information of the
patient. Can be inferred if None.
:param socialhist_file: file containing the social history information of the
patient. Can be inferred if None.
"""
self.path = path
self.mrn = mrn
self.csn = csn
self.move_file = self.infer_full_path(move_file)
self.demo_file = self.infer_full_path(demo_file)
self.vitals_file = self.infer_full_path(vitals_file)
self.lab_file = self.infer_full_path(lab_file)
self.med_file = self.infer_full_path(med_file)
self.adm_file = self.infer_full_path(adm_file)
self.surgery_file = self.infer_full_path(surgery_file)
self.other_procedures_file = self.infer_full_path(other_procedures_file)
self.transfusions_file = self.infer_full_path(transfusions_file)
self.events_file = self.infer_full_path(events_file)
self.medhist_file = self.infer_full_path(medhist_file)
self.surghist_file = self.infer_full_path(surghist_file)
self.socialhist_file = self.infer_full_path(socialhist_file)
self.timezone = TIMEZONE
def infer_full_path(self, file_name: str) -> str:
"""
Infer a file name from MRN and type of data.
Used if a file is not specified on the input.
:param file_name: <str> 8 possible options:
'medications.csv', 'demographics.csv', 'labs.csv',
'flowsheet.scv', 'admission-vitals.csv',
'surgery.csv','procedures.csv', 'transfusions.csv'
:return: <str> the inferred path
"""
if not file_name.endswith(".csv"):
file_name = f"{file_name}.csv"
full_path = os.path.join(self.path, self.mrn, self.csn, file_name)
return full_path
def list_vitals(self) -> List[str]:
"""
List all the vital signs taken from the patient.
:return: <List[str]> List with all the available vital signals
from the patient.
"""
signal_column = EDW_FILES["vitals_file"]["columns"][0]
vitals_df = pd.read_csv(self.vitals_file)
# Remove measurements out of dates
time_column = EDW_FILES["vitals_file"]["columns"][3]
admit_column = EDW_FILES["adm_file"]["columns"][3]
discharge_column = EDW_FILES["adm_file"]["columns"][4]
admission_df = pd.read_csv(self.adm_file)
init_date = admission_df[admit_column].values[0]
end_date = admission_df[discharge_column].values[0]
vitals_df = vitals_df[vitals_df[time_column] >= init_date]
if str(end_date) != "nan":
vitals_df = vitals_df[vitals_df[time_column] <= end_date]
return list(vitals_df[signal_column].astype("str").str.upper().unique())
def list_labs(self) -> List[str]:
"""
List all the lab measurements taken from the patient.
:return: <List[str]> List with all the available lab measurements
from the patient.
"""
signal_column = EDW_FILES["lab_file"]["columns"][0]
labs_df = pd.read_csv(self.lab_file)
return list(labs_df[signal_column].astype("str").str.upper().unique())
def list_medications(self) -> List[str]:
"""
List all the medications given to the patient.
:return: <List[str]> List with all the medications on
the patients record.
"""
signal_column = EDW_FILES["med_file"]["columns"][0]
status_column = EDW_FILES["med_file"]["columns"][1]
med_df = pd.read_csv(self.med_file)
med_df = med_df[med_df[status_column].isin(MED_ACTIONS)]
return list(med_df[signal_column].astype("str").str.upper().unique())
def list_surgery(self) -> List[str]:
"""
List all the types of surgery performed to the patient.
:return: <List[str]> List with all the event types associated
with the patient.
"""
return self._list_procedures(self.surgery_file, "surgery_file")
def list_other_procedures(self) -> List[str]:
"""
List all the types of procedures performed to the patient.
:return: <List[str]> List with all the event types associated
with the patient.
"""
return self._list_procedures(
self.other_procedures_file,
"other_procedures_file",
)
def list_transfusions(self) -> List[str]:
"""
List all the transfusions types that have been done on the patient.
:return: <List[str]> List with all the transfusions type of
the patient.
"""
return self._list_procedures(self.transfusions_file, "transfusions_file")
@staticmethod
def _list_procedures(file_name, file_key) -> List[str]:
"""
Filter and list all the procedures in the given file.
"""
signal_column, status_column, start_column, end_column = EDW_FILES[file_key][
"columns"
]
data = pd.read_csv(file_name)
data = data[data[status_column].isin(["Complete", "Completed"])]
data = data.dropna(subset=[start_column, end_column])
return list(data[signal_column].astype("str").str.upper().unique())
def list_events(self) -> List[str]:
"""
List all the event types during the patient stay.
:return: <List[str]> List with all the events type.
"""
signal_column, _ = EDW_FILES["events_file"]["columns"]
data = pd.read_csv(self.events_file)
return list(data[signal_column].astype("str").str.upper().unique())
def get_static_data(self) -> StaticData:
"""
Get the static data from the EDW csv file (admission + demographics).
:return: <StaticData> wrapped information
"""
movement_df = pd.read_csv(self.move_file)
admission_df = pd.read_csv(self.adm_file)
demographics_df = pd.read_csv(self.demo_file)
# Obtain patient's movement (location and when they move)
department_id = np.array(movement_df["DepartmentID"], dtype=int)
department_nm = np.array(movement_df["DepartmentDSC"], dtype="S")
room_bed = np.array(movement_df["BedLabelNM"], dtype="S")
move_time = np.array(movement_df["TransferInDTS"], dtype="S")
# Convert weight from ounces to pounds
weight = float(admission_df["WeightPoundNBR"].values[0]) / 16
# Convert height from feet & inches to meters
height = self._convert_height(admission_df["HeightTXT"].values[0])
admin_type = admission_df["HospitalAdmitTypeDSC"].values[0]
# Find possible diagnosis at admission
diag_info = admission_df["AdmitDiagnosisTXT"].dropna().drop_duplicates()
if list(diag_info):
diag_info = diag_info.astype("str")
admin_diag = diag_info.str.cat(sep="; ")
else:
admin_diag = "UNKNOWN"
admin_date = admission_df["HospitalAdmitDTS"].values[0]
birth_date = demographics_df["BirthDTS"].values[0]
race = demographics_df["PatientRaceDSC"].values[0]
sex = demographics_df["SexDSC"].values[0]
end_date = admission_df["HospitalDischargeDTS"].values[0]
# Check whether it exists a deceased date or not
end_stay_type = (
"Alive"
if str(demographics_df["DeathDTS"].values[0]) == "nan"
else "Deceased"
)
# Find local time, if patient is still in hospital, take today's date
if str(end_date) != "nan":
offsets = self._get_local_time(admin_date[:-1], end_date[:-1])
else:
today_date = datetime.today().strftime("%Y-%m-%d %H:%M:%S.%f")
offsets = self._get_local_time(admin_date[:-1], today_date)
offsets = list(set(offsets)) # Take unique local times
local_time = np.empty(0)
for offset in offsets:
local_time = np.append(local_time, f"UTC{int(offset/3600)}:00")
local_time = local_time.astype("S")
# Find medical, surgical and social history of patient
medical_hist = self._get_med_surg_hist("medhist_file")
surgical_hist = self._get_med_surg_hist("surghist_file")
tobacco_hist, alcohol_hist = self._get_social_hist()
return StaticData(
department_id,
department_nm,
room_bed,
move_time,
weight,
height,
admin_type,
admin_diag,
admin_date,
birth_date,
race,
sex,
end_date,
end_stay_type,
local_time,
medical_hist,
surgical_hist,
tobacco_hist,
alcohol_hist,
)
def get_med_doses(self, med_name: str) -> Medication:
"""
Get all the doses of the input medication given to the patient.
:param medication_name: <string> name of the medicine
:return: <Medication> wrapped list of medications doses
"""
(
signal_column,
status_column,
time_column,
route_column,
weight_column,
dose_column,
dose_unit_column,
infusion_column,
infusion_unit_column,
duration_column,
duration_unit_column,
) = EDW_FILES["med_file"]["columns"]
source = EDW_FILES["med_file"]["source"]
med_df = pd.read_csv(self.med_file)
med_df = med_df[med_df[status_column].isin(MED_ACTIONS)]
med_df = med_df.sort_values(time_column)
if med_name not in med_df[signal_column].astype("str").str.upper().unique():
raise ValueError(f"{med_name} was not found in {self.med_file}.")
idx = np.where(med_df[signal_column].astype("str").str.upper() == med_name)[0]
route = np.array(med_df[route_column])[idx[0]]
wt_base_dose = (
bool(1) if np.array(med_df[weight_column])[idx[0]] == "Y" else bool(0)
)
if med_df[duration_column].isnull().values[idx[0]]:
start_date = self._get_unix_timestamps(np.array(med_df[time_column])[idx])
action = np.array(med_df[status_column], dtype="S")[idx]
if (
np.array(med_df[status_column])[idx[0]] in [MED_ACTIONS[0]]
or med_df[infusion_column].isnull().values[idx[0]]
):
dose = np.array(med_df[dose_column], dtype="S")[idx]
units = np.array(med_df[dose_unit_column])[idx[0]]
else:
dose = np.array(med_df[infusion_column])[idx]
units = np.array(med_df[infusion_unit_column])[idx[0]]
else:
dose = np.array([])
units = np.array(med_df[infusion_unit_column])[idx[0]]
start_date = np.array([])
action = np.array([])
for _, row in med_df.iloc[idx, :].iterrows():
dose = np.append(dose, [row[infusion_column], 0])
time = self._get_unix_timestamps(np.array([row[time_column]]))[0]
conversion = 1
if row[duration_unit_column] == "Seconds":
conversion = 1
elif row[duration_unit_column] == "Minutes":
conversion = 60
elif row[duration_unit_column] == "Hours":
conversion = 3600
start_date = np.append(
start_date,
[time, time + float(row[duration_column]) * conversion],
)
action = np.append(action, [row[status_column], "Stopped"])
dose = self._ensure_contiguous(dose)
start_date = self._ensure_contiguous(start_date)
action = self._ensure_contiguous(action)
return Medication(
med_name,
dose,
units,
start_date,
action,
route,
wt_base_dose,
source,
)
def get_vitals(self, vital_name: str) -> Measurement:
"""
Get the vital signals from the EDW csv file 'flowsheet'.
:param vital_name: <string> name of the signal
:return: <Measurement> wrapped measurement signal
"""
vitals_df = pd.read_csv(self.vitals_file)
# Remove measurements out of dates
time_column = EDW_FILES["vitals_file"]["columns"][3]
admit_column = EDW_FILES["adm_file"]["columns"][3]
discharge_column = EDW_FILES["adm_file"]["columns"][4]
admission_df = pd.read_csv(self.adm_file)
init_date = admission_df[admit_column].values[0]
end_date = admission_df[discharge_column].values[0]
vitals_df = vitals_df[vitals_df[time_column] >= init_date]
if str(end_date) != "nan":
vitals_df = vitals_df[vitals_df[time_column] <= end_date]
return self._get_measurements(
"vitals_file",
vitals_df,
vital_name,
self.vitals_file,
)
def get_labs(self, lab_name: str) -> Measurement:
"""
Get the lab measurement from the EDW csv file 'labs'.
:param lab_name: <string> name of the signal
:return: <Measurement> wrapped measurement signal
"""
labs_df = pd.read_csv(self.lab_file)
return self._get_measurements("lab_file", labs_df, lab_name, self.lab_file)
def get_surgery(self, surgery_type: str) -> Procedure:
"""
Get all the surgery information of the input type performed to the
patient.
:param surgery_type: <string> type of surgery
:return: <Procedure> wrapped list surgeries of the input type
"""
return self._get_procedures("surgery_file", self.surgery_file, surgery_type)
def get_other_procedures(self, procedure_type: str) -> Procedure:
"""
Get all the procedures of the input type performed to the patient.
:param procedure: <string> type of procedure
:return: <Procedure> wrapped list procedures of the input type
"""
return self._get_procedures(
"other_procedures_file",
self.other_procedures_file,
procedure_type,
)
def get_transfusions(self, transfusion_type: str) -> Procedure:
"""
Get all the input transfusions type that were done to the patient.
:param transfusion_type: <string> Type of transfusion.
:return: <Procedure> Wrapped list of transfusions of the input type.
"""
return self._get_procedures(
"transfusions_file",
self.transfusions_file,
transfusion_type,
)
def get_events(self, event_type: str) -> Event:
"""
Get all the input event type during the patient stay.
:param event_type: <string> Type of event.
:return: <Event> Wrapped list of events of the input type.
"""
signal_column, time_column = EDW_FILES["events_file"]["columns"]
data = pd.read_csv(self.events_file)
data = data.dropna(subset=[time_column])
data = data.sort_values([time_column])
if event_type not in data[signal_column].astype("str").str.upper().unique():
raise ValueError(f"{event_type} was not found in {self.events_file}.")
idx = np.where(data[signal_column].astype("str").str.upper() == event_type)[0]
time = self._get_unix_timestamps(np.array(data[time_column])[idx])
time = self._ensure_contiguous(time)
return Event(event_type, time)
def _get_local_time(self, init_date: str, end_date: str) -> np.ndarray:
"""
Obtain local time from init and end dates.
:param init_date: <str> String with initial date.
:param end_date: <str> String with end date.
:return: <np.ndarray> List of offsets from UTC (it may be two in
case the time shift between summer/winter occurs while the
patient is in the hospital).
"""
init_dt = datetime.strptime(init_date, "%Y-%m-%d %H:%M:%S.%f")
end_dt = datetime.strptime(end_date, "%Y-%m-%d %H:%M:%S.%f")
offset_init = self.timezone.utcoffset( # type: ignore
init_dt,
is_dst=True,
).total_seconds()
offset_end = self.timezone.utcoffset( # type: ignore
end_dt,
is_dst=True,
).total_seconds()
return np.array([offset_init, offset_end], dtype=float)
def _get_unix_timestamps(self, time_stamps: np.ndarray) -> np.ndarray:
"""
Convert readable time stamps to unix time stamps.
:param time_stamps: <np.ndarray> Array with all readable time stamps.
:return: <np.ndarray> Array with Unix time stamps.
"""
try:
arr_timestamps = pd.to_datetime(time_stamps)
except pd.errors.ParserError as error:
raise ValueError("Array contains non datetime values.") from error
# Convert readable local timestamps in local seconds timestamps
local_timestamps = (
np.array(arr_timestamps, dtype=np.datetime64)
- np.datetime64("1970-01-01T00:00:00")
) / np.timedelta64(1, "s")
# Find local time shift to UTC
if not (
|
pd.isnull(local_timestamps[0])
|
pandas.isnull
|
import pandas
s =
|
pandas.Series([51, 27, "galleta", 48.1231, 15])
|
pandas.Series
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with
|
tm.assert_produces_warning(FutureWarning)
|
pandas.util.testing.assert_produces_warning
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
from datetime import datetime
from distutils.version import LooseVersion
import inspect
import sys
import unittest
from io import StringIO
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pyspark import StorageLevel
from pyspark.ml.linalg import SparseVector
from pyspark.sql.types import StructType
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import CachedDataFrame
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.utils import name_like_string
class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
return pdf, psdf
def test_dataframe(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf["a"] + 1, pdf["a"] + 1)
self.assert_eq(psdf.columns, pd.Index(["a", "b"]))
self.assert_eq(psdf[psdf["b"] > 2], pdf[pdf["b"] > 2])
self.assert_eq(-psdf[psdf["b"] > 2], -pdf[pdf["b"] > 2])
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b.mean(), pdf.b.mean())
self.assert_eq(psdf.b.var(), pdf.b.var())
self.assert_eq(psdf.b.std(), pdf.b.std())
pdf, psdf = self.df_pair
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assertEqual(psdf.a.notnull().rename("x").name, "x")
# check ps.DataFrame(ps.Series)
pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(pd.DataFrame(pser), ps.DataFrame(psser))
# check psdf[pd.Index]
pdf, psdf = self.df_pair
column_mask = pdf.columns.isin(["a", "b"])
index_cols = pdf.columns[column_mask]
self.assert_eq(psdf[index_cols], pdf[index_cols])
def _check_extension(self, psdf, pdf):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psdf, pdf, check_exact=False)
for dtype in psdf.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assert_eq(psdf, pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1, 2, None, 4], dtype="Int8"),
"b": pd.Series([1, None, None, 4], dtype="Int16"),
"c": pd.Series([1, 2, None, None], dtype="Int32"),
"d": pd.Series([None, 2, None, 4], dtype="Int64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_astype_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": [1, 2, None, 4],
"b": [1, None, None, 4],
"c": [1, 2, None, None],
"d": [None, 2, None, 4],
}
)
psdf = ps.from_pandas(pdf)
astype = {"a": "Int8", "b": "Int16", "c": "Int32", "d": "Int64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_extension_object_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series(["a", "b", None, "c"], dtype="string"),
"b": pd.Series([True, None, False, True], dtype="boolean"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_astype_extension_object_dtypes(self):
pdf = pd.DataFrame({"a": ["a", "b", None, "c"], "b": [True, None, False, True]})
psdf = ps.from_pandas(pdf)
astype = {"a": "string", "b": "boolean"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_extension_float_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, None, 4.0], dtype="Float32"),
"b": pd.Series([1.0, None, 3.0, 4.0], dtype="Float64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + 1, pdf + 1)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_astype_extension_float_dtypes(self):
pdf = pd.DataFrame({"a": [1.0, 2.0, None, 4.0], "b": [1.0, None, 3.0, 4.0]})
psdf = ps.from_pandas(pdf)
astype = {"a": "Float32", "b": "Float64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psser = ps.Series([4, 5, 6])
self.assertRaises(ValueError, lambda: psdf.insert(0, "y", psser))
self.assertRaisesRegex(
ValueError, "cannot insert b, already exists", lambda: psdf.insert(1, "b", 10)
)
self.assertRaisesRegex(
TypeError,
'"column" should be a scalar value or tuple that contains scalar values',
lambda: psdf.insert(0, list("abc"), psser),
)
self.assertRaisesRegex(
TypeError,
"loc must be int",
lambda: psdf.insert((1,), "b", 10),
)
self.assertRaisesRegex(
NotImplementedError,
"Assigning column name as tuple is only supported for MultiIndex columns for now.",
lambda: psdf.insert(0, ("e",), 10),
)
self.assertRaises(ValueError, lambda: psdf.insert(0, "e", [7, 8, 9, 10]))
self.assertRaises(ValueError, lambda: psdf.insert(0, "f", ps.Series([7, 8])))
self.assertRaises(AssertionError, lambda: psdf.insert(100, "y", psser))
self.assertRaises(AssertionError, lambda: psdf.insert(1, "y", psser, allow_duplicates=True))
#
# DataFrame with MultiIndex as columns
#
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
self.assertRaisesRegex(
ValueError, "cannot insert d, already exists", lambda: psdf.insert(4, "d", 11)
)
self.assertRaisesRegex(
ValueError,
r"cannot insert \('x', 'a', 'b'\), already exists",
lambda: psdf.insert(4, ("x", "a", "b"), 11),
)
self.assertRaisesRegex(
ValueError,
'"column" must have length equal to number of column levels.',
lambda: psdf.insert(4, ("e",), 11),
)
def test_inplace(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["a"] = pdf["a"] + 10
psdf["a"] = psdf["a"] + 10
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
def test_assign_list(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
psdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"):
psdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80]
def test_dataframe_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["x"], pdf["x"])
self.assert_eq(psdf["y.z"], pdf["y.z"])
self.assert_eq(psdf["x"]["b"], pdf["x"]["b"])
self.assert_eq(psdf["x"]["b"]["2"], pdf["x"]["b"]["2"])
self.assert_eq(psdf.x, pdf.x)
self.assert_eq(psdf.x.b, pdf.x.b)
self.assert_eq(psdf.x.b["2"], pdf.x.b["2"])
self.assertRaises(KeyError, lambda: psdf["z"])
self.assertRaises(AttributeError, lambda: psdf.z)
self.assert_eq(psdf[("x",)], pdf[("x",)])
self.assert_eq(psdf[("x", "a")], pdf[("x", "a")])
self.assert_eq(psdf[("x", "a", "1")], pdf[("x", "a", "1")])
def test_dataframe_column_level_name(self):
column = pd.Index(["A", "B", "C"], name="X")
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
def test_dataframe_multiindex_names_level(self):
columns = pd.MultiIndex.from_tuples(
[("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")],
names=["lvl_1", "lvl_2", "lv_3"],
)
pdf = pd.DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]],
columns=columns,
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
psdf1 = ps.from_pandas(pdf)
self.assert_eq(psdf1.columns.names, pdf.columns.names)
self.assertRaises(
AssertionError,
lambda: ps.DataFrame(psdf1._internal.copy(column_label_names=("level",))),
)
self.assert_eq(psdf["X"], pdf["X"])
self.assert_eq(psdf["X"].columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"].to_pandas().columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"]["A"], pdf["X"]["A"])
self.assert_eq(psdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf[("X", "A")], pdf[("X", "A")])
self.assert_eq(psdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A", "Z")], pdf[("X", "A", "Z")])
def test_itertuples(self):
pdf = pd.DataFrame({"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
pdf.itertuples(index=False, name="Animal"), psdf.itertuples(index=False, name="Animal")
):
self.assert_eq(ptuple, ktuple)
for ptuple, ktuple in zip(pdf.itertuples(name=None), psdf.itertuples(name=None)):
self.assert_eq(ptuple, ktuple)
pdf.index = pd.MultiIndex.from_arrays(
[[1, 2], ["black", "brown"]], names=("count", "color")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf.columns = pd.MultiIndex.from_arrays(
[["CA", "WA"], ["age", "children"]], names=("origin", "info")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
(pdf + 1).itertuples(name="num"), (psdf + 1).itertuples(name="num")
):
self.assert_eq(ptuple, ktuple)
# DataFrames with a large number of columns (>254)
pdf = pd.DataFrame(np.random.random((1, 255)))
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="num"), psdf.itertuples(name="num")):
self.assert_eq(ptuple, ktuple)
def test_iterrows(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
for (pdf_k, pdf_v), (psdf_k, psdf_v) in zip(pdf.iterrows(), psdf.iterrows()):
self.assert_eq(pdf_k, psdf_k)
self.assert_eq(pdf_v, psdf_v)
def test_reset_index(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index().index, pdf.reset_index().index)
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.index.name = "a"
psdf.index.name = "a"
with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"):
psdf.reset_index()
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
# inplace
pser = pdf.a
psser = psdf.a
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf.columns = ["index", "b"]
psdf.columns = ["index", "b"]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
def test_reset_index_with_default_index_types(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(psdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index())
def test_reset_index_with_multiindex_columns(self):
index = pd.MultiIndex.from_tuples(
[("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")],
names=["class", "name"],
)
columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")])
pdf = pd.DataFrame(
[(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")],
index=index,
columns=columns,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(level="class"), pdf.reset_index(level="class"))
self.assert_eq(
psdf.reset_index(level="class", col_level=1),
pdf.reset_index(level="class", col_level=1),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="species"),
pdf.reset_index(level="class", col_level=1, col_fill="species"),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="genus"),
pdf.reset_index(level="class", col_level=1, col_fill="genus"),
)
with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"):
psdf.reset_index(col_level=2)
pdf.index.names = [("x", "class"), ("y", "name")]
psdf.index.names = [("x", "class"), ("y", "name")]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."):
psdf.reset_index(col_level=1)
def test_index_to_frame_reset_index(self):
def check(psdf, pdf):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
pdf, psdf = self.df_pair
check(psdf.index.to_frame(), pdf.index.to_frame())
check(psdf.index.to_frame(index=False), pdf.index.to_frame(index=False))
check(psdf.index.to_frame(name="a"), pdf.index.to_frame(name="a"))
check(psdf.index.to_frame(index=False, name="a"), pdf.index.to_frame(index=False, name="a"))
check(psdf.index.to_frame(name=("x", "a")), pdf.index.to_frame(name=("x", "a")))
check(
psdf.index.to_frame(index=False, name=("x", "a")),
pdf.index.to_frame(index=False, name=("x", "a")),
)
def test_multiindex_column_access(self):
columns = pd.MultiIndex.from_tuples(
[
("a", "", "", "b"),
("c", "", "d", ""),
("e", "", "f", ""),
("e", "g", "", ""),
("", "", "", "h"),
("i", "", "", ""),
]
)
pdf = pd.DataFrame(
[
(1, "a", "x", 10, 100, 1000),
(2, "b", "y", 20, 200, 2000),
(3, "c", "z", 30, 300, 3000),
],
columns=columns,
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["a"], pdf["a"])
self.assert_eq(psdf["a"]["b"], pdf["a"]["b"])
self.assert_eq(psdf["c"], pdf["c"])
self.assert_eq(psdf["c"]["d"], pdf["c"]["d"])
self.assert_eq(psdf["e"], pdf["e"])
self.assert_eq(psdf["e"][""]["f"], pdf["e"][""]["f"])
self.assert_eq(psdf["e"]["g"], pdf["e"]["g"])
self.assert_eq(psdf[""], pdf[""])
self.assert_eq(psdf[""]["h"], pdf[""]["h"])
self.assert_eq(psdf["i"], pdf["i"])
self.assert_eq(psdf[["a", "e"]], pdf[["a", "e"]])
self.assert_eq(psdf[["e", "a"]], pdf[["e", "a"]])
self.assert_eq(psdf[("a",)], pdf[("a",)])
self.assert_eq(psdf[("e", "g")], pdf[("e", "g")])
# self.assert_eq(psdf[("i",)], pdf[("i",)])
self.assert_eq(psdf[("i", "")], pdf[("i", "")])
self.assertRaises(KeyError, lambda: psdf[("a", "b")])
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df.__repr__()
df["a"] = df["id"]
self.assertEqual(df.__repr__(), df.to_pandas().__repr__())
def test_repr_html_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df._repr_html_()
df["a"] = df["id"]
self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_())
def test_empty_dataframe(self):
pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_all_null_dataframe(self):
pdf = pd.DataFrame(
{
"a": [None, None, None, "a"],
"b": [None, None, None, 1],
"c": [None, None, None] + list(np.arange(1, 2).astype("i1")),
"d": [None, None, None, 1.0],
"e": [None, None, None, True],
"f": [None, None, None] + list(pd.date_range("20130101", periods=1)),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
pdf = pd.DataFrame(
{
"a": pd.Series([None, None, None], dtype="float64"),
"b": pd.Series([None, None, None], dtype="str"),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_nullable_object(self):
pdf = pd.DataFrame(
{
"a": list("abc") + [np.nan, None],
"b": list(range(1, 4)) + [np.nan, None],
"c": list(np.arange(3, 6).astype("i1")) + [np.nan, None],
"d": list(np.arange(4.0, 7.0, dtype="float64")) + [np.nan, None],
"e": [True, False, True, np.nan, None],
"f": list(pd.date_range("20130101", periods=3)) + [np.nan, None],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_assign(self):
pdf, psdf = self.df_pair
psdf["w"] = 1.0
pdf["w"] = 1.0
self.assert_eq(psdf, pdf)
psdf.w = 10.0
pdf.w = 10.0
self.assert_eq(psdf, pdf)
psdf[1] = 1.0
pdf[1] = 1.0
self.assert_eq(psdf, pdf)
psdf = psdf.assign(a=psdf["a"] * 2)
pdf = pdf.assign(a=pdf["a"] * 2)
self.assert_eq(psdf, pdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "w"), ("y", "v")])
pdf.columns = columns
psdf.columns = columns
psdf[("a", "c")] = "def"
pdf[("a", "c")] = "def"
self.assert_eq(psdf, pdf)
psdf = psdf.assign(Z="ZZ")
pdf = pdf.assign(Z="ZZ")
self.assert_eq(psdf, pdf)
psdf["x"] = "ghi"
pdf["x"] = "ghi"
self.assert_eq(psdf, pdf)
def test_head(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.head(2), pdf.head(2))
self.assert_eq(psdf.head(3), pdf.head(3))
self.assert_eq(psdf.head(0), pdf.head(0))
self.assert_eq(psdf.head(-3), pdf.head(-3))
self.assert_eq(psdf.head(-10), pdf.head(-10))
with option_context("compute.ordered_head", True):
self.assert_eq(psdf.head(), pdf.head())
def test_attributes(self):
psdf = self.psdf
self.assertIn("a", dir(psdf))
self.assertNotIn("foo", dir(psdf))
self.assertRaises(AttributeError, lambda: psdf.foo)
psdf = ps.DataFrame({"a b c": [1, 2, 3]})
self.assertNotIn("a b c", dir(psdf))
psdf = ps.DataFrame({"a": [1, 2], 5: [1, 2]})
self.assertIn("a", dir(psdf))
self.assertNotIn(5, dir(psdf))
def test_column_names(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.columns, pdf.columns)
self.assert_eq(psdf[["b", "a"]].columns, pdf[["b", "a"]].columns)
self.assert_eq(psdf["a"].name, pdf["a"].name)
self.assert_eq((psdf["a"] + 1).name, (pdf["a"] + 1).name)
self.assert_eq((psdf.a + psdf.b).name, (pdf.a + pdf.b).name)
self.assert_eq((psdf.a + psdf.b.rename("a")).name, (pdf.a + pdf.b.rename("a")).name)
self.assert_eq((psdf.a + psdf.b.rename()).name, (pdf.a + pdf.b.rename()).name)
self.assert_eq((psdf.a.rename() + psdf.b).name, (pdf.a.rename() + pdf.b).name)
self.assert_eq(
(psdf.a.rename() + psdf.b.rename()).name, (pdf.a.rename() + pdf.b.rename()).name
)
def test_rename_columns(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
psdf.columns = ["x", "y"]
pdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
columns = pdf.columns
columns.name = "lvl_1"
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1"])
self.assert_eq(psdf, pdf)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
psdf.columns = [1, 2, 3, 4]
# Multi-index columns
pdf = pd.DataFrame(
{("A", "0"): [1, 2, 2, 3], ("B", "1"): [1, 2, 3, 4]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
columns = pdf.columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
pdf.columns = ["x", "y"]
psdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
columns.names = ["lvl_1", "lvl_2"]
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1", "lvl_2"])
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
def test_rename_dataframe(self):
pdf1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
psdf1 = ps.from_pandas(pdf1)
self.assert_eq(
psdf1.rename(columns={"A": "a", "B": "b"}), pdf1.rename(columns={"A": "a", "B": "b"})
)
result_psdf = psdf1.rename(index={1: 10, 2: 20})
result_pdf = pdf1.rename(index={1: 10, 2: 20})
self.assert_eq(result_psdf, result_pdf)
# inplace
pser = result_pdf.A
psser = result_psdf.A
result_psdf.rename(index={10: 100, 20: 200}, inplace=True)
result_pdf.rename(index={10: 100, 20: 200}, inplace=True)
self.assert_eq(result_psdf, result_pdf)
self.assert_eq(psser, pser)
def str_lower(s) -> str:
return str.lower(s)
self.assert_eq(
psdf1.rename(str_lower, axis="columns"), pdf1.rename(str_lower, axis="columns")
)
def mul10(x) -> int:
return x * 10
self.assert_eq(psdf1.rename(mul10, axis="index"), pdf1.rename(mul10, axis="index"))
self.assert_eq(
psdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
pdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
)
idx = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Y", "D")])
pdf2 = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf2.rename(columns=str_lower), pdf2.rename(columns=str_lower))
self.assert_eq(
psdf2.rename(columns=str_lower, level=0), pdf2.rename(columns=str_lower, level=0)
)
self.assert_eq(
psdf2.rename(columns=str_lower, level=1), pdf2.rename(columns=str_lower, level=1)
)
pdf3 = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list("ab"))
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(psdf3.rename(index=str_lower), pdf3.rename(index=str_lower))
self.assert_eq(
psdf3.rename(index=str_lower, level=0), pdf3.rename(index=str_lower, level=0)
)
self.assert_eq(
psdf3.rename(index=str_lower, level=1), pdf3.rename(index=str_lower, level=1)
)
pdf4 = pdf2 + 1
psdf4 = psdf2 + 1
self.assert_eq(psdf4.rename(columns=str_lower), pdf4.rename(columns=str_lower))
pdf5 = pdf3 + 1
psdf5 = psdf3 + 1
self.assert_eq(psdf5.rename(index=str_lower), pdf5.rename(index=str_lower))
msg = "Either `index` or `columns` should be provided."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename()
msg = "`mapper` or `index` or `columns` should be either dict-like or function type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename(mapper=[str_lower], axis=1)
msg = "Mapper dict should have the same value type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename({"A": "a", "B": 2}, axis=1)
msg = r"level should be an integer between \[0, column_labels_level\)"
with self.assertRaisesRegex(ValueError, msg):
psdf2.rename(columns=str_lower, level=2)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis("index2", axis=axis).sort_index(),
psdf.rename_axis("index2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["index2"], axis=axis).sort_index(),
psdf.rename_axis(["index2"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis("cols2", axis=axis).sort_index(),
psdf.rename_axis("cols2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["cols2"], axis=axis).sort_index(),
psdf.rename_axis(["cols2"], axis=axis).sort_index(),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pdf2.rename_axis("index2", axis="index", inplace=True)
psdf2.rename_axis("index2", axis="index", inplace=True)
self.assert_eq(pdf2.sort_index(), psdf2.sort_index())
self.assertRaises(ValueError, lambda: psdf.rename_axis(["index2", "index3"], axis=0))
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols2", "cols3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.rename_axis(mapper=["index2"], index=["index3"]))
self.assert_eq(
pdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
psdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index2"}, columns={"missing": "cols2"}).sort_index(),
psdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
psdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
psdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
)
self.assertRaises(
ValueError, lambda: psdf.rename_axis(["index3", "index4", "index5"], axis=0)
)
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols3", "cols4", "cols5"], axis=1))
self.assert_eq(
pdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
psdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index3"}, columns={"missing": "cols3"}).sort_index(),
psdf.rename_axis(
index={"missing": "index3"}, columns={"missing": "cols3"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
psdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
def test_dot(self):
psdf = self.psdf
with self.assertRaisesRegex(TypeError, "Unsupported type DataFrame"):
psdf.dot(psdf)
def test_dot_in_column_name(self):
self.assert_eq(
ps.DataFrame(ps.range(1)._internal.spark_frame.selectExpr("1L as `a.b`"))["a.b"],
ps.Series([1], name="a.b"),
)
def test_aggregate(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=["A", "B", "C"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(), # TODO?: fix column order
pdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(),
)
self.assert_eq(
psdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
pdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
)
self.assertRaises(KeyError, lambda: psdf.agg({"A": ["sum", "min"], "X": ["min", "max"]}))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
pdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
pdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.agg({"X": ["sum", "min"], "Y": ["min", "max"]}))
# non-string names
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=[10, 20, 30]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
pdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
)
self.assert_eq(
psdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
pdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", 10), ("X", 20), ("Y", 30)])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
pdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
pdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
)
pdf = pd.DataFrame(
[datetime(2019, 2, 2, 0, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0, 0)],
columns=["timestamp"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.timestamp.min(), pdf.timestamp.min())
self.assert_eq(psdf.timestamp.max(), pdf.timestamp.max())
self.assertRaises(ValueError, lambda: psdf.agg(("sum", "min")))
def test_droplevel(self):
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
pdf.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
psdf = ps.from_pandas(pdf)
self.assertRaises(ValueError, lambda: psdf.droplevel(["a", "b"]))
self.assertRaises(ValueError, lambda: psdf.droplevel([1, 1, 1, 1, 1]))
self.assertRaises(IndexError, lambda: psdf.droplevel(2))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a"}))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a": 1}))
self.assertRaises(ValueError, lambda: psdf.droplevel(["level_1", "level_2"], axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(2, axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1"}, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1": 1}, axis=1))
self.assert_eq(pdf.droplevel("a"), psdf.droplevel("a"))
self.assert_eq(pdf.droplevel(["a"]), psdf.droplevel(["a"]))
self.assert_eq(pdf.droplevel(("a",)), psdf.droplevel(("a",)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel("level_1", axis=1), psdf.droplevel("level_1", axis=1))
self.assert_eq(pdf.droplevel(["level_1"], axis=1), psdf.droplevel(["level_1"], axis=1))
self.assert_eq(pdf.droplevel(("level_1",), axis=1), psdf.droplevel(("level_1",), axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
self.assert_eq(pdf.droplevel(-1, axis=1), psdf.droplevel(-1, axis=1))
# Tupled names
pdf.columns.names = [("level", 1), ("level", 2)]
pdf.index.names = [("a", 10), ("x", 20)]
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.droplevel("a"))
self.assertRaises(KeyError, lambda: psdf.droplevel(("a", 10)))
self.assert_eq(pdf.droplevel([("a", 10)]), psdf.droplevel([("a", 10)]))
self.assert_eq(
pdf.droplevel([("level", 1)], axis=1), psdf.droplevel([("level", 1)], axis=1)
)
# non-string names
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis([10.0, 20.0])
)
pdf.columns = pd.MultiIndex.from_tuples([("c", "e"), ("d", "f")], names=[100.0, 200.0])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.droplevel(10.0), psdf.droplevel(10.0))
self.assert_eq(pdf.droplevel([10.0]), psdf.droplevel([10.0]))
self.assert_eq(pdf.droplevel((10.0,)), psdf.droplevel((10.0,)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel(100.0, axis=1), psdf.droplevel(100.0, axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
def test_drop(self):
pdf = pd.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
# Assert 'labels' or 'columns' parameter is set
expected_error_message = "Need to specify at least one of 'labels' or 'columns'"
with self.assertRaisesRegex(ValueError, expected_error_message):
psdf.drop()
#
# Drop columns
#
# Assert using a str for 'labels' works
self.assert_eq(psdf.drop("x", axis=1), pdf.drop("x", axis=1))
self.assert_eq((psdf + 1).drop("x", axis=1), (pdf + 1).drop("x", axis=1))
# Assert using a list for 'labels' works
self.assert_eq(psdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], axis=1))
self.assert_eq(psdf.drop(["x", "y", "z"], axis=1), pdf.drop(["x", "y", "z"], axis=1))
# Assert using 'columns' instead of 'labels' produces the same results
self.assert_eq(psdf.drop(columns="x"), pdf.drop(columns="x"))
self.assert_eq(psdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", "z"]))
self.assert_eq(psdf.drop(columns=["x", "y", "z"]), pdf.drop(columns=["x", "y", "z"]))
self.assert_eq(psdf.drop(columns=[]), pdf.drop(columns=[]))
columns = pd.MultiIndex.from_tuples([(1, "x"), (1, "y"), (2, "z")])
pdf.columns = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(columns=1), pdf.drop(columns=1))
self.assert_eq(psdf.drop(columns=(1, "x")), pdf.drop(columns=(1, "x")))
self.assert_eq(psdf.drop(columns=[(1, "x"), 2]), pdf.drop(columns=[(1, "x"), 2]))
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
self.assertRaises(KeyError, lambda: psdf.drop(columns=3))
self.assertRaises(KeyError, lambda: psdf.drop(columns=(1, "z")))
pdf.index = pd.MultiIndex.from_tuples([("i", 0), ("j", 1)])
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
# non-string names
pdf = pd.DataFrame({10: [1, 2], 20: [3, 4], 30: [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(10, axis=1), pdf.drop(10, axis=1))
self.assert_eq(psdf.drop([20, 30], axis=1), pdf.drop([20, 30], axis=1))
#
# Drop rows
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
# Given labels (and axis = 0)
self.assert_eq(psdf.drop(labels="A", axis=0), pdf.drop(labels="A", axis=0))
self.assert_eq(psdf.drop(labels="A"), pdf.drop(labels="A"))
self.assert_eq((psdf + 1).drop(labels="A"), (pdf + 1).drop(labels="A"))
self.assert_eq(psdf.drop(labels=["A", "C"], axis=0), pdf.drop(labels=["A", "C"], axis=0))
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
# Given index
self.assert_eq(psdf.drop(index="A"), pdf.drop(index="A"))
self.assert_eq(psdf.drop(index=["A", "C"]), pdf.drop(index=["A", "C"]))
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
self.assert_eq(psdf.drop(index=[]), pdf.drop(index=[]))
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
# Non-string names
pdf.index = [10, 20, 30]
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(labels=10, axis=0), pdf.drop(labels=10, axis=0))
self.assert_eq(psdf.drop(labels=[10, 30], axis=0), pdf.drop(labels=[10, 30], axis=0))
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
# MultiIndex
pdf.index = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: psdf.drop(labels=[("a", "x")]))
#
# Drop rows and columns
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(index="A", columns="X"), pdf.drop(index="A", columns="X"))
self.assert_eq(
psdf.drop(index=["A", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=[], columns=["X", "Z"]),
pdf.drop(index=[], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=[]),
pdf.drop(index=["A", "B", "C"], columns=[]),
)
self.assert_eq(
psdf.drop(index=[], columns=[]),
pdf.drop(index=[], columns=[]),
)
self.assertRaises(
ValueError,
lambda: psdf.drop(labels="A", axis=0, columns="X"),
)
def _test_dropna(self, pdf, axis):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(psdf.dropna(axis=axis, subset=["x"]), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(psdf.dropna(axis=axis, subset="x"), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"]), pdf.dropna(axis=axis, subset=["y", "z"])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"], how="all"),
pdf.dropna(axis=axis, subset=["y", "z"], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
pdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pser = pdf2[pdf2.columns[0]]
psser = psdf2[psdf2.columns[0]]
pdf2.dropna(inplace=True, axis=axis)
psdf2.dropna(inplace=True, axis=axis)
self.assert_eq(psdf2, pdf2)
self.assert_eq(psser, pser)
# multi-index
columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
if axis == 0:
pdf.columns = columns
else:
pdf.index = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "x")]), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=("a", "x")), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
)
def test_dropna_axis_index(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self._test_dropna(pdf, axis=0)
# empty
pdf = pd.DataFrame(index=np.random.rand(6))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(), pdf.dropna())
self.assert_eq(psdf.dropna(how="all"), pdf.dropna(how="all"))
self.assert_eq(psdf.dropna(thresh=0), pdf.dropna(thresh=0))
self.assert_eq(psdf.dropna(thresh=1), pdf.dropna(thresh=1))
with self.assertRaisesRegex(ValueError, "No axis named foo"):
psdf.dropna(axis="foo")
self.assertRaises(KeyError, lambda: psdf.dropna(subset="1"))
with self.assertRaisesRegex(ValueError, "invalid how option: 1"):
psdf.dropna(how=1)
with self.assertRaisesRegex(TypeError, "must specify how or thresh"):
psdf.dropna(how=None)
def test_dropna_axis_column(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=[str(r) for r in np.random.rand(6)],
).T
self._test_dropna(pdf, axis=1)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
ValueError, "The length of each subset must be the same as the index size."
):
psdf.dropna(subset=(["x", "y"]), axis=1)
# empty
pdf = pd.DataFrame({"x": [], "y": [], "z": []})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=1), pdf.dropna(axis=1))
self.assert_eq(psdf.dropna(axis=1, how="all"), pdf.dropna(axis=1, how="all"))
self.assert_eq(psdf.dropna(axis=1, thresh=0), pdf.dropna(axis=1, thresh=0))
self.assert_eq(psdf.dropna(axis=1, thresh=1), pdf.dropna(axis=1, thresh=1))
def test_dtype(self):
pdf = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
# multi-index columns
columns = pd.MultiIndex.from_tuples(zip(list("xxxyyz"), list("abcdef")))
pdf.columns = columns
psdf.columns = columns
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
def test_fillna(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({"x": -1, "y": -2, "z": -5}), pdf.fillna({"x": -1, "y": -2, "z": -5})
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
pdf = pdf.set_index(["x", "y"])
psdf = ps.from_pandas(pdf)
# check multi index
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
pser = pdf.z
psser = psdf.z
pdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
psdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
s_nan = pd.Series([-1, -2, -5], index=["x", "y", "z"], dtype=int)
self.assert_eq(psdf.fillna(s_nan), pdf.fillna(s_nan))
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis=1)
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis="columns")
with self.assertRaisesRegex(ValueError, "limit parameter for value is not support now"):
psdf.fillna(-1, limit=1)
with self.assertRaisesRegex(TypeError, "Unsupported.*DataFrame"):
psdf.fillna(pd.DataFrame({"x": [-1], "y": [-1], "z": [-1]}))
with self.assertRaisesRegex(TypeError, "Unsupported.*int64"):
psdf.fillna({"x": np.int64(-6), "y": np.int64(-4), "z": -5})
with self.assertRaisesRegex(ValueError, "Expecting 'pad', 'ffill', 'backfill' or 'bfill'."):
psdf.fillna(method="xxx")
with self.assertRaisesRegex(
ValueError, "Must specify a fillna 'value' or 'method' parameter."
):
psdf.fillna()
# multi-index columns
pdf = pd.DataFrame(
{
("x", "a"): [np.nan, 2, 3, 4, np.nan, 6],
("x", "b"): [1, 2, np.nan, 4, np.nan, np.nan],
("y", "c"): [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
self.assert_eq(psdf.fillna({"x": -1}), pdf.fillna({"x": -1}))
self.assert_eq(
psdf.fillna({"x": -1, ("x", "b"): -2}), pdf.fillna({"x": -1, ("x", "b"): -2})
)
self.assert_eq(
psdf.fillna({("x", "b"): -2, "x": -1}), pdf.fillna({("x", "b"): -2, "x": -1})
)
# check multi index
pdf = pdf.set_index([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
def test_isnull(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")}, index=np.random.rand(6)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.notnull(), pdf.notnull())
self.assert_eq(psdf.isnull(), pdf.isnull())
def test_to_datetime(self):
pdf = pd.DataFrame(
{"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}, index=np.random.rand(2)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pd.to_datetime(pdf), ps.to_datetime(psdf))
def test_nunique(self):
pdf = pd.DataFrame({"A": [1, 2, 3], "B": [np.nan, 3, np.nan]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
# Assert NaNs are dropped by default
self.assert_eq(psdf.nunique(), pdf.nunique())
# Assert including NaN values
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
# Assert approximate counts
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True),
pd.Series([103], index=["A"]),
)
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True, rsd=0.01),
pd.Series([100], index=["A"]),
)
# Assert unsupported axis value yet
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.nunique(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")], names=["1", "2"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.nunique(), pdf.nunique())
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
def test_sort_values(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
for ascending in [True, False]:
for na_position in ["first", "last"]:
self.assert_eq(
psdf.sort_values("a", ascending=ascending, na_position=na_position),
pdf.sort_values("a", ascending=ascending, na_position=na_position),
)
self.assert_eq(psdf.sort_values(["a", "b"]), pdf.sort_values(["a", "b"]))
self.assert_eq(
psdf.sort_values(["a", "b"], ascending=[False, True]),
pdf.sort_values(["a", "b"], ascending=[False, True]),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], ascending=[False]))
self.assert_eq(
psdf.sort_values(["a", "b"], na_position="first"),
pdf.sort_values(["a", "b"], na_position="first"),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], na_position="invalid"))
pserA = pdf.a
psserA = psdf.a
self.assert_eq(psdf.sort_values("b", inplace=True), pdf.sort_values("b", inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# multi-index columns
pdf = pd.DataFrame(
{("X", 10): [1, 2, 3, 4, 5, None, 7], ("X", 20): [7, 6, 5, 4, 3, 2, 1]},
index=np.random.rand(7),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(("X", 20)), pdf.sort_values(("X", 20)))
self.assert_eq(
psdf.sort_values([("X", 20), ("X", 10)]), pdf.sort_values([("X", 20), ("X", 10)])
)
self.assertRaisesRegex(
ValueError,
"For a multi-index, the label must be a tuple with elements",
lambda: psdf.sort_values(["X"]),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 3, 4, 5, None, 7], 20: [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(20), pdf.sort_values(20))
self.assert_eq(psdf.sort_values([20, 10]), pdf.sort_values([20, 10]))
def test_sort_index(self):
pdf = pd.DataFrame(
{"A": [2, 1, np.nan], "B": [np.nan, 0, np.nan]}, index=["b", "a", np.nan]
)
psdf = ps.from_pandas(pdf)
# Assert invalid parameters
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(axis=1))
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(kind="mergesort"))
self.assertRaises(ValueError, lambda: psdf.sort_index(na_position="invalid"))
# Assert default behavior without parameters
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Assert sorting descending
self.assert_eq(psdf.sort_index(ascending=False), pdf.sort_index(ascending=False))
# Assert sorting NA indices first
self.assert_eq(psdf.sort_index(na_position="first"), pdf.sort_index(na_position="first"))
# Assert sorting descending and NA indices first
self.assert_eq(
psdf.sort_index(ascending=False, na_position="first"),
pdf.sort_index(ascending=False, na_position="first"),
)
# Assert sorting inplace
pserA = pdf.A
psserA = psdf.A
self.assertEqual(psdf.sort_index(inplace=True), pdf.sort_index(inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# Assert multi-indices
pdf = pd.DataFrame(
{"A": range(4), "B": range(4)[::-1]}, index=[["b", "b", "a", "a"], [1, 0, 1, 0]]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_index(level=[1, 0]), pdf.sort_index(level=[1, 0]))
self.assert_eq(psdf.reset_index().sort_index(), pdf.reset_index().sort_index())
# Assert with multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_swaplevel(self):
# MultiIndex with two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
# MultiIndex with more than two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"], ["l", "m", "s", "xs"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(0, 2), psdf.swaplevel(0, 2))
self.assert_eq(pdf.swaplevel(1, 2), psdf.swaplevel(1, 2))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel(-1, -2), psdf.swaplevel(-1, -2))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
self.assert_eq(pdf.swaplevel("number", "size"), psdf.swaplevel("number", "size"))
self.assert_eq(pdf.swaplevel("color", "size"), psdf.swaplevel("color", "size"))
self.assert_eq(
pdf.swaplevel("color", "size", axis="index"),
psdf.swaplevel("color", "size", axis="index"),
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=0), psdf.swaplevel("color", "size", axis=0)
)
pdf = pd.DataFrame(
{
"x1": ["a", "b", "c", "d"],
"x2": ["a", "b", "c", "d"],
"x3": ["a", "b", "c", "d"],
"x4": ["a", "b", "c", "d"],
}
)
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf.columns = pidx
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(axis=1), psdf.swaplevel(axis=1))
self.assert_eq(pdf.swaplevel(0, 1, axis=1), psdf.swaplevel(0, 1, axis=1))
self.assert_eq(pdf.swaplevel(0, 2, axis=1), psdf.swaplevel(0, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 2, axis=1), psdf.swaplevel(1, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 1, axis=1), psdf.swaplevel(1, 1, axis=1))
self.assert_eq(pdf.swaplevel(-1, -2, axis=1), psdf.swaplevel(-1, -2, axis=1))
self.assert_eq(
pdf.swaplevel("number", "color", axis=1), psdf.swaplevel("number", "color", axis=1)
)
self.assert_eq(
pdf.swaplevel("number", "size", axis=1), psdf.swaplevel("number", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=1), psdf.swaplevel("color", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis="columns"),
psdf.swaplevel("color", "size", axis="columns"),
)
# Error conditions
self.assertRaises(AssertionError, lambda: ps.DataFrame([1, 2]).swaplevel())
self.assertRaises(IndexError, lambda: psdf.swaplevel(0, 9, axis=1))
self.assertRaises(KeyError, lambda: psdf.swaplevel("not_number", "color", axis=1))
self.assertRaises(ValueError, lambda: psdf.swaplevel(axis=2))
def test_swapaxes(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["x", "y", "z"], columns=["a", "b", "c"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.swapaxes(0, 1), pdf.swapaxes(0, 1))
self.assert_eq(psdf.swapaxes(1, 0), pdf.swapaxes(1, 0))
self.assert_eq(psdf.swapaxes("index", "columns"), pdf.swapaxes("index", "columns"))
self.assert_eq(psdf.swapaxes("columns", "index"), pdf.swapaxes("columns", "index"))
self.assert_eq((psdf + 1).swapaxes(0, 1), (pdf + 1).swapaxes(0, 1))
self.assertRaises(AssertionError, lambda: psdf.swapaxes(0, 1, copy=False))
self.assertRaises(ValueError, lambda: psdf.swapaxes(0, -1))
def test_nlargest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nlargest(n=5, columns="a"), pdf.nlargest(5, columns="a"))
self.assert_eq(psdf.nlargest(n=5, columns=["a", "b"]), pdf.nlargest(5, columns=["a", "b"]))
def test_nsmallest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nsmallest(n=5, columns="a"), pdf.nsmallest(5, columns="a"))
self.assert_eq(
psdf.nsmallest(n=5, columns=["a", "b"]), pdf.nsmallest(5, columns=["a", "b"])
)
def test_xs(self):
d = {
"num_legs": [4, 4, 2, 2],
"num_wings": [0, 0, 2, 2],
"class": ["mammal", "mammal", "mammal", "bird"],
"animal": ["cat", "dog", "bat", "penguin"],
"locomotion": ["walks", "walks", "flies", "walks"],
}
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "locomotion"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs("mammal"), pdf.xs("mammal"))
self.assert_eq(psdf.xs(("mammal",)), pdf.xs(("mammal",)))
self.assert_eq(psdf.xs(("mammal", "dog", "walks")), pdf.xs(("mammal", "dog", "walks")))
self.assert_eq(
ps.concat([psdf, psdf]).xs(("mammal", "dog", "walks")),
pd.concat([pdf, pdf]).xs(("mammal", "dog", "walks")),
)
self.assert_eq(psdf.xs("cat", level=1), pdf.xs("cat", level=1))
self.assert_eq(psdf.xs("flies", level=2), pdf.xs("flies", level=2))
self.assert_eq(psdf.xs("mammal", level=-3), pdf.xs("mammal", level=-3))
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.xs("num_wings", axis=1)
with self.assertRaises(KeyError):
psdf.xs(("mammal", "dog", "walk"))
msg = r"'Key length \(4\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
psdf.xs(("mammal", "dog", "walks", "foo"))
msg = "'key' should be a scalar value or tuple that contains scalar values"
with self.assertRaisesRegex(TypeError, msg):
psdf.xs(["mammal", "dog", "walks", "foo"])
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=-4))
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=3))
self.assertRaises(KeyError, lambda: psdf.xs(("dog", "walks"), level=1))
# non-string names
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "num_legs", "num_wings"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs(("mammal", "dog", 4)), pdf.xs(("mammal", "dog", 4)))
self.assert_eq(psdf.xs(2, level=2), pdf.xs(2, level=2))
self.assert_eq((psdf + "a").xs(("mammal", "dog", 4)), (pdf + "a").xs(("mammal", "dog", 4)))
self.assert_eq((psdf + "a").xs(2, level=2), (pdf + "a").xs(2, level=2))
def test_missing(self):
psdf = self.psdf
missing_functions = inspect.getmembers(_MissingPandasLikeDataFrame, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)()
missing_properties = inspect.getmembers(
_MissingPandasLikeDataFrame, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)
def test_to_numpy(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_numpy(), pdf.values)
def test_to_pandas(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.to_pandas(), pdf)
def test_isin(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.isin([4, "six"]), pdf.isin([4, "six"]))
# Seems like pandas has a bug when passing `np.array` as parameter
self.assert_eq(psdf.isin(np.array([4, "six"])), pdf.isin([4, "six"]))
self.assert_eq(
psdf.isin({"a": [2, 8], "c": ["three", "one"]}),
pdf.isin({"a": [2, 8], "c": ["three", "one"]}),
)
self.assert_eq(
psdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
pdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
)
msg = "'DataFrame' object has no attribute {'e'}"
with self.assertRaisesRegex(AttributeError, msg):
psdf.isin({"e": [5, 7], "a": [1, 6]})
msg = "DataFrame and Series are not supported"
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.isin(pdf)
msg = "Values should be iterable, Series, DataFrame or dict."
with self.assertRaisesRegex(TypeError, msg):
psdf.isin(1)
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, None, 9, 4, None, 4],
"c": [None, 5, None, 3, 2, 1],
},
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), pdf.isin([4, 3, 1, 1, None]))
else:
expected = pd.DataFrame(
{
"a": [True, False, True, True, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, True, False, True],
}
)
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), expected)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psdf.isin({"b": [4, 3, 1, 1, None]}), pdf.isin({"b": [4, 3, 1, 1, None]})
)
else:
expected = pd.DataFrame(
{
"a": [False, False, False, False, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, False, False, False],
}
)
self.assert_eq(psdf.isin({"b": [4, 3, 1, 1, None]}), expected)
def test_merge(self):
left_pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"value": [1, 2, 3, 5, 6, 7],
"x": list("abcdef"),
},
columns=["lkey", "value", "x"],
)
right_pdf = pd.DataFrame(
{
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [4, 5, 6, 7, 8, 9],
"y": list("efghij"),
},
columns=["rkey", "value", "y"],
)
right_ps = pd.Series(list("defghi"), name="x", index=[5, 6, 7, 8, 9, 10])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
right_psser = ps.from_pandas(right_ps)
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, on=("value",)))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
# MultiIndex
check(
lambda left, right: left.merge(
right, left_on=["lkey", "value"], right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.set_index(["lkey", "value"]).merge(
right, left_index=True, right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.merge(
right.set_index(["rkey", "value"]), left_on=["lkey", "value"], right_index=True
)
)
# TODO: when both left_index=True and right_index=True with multi-index
# check(lambda left, right: left.set_index(['lkey', 'value']).merge(
# right.set_index(['rkey', 'value']), left_index=True, right_index=True))
# join types
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, on="value", how=how))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey", how=how))
# suffix
check(
lambda left, right: left.merge(
right, left_on="lkey", right_on="rkey", suffixes=["_left", "_right"]
)
)
# Test Series on the right
check(lambda left, right: left.merge(right), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x"), right_psser, right_ps
)
check(
lambda left, right: left.set_index("x").merge(right, left_index=True, right_on="x"),
right_psser,
right_ps,
)
# Test join types with Series
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, how=how), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x", how=how),
right_psser,
right_ps,
)
# suffix with Series
check(
lambda left, right: left.merge(
right,
suffixes=["_left", "_right"],
how="outer",
left_index=True,
right_index=True,
),
right_psser,
right_ps,
)
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([(10, "lkey"), (10, "value"), (20, "x")])
left_pdf.columns = left_columns
left_psdf.columns = left_columns
right_columns = pd.MultiIndex.from_tuples([(10, "rkey"), (10, "value"), (30, "y")])
right_pdf.columns = right_columns
right_psdf.columns = right_columns
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[(10, "value")]))
check(
lambda left, right: (left.set_index((10, "lkey")).merge(right.set_index((10, "rkey"))))
)
check(
lambda left, right: (
left.set_index((10, "lkey")).merge(
right.set_index((10, "rkey")), left_index=True, right_index=True
)
)
)
# TODO: when both left_index=True and right_index=True with multi-index columns
# check(lambda left, right: left.merge(right,
# left_on=[('a', 'lkey')], right_on=[('a', 'rkey')]))
# check(lambda left, right: (left.set_index(('a', 'lkey'))
# .merge(right, left_index=True, right_on=[('a', 'rkey')])))
# non-string names
left_pdf.columns = [10, 100, 1000]
left_psdf.columns = [10, 100, 1000]
right_pdf.columns = [20, 100, 2000]
right_psdf.columns = [20, 100, 2000]
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[100]))
check(lambda left, right: (left.set_index(10).merge(right.set_index(20))))
check(
lambda left, right: (
left.set_index(10).merge(right.set_index(20), left_index=True, right_index=True)
)
)
def test_merge_same_anchor(self):
pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [1, 1, 3, 5, 6, 7],
"x": list("abcdef"),
"y": list("efghij"),
},
columns=["lkey", "rkey", "value", "x", "y"],
)
psdf = ps.from_pandas(pdf)
left_pdf = pdf[["lkey", "value", "x"]]
right_pdf = pdf[["rkey", "value", "y"]]
left_psdf = psdf[["lkey", "value", "x"]]
right_psdf = psdf[["rkey", "value", "y"]]
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
def test_merge_retains_indices(self):
left_pdf = pd.DataFrame({"A": [0, 1]})
right_pdf = pd.DataFrame({"B": [1, 2]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_index=True),
left_pdf.merge(right_pdf, left_index=True, right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_index=True),
left_pdf.merge(right_pdf, left_on="A", right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_on="B"),
left_pdf.merge(right_pdf, left_index=True, right_on="B"),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_on="B"),
left_pdf.merge(right_pdf, left_on="A", right_on="B"),
)
def test_merge_how_parameter(self):
left_pdf = pd.DataFrame({"A": [1, 2]})
right_pdf = pd.DataFrame({"B": ["x", "y"]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True)
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True)
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="left")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="left")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="right")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="right")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="outer")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="outer")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
def test_merge_raises(self):
left = ps.DataFrame(
{"value": [1, 2, 3, 5, 6], "x": list("abcde")},
columns=["value", "x"],
index=["foo", "bar", "baz", "foo", "bar"],
)
right = ps.DataFrame(
{"value": [4, 5, 6, 7, 8], "y": list("fghij")},
columns=["value", "y"],
index=["baz", "foo", "bar", "baz", "foo"],
)
with self.assertRaisesRegex(ValueError, "No common columns to perform merge on"):
left[["x"]].merge(right[["y"]])
with self.assertRaisesRegex(ValueError, "not a combination of both"):
left.merge(right, on="value", left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_index=True)
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_on="y")
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_index=True)
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on="value", right_on=["value", "y"])
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on=["value", "x"], right_on="value")
with self.assertRaisesRegex(ValueError, "['inner', 'left', 'right', 'full', 'outer']"):
left.merge(right, left_index=True, right_index=True, how="foo")
with self.assertRaisesRegex(KeyError, "id"):
left.merge(right, on="id")
def test_append(self):
pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"))
psdf = ps.from_pandas(pdf)
other_pdf = pd.DataFrame([[3, 4], [5, 6]], columns=list("BC"), index=[2, 3])
other_psdf = ps.from_pandas(other_pdf)
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
self.assert_eq(psdf.append(psdf, ignore_index=True), pdf.append(pdf, ignore_index=True))
# Assert DataFrames with non-matching columns
self.assert_eq(psdf.append(other_psdf), pdf.append(other_pdf))
# Assert appending a Series fails
msg = "DataFrames.append() does not support appending Series to DataFrames"
with self.assertRaises(TypeError, msg=msg):
psdf.append(psdf["A"])
# Assert using the sort parameter raises an exception
msg = "The 'sort' parameter is currently not supported"
with self.assertRaises(NotImplementedError, msg=msg):
psdf.append(psdf, sort=True)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
psdf.append(other_psdf, verify_integrity=True),
pdf.append(other_pdf, verify_integrity=True),
)
msg = "Indices have overlapping values"
with self.assertRaises(ValueError, msg=msg):
psdf.append(psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
psdf.append(psdf, ignore_index=True, verify_integrity=True),
pdf.append(pdf, ignore_index=True, verify_integrity=True),
)
# Assert appending multi-index DataFrames
multi_index_pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[[2, 3], [4, 5]])
multi_index_psdf = ps.from_pandas(multi_index_pdf)
other_multi_index_pdf = pd.DataFrame(
[[5, 6], [7, 8]], columns=list("AB"), index=[[2, 3], [6, 7]]
)
other_multi_index_psdf = ps.from_pandas(other_multi_index_pdf)
self.assert_eq(
multi_index_psdf.append(multi_index_psdf), multi_index_pdf.append(multi_index_pdf)
)
# Assert DataFrames with non-matching columns
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf),
multi_index_pdf.append(other_multi_index_pdf),
)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf, verify_integrity=True),
multi_index_pdf.append(other_multi_index_pdf, verify_integrity=True),
)
with self.assertRaises(ValueError, msg=msg):
multi_index_psdf.append(multi_index_psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
multi_index_psdf.append(multi_index_psdf, ignore_index=True, verify_integrity=True),
multi_index_pdf.append(multi_index_pdf, ignore_index=True, verify_integrity=True),
)
# Assert trying to append DataFrames with different index levels
msg = "Both DataFrames have to have the same number of index levels"
with self.assertRaises(ValueError, msg=msg):
psdf.append(multi_index_psdf)
# Skip index level check when ignore_index=True
self.assert_eq(
psdf.append(multi_index_psdf, ignore_index=True),
pdf.append(multi_index_pdf, ignore_index=True),
)
columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
def test_clip(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
# Assert list-like values are not accepted for 'lower' and 'upper'
msg = "List-like value are not supported for 'lower' and 'upper' at the moment"
with self.assertRaises(TypeError, msg=msg):
psdf.clip(lower=[1])
with self.assertRaises(TypeError, msg=msg):
psdf.clip(upper=[1])
# Assert no lower or upper
self.assert_eq(psdf.clip(), pdf.clip())
# Assert lower only
self.assert_eq(psdf.clip(1), pdf.clip(1))
# Assert upper only
self.assert_eq(psdf.clip(upper=3), pdf.clip(upper=3))
# Assert lower and upper
self.assert_eq(psdf.clip(1, 3), pdf.clip(1, 3))
pdf["clip"] = pdf.A.clip(lower=1, upper=3)
psdf["clip"] = psdf.A.clip(lower=1, upper=3)
self.assert_eq(psdf, pdf)
# Assert behavior on string values
str_psdf = ps.DataFrame({"A": ["a", "b", "c"]}, index=np.random.rand(3))
self.assert_eq(str_psdf.clip(1, 3), str_psdf)
def test_binary_operators(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf + psdf.copy(), pdf + pdf.copy())
self.assert_eq(psdf + psdf.loc[:, ["A", "B"]], pdf + pdf.loc[:, ["A", "B"]])
self.assert_eq(psdf.loc[:, ["A", "B"]] + psdf, pdf.loc[:, ["A", "B"]] + pdf)
self.assertRaisesRegex(
ValueError,
"it comes from a different dataframe",
lambda: ps.range(10).add(ps.range(10)),
)
self.assertRaisesRegex(
TypeError,
"add with a sequence is currently not supported",
lambda: ps.range(10).add(ps.range(10).id),
)
psdf_other = psdf.copy()
psdf_other.columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
self.assertRaisesRegex(
ValueError,
"cannot join with no overlapping index names",
lambda: psdf.add(psdf_other),
)
def test_binary_operator_add(self):
# Positive
pdf = pd.DataFrame({"a": ["x"], "b": ["y"], "c": [1], "d": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] + psdf["b"], pdf["a"] + pdf["b"])
self.assert_eq(psdf["c"] + psdf["d"], pdf["c"] + pdf["d"])
# Negative
ks_err_msg = "Addition can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + 1)
def test_binary_operator_sub(self):
# Positive
pdf = pd.DataFrame({"a": [2], "b": [1]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] - psdf["b"], pdf["a"] - pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Subtraction can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" - psdf["b"])
ks_err_msg = "Subtraction can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - 1)
psdf = ps.DataFrame({"a": ["x"], "b": ["y"]})
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
def test_binary_operator_truediv(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] / psdf["b"], pdf["a"] / pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "True division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" / psdf["b"])
ks_err_msg = "True division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] / psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 / psdf["a"])
def test_binary_operator_floordiv(self):
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Floor division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] // psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 // psdf["a"])
ks_err_msg = "Floor division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" // psdf["b"])
def test_binary_operator_mod(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] % psdf["b"], pdf["a"] % pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Modulo can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % "literal")
ks_err_msg = "Modulo can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] % psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 % psdf["a"])
def test_binary_operator_multiply(self):
# Positive
pdf = pd.DataFrame({"a": ["x", "y"], "b": [1, 2], "c": [3, 4]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["b"] * psdf["c"], pdf["b"] * pdf["c"])
self.assert_eq(psdf["c"] * psdf["b"], pdf["c"] * pdf["b"])
self.assert_eq(psdf["a"] * psdf["b"], pdf["a"] * pdf["b"])
self.assert_eq(psdf["b"] * psdf["a"], pdf["b"] * pdf["a"])
self.assert_eq(psdf["a"] * 2, pdf["a"] * 2)
self.assert_eq(psdf["b"] * 2, pdf["b"] * 2)
self.assert_eq(2 * psdf["a"], 2 * pdf["a"])
self.assert_eq(2 * psdf["b"], 2 * pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [2]})
ks_err_msg = "Multiplication can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * 0.1)
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 0.1 * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["a"])
def test_sample(self):
pdf = pd.DataFrame({"A": [0, 2, 4]})
psdf = ps.from_pandas(pdf)
# Make sure the tests run, but we can't check the result because they are non-deterministic.
psdf.sample(frac=0.1)
psdf.sample(frac=0.2, replace=True)
psdf.sample(frac=0.2, random_state=5)
psdf["A"].sample(frac=0.2)
psdf["A"].sample(frac=0.2, replace=True)
psdf["A"].sample(frac=0.2, random_state=5)
with self.assertRaises(ValueError):
psdf.sample()
with self.assertRaises(NotImplementedError):
psdf.sample(n=1)
def test_add_prefix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
def test_add_suffix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
def test_join(self):
# check basic function
pdf1 = pd.DataFrame(
{"key": ["K0", "K1", "K2", "K3"], "A": ["A0", "A1", "A2", "A3"]}, columns=["key", "A"]
)
pdf2 = pd.DataFrame(
{"key": ["K0", "K1", "K2"], "B": ["B0", "B1", "B2"]}, columns=["key", "B"]
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# join with duplicated columns in Series
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
ks1 = ps.Series(["A1", "A5"], index=[1, 2], name="A")
psdf1.join(ks1, how="outer")
# join with duplicated columns in DataFrame
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
psdf1.join(psdf2, how="outer")
# check `on` parameter
join_pdf = pdf1.join(pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index("key").join(
pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index("key").join(
psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index columns
columns1 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "A")])
columns2 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "B")])
pdf1.columns = columns1
pdf2.columns = columns2
psdf1.columns = columns1
psdf2.columns = columns2
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# check `on` parameter
join_pdf = pdf1.join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index(("x", "key")).join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index(("x", "key")).join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index
midx1 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c"), ("z", "d")], names=["index1", "index2"]
)
midx2 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"]
)
pdf1.index = midx1
pdf2.index = midx2
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, on=["index1", "index2"], rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, on=["index1", "index2"], rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
with self.assertRaisesRegex(
ValueError, r'len\(left_on\) must equal the number of levels in the index of "right"'
):
psdf1.join(psdf2, on=["index1"], rsuffix="_right")
def test_replace(self):
pdf = pd.DataFrame(
{
"name": ["Ironman", "Captain America", "Thor", "Hulk"],
"weapon": ["Mark-45", "Shield", "Mjolnir", "Smash"],
},
index=np.random.rand(4),
)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only for method='pad"
):
psdf.replace(method="bfill")
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only when limit=None"
):
psdf.replace(limit=10)
with self.assertRaisesRegex(
NotImplementedError, "replace currently doesn't supports regex"
):
psdf.replace(regex="")
with self.assertRaisesRegex(ValueError, "Length of to_replace and value must be same"):
psdf.replace(to_replace=["Ironman"], value=["Spiderman", "Doctor Strange"])
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace("Ironman", lambda x: "Spiderman")
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace(lambda x: "Ironman", "Spiderman")
self.assert_eq(psdf.replace("Ironman", "Spiderman"), pdf.replace("Ironman", "Spiderman"))
self.assert_eq(
psdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
pdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
)
self.assert_eq(
psdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
pdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
)
# inplace
pser = pdf.name
psser = psdf.name
pdf.replace("Ironman", "Spiderman", inplace=True)
psdf.replace("Ironman", "Spiderman", inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf = pd.DataFrame(
{"A": [0, 1, 2, 3, np.nan], "B": [5, 6, 7, 8, np.nan], "C": ["a", "b", "c", "d", None]},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
pdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
)
self.assert_eq(
psdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(psdf.replace({"C": ["a", None]}, "e"), pdf.replace({"C": ["a", None]}, "e"))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
pdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
)
self.assert_eq(
psdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("Y", "C"): ["a", None]}, "e"),
pdf.replace({("Y", "C"): ["a", None]}, "e"),
)
def test_update(self):
# check base function
def get_data(left_columns=None, right_columns=None):
left_pdf = pd.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", np.nan, np.nan]}, columns=["A", "B"]
)
right_pdf = pd.DataFrame(
{"B": ["x", np.nan, "y", np.nan], "C": ["100", "200", "300", "400"]},
columns=["B", "C"],
)
left_psdf = ps.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", None, None]}, columns=["A", "B"]
)
right_psdf = ps.DataFrame(
{"B": ["x", None, "y", None], "C": ["100", "200", "300", "400"]}, columns=["B", "C"]
)
if left_columns is not None:
left_pdf.columns = left_columns
left_psdf.columns = left_columns
if right_columns is not None:
right_pdf.columns = right_columns
right_psdf.columns = right_columns
return left_psdf, left_pdf, right_psdf, right_pdf
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
pser = left_pdf.B
psser = left_psdf.B
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
self.assert_eq(psser.sort_index(), pser.sort_index())
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
with self.assertRaises(NotImplementedError):
left_psdf.update(right_psdf, join="right")
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
right_columns = pd.MultiIndex.from_tuples([("X", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
right_columns = pd.MultiIndex.from_tuples([("Y", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
def test_pivot_table_dtypes(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Skip columns comparison by reset_index
res_df = psdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
exp_df = pdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
self.assert_eq(res_df, exp_df)
# Results don't have the same column's name
# Todo: self.assert_eq(psdf.pivot_table(columns="a", values="b").dtypes,
# pdf.pivot_table(columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes, pdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes)
def test_pivot_table(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [10, 20, 20, 40, 20, 40],
"c": [1, 2, 9, 4, 7, 4],
"d": [-1, -2, -3, -4, -5, -6],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Checking if both DataFrames have the same results
self.assert_eq(
psdf.pivot_table(columns="a", values="b").sort_index(),
pdf.pivot_table(columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", fill_value=999
).sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b", fill_value=999).sort_index(),
almost=True,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "e"), ("z", "c"), ("w", "d")]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pivot_table(columns=("x", "a"), values=("x", "b")).sort_index(),
pdf.pivot_table(columns=[("x", "a")], values=[("x", "b")]).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b"), ("y", "e")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e"), ("w", "d")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e"), ("w", "d")],
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")],
columns=("x", "a"),
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
almost=True,
)
def test_pivot_table_and_index(self):
# https://github.com/databricks/koalas/issues/805
pdf = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
ptable = pdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
ktable = psdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
self.assert_eq(ktable, ptable)
self.assert_eq(ktable.index, ptable.index)
self.assert_eq(repr(ktable.index), repr(ptable.index))
def test_stack(self):
pdf_single_level_cols = pd.DataFrame(
[[0, 1], [2, 3]], index=["cat", "dog"], columns=["weight", "height"]
)
psdf_single_level_cols = ps.from_pandas(pdf_single_level_cols)
self.assert_eq(
psdf_single_level_cols.stack().sort_index(), pdf_single_level_cols.stack().sort_index()
)
multicol1 = pd.MultiIndex.from_tuples(
[("weight", "kg"), ("weight", "pounds")], names=["x", "y"]
)
pdf_multi_level_cols1 = pd.DataFrame(
[[1, 2], [2, 4]], index=["cat", "dog"], columns=multicol1
)
psdf_multi_level_cols1 = ps.from_pandas(pdf_multi_level_cols1)
self.assert_eq(
psdf_multi_level_cols1.stack().sort_index(), pdf_multi_level_cols1.stack().sort_index()
)
multicol2 = pd.MultiIndex.from_tuples([("weight", "kg"), ("height", "m")])
pdf_multi_level_cols2 = pd.DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=["cat", "dog"], columns=multicol2
)
psdf_multi_level_cols2 = ps.from_pandas(pdf_multi_level_cols2)
self.assert_eq(
psdf_multi_level_cols2.stack().sort_index(), pdf_multi_level_cols2.stack().sort_index()
)
pdf = pd.DataFrame(
{
("y", "c"): [True, True],
("x", "b"): [False, False],
("x", "c"): [True, False],
("y", "a"): [False, True],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.stack().sort_index(), pdf.stack().sort_index())
self.assert_eq(psdf[[]].stack().sort_index(), pdf[[]].stack().sort_index(), almost=True)
def test_unstack(self):
pdf = pd.DataFrame(
np.random.randn(3, 3),
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.unstack().sort_index(), pdf.unstack().sort_index(), almost=True)
self.assert_eq(
psdf.unstack().unstack().sort_index(), pdf.unstack().unstack().sort_index(), almost=True
)
def test_pivot_errors(self):
psdf = ps.range(10)
with self.assertRaisesRegex(ValueError, "columns should be set"):
psdf.pivot(index="id")
with self.assertRaisesRegex(ValueError, "values should be set"):
psdf.pivot(index="id", columns="id")
def test_pivot_table_errors(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.pivot_table(index=["c"], columns="a", values=5))
msg = "index should be a None or a list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index="c", columns="a", values="b")
msg = "pivot_table doesn't support aggfunc as dict and without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"})
msg = "columns should be one column name."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns=["a"], values=["b"], aggfunc={"b": "mean", "e": "sum"})
msg = "Columns in aggfunc must be the same as values."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", aggfunc={"b": "mean", "e": "sum"}
)
msg = "values can't be a list without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"])
msg = "Wrong columns A."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["c"], columns="A", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
)
msg = "values should be one column or list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values=(["b"], ["c"]))
msg = "aggfunc must be a dict mapping from column name to aggregate functions"
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values="b", aggfunc={"a": lambda x: sum(x)})
psdf = ps.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(
index=["C"], columns="A", values=["B", "E"], aggfunc={"B": "mean", "E": "sum"}
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index=["C"], columns="A", values="B", aggfunc={"B": "mean"})
def test_transpose(self):
# TODO: what if with random index?
pdf1 = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}, columns=["col1", "col2"])
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame(
data={"score": [9, 8], "kids": [0, 0], "age": [12, 22]},
columns=["score", "kids", "age"],
)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
pdf3 = pd.DataFrame(
{
("cg1", "a"): [1, 2, 3],
("cg1", "b"): [4, 5, 6],
("cg2", "c"): [7, 8, 9],
("cg3", "d"): [9, 9, 9],
},
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
def _test_cummin(self, pdf, psdf):
self.assert_eq(pdf.cummin(), psdf.cummin())
self.assert_eq(pdf.cummin(skipna=False), psdf.cummin(skipna=False))
self.assert_eq(pdf.cummin().sum(), psdf.cummin().sum())
def test_cummin(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def test_cummin_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def _test_cummax(self, pdf, psdf):
self.assert_eq(pdf.cummax(), psdf.cummax())
self.assert_eq(pdf.cummax(skipna=False), psdf.cummax(skipna=False))
self.assert_eq(pdf.cummax().sum(), psdf.cummax().sum())
def test_cummax(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def test_cummax_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def _test_cumsum(self, pdf, psdf):
self.assert_eq(pdf.cumsum(), psdf.cumsum())
self.assert_eq(pdf.cumsum(skipna=False), psdf.cumsum(skipna=False))
self.assert_eq(pdf.cumsum().sum(), psdf.cumsum().sum())
def test_cumsum(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def test_cumsum_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def _test_cumprod(self, pdf, psdf):
self.assert_eq(pdf.cumprod(), psdf.cumprod(), almost=True)
self.assert_eq(pdf.cumprod(skipna=False), psdf.cumprod(skipna=False), almost=True)
self.assert_eq(pdf.cumprod().sum(), psdf.cumprod().sum(), almost=True)
def test_cumprod(self):
pdf = pd.DataFrame(
[[2.0, 1.0, 1], [5, None, 2], [1.0, -1.0, -3], [2.0, 0, 4], [4.0, 9.0, 5]],
columns=list("ABC"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_cumprod_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.rand(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_drop_duplicates(self):
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
# inplace is False
for keep in ["first", "last", False]:
with self.subTest(keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates("a", keep=keep).sort_index(),
psdf.drop_duplicates("a", keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
psdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
)
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
# inplace is False
for keep in ["first", "last", False]:
with self.subTest("multi-index columns", keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(("x", "a"), keep=keep).sort_index(),
psdf.drop_duplicates(("x", "a"), keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(),
psdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(),
)
# inplace is True
subset_list = [None, "a", ["a", "b"]]
for subset in subset_list:
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
pser = pdf.a
psser = psdf.a
pdf.drop_duplicates(subset=subset, inplace=True)
psdf.drop_duplicates(subset=subset, inplace=True)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser.sort_index(), pser.sort_index())
# multi-index columns, inplace is True
subset_list = [None, ("x", "a"), [("x", "a"), ("y", "b")]]
for subset in subset_list:
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
pser = pdf[("x", "a")]
psser = psdf[("x", "a")]
pdf.drop_duplicates(subset=subset, inplace=True)
psdf.drop_duplicates(subset=subset, inplace=True)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser.sort_index(), pser.sort_index())
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 2, 2, 3], 20: ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.drop_duplicates(10, keep=keep).sort_index(),
psdf.drop_duplicates(10, keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates([10, 20], keep=keep).sort_index(),
psdf.drop_duplicates([10, 20], keep=keep).sort_index(),
)
def test_reindex(self):
index = pd.Index(["A", "B", "C", "D", "E"])
columns = pd.Index(["numbers"])
pdf = pd.DataFrame([1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
columns2 = pd.Index(["numbers", "2", "3"], name="cols2")
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
columns = pd.Index(["numbers"], name="cols")
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
pdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(),
psdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(),
psdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(index=["A", "B"]).sort_index(), psdf.reindex(index=["A", "B"]).sort_index()
)
self.assert_eq(
pdf.reindex(index=["A", "B", "2", "3"]).sort_index(),
psdf.reindex(index=["A", "B", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(),
psdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(),
)
self.assert_eq(
pdf.reindex(columns=["numbers"]).sort_index(),
psdf.reindex(columns=["numbers"]).sort_index(),
)
self.assert_eq(
pdf.reindex(columns=["numbers"], copy=True).sort_index(),
psdf.reindex(columns=["numbers"], copy=True).sort_index(),
)
# Using float as fill_value to avoid int64/32 clash
self.assert_eq(
pdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(),
psdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(),
)
columns2 = pd.Index(["numbers", "2", "3"])
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
columns2 = pd.Index(["numbers", "2", "3"], name="cols2")
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
# Reindexing single Index on single Index
pindex2 = pd.Index(["A", "C", "D", "E", "0"], name="index2")
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
pindex2 = pd.DataFrame({"index2": ["A", "C", "D", "E", "0"]}).set_index("index2").index
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
# Reindexing MultiIndex on single Index
pindex = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("F", "G")], names=["name1", "name2"]
)
kindex = ps.from_pandas(pindex)
self.assert_eq(
pdf.reindex(index=pindex, fill_value=0.0).sort_index(),
psdf.reindex(index=kindex, fill_value=0.0).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["numbers", "2", "3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["numbers", "2", "3"], axis=2))
self.assertRaises(TypeError, lambda: psdf.reindex(columns="numbers"))
self.assertRaises(TypeError, lambda: psdf.reindex(index=["A", "B", "C"], axis=1))
self.assertRaises(TypeError, lambda: psdf.reindex(index=123))
# Reindexing MultiIndex on MultiIndex
pdf = pd.DataFrame({"numbers": [1.0, 2.0, None]}, index=pindex)
psdf = ps.from_pandas(pdf)
pindex2 = pd.MultiIndex.from_tuples(
[("A", "G"), ("C", "D"), ("I", "J")], names=["name1", "name2"]
)
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
pindex2 = (
pd.DataFrame({"index_level_1": ["A", "C", "I"], "index_level_2": ["G", "D", "J"]})
.set_index(["index_level_1", "index_level_2"])
.index
)
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", "numbers")], names=["cols1", "cols2"])
pdf.columns = columns
psdf.columns = columns
# Reindexing MultiIndex index on MultiIndex columns and MultiIndex index
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
index = pd.Index(["A", "B", "C", "D", "E"])
pdf = pd.DataFrame(data=[1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
pindex2 = pd.Index(["A", "C", "D", "E", "0"], name="index2")
kindex2 = ps.from_pandas(pindex2)
# Reindexing single Index on MultiIndex columns and single Index
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(
columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=fill_value
).sort_index(),
psdf.reindex(
columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=fill_value
).sort_index(),
)
columns2 = pd.MultiIndex.from_tuples(
[("X", "numbers"), ("Y", "2"), ("Y", "3")], names=["cols3", "cols4"]
)
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["X"]))
self.assertRaises(ValueError, lambda: psdf.reindex(columns=[("X",)]))
def test_reindex_like(self):
data = [[1.0, 2.0], [3.0, None], [None, 4.0]]
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame(data=data, index=index, columns=columns)
psdf = ps.from_pandas(pdf)
# Reindexing single Index on single Index
data2 = [[5.0, None], [6.0, 7.0], [8.0, None]]
index2 = pd.Index(["A", "C", "D"], name="index2")
columns2 = pd.Index(["numbers", "F"], name="cols2")
pdf2 = pd.DataFrame(data=data2, index=index2, columns=columns2)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
pdf2 = pd.DataFrame({"index_level_1": ["A", "C", "I"]})
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2.set_index(["index_level_1"])).sort_index(),
psdf.reindex_like(psdf2.set_index(["index_level_1"])).sort_index(),
)
# Reindexing MultiIndex on single Index
index2 = pd.MultiIndex.from_tuples(
[("A", "G"), ("C", "D"), ("I", "J")], names=["name3", "name4"]
)
pdf2 = pd.DataFrame(data=data2, index=index2)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex_like(index2))
self.assertRaises(AssertionError, lambda: psdf2.reindex_like(psdf))
# Reindexing MultiIndex on MultiIndex
columns2 = pd.MultiIndex.from_tuples(
[("numbers", "third"), ("values", "second")], names=["cols3", "cols4"]
)
pdf2.columns = columns2
psdf2.columns = columns2
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["name1", "name2"]
)
pdf = pd.DataFrame(data=data, index=index, columns=columns)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
def test_melt(self):
pdf = pd.DataFrame(
{"A": [1, 3, 5], "B": [2, 4, 6], "C": [7, 8, 9]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.melt().sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars="A").sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars="A").sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A", "B"]).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=["A", "B"]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=("A", "B")).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=("A", "B")).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A"], value_vars=["C"])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=["A"], value_vars=["C"]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A"], value_vars=["B"], var_name="myVarname", value_name="myValname")
.sort_values(["myVarname", "myValname"])
.reset_index(drop=True),
pdf.melt(
id_vars=["A"], value_vars=["B"], var_name="myVarname", value_name="myValname"
).sort_values(["myVarname", "myValname"]),
)
self.assert_eq(
psdf.melt(value_vars=("A", "B"))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(value_vars=("A", "B")).sort_values(["variable", "value"]),
)
self.assertRaises(KeyError, lambda: psdf.melt(id_vars="Z"))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars="Z"))
# multi-index columns
TEN = 10.0
TWELVE = 20.0
columns = pd.MultiIndex.from_tuples([(TEN, "A"), (TEN, "B"), (TWELVE, "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.melt().sort_values(["variable_0", "variable_1", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable_0", "variable_1", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[(TEN, "A")])
.sort_values(["variable_0", "variable_1", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[(TEN, "A")])
.sort_values(["variable_0", "variable_1", "value"])
.rename(columns=name_like_string),
)
self.assert_eq(
psdf.melt(id_vars=[(TEN, "A")], value_vars=[(TWELVE, "C")])
.sort_values(["variable_0", "variable_1", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[(TEN, "A")], value_vars=[(TWELVE, "C")])
.sort_values(["variable_0", "variable_1", "value"])
.rename(columns=name_like_string),
)
self.assert_eq(
psdf.melt(
id_vars=[(TEN, "A")],
value_vars=[(TEN, "B")],
var_name=["myV1", "myV2"],
value_name="myValname",
)
.sort_values(["myV1", "myV2", "myValname"])
.reset_index(drop=True),
pdf.melt(
id_vars=[(TEN, "A")],
value_vars=[(TEN, "B")],
var_name=["myV1", "myV2"],
value_name="myValname",
)
.sort_values(["myV1", "myV2", "myValname"])
.rename(columns=name_like_string),
)
columns.names = ["v0", "v1"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.melt().sort_values(["v0", "v1", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["v0", "v1", "value"]),
)
self.assertRaises(ValueError, lambda: psdf.melt(id_vars=(TEN, "A")))
self.assertRaises(ValueError, lambda: psdf.melt(value_vars=(TEN, "A")))
self.assertRaises(KeyError, lambda: psdf.melt(id_vars=[TEN]))
self.assertRaises(KeyError, lambda: psdf.melt(id_vars=[(TWELVE, "A")]))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars=[TWELVE]))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars=[(TWELVE, "A")]))
# non-string names
pdf.columns = [10.0, 20.0, 30.0]
psdf.columns = [10.0, 20.0, 30.0]
self.assert_eq(
psdf.melt().sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=10.0).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=10.0).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[10.0, 20.0])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[10.0, 20.0]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=(10.0, 20.0))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=(10.0, 20.0)).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[10.0], value_vars=[30.0])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[10.0], value_vars=[30.0]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(value_vars=(10.0, 20.0))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(value_vars=(10.0, 20.0)).sort_values(["variable", "value"]),
)
def test_all(self):
pdf = pd.DataFrame(
{
"col1": [False, False, False],
"col2": [True, False, False],
"col3": [0, 0, 1],
"col4": [0, 1, 2],
"col5": [False, False, None],
"col6": [True, False, None],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.all(), pdf.all())
columns = pd.MultiIndex.from_tuples(
[
("a", "col1"),
("a", "col2"),
("a", "col3"),
("b", "col4"),
("b", "col5"),
("c", "col6"),
]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.all(), pdf.all())
columns.names = ["X", "Y"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.all(), pdf.all())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.all(axis=1)
def test_any(self):
pdf = pd.DataFrame(
{
"col1": [False, False, False],
"col2": [True, False, False],
"col3": [0, 0, 1],
"col4": [0, 1, 2],
"col5": [False, False, None],
"col6": [True, False, None],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.any(), pdf.any())
columns = pd.MultiIndex.from_tuples(
[
("a", "col1"),
("a", "col2"),
("a", "col3"),
("b", "col4"),
("b", "col5"),
("c", "col6"),
]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.any(), pdf.any())
columns.names = ["X", "Y"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.any(), pdf.any())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.any(axis=1)
def test_rank(self):
pdf = pd.DataFrame(
data={"col1": [1, 2, 3, 1], "col2": [3, 4, 3, 1]},
columns=["col1", "col2"],
index=np.random.rand(4),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.rank().sort_index(), psdf.rank().sort_index())
self.assert_eq(pdf.rank().sum(), psdf.rank().sum())
self.assert_eq(
pdf.rank(ascending=False).sort_index(), psdf.rank(ascending=False).sort_index()
)
self.assert_eq(pdf.rank(method="min").sort_index(), psdf.rank(method="min").sort_index())
self.assert_eq(pdf.rank(method="max").sort_index(), psdf.rank(method="max").sort_index())
self.assert_eq(
pdf.rank(method="first").sort_index(), psdf.rank(method="first").sort_index()
)
self.assert_eq(
pdf.rank(method="dense").sort_index(), psdf.rank(method="dense").sort_index()
)
msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'"
with self.assertRaisesRegex(ValueError, msg):
psdf.rank(method="nothing")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "col1"), ("y", "col2")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.rank().sort_index(), psdf.rank().sort_index())
def test_round(self):
pdf = pd.DataFrame(
{
"A": [0.028208, 0.038683, 0.877076],
"B": [0.992815, 0.645646, 0.149370],
"C": [0.173891, 0.577595, 0.491027],
},
columns=["A", "B", "C"],
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
pser = pd.Series([1, 0, 2], index=["A", "B", "C"])
psser = ps.Series([1, 0, 2], index=["A", "B", "C"])
self.assert_eq(pdf.round(2), psdf.round(2))
self.assert_eq(pdf.round({"A": 1, "C": 2}), psdf.round({"A": 1, "C": 2}))
self.assert_eq(pdf.round({"A": 1, "D": 2}), psdf.round({"A": 1, "D": 2}))
self.assert_eq(pdf.round(pser), psdf.round(psser))
msg = "decimals must be an integer, a dict-like or a Series"
with self.assertRaisesRegex(TypeError, msg):
psdf.round(1.5)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
pser = pd.Series([1, 0, 2], index=columns)
psser = ps.Series([1, 0, 2], index=columns)
self.assert_eq(pdf.round(2), psdf.round(2))
self.assert_eq(
pdf.round({("X", "A"): 1, ("Y", "C"): 2}), psdf.round({("X", "A"): 1, ("Y", "C"): 2})
)
self.assert_eq(pdf.round({("X", "A"): 1, "Y": 2}), psdf.round({("X", "A"): 1, "Y": 2}))
self.assert_eq(pdf.round(pser), psdf.round(psser))
# non-string names
pdf = pd.DataFrame(
{
10: [0.028208, 0.038683, 0.877076],
20: [0.992815, 0.645646, 0.149370],
30: [0.173891, 0.577595, 0.491027],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.round({10: 1, 30: 2}), psdf.round({10: 1, 30: 2}))
def test_shift(self):
pdf = pd.DataFrame(
{
"Col1": [10, 20, 15, 30, 45],
"Col2": [13, 23, 18, 33, 48],
"Col3": [17, 27, 22, 37, 52],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.shift(3), psdf.shift(3))
self.assert_eq(pdf.shift().shift(-1), psdf.shift().shift(-1))
self.assert_eq(pdf.shift().sum().astype(int), psdf.shift().sum())
# Need the expected result since pandas 0.23 does not support `fill_value` argument.
pdf1 = pd.DataFrame(
{"Col1": [0, 0, 0, 10, 20], "Col2": [0, 0, 0, 13, 23], "Col3": [0, 0, 0, 17, 27]},
index=pdf.index,
)
self.assert_eq(pdf1, psdf.shift(periods=3, fill_value=0))
msg = "should be an int"
with self.assertRaisesRegex(TypeError, msg):
psdf.shift(1.5)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "Col1"), ("x", "Col2"), ("y", "Col3")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.shift(3), psdf.shift(3))
self.assert_eq(pdf.shift().shift(-1), psdf.shift().shift(-1))
def test_diff(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.diff(), psdf.diff())
self.assert_eq(pdf.diff().diff(-1), psdf.diff().diff(-1))
self.assert_eq(pdf.diff().sum().astype(int), psdf.diff().sum())
msg = "should be an int"
with self.assertRaisesRegex(TypeError, msg):
psdf.diff(1.5)
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.diff(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "Col1"), ("x", "Col2"), ("y", "Col3")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.diff(), psdf.diff())
def test_duplicated(self):
pdf = pd.DataFrame(
{"a": [1, 1, 2, 3], "b": [1, 1, 1, 4], "c": [1, 1, 1, 5]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(keep="last").sort_index(),
psdf.duplicated(keep="last").sort_index(),
)
self.assert_eq(
pdf.duplicated(keep=False).sort_index(),
psdf.duplicated(keep=False).sort_index(),
)
self.assert_eq(
pdf.duplicated(subset="b").sort_index(),
psdf.duplicated(subset="b").sort_index(),
)
self.assert_eq(
pdf.duplicated(subset=["b"]).sort_index(),
psdf.duplicated(subset=["b"]).sort_index(),
)
with self.assertRaisesRegex(ValueError, "'keep' only supports 'first', 'last' and False"):
psdf.duplicated(keep="false")
with self.assertRaisesRegex(KeyError, "'d'"):
psdf.duplicated(subset=["d"])
pdf.index.name = "x"
psdf.index.name = "x"
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
# multi-index
self.assert_eq(
pdf.set_index("a", append=True).duplicated().sort_index(),
psdf.set_index("a", append=True).duplicated().sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).duplicated(keep=False).sort_index(),
psdf.set_index("a", append=True).duplicated(keep=False).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).duplicated(subset=["b"]).sort_index(),
psdf.set_index("a", append=True).duplicated(subset=["b"]).sort_index(),
)
# mutli-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(subset=("x", "b")).sort_index(),
psdf.duplicated(subset=("x", "b")).sort_index(),
)
self.assert_eq(
pdf.duplicated(subset=[("x", "b")]).sort_index(),
psdf.duplicated(subset=[("x", "b")]).sort_index(),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 1, 2, 3], 20: [1, 1, 1, 4], 30: [1, 1, 1, 5]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(subset=10).sort_index(),
psdf.duplicated(subset=10).sort_index(),
)
def test_ffill(self):
idx = np.random.rand(6)
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=idx,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.ffill(), pdf.ffill())
self.assert_eq(psdf.ffill(limit=1), pdf.ffill(limit=1))
pser = pdf.y
psser = psdf.y
psdf.ffill(inplace=True)
pdf.ffill(inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
self.assert_eq(psser[idx[2]], pser[idx[2]])
def test_bfill(self):
idx = np.random.rand(6)
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=idx,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.bfill(), pdf.bfill())
self.assert_eq(psdf.bfill(limit=1), pdf.bfill(limit=1))
pser = pdf.x
psser = psdf.x
psdf.bfill(inplace=True)
pdf.bfill(inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
self.assert_eq(psser[idx[0]], pser[idx[0]])
def test_filter(self):
pdf = pd.DataFrame(
{
"aa": ["aa", "bd", "bc", "ab", "ce"],
"ba": [1, 2, 3, 4, 5],
"cb": [1.0, 2.0, 3.0, 4.0, 5.0],
"db": [1.0, np.nan, 3.0, np.nan, 5.0],
}
)
pdf = pdf.set_index("aa")
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
with option_context("compute.isin_limit", 0):
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
self.assert_eq(
psdf.filter(items=["ba", "db"], axis=1).sort_index(),
pdf.filter(items=["ba", "db"], axis=1).sort_index(),
)
self.assert_eq(psdf.filter(like="b", axis="index"), pdf.filter(like="b", axis="index"))
self.assert_eq(psdf.filter(like="c", axis="columns"), pdf.filter(like="c", axis="columns"))
self.assert_eq(
psdf.filter(regex="b.*", axis="index"), pdf.filter(regex="b.*", axis="index")
)
self.assert_eq(
psdf.filter(regex="b.*", axis="columns"), pdf.filter(regex="b.*", axis="columns")
)
pdf = pdf.set_index("ba", append=True)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=[("aa", 1), ("bd", 2)], axis=0).sort_index(),
pdf.filter(items=[("aa", 1), ("bd", 2)], axis=0).sort_index(),
)
with self.assertRaisesRegex(TypeError, "Unsupported type list"):
psdf.filter(items=[["aa", 1], ("bd", 2)], axis=0)
with self.assertRaisesRegex(ValueError, "The item should not be empty."):
psdf.filter(items=[(), ("bd", 2)], axis=0)
self.assert_eq(psdf.filter(like="b", axis=0), pdf.filter(like="b", axis=0))
self.assert_eq(psdf.filter(regex="b.*", axis=0), pdf.filter(regex="b.*", axis=0))
with self.assertRaisesRegex(ValueError, "items should be a list-like object"):
psdf.filter(items="b")
with self.assertRaisesRegex(ValueError, "No axis named"):
psdf.filter(regex="b.*", axis=123)
with self.assertRaisesRegex(TypeError, "Must pass either `items`, `like`"):
psdf.filter()
with self.assertRaisesRegex(TypeError, "mutually exclusive"):
psdf.filter(regex="b.*", like="aaa")
# multi-index columns
pdf = pd.DataFrame(
{
("x", "aa"): ["aa", "ab", "bc", "bd", "ce"],
("x", "ba"): [1, 2, 3, 4, 5],
("y", "cb"): [1.0, 2.0, 3.0, 4.0, 5.0],
("z", "db"): [1.0, np.nan, 3.0, np.nan, 5.0],
}
)
pdf = pdf.set_index(("x", "aa"))
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
self.assert_eq(
psdf.filter(items=[("x", "ba"), ("z", "db")], axis=1).sort_index(),
pdf.filter(items=[("x", "ba"), ("z", "db")], axis=1).sort_index(),
)
self.assert_eq(psdf.filter(like="b", axis="index"), pdf.filter(like="b", axis="index"))
self.assert_eq(psdf.filter(like="c", axis="columns"), pdf.filter(like="c", axis="columns"))
self.assert_eq(
psdf.filter(regex="b.*", axis="index"), pdf.filter(regex="b.*", axis="index")
)
self.assert_eq(
psdf.filter(regex="b.*", axis="columns"), pdf.filter(regex="b.*", axis="columns")
)
def test_pipe(self):
psdf = ps.DataFrame(
{"category": ["A", "A", "B"], "col1": [1, 2, 3], "col2": [4, 5, 6]},
columns=["category", "col1", "col2"],
)
self.assertRaisesRegex(
ValueError,
"arg is both the pipe target and a keyword argument",
lambda: psdf.pipe((lambda x: x, "arg"), arg="1"),
)
def test_transform(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
self.assert_eq(
psdf.transform(lambda x, y: x + y, y=2).sort_index(),
pdf.transform(lambda x, y: x + y, y=2).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
self.assert_eq(
psdf.transform(lambda x, y: x + y, y=1).sort_index(),
pdf.transform(lambda x, y: x + y, y=1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.transform(1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
def test_apply(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
pdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, b=1).sort_index(),
pdf.apply(lambda x, b: x + b, b=1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
pdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, b=1).sort_index(),
pdf.apply(lambda x, b: x + b, b=1).sort_index(),
)
# returning a Series
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
pdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
pdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.apply(1)
with self.assertRaisesRegex(TypeError, "The given function.*1 or 'column'; however"):
def f1(_) -> ps.DataFrame[int]:
pass
psdf.apply(f1, axis=0)
with self.assertRaisesRegex(TypeError, "The given function.*0 or 'index'; however"):
def f2(_) -> ps.Series[int]:
pass
psdf.apply(f2, axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
# returning a Series
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
def test_apply_with_type(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
def identify1(x) -> ps.DataFrame[int, int]:
return x
# Type hints set the default column names, and we use default index for
# pandas API on Spark. Here we ignore both diff.
actual = psdf.apply(identify1, axis=1)
expected = pdf.apply(identify1, axis=1)
self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy()))
def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405
return x
actual = psdf.apply(identify2, axis=1)
expected = pdf.apply(identify2, axis=1)
self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy()))
def test_apply_batch(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf, a: pdf + a, args=(1,)).sort_index(),
(pdf + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf: pdf + 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf, b: pdf + b, b=1).sort_index(),
(pdf + 1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.pandas_on_spark.apply_batch(1)
with self.assertRaisesRegex(TypeError, "The given function.*frame as its type hints"):
def f2(_) -> ps.Series[int]:
pass
psdf.pandas_on_spark.apply_batch(f2)
with self.assertRaisesRegex(ValueError, "The given function should return a frame"):
psdf.pandas_on_spark.apply_batch(lambda pdf: 1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda x: x + 1).sort_index(), (pdf + 1).sort_index()
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda x: x + 1).sort_index(),
(pdf + 1).sort_index(),
)
def test_apply_batch_with_type(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
def identify1(x) -> ps.DataFrame[int, int]:
return x
# Type hints set the default column names, and we use default index for
# pandas API on Spark. Here we ignore both diff.
actual = psdf.pandas_on_spark.apply_batch(identify1)
expected = pdf
self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy()))
def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405
return x
actual = psdf.pandas_on_spark.apply_batch(identify2)
expected = pdf
self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy()))
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [[e] for e in [4, 5, 6, 3, 2, 1, 0, 0, 0]]},
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
def identify3(x) -> ps.DataFrame[float, [int, List[int]]]:
return x
actual = psdf.pandas_on_spark.apply_batch(identify3)
actual.columns = ["a", "b"]
self.assert_eq(actual, pdf)
# For NumPy typing, NumPy version should be 1.21+ and Python version should be 3.8+
if sys.version_info >= (3, 8) and LooseVersion(np.__version__) >= LooseVersion("1.21"):
import numpy.typing as ntp
psdf = ps.from_pandas(pdf)
def identify4(
x,
) -> ps.DataFrame[float, [int, ntp.NDArray[int]]]: # type: ignore[name-defined]
return x
actual = psdf.pandas_on_spark.apply_batch(identify4)
actual.columns = ["a", "b"]
self.assert_eq(actual, pdf)
arrays = [[1, 2, 3, 4, 5, 6, 7, 8, 9], ["a", "b", "c", "d", "e", "f", "g", "h", "i"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [[e] for e in [4, 5, 6, 3, 2, 1, 0, 0, 0]]},
index=idx,
)
psdf = ps.from_pandas(pdf)
def identify4(x) -> ps.DataFrame[[int, str], [int, List[int]]]:
return x
actual = psdf.pandas_on_spark.apply_batch(identify4)
actual.index.names = ["number", "color"]
actual.columns = ["a", "b"]
self.assert_eq(actual, pdf)
def identify5(
x,
) -> ps.DataFrame[
[("number", int), ("color", str)], [("a", int), ("b", List[int])] # noqa: F405
]:
return x
actual = psdf.pandas_on_spark.apply_batch(identify5)
self.assert_eq(actual, pdf)
def test_transform_batch(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.c + 1).sort_index(),
(pdf.c + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf + a, 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf.c + a, a=1).sort_index(),
(pdf.c + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf + 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b + 1).sort_index(),
(pdf.b + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf + a, 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf.c + a, a=1).sort_index(),
(pdf.c + 1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.pandas_on_spark.transform_batch(1)
with self.assertRaisesRegex(ValueError, "The given function should return a frame"):
psdf.pandas_on_spark.transform_batch(lambda pdf: 1)
with self.assertRaisesRegex(
ValueError, "transform_batch cannot produce aggregated results"
):
psdf.pandas_on_spark.transform_batch(lambda pdf: pd.Series(1))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda x: x + 1).sort_index(),
(pdf + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda x: x + 1).sort_index(),
(pdf + 1).sort_index(),
)
def test_transform_batch_with_type(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
def identify1(x) -> ps.DataFrame[int, int]:
return x
# Type hints set the default column names, and we use default index for
# pandas API on Spark. Here we ignore both diff.
actual = psdf.pandas_on_spark.transform_batch(identify1)
expected = pdf
self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy()))
def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405
return x
actual = psdf.pandas_on_spark.transform_batch(identify2)
expected = pdf
self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy()))
def test_transform_batch_same_anchor(self):
psdf = ps.range(10)
psdf["d"] = psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.id + 1)
self.assert_eq(
psdf,
pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]),
)
psdf = ps.range(10)
def plus_one(pdf) -> ps.Series[np.int64]:
return pdf.id + 1
psdf["d"] = psdf.pandas_on_spark.transform_batch(plus_one)
self.assert_eq(
psdf,
pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]),
)
psdf = ps.range(10)
def plus_one(ser) -> ps.Series[np.int64]:
return ser + 1
psdf["d"] = psdf.id.pandas_on_spark.transform_batch(plus_one)
self.assert_eq(
psdf,
pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]),
)
def test_empty_timestamp(self):
pdf = pd.DataFrame(
{
"t": [
datetime(2019, 1, 1, 0, 0, 0),
datetime(2019, 1, 2, 0, 0, 0),
datetime(2019, 1, 3, 0, 0, 0),
]
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf[psdf["t"] != psdf["t"]], pdf[pdf["t"] != pdf["t"]])
self.assert_eq(psdf[psdf["t"] != psdf["t"]].dtypes, pdf[pdf["t"] != pdf["t"]].dtypes)
def test_to_spark(self):
psdf = ps.from_pandas(self.pdf)
with self.assertRaisesRegex(ValueError, "'index_col' cannot be overlapped"):
psdf.to_spark(index_col="a")
with self.assertRaisesRegex(ValueError, "length of index columns.*1.*3"):
psdf.to_spark(index_col=["x", "y", "z"])
def test_keys(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.keys(), pdf.keys())
def test_quantile(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75]))
self.assert_eq(psdf.loc[[]].quantile(0.5), pdf.loc[[]].quantile(0.5))
self.assert_eq(
psdf.loc[[]].quantile([0.25, 0.5, 0.75]), pdf.loc[[]].quantile([0.25, 0.5, 0.75])
)
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.quantile(0.5, axis=1)
with self.assertRaisesRegex(TypeError, "accuracy must be an integer; however"):
psdf.quantile(accuracy="a")
with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"):
psdf.quantile(q="a")
with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"):
psdf.quantile(q=["a"])
with self.assertRaisesRegex(
ValueError, r"percentiles should all be in the interval \[0, 1\]"
):
psdf.quantile(q=[1.1])
self.assert_eq(
psdf.quantile(0.5, numeric_only=False), pdf.quantile(0.5, numeric_only=False)
)
self.assert_eq(
psdf.quantile([0.25, 0.5, 0.75], numeric_only=False),
pdf.quantile([0.25, 0.5, 0.75], numeric_only=False),
)
# multi-index column
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75]))
pdf = pd.DataFrame({"x": ["a", "b", "c"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75]))
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.quantile(0.5, numeric_only=False)
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.quantile([0.25, 0.5, 0.75], numeric_only=False)
def test_pct_change(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0], "c": [300, 200, 400, 200]},
index=np.random.rand(4),
)
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.pct_change(2), pdf.pct_change(2), check_exact=False)
self.assert_eq(psdf.pct_change().sum(), pdf.pct_change().sum(), check_exact=False)
def test_where(self):
pdf, psdf = self.df_pair
# pandas requires `axis` argument when the `other` is Series.
# `axis` is not fully supported yet in pandas-on-Spark.
self.assert_eq(
psdf.where(psdf > 2, psdf.a + 10, axis=0), pdf.where(pdf > 2, pdf.a + 10, axis=0)
)
with self.assertRaisesRegex(TypeError, "type of cond must be a DataFrame or Series"):
psdf.where(1)
def test_mask(self):
psdf = ps.from_pandas(self.pdf)
with self.assertRaisesRegex(TypeError, "type of cond must be a DataFrame or Series"):
psdf.mask(1)
def test_query(self):
pdf = pd.DataFrame({"A": range(1, 6), "B": range(10, 0, -2), "C": range(10, 5, -1)})
psdf = ps.from_pandas(pdf)
exprs = ("A > B", "A < C", "C == B")
for expr in exprs:
self.assert_eq(psdf.query(expr), pdf.query(expr))
# test `inplace=True`
for expr in exprs:
dummy_psdf = psdf.copy()
dummy_pdf = pdf.copy()
pser = dummy_pdf.A
psser = dummy_psdf.A
dummy_pdf.query(expr, inplace=True)
dummy_psdf.query(expr, inplace=True)
self.assert_eq(dummy_psdf, dummy_pdf)
self.assert_eq(psser, pser)
# invalid values for `expr`
invalid_exprs = (1, 1.0, (exprs[0],), [exprs[0]])
for expr in invalid_exprs:
with self.assertRaisesRegex(
TypeError,
"expr must be a string to be evaluated, {} given".format(type(expr).__name__),
):
psdf.query(expr)
# invalid values for `inplace`
invalid_inplaces = (1, 0, "True", "False")
for inplace in invalid_inplaces:
with self.assertRaisesRegex(
TypeError,
'For argument "inplace" expected type bool, received type {}.'.format(
type(inplace).__name__
),
):
psdf.query("a < b", inplace=inplace)
# doesn't support for MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
psdf.columns = columns
with self.assertRaisesRegex(TypeError, "Doesn't support for MultiIndex columns"):
psdf.query("('A', 'Z') > ('B', 'X')")
def test_take(self):
pdf = pd.DataFrame(
{"A": range(0, 50000), "B": range(100000, 0, -2), "C": range(100000, 50000, -1)}
)
psdf = ps.from_pandas(pdf)
# axis=0 (default)
self.assert_eq(psdf.take([1, 2]).sort_index(), pdf.take([1, 2]).sort_index())
self.assert_eq(psdf.take([-1, -2]).sort_index(), pdf.take([-1, -2]).sort_index())
self.assert_eq(
psdf.take(range(100, 110)).sort_index(), pdf.take(range(100, 110)).sort_index()
)
self.assert_eq(
psdf.take(range(-110, -100)).sort_index(), pdf.take(range(-110, -100)).sort_index()
)
self.assert_eq(
psdf.take([10, 100, 1000, 10000]).sort_index(),
pdf.take([10, 100, 1000, 10000]).sort_index(),
)
self.assert_eq(
psdf.take([-10, -100, -1000, -10000]).sort_index(),
pdf.take([-10, -100, -1000, -10000]).sort_index(),
)
# axis=1
self.assert_eq(
psdf.take([1, 2], axis=1).sort_index(), pdf.take([1, 2], axis=1).sort_index()
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index()
)
self.assert_eq(
psdf.take(range(1, 3), axis=1).sort_index(),
pdf.take(range(1, 3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take(range(-1, -3), axis=1).sort_index(),
pdf.take(range(-1, -3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take([2, 1], axis=1).sort_index(),
pdf.take([2, 1], axis=1).sort_index(),
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(),
pdf.take([-1, -2], axis=1).sort_index(),
)
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
psdf.columns = columns
pdf.columns = columns
# MultiIndex columns with axis=0 (default)
self.assert_eq(psdf.take([1, 2]).sort_index(), pdf.take([1, 2]).sort_index())
self.assert_eq(psdf.take([-1, -2]).sort_index(), pdf.take([-1, -2]).sort_index())
self.assert_eq(
psdf.take(range(100, 110)).sort_index(), pdf.take(range(100, 110)).sort_index()
)
self.assert_eq(
psdf.take(range(-110, -100)).sort_index(), pdf.take(range(-110, -100)).sort_index()
)
self.assert_eq(
psdf.take([10, 100, 1000, 10000]).sort_index(),
pdf.take([10, 100, 1000, 10000]).sort_index(),
)
self.assert_eq(
psdf.take([-10, -100, -1000, -10000]).sort_index(),
pdf.take([-10, -100, -1000, -10000]).sort_index(),
)
# axis=1
self.assert_eq(
psdf.take([1, 2], axis=1).sort_index(), pdf.take([1, 2], axis=1).sort_index()
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index()
)
self.assert_eq(
psdf.take(range(1, 3), axis=1).sort_index(),
pdf.take(range(1, 3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take(range(-1, -3), axis=1).sort_index(),
pdf.take(range(-1, -3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take([2, 1], axis=1).sort_index(),
pdf.take([2, 1], axis=1).sort_index(),
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(),
pdf.take([-1, -2], axis=1).sort_index(),
)
# Checking the type of indices.
self.assertRaises(TypeError, lambda: psdf.take(1))
self.assertRaises(TypeError, lambda: psdf.take("1"))
self.assertRaises(TypeError, lambda: psdf.take({1, 2}))
self.assertRaises(TypeError, lambda: psdf.take({1: None, 2: None}))
def test_axes(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.axes, psdf.axes)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.axes, psdf.axes)
def test_udt(self):
sparse_values = {0: 0.1, 1: 1.1}
sparse_vector = SparseVector(len(sparse_values), sparse_values)
pdf = pd.DataFrame({"a": [sparse_vector], "b": [10]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_eval(self):
pdf = pd.DataFrame({"A": range(1, 6), "B": range(10, 0, -2)})
psdf = ps.from_pandas(pdf)
# operation between columns (returns Series)
self.assert_eq(pdf.eval("A + B"), psdf.eval("A + B"))
self.assert_eq(pdf.eval("A + A"), psdf.eval("A + A"))
# assignment (returns DataFrame)
self.assert_eq(pdf.eval("C = A + B"), psdf.eval("C = A + B"))
self.assert_eq(pdf.eval("A = A + A"), psdf.eval("A = A + A"))
# operation between scalars (returns scalar)
self.assert_eq(pdf.eval("1 + 1"), psdf.eval("1 + 1"))
# complicated operations with assignment
self.assert_eq(
pdf.eval("B = A + B // (100 + 200) * (500 - B) - 10.5"),
psdf.eval("B = A + B // (100 + 200) * (500 - B) - 10.5"),
)
# inplace=True (only support for assignment)
pdf.eval("C = A + B", inplace=True)
psdf.eval("C = A + B", inplace=True)
self.assert_eq(pdf, psdf)
pser = pdf.A
psser = psdf.A
pdf.eval("A = B + C", inplace=True)
psdf.eval("A = B + C", inplace=True)
self.assert_eq(pdf, psdf)
self.assert_eq(pser, psser)
# doesn't support for multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b"), ("z", "c")])
psdf.columns = columns
self.assertRaises(TypeError, lambda: psdf.eval("x.a + y.b"))
@unittest.skipIf(not have_tabulate, tabulate_requirement_message)
def test_to_markdown(self):
pdf = pd.DataFrame(data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.to_markdown(), psdf.to_markdown())
def test_cache(self):
pdf = pd.DataFrame(
[(0.2, 0.3), (0.0, 0.6), (0.6, 0.0), (0.2, 0.1)], columns=["dogs", "cats"]
)
psdf = ps.from_pandas(pdf)
with psdf.spark.cache() as cached_df:
self.assert_eq(isinstance(cached_df, CachedDataFrame), True)
self.assert_eq(
repr(cached_df.spark.storage_level), repr(StorageLevel(True, True, False, True))
)
def test_persist(self):
pdf = pd.DataFrame(
[(0.2, 0.3), (0.0, 0.6), (0.6, 0.0), (0.2, 0.1)], columns=["dogs", "cats"]
)
psdf = ps.from_pandas(pdf)
storage_levels = [
StorageLevel.DISK_ONLY,
StorageLevel.MEMORY_AND_DISK,
StorageLevel.MEMORY_ONLY,
StorageLevel.OFF_HEAP,
]
for storage_level in storage_levels:
with psdf.spark.persist(storage_level) as cached_df:
self.assert_eq(isinstance(cached_df, CachedDataFrame), True)
self.assert_eq(repr(cached_df.spark.storage_level), repr(storage_level))
self.assertRaises(TypeError, lambda: psdf.spark.persist("DISK_ONLY"))
def test_squeeze(self):
axises = [None, 0, 1, "rows", "index", "columns"]
# Multiple columns
pdf = pd.DataFrame([[1, 2], [3, 4]], columns=["a", "b"], index=["x", "y"])
psdf = ps.from_pandas(pdf)
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Multiple columns with MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X")])
pdf.columns = columns
psdf.columns = columns
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with single value
pdf = pd.DataFrame([[1]], columns=["a"], index=["x"])
psdf = ps.from_pandas(pdf)
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with single value with MultiIndex column
columns = pd.MultiIndex.from_tuples([("A", "Z")])
pdf.columns = columns
psdf.columns = columns
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with multiple values
pdf = pd.DataFrame([1, 2, 3, 4], columns=["a"])
psdf = ps.from_pandas(pdf)
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with multiple values with MultiIndex column
pdf.columns = columns
psdf.columns = columns
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
def test_rfloordiv(self):
pdf = pd.DataFrame(
{"angles": [0, 3, 4], "degrees": [360, 180, 360]},
index=["circle", "triangle", "rectangle"],
columns=["angles", "degrees"],
)
psdf = ps.from_pandas(pdf)
expected_result = pdf.rfloordiv(10)
self.assert_eq(psdf.rfloordiv(10), expected_result)
def test_truncate(self):
pdf1 = pd.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[-500, -20, -1, 0, 400, 550, 1000],
)
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[1000, 550, 400, 0, -1, -20, -500],
)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf1.truncate(), pdf1.truncate())
self.assert_eq(psdf1.truncate(before=-20), pdf1.truncate(before=-20))
self.assert_eq(psdf1.truncate(after=400), pdf1.truncate(after=400))
self.assert_eq(psdf1.truncate(copy=False), pdf1.truncate(copy=False))
self.assert_eq(psdf1.truncate(-20, 400, copy=False), pdf1.truncate(-20, 400, copy=False))
# The bug for these tests has been fixed in pandas 1.1.0.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(psdf2.truncate(0, 550), pdf2.truncate(0, 550))
self.assert_eq(psdf2.truncate(0, 550, copy=False), pdf2.truncate(0, 550, copy=False))
else:
expected_psdf = ps.DataFrame(
{"A": ["b", "c", "d"], "B": ["i", "j", "k"], "C": ["p", "q", "r"]},
index=[550, 400, 0],
)
self.assert_eq(psdf2.truncate(0, 550), expected_psdf)
self.assert_eq(psdf2.truncate(0, 550, copy=False), expected_psdf)
# axis = 1
self.assert_eq(psdf1.truncate(axis=1), pdf1.truncate(axis=1))
self.assert_eq(psdf1.truncate(before="B", axis=1), pdf1.truncate(before="B", axis=1))
self.assert_eq(psdf1.truncate(after="A", axis=1), pdf1.truncate(after="A", axis=1))
self.assert_eq(psdf1.truncate(copy=False, axis=1), pdf1.truncate(copy=False, axis=1))
self.assert_eq(psdf2.truncate("B", "C", axis=1), pdf2.truncate("B", "C", axis=1))
self.assert_eq(
psdf1.truncate("B", "C", copy=False, axis=1),
pdf1.truncate("B", "C", copy=False, axis=1),
)
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "Z")])
pdf1.columns = columns
psdf1.columns = columns
pdf2.columns = columns
psdf2.columns = columns
self.assert_eq(psdf1.truncate(), pdf1.truncate())
self.assert_eq(psdf1.truncate(before=-20), pdf1.truncate(before=-20))
self.assert_eq(psdf1.truncate(after=400), pdf1.truncate(after=400))
self.assert_eq(psdf1.truncate(copy=False), pdf1.truncate(copy=False))
self.assert_eq(psdf1.truncate(-20, 400, copy=False), pdf1.truncate(-20, 400, copy=False))
# The bug for these tests has been fixed in pandas 1.1.0.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(psdf2.truncate(0, 550), pdf2.truncate(0, 550))
self.assert_eq(psdf2.truncate(0, 550, copy=False), pdf2.truncate(0, 550, copy=False))
else:
expected_psdf.columns = columns
self.assert_eq(psdf2.truncate(0, 550), expected_psdf)
self.assert_eq(psdf2.truncate(0, 550, copy=False), expected_psdf)
# axis = 1
self.assert_eq(psdf1.truncate(axis=1), pdf1.truncate(axis=1))
self.assert_eq(psdf1.truncate(before="B", axis=1), pdf1.truncate(before="B", axis=1))
self.assert_eq(psdf1.truncate(after="A", axis=1), pdf1.truncate(after="A", axis=1))
self.assert_eq(psdf1.truncate(copy=False, axis=1), pdf1.truncate(copy=False, axis=1))
self.assert_eq(psdf2.truncate("B", "C", axis=1), pdf2.truncate("B", "C", axis=1))
self.assert_eq(
psdf1.truncate("B", "C", copy=False, axis=1),
pdf1.truncate("B", "C", copy=False, axis=1),
)
# Exceptions
psdf = ps.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[-500, 100, 400, 0, -1, 550, -20],
)
msg = "truncate requires a sorted index"
with self.assertRaisesRegex(ValueError, msg):
psdf.truncate()
psdf = ps.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[-500, -20, -1, 0, 400, 550, 1000],
)
msg = "Truncate: -20 must be after 400"
with self.assertRaisesRegex(ValueError, msg):
psdf.truncate(400, -20)
msg = "Truncate: B must be after C"
with self.assertRaisesRegex(ValueError, msg):
psdf.truncate("C", "B", axis=1)
def test_explode(self):
pdf = pd.DataFrame({"A": [[-1.0, np.nan], [0.0, np.inf], [1.0, -np.inf]], "B": 1})
pdf.index.name = "index"
pdf.columns.name = "columns"
psdf = ps.from_pandas(pdf)
expected_result1 = pdf.explode("A")
expected_result2 = pdf.explode("B")
self.assert_eq(psdf.explode("A"), expected_result1, almost=True)
self.assert_eq(psdf.explode("B"), expected_result2)
self.assert_eq(psdf.explode("A").index.name, expected_result1.index.name)
self.assert_eq(psdf.explode("A").columns.name, expected_result1.columns.name)
self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"]))
# MultiIndex
midx = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"]
)
pdf.index = midx
psdf = ps.from_pandas(pdf)
expected_result1 = pdf.explode("A")
expected_result2 = pdf.explode("B")
self.assert_eq(psdf.explode("A"), expected_result1, almost=True)
self.assert_eq(psdf.explode("B"), expected_result2)
self.assert_eq(psdf.explode("A").index.names, expected_result1.index.names)
self.assert_eq(psdf.explode("A").columns.name, expected_result1.columns.name)
self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"]))
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X")], names=["column1", "column2"])
pdf.columns = columns
psdf.columns = columns
expected_result1 = pdf.explode(("A", "Z"))
expected_result2 = pdf.explode(("B", "X"))
expected_result3 = pdf.A.explode("Z")
self.assert_eq(psdf.explode(("A", "Z")), expected_result1, almost=True)
self.assert_eq(psdf.explode(("B", "X")), expected_result2)
self.assert_eq(psdf.explode(("A", "Z")).index.names, expected_result1.index.names)
self.assert_eq(psdf.explode(("A", "Z")).columns.names, expected_result1.columns.names)
self.assert_eq(psdf.A.explode("Z"), expected_result3, almost=True)
self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"]))
self.assertRaises(ValueError, lambda: psdf.explode("A"))
def test_spark_schema(self):
psdf = ps.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
columns=["a", "b", "c", "d", "e", "f"],
)
actual = psdf.spark.schema()
expected = (
StructType()
.add("a", "string", False)
.add("b", "long", False)
.add("c", "byte", False)
.add("d", "double", False)
.add("e", "boolean", False)
.add("f", "timestamp", False)
)
self.assertEqual(actual, expected)
actual = psdf.spark.schema("index")
expected = (
StructType()
.add("index", "long", False)
.add("a", "string", False)
.add("b", "long", False)
.add("c", "byte", False)
.add("d", "double", False)
.add("e", "boolean", False)
.add("f", "timestamp", False)
)
self.assertEqual(actual, expected)
def test_print_schema(self):
psdf = ps.DataFrame(
{"a": list("abc"), "b": list(range(1, 4)), "c": np.arange(3, 6).astype("i1")},
columns=["a", "b", "c"],
)
prev = sys.stdout
try:
out = StringIO()
sys.stdout = out
psdf.spark.print_schema()
actual = out.getvalue().strip()
self.assertTrue("a: string" in actual, actual)
self.assertTrue("b: long" in actual, actual)
self.assertTrue("c: byte" in actual, actual)
out = StringIO()
sys.stdout = out
psdf.spark.print_schema(index_col="index")
actual = out.getvalue().strip()
self.assertTrue("index: long" in actual, actual)
self.assertTrue("a: string" in actual, actual)
self.assertTrue("b: long" in actual, actual)
self.assertTrue("c: byte" in actual, actual)
finally:
sys.stdout = prev
def test_explain_hint(self):
psdf1 = ps.DataFrame(
{"lkey": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 5]},
columns=["lkey", "value"],
)
psdf2 = ps.DataFrame(
{"rkey": ["foo", "bar", "baz", "foo"], "value": [5, 6, 7, 8]},
columns=["rkey", "value"],
)
merged = psdf1.merge(psdf2.spark.hint("broadcast"), left_on="lkey", right_on="rkey")
prev = sys.stdout
try:
out = StringIO()
sys.stdout = out
merged.spark.explain()
actual = out.getvalue().strip()
self.assertTrue("Broadcast" in actual, actual)
finally:
sys.stdout = prev
def test_mad(self):
pdf = pd.DataFrame(
{
"A": [1, 2, None, 4, np.nan],
"B": [-0.1, 0.2, -0.3, np.nan, 0.5],
"C": ["a", "b", "c", "d", "e"],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.mad(), pdf.mad())
self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1))
with self.assertRaises(ValueError):
psdf.mad(axis=2)
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("A", "Z")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.mad(), pdf.mad())
self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1))
pdf = pd.DataFrame({"A": [True, True, False, False], "B": [True, False, False, True]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.mad(), pdf.mad())
self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1))
def test_abs(self):
pdf = pd.DataFrame({"a": [-2, -1, 0, 1]})
psdf = ps.from_pandas(pdf)
self.assert_eq(abs(psdf), abs(pdf))
self.assert_eq(np.abs(psdf), np.abs(pdf))
def test_iteritems(self):
pdf = pd.DataFrame(
{"species": ["bear", "bear", "marsupial"], "population": [1864, 22000, 80000]},
index=["panda", "polar", "koala"],
columns=["species", "population"],
)
psdf = ps.from_pandas(pdf)
for (p_name, p_items), (k_name, k_items) in zip(pdf.iteritems(), psdf.iteritems()):
self.assert_eq(p_name, k_name)
self.assert_eq(p_items, k_items)
def test_tail(self):
pdf = pd.DataFrame({"x": range(1000)})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.tail(), psdf.tail())
self.assert_eq(pdf.tail(10), psdf.tail(10))
self.assert_eq(pdf.tail(-990), psdf.tail(-990))
self.assert_eq(pdf.tail(0), psdf.tail(0))
self.assert_eq(pdf.tail(-1001), psdf.tail(-1001))
self.assert_eq(pdf.tail(1001), psdf.tail(1001))
self.assert_eq((pdf + 1).tail(), (psdf + 1).tail())
self.assert_eq((pdf + 1).tail(10), (psdf + 1).tail(10))
self.assert_eq((pdf + 1).tail(-990), (psdf + 1).tail(-990))
self.assert_eq((pdf + 1).tail(0), (psdf + 1).tail(0))
self.assert_eq((pdf + 1).tail(-1001), (psdf + 1).tail(-1001))
self.assert_eq((pdf + 1).tail(1001), (psdf + 1).tail(1001))
with self.assertRaisesRegex(TypeError, "bad operand type for unary -: 'str'"):
psdf.tail("10")
def test_last_valid_index(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, None], "b": [1.0, 2.0, 3.0, None], "c": [100, 200, 400, None]},
index=["Q", "W", "E", "R"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.last_valid_index(), psdf.last_valid_index())
self.assert_eq(pdf[[]].last_valid_index(), psdf[[]].last_valid_index())
# MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.last_valid_index(), psdf.last_valid_index())
# Empty DataFrame
pdf = pd.Series([]).to_frame()
psdf = ps.Series([]).to_frame()
self.assert_eq(pdf.last_valid_index(), psdf.last_valid_index())
def test_last(self):
index = pd.date_range("2018-04-09", periods=4, freq="2D")
pdf = pd.DataFrame([1, 2, 3, 4], index=index)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.last("1D"), psdf.last("1D"))
self.assert_eq(pdf.last(DateOffset(days=1)), psdf.last(DateOffset(days=1)))
with self.assertRaisesRegex(TypeError, "'last' only supports a DatetimeIndex"):
ps.DataFrame([1, 2, 3, 4]).last("1D")
def test_first(self):
index = pd.date_range("2018-04-09", periods=4, freq="2D")
pdf = pd.DataFrame([1, 2, 3, 4], index=index)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.first("1D"), psdf.first("1D"))
self.assert_eq(pdf.first(DateOffset(days=1)), psdf.first(DateOffset(days=1)))
with self.assertRaisesRegex(TypeError, "'first' only supports a DatetimeIndex"):
ps.DataFrame([1, 2, 3, 4]).first("1D")
def test_first_valid_index(self):
pdf = pd.DataFrame(
{"a": [None, 2, 3, 2], "b": [None, 2.0, 3.0, 1.0], "c": [None, 200, 400, 200]},
index=["Q", "W", "E", "R"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index())
self.assert_eq(pdf[[]].first_valid_index(), psdf[[]].first_valid_index())
# MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index())
# Empty DataFrame
pdf = pd.Series([]).to_frame()
psdf = ps.Series([]).to_frame()
self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index())
pdf = pd.DataFrame(
{"a": [None, 2, 3, 2], "b": [None, 2.0, 3.0, 1.0], "c": [None, 200, 400, 200]},
index=[
datetime(2021, 1, 1),
datetime(2021, 2, 1),
datetime(2021, 3, 1),
datetime(2021, 4, 1),
],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index())
def test_product(self):
pdf = pd.DataFrame(
{"A": [1, 2, 3, 4, 5], "B": [10, 20, 30, 40, 50], "C": ["a", "b", "c", "d", "e"]}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# Named columns
pdf.columns.name = "Koalas"
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# Named MultiIndex columns
pdf.columns.names = ["Hello", "Koalas"]
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# No numeric columns
pdf = pd.DataFrame({"key": ["a", "b", "c"], "val": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# No numeric named columns
pdf.columns.name = "Koalas"
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), almost=True)
# No numeric MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), almost=True)
# No numeric named MultiIndex columns
pdf.columns.names = ["Hello", "Koalas"]
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), almost=True)
# All NaN columns
pdf = pd.DataFrame(
{
"A": [np.nan, np.nan, np.nan, np.nan, np.nan],
"B": [10, 20, 30, 40, 50],
"C": ["a", "b", "c", "d", "e"],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False)
# All NaN named columns
pdf.columns.name = "Koalas"
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False)
# All NaN MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False)
# All NaN named MultiIndex columns
pdf.columns.names = ["Hello", "Koalas"]
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False)
def test_from_dict(self):
data = {"row_1": [3, 2, 1, 0], "row_2": [10, 20, 30, 40]}
pdf = pd.DataFrame.from_dict(data)
psdf = ps.DataFrame.from_dict(data)
self.assert_eq(pdf, psdf)
pdf = pd.DataFrame.from_dict(data, dtype="int8")
psdf = ps.DataFrame.from_dict(data, dtype="int8")
self.assert_eq(pdf, psdf)
pdf = pd.DataFrame.from_dict(data, orient="index", columns=["A", "B", "C", "D"])
psdf = ps.DataFrame.from_dict(data, orient="index", columns=["A", "B", "C", "D"])
self.assert_eq(pdf, psdf)
def test_pad(self):
pdf = pd.DataFrame(
{
"A": [None, 3, None, None],
"B": [2, 4, None, 3],
"C": [None, None, None, 1],
"D": [0, 1, 5, 4],
},
columns=["A", "B", "C", "D"],
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self.assert_eq(pdf.pad(), psdf.pad())
# Test `inplace=True`
pdf.pad(inplace=True)
psdf.pad(inplace=True)
self.assert_eq(pdf, psdf)
else:
expected = ps.DataFrame(
{
"A": [None, 3, 3, 3],
"B": [2.0, 4.0, 4.0, 3.0],
"C": [None, None, None, 1],
"D": [0, 1, 5, 4],
},
columns=["A", "B", "C", "D"],
)
self.assert_eq(expected, psdf.pad())
# Test `inplace=True`
psdf.pad(inplace=True)
self.assert_eq(expected, psdf)
def test_backfill(self):
pdf = pd.DataFrame(
{
"A": [None, 3, None, None],
"B": [2, 4, None, 3],
"C": [None, None, None, 1],
"D": [0, 1, 5, 4],
},
columns=["A", "B", "C", "D"],
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self.assert_eq(pdf.backfill(), psdf.backfill())
# Test `inplace=True`
pdf.backfill(inplace=True)
psdf.backfill(inplace=True)
self.assert_eq(pdf, psdf)
else:
expected = ps.DataFrame(
{
"A": [3.0, 3.0, None, None],
"B": [2.0, 4.0, 3.0, 3.0],
"C": [1.0, 1.0, 1.0, 1.0],
"D": [0, 1, 5, 4],
},
columns=["A", "B", "C", "D"],
)
self.assert_eq(expected, psdf.backfill())
# Test `inplace=True`
psdf.backfill(inplace=True)
self.assert_eq(expected, psdf)
def test_align(self):
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])
psdf1 = ps.from_pandas(pdf1)
for join in ["outer", "inner", "left", "right"]:
for axis in [None, 0, 1]:
psdf_l, psdf_r = psdf1.align(psdf1[["b"]], join=join, axis=axis)
pdf_l, pdf_r = pdf1.align(pdf1[["b"]], join=join, axis=axis)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psdf_r, pdf_r)
psdf_l, psdf_r = psdf1[["a"]].align(psdf1[["b", "a"]], join=join, axis=axis)
pdf_l, pdf_r = pdf1[["a"]].align(pdf1[["b", "a"]], join=join, axis=axis)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psdf_r, pdf_r)
psdf_l, psdf_r = psdf1[["b", "a"]].align(psdf1[["a"]], join=join, axis=axis)
pdf_l, pdf_r = pdf1[["b", "a"]].align(pdf1[["a"]], join=join, axis=axis)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psdf_r, pdf_r)
psdf_l, psdf_r = psdf1.align(psdf1["b"], axis=0)
pdf_l, pdf_r = pdf1.align(pdf1["b"], axis=0)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psdf_r, pdf_r)
psdf_l, psser_b = psdf1[["a"]].align(psdf1["b"], axis=0)
pdf_l, pser_b = pdf1[["a"]].align(pdf1["b"], axis=0)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psser_b, pser_b)
self.assertRaises(ValueError, lambda: psdf1.align(psdf1, join="unknown"))
self.assertRaises(ValueError, lambda: psdf1.align(psdf1["b"]))
self.assertRaises(TypeError, lambda: psdf1.align(["b"]))
self.assertRaises(NotImplementedError, lambda: psdf1.align(psdf1["b"], axis=1))
pdf2 = pd.DataFrame({"a": [4, 5, 6], "d": ["d", "e", "f"]}, index=[10, 11, 12])
psdf2 = ps.from_pandas(pdf2)
for join in ["outer", "inner", "left", "right"]:
psdf_l, psdf_r = psdf1.align(psdf2, join=join, axis=1)
pdf_l, pdf_r = pdf1.align(pdf2, join=join, axis=1)
self.assert_eq(psdf_l.sort_index(), pdf_l.sort_index())
self.assert_eq(psdf_r.sort_index(), pdf_r.sort_index())
def test_between_time(self):
idx = pd.date_range("2018-04-09", periods=4, freq="1D20min")
pdf = pd.DataFrame({"A": [1, 2, 3, 4]}, index=idx)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
pdf.index.name = "ts"
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
# Column label is 'index'
pdf.columns = pd.Index(["index"])
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
# Both index name and column label are 'index'
pdf.index.name = "index"
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
# Index name is 'index', column label is ('X', 'A')
pdf.columns = pd.MultiIndex.from_arrays([["X"], ["A"]])
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
with self.assertRaisesRegex(
NotImplementedError, "between_time currently only works for axis=0"
):
psdf.between_time("0:15", "0:45", axis=1)
psdf = ps.DataFrame({"A": [1, 2, 3, 4]})
with self.assertRaisesRegex(TypeError, "Index must be DatetimeIndex"):
psdf.between_time("0:15", "0:45")
def test_at_time(self):
idx = pd.date_range("2018-04-09", periods=4, freq="1D20min")
pdf = pd.DataFrame({"A": [1, 2, 3, 4]}, index=idx)
psdf = ps.from_pandas(pdf)
psdf.at_time("0:20")
self.assert_eq(
pdf.at_time("0:20").sort_index(),
psdf.at_time("0:20").sort_index(),
)
# Index name is 'ts'
pdf.index.name = "ts"
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.at_time("0:20").sort_index(),
psdf.at_time("0:20").sort_index(),
)
# Index name is 'ts', column label is 'index'
pdf.columns = pd.Index(["index"])
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.at_time("0:40").sort_index(),
psdf.at_time("0:40").sort_index(),
)
# Both index name and column label are 'index'
pdf.index.name = "index"
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.at_time("0:40").sort_index(),
psdf.at_time("0:40").sort_index(),
)
# Index name is 'index', column label is ('X', 'A')
pdf.columns = pd.MultiIndex.from_arrays([["X"], ["A"]])
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.at_time("0:40").sort_index(),
psdf.at_time("0:40").sort_index(),
)
with self.assertRaisesRegex(NotImplementedError, "'asof' argument is not supported"):
psdf.at_time("0:15", asof=True)
with self.assertRaisesRegex(NotImplementedError, "at_time currently only works for axis=0"):
psdf.at_time("0:15", axis=1)
psdf = ps.DataFrame({"A": [1, 2, 3, 4]})
with self.assertRaisesRegex(TypeError, "Index must be DatetimeIndex"):
psdf.at_time("0:15")
def test_astype(self):
psdf = self.psdf
msg = "Only a column name can be used for the key in a dtype mappings argument."
with self.assertRaisesRegex(KeyError, msg):
psdf.astype({"c": float})
def test_describe(self):
pdf, psdf = self.df_pair
# numeric columns
self.assert_eq(psdf.describe(), pdf.describe())
psdf.a += psdf.a
pdf.a += pdf.a
self.assert_eq(psdf.describe(), pdf.describe())
# string columns
psdf = ps.DataFrame({"A": ["a", "b", "b", "c"], "B": ["d", "e", "f", "f"]})
pdf = psdf.to_pandas()
self.assert_eq(psdf.describe(), pdf.describe().astype(str))
psdf.A += psdf.A
pdf.A += pdf.A
self.assert_eq(psdf.describe(), pdf.describe().astype(str))
# timestamp columns
psdf = ps.DataFrame(
{
"A": [
pd.Timestamp("2020-10-20"),
pd.Timestamp("2021-06-02"),
pd.Timestamp("2021-06-02"),
pd.Timestamp("2022-07-11"),
],
"B": [
|
pd.Timestamp("2021-11-20")
|
pandas.Timestamp
|
import pandas as pd
from sklearn import datasets
from Algorithms.pca import PCA
X, y = datasets.load_iris(return_X_y=True)
X =
|
pd.DataFrame(data=X, columns=['Sepal length', 'Sepal width', 'Petal length', 'Petal width'])
|
pandas.DataFrame
|
import pandas as pd
import pytest
from viadot.tasks import SalesforceUpsert
@pytest.fixture(scope="session")
def test_df():
data = {
"Id": ["111"],
"LastName": ["<NAME>-External 3"],
"SAPContactId__c": [111],
}
df =
|
pd.DataFrame(data=data)
|
pandas.DataFrame
|
import warnings
from onecodex.lib.enums import AlphaDiversityMetric, Rank, BaseEnum
from onecodex.exceptions import OneCodexException, PlottingException, PlottingWarning
from onecodex.viz._primitives import prepare_props, sort_helper, get_base_classification_url
class PlotType(BaseEnum):
Auto = "auto"
BoxPlot = "boxplot"
Scatter = "scatter"
class VizMetadataMixin(object):
def plot_metadata(
self,
rank=Rank.Auto,
haxis="Label",
vaxis=AlphaDiversityMetric.Shannon,
title=None,
xlabel=None,
ylabel=None,
return_chart=False,
plot_type=PlotType.Auto,
label=None,
sort_x=None,
width=200,
height=400,
facet_by=None,
):
"""Plot an arbitrary metadata field versus an arbitrary quantity as a boxplot or scatter plot.
Parameters
----------
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
haxis : `string`, optional
The metadata field (or tuple containing multiple categorical fields) to be plotted on
the horizontal axis.
vaxis : `string`, optional
Data to be plotted on the vertical axis. Can be any one of the following:
- A metadata field: the name of a metadata field containing numerical data
- {'simpson', 'observed_taxa', 'shannon'}: an alpha diversity statistic to calculate for each sample
- A taxon name: the name of a taxon in the analysis
- A taxon ID: the ID of a taxon in the analysis
title : `string`, optional
Text label at the top of the plot.
xlabel : `string`, optional
Text label along the horizontal axis.
ylabel : `string`, optional
Text label along the vertical axis.
plot_type : {'auto', 'boxplot', 'scatter'}
By default, will determine plot type automatically based on the data. Otherwise, specify
one of 'boxplot' or 'scatter' to set the type of plot manually.
label : `string` or `callable`, optional
A metadata field (or function) used to label each analysis. If passing a function, a
dict containing the metadata for each analysis is passed as the first and only
positional argument. The callable function must return a string.
sort_x : `list` or `callable`, optional
Either a list of sorted labels or a function that will be called with a list of x-axis labels
as the only argument, and must return the same list in a user-specified order.
facet_by : `string`, optional
The metadata field used to facet samples by (i.e. to create a separate subplot for each
group of samples).
Examples
--------
Generate a boxplot of the abundance of Bacteroides (genus) of samples grouped by whether the
individuals are allergic to dogs, cats, both, or neither.
>>> plot_metadata(haxis=('allergy_dogs', 'allergy_cats'), vaxis='Bacteroides')
"""
# Deferred imports
import altair as alt
import pandas as pd
if rank is None:
raise OneCodexException("Please specify a rank or 'auto' to choose automatically")
if not PlotType.has_value(plot_type):
raise OneCodexException("Plot type must be one of: auto, boxplot, scatter")
if len(self._results) < 1:
raise PlottingException(
"There are too few samples for metadata plots after filtering. Please select 1 or "
"more samples to plot."
)
# alpha diversity is only allowed on vertical axis--horizontal can be magically mapped
metadata_fields = [haxis, "Label"]
if facet_by:
metadata_fields.append(facet_by)
df, magic_fields = self._metadata_fetch(metadata_fields, label=label)
if AlphaDiversityMetric.has_value(vaxis):
df.loc[:, vaxis] = self.alpha_diversity(vaxis, rank=rank)
magic_fields[vaxis] = vaxis
df.dropna(subset=[magic_fields[vaxis]], inplace=True)
else:
# if it's not alpha diversity, vertical axis can also be magically mapped
vert_df, vert_magic_fields = self._metadata_fetch([vaxis])
# we require the vertical axis to be numerical otherwise plots get weird
if (
pd.api.types.is_bool_dtype(vert_df[vert_magic_fields[vaxis]])
or
|
pd.api.types.is_categorical_dtype(vert_df[vert_magic_fields[vaxis]])
|
pandas.api.types.is_categorical_dtype
|
import pandas as pd
import numpy as np
import gensim
from gensim.corpora.dictionary import Dictionary
from datetime import datetime
co = pd.read_pickle('/home/mluser/master8_projects/clustering_vacancies/data/release/df_vacancies_full_ru_22K.pkl')
texts = np.array(co.lemmatized_text_pos_tags.apply(eval))
texts = texts.tolist()
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
print(str(datetime.now()))
lda = gensim.models.LdaModel(corpus, num_topics=500, id2word=dictionary)
print(str(datetime.now()))
#
# lda.get_document_topics()
# print(str(datetime.now()))
# lsi = gensim.models.LsiModel(corpus, num_topics=500, id2word=dictionary)
# print(str(datetime.now()))
# lsi.save('/home/mluser/master8_projects/clustering_vacancies/models/lsi/lsi_500_22K')
# co = pd.read_pickle('/home/mluser/master8_projects/clustering_vacancies/data/release/df_vacancies_full_ru_22K.pkl')
# co['lsi_500_22K'] = co.lemmatized_text_pos_tags.apply(eval).apply(dictionary.doc2bow).apply(lambda x: np.array(lsi[x])[:,1].tolist())
# co = co[['id', 'lsi_500_22K']]
# co.to_pickle('/home/mluser/master8_projects/clustering_vacancies/data/release/df_vacancies_full_ru_22K_lsi_500_22K.pkl')
def to_vec(x):
v = np.array(lda[x])
df = pd.DataFrame(v, columns=['topic', 'prob'])
df.topic = df.topic.astype('int')
df.index = df.topic
re = pd.DataFrame(index=range(0, 500, 1))
re['prob'] = df.prob
re.prob = re.prob.fillna(0.0)
return np.array(re.prob).tolist()
co =
|
pd.read_pickle('/home/mluser/master8_projects/clustering_vacancies/data/release/df_vacancies_full_ru_22K.pkl')
|
pandas.read_pickle
|
"""Copyright © 2020-present, Swisscom (Schweiz) AG.
All rights reserved.
"""
import pandas as pd
import pytest
from ..decision_maker import DecisionMaker
# Evaluate the constructor
@pytest.mark.parametrize('attr, error',
[(['a', 'b', '', {'a1': 'a'}], ValueError),
(['a', 'b', 'c', {'a1': 'a'}, 0.1], TypeError),
(['a', 'b', 'c', {'a1': 'a'}, -1], ValueError),
(['a', 'b', 'c', {'a1': 'a'}, 0], ValueError)])
# indirect=['person'])
def test_constructor_errors(attr, error):
with pytest.raises(error):
DecisionMaker(*attr)
# Evaluate _compute_landscape
dm = DecisionMaker('John', 'Sales', 'transfer',
{'age': '30s', 'gender': 'M', 'language': 'DE'}, 1)
cands = pd.DataFrame(columns=['age', 'gender', 'language', 'transfer'],
data=[['20s', 'F', 'EN', 0],
['30s', 'F', 'FR', 1],
['40s', 'M', 'IT', 0],
['50s', 'M', 'DE', 1],
['30s', 'M', 'DE', 0]])
@pytest.mark.parametrize('param, error',
[('', ValueError),
('hello', TypeError),
(pd.DataFrame(columns=['A'], data=[1]), ValueError),
(pd.DataFrame(columns=['cand_age'], data=[1]), ValueError),
(
|
pd.DataFrame(columns=['cand_age', 'cand_gender'], data=[[1, 2]])
|
pandas.DataFrame
|
import pandas as pd
def convert_str_to_datetime(df, *, column: str, format: str):
"""
Convert string column into datetime column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to format
- `format` (*str*): current format of the values (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
"""
df[column] =
|
pd.to_datetime(df[column], format=format)
|
pandas.to_datetime
|
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy import API
from tweepy import Cursor
import twitter_credentials
import numpy as np
import pandas as pd
# This application was coded by following the "Tweet Visualization and Sentiment Analysis in Python Tutorial"
# from <NAME> of Lucid Programming
class TwitterClient:
def __init__(self, twitter_user=None):
self.auth = TwitterAuth().authenticate_twitter()
self.twitter_client = API(self.auth)
self.twitter_user = twitter_user
def get_twitter_client_api(self):
return self.twitter_client
def get_user_timeline_tweets(self, num_tweets):
tweets_list = []
for tweet in Cursor(self.twitter_client.user_timeline, id=self.twitter_user).items(num_tweets):
tweets_list.append(tweet)
return tweets_list
class TwitterAuth:
@staticmethod
def authenticate_twitter():
auth = OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)
auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)
return auth
class TwitterStreamer:
"""
A class to stream and process live tweets
"""
def __init__(self):
self.twitter_auth = TwitterAuth()
def stream_tweets(self, fetched_tweets_file, hash_tags):
# Handles connecting to and streaming tweets from the Twitter API
listener = TwitterListener(fetched_tweets_file)
auth = self.twitter_auth.authenticate_twitter()
stream = Stream(auth, listener)
stream.filter(track=hash_tags)
class TwitterListener(StreamListener):
"""
Basic listener class that prints data from the Twitter stream to stdOut
"""
def __init__(self, fetched_tweets_file):
self.fetched_tweets_file = fetched_tweets_file
def on_data(self, data):
try:
print(data)
with open(self.fetched_tweets_file, 'a') as tf:
tf.write(data)
return True
except BaseException as e:
print("Error_data: %s" % str(e))
return True
def on_error(self, status):
if status == 420:
# Terminate connection if twitter rate limit is exceeded
return False
print(status)
class TweetAnalyzer:
"""
Used to analyze tweet data and categorize it
"""
def tweets_to_df(self, tweets):
df =
|
pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['tweets'])
|
pandas.DataFrame
|
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.fixture
def data():
return pd.array(
[True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],
dtype="boolean",
)
@pytest.mark.parametrize(
"ufunc", [np.add, np.logical_or, np.logical_and, np.logical_xor]
)
def test_ufuncs_binary(ufunc):
# two BooleanArrays
a = pd.array([True, False, None], dtype="boolean")
result = ufunc(a, a)
expected = pd.array(ufunc(a._data, a._data), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = ufunc(s, a)
expected = pd.Series(ufunc(a._data, a._data), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_series_equal(result, expected)
# Boolean with numpy array
arr = np.array([True, True, False])
result = ufunc(a, arr)
expected = pd.array(ufunc(a._data, arr), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_extension_array_equal(result, expected)
result = ufunc(arr, a)
expected = pd.array(ufunc(arr, a._data), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_extension_array_equal(result, expected)
# BooleanArray with scalar
result = ufunc(a, True)
expected = pd.array(ufunc(a._data, True), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_extension_array_equal(result, expected)
result = ufunc(True, a)
expected = pd.array(ufunc(True, a._data), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_extension_array_equal(result, expected)
# not handled types
msg = r"operand type\(s\) all returned NotImplemented from __array_ufunc__"
with pytest.raises(TypeError, match=msg):
ufunc(a, "test")
@pytest.mark.parametrize("ufunc", [np.logical_not])
def test_ufuncs_unary(ufunc):
a = pd.array([True, False, None], dtype="boolean")
result = ufunc(a)
expected = pd.array(ufunc(a._data), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = ufunc(s)
expected = pd.Series(ufunc(a._data), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("values", [[True, False], [True, None]])
def test_ufunc_reduce_raises(values):
a = pd.array(values, dtype="boolean")
msg = "The 'reduce' method is not supported"
with pytest.raises(NotImplementedError, match=msg):
np.add.reduce(a)
def test_value_counts_na():
arr = pd.array([True, False, pd.NA], dtype="boolean")
result = arr.value_counts(dropna=False)
expected = pd.Series([1, 1, 1], index=[True, False, pd.NA], dtype="Int64")
tm.assert_series_equal(result, expected)
result = arr.value_counts(dropna=True)
expected = pd.Series([1, 1], index=[True, False], dtype="Int64")
tm.assert_series_equal(result, expected)
def test_diff():
a = pd.array(
[True, True, False, False, True, None, True, None, False], dtype="boolean"
)
result = pd.core.algorithms.diff(a, 1)
expected = pd.array(
[None, False, True, False, True, None, None, None, None], dtype="boolean"
)
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = s.diff()
expected =
|
pd.Series(expected)
|
pandas.Series
|
import pyensembl
import Bio.SeqIO
import Bio.Seq
import pandas as pd
import sys
import re
from Bio import pairwise2
def get_transcript_adj_exons(ensembl,gene_id,exon_coord):
try:
transcript_ids=ensembl.transcript_ids_of_gene_id(gene_id)
except:
print('Warning: ' + gene_id + ' not found')
transcript_ids=[]
transcript_list=[]
for tid in transcript_ids:
transcript=ensembl.transcript_by_id(tid)
transcript.exon_intervals.sort()
if exon_coord[0] in transcript.exon_intervals:
idx=transcript.exon_intervals.index(exon_coord[0])
if exon_coord == transcript.exon_intervals[idx:idx+len(exon_coord)]:
transcript_list.append(transcript)
return transcript_list
def has_coding_transcript(transcript_list):
has_coding=False
for transcript in transcript_list:
if transcript.biotype=='protein_coding':
has_coding=True
return has_coding
def get_transcript_contain_exons(ensembl,gene_id,exon_coord):
try:
transcript_ids=ensembl.transcript_ids_of_gene_id(gene_id)
except:
print('Warning: ' + gene_id + ' not found')
transcript_ids=[]
transcript_list=[]
for tid in transcript_ids:
transcript=ensembl.transcript_by_id(tid)
if set(exon_coord).issubset(set(transcript.exon_intervals)):
transcript_list.append(transcript)
return transcript_list
def find_overlap(rangeDF,coord):
return (coord[0]<=rangeDF.loc[:,'end']) & (coord[1]>=rangeDF.loc[:,'start'])
def make_seq_from_coord(ref,contig,coordDF,strand):
seq=''
if not contig in ref:
contig="chr"+str(contig)
if not contig in ref:
print(contig + "not found in ref")
return seq
for index,row in coordDF.iterrows():
if strand=='+':
seq=seq+str(ref[contig].seq[int(row.start)-1:int(row.end)])
else:
seq=str(ref[contig].seq[int(row.start)-1:int(row.end)].reverse_complement())+seq
return seq
def find_seq_diff(seq1,seq2):
align_list=pairwise2.align.globalms(seq1,seq2,2,-1,-10,0)
if len(align_list)==0:
seq1_diff_pos=(0,len(seq1)-1)
seq2_diff_pos=(0,len(seq2)-1)
elif seq1==seq2:
seq1_diff_pos=(float('nan'),float('nan'))
seq2_diff_pos=(float('nan'),float('nan'))
else:
align_data=pairwise2.format_alignment(*align_list[0]).split('\n')
#print(align_data)
seq_comp=pd.DataFrame({"seq1": list(align_data[0]), "seq2": list(align_data[2])})
seq_comp=seq_comp.assign(seq1_pos= seq_comp.seq1.isin(list(Bio.Seq.Alphabet.IUPAC.IUPACProtein.letters)).cumsum())
seq_comp=seq_comp.assign(seq2_pos= seq_comp.seq2.isin(list(Bio.Seq.Alphabet.IUPAC.IUPACProtein.letters)).cumsum())
seq_comp=seq_comp.assign(match=seq_comp.seq1==seq_comp.seq2)
#print(seq_comp.head())
#print(seq_comp[seq_comp.match==False])
first_mismatch=seq_comp.loc[seq_comp.match==False].index[0]
if first_mismatch==0:
seq1_diff_pos=(-1,max(seq_comp.seq1_pos[seq_comp.match==False]))
seq2_diff_pos=(-1,max(seq_comp.seq2_pos[seq_comp.match==False]))
else:
seq1_diff_pos=(seq_comp.seq1_pos[first_mismatch-1],max(seq_comp.seq1_pos[seq_comp.match==False]))
seq2_diff_pos=(seq_comp.seq2_pos[first_mismatch-1],max(seq_comp.seq2_pos[seq_comp.match==False]))
return seq1_diff_pos, seq2_diff_pos
def is_coding_effect(transcript,effect_coord):
coding_effect=dict()
if transcript.biotype!='protein_coding' or not transcript.contains_stop_codon or not transcript.contains_start_codon:
coding_effect['type']='NoncodingOrIncompleteTranscript'
coding_effect['is_coding']=False
else:
coding_coord=pd.DataFrame(transcript.coding_sequence_position_ranges,columns=['start','end'])
coding_effect['coord']=coding_coord.append(pd.Series({'start':min(transcript.stop_codon_positions),'end':max(transcript.stop_codon_positions)}),ignore_index=True).sort_values(by="start")
coding_effect['overlap']=find_overlap(coding_effect['coord'],effect_coord)
if effect_coord[1]<coding_coord.start.min() or effect_coord[0]>coding_coord.end.max():
coding_effect['type']='UTR'
coding_effect['is_coding']=False
else:
coding_effect['is_coding']=True
return coding_effect
def make_prot_from_coord(transcript,coord,ref):
trans=Bio.Seq.translate(make_seq_from_coord(ref,transcript.contig,coord,transcript.strand))
stop_count=trans.count('*')
if trans.startswith('M'):
start_lost=False
else:
start_lost=True
if stop_count==0:
stop_lost=True
else:
stop_lost=False
if stop_lost and not start_lost:
if transcript.strand=='+':
coord.iloc[-1,1]=transcript.exon_intervals[-1][1]
else:
coord.iloc[0,0]=transcript.exon_intervals[0][0]
trans=Bio.Seq.translate(make_seq_from_coord(ref,transcript.contig,coord,transcript.strand))
stop_count=trans.count('*')
if start_lost or stop_count==0:
prot_seq=''
else:
prot_seq=trans.split('*')[0]+'*'
if start_lost:
effect='StartLost'
elif stop_count==0:
effect='StopLost'
elif stop_lost:
effect='PostStop'
elif stop_count==1 and trans.endswith('*'):
effect='InFrame'
else:
effect='PrematureStop'
return prot_seq, effect
def get_event_coords(event_info,event_type):
event_coords=pd.DataFrame(columns=("isoform","start","end"))
if event_type=="IR":
event_coords=event_coords.append({"isoform":"iso2","start":event_info["exon1_end"],"end":event_info["exon2_start"]},ignore_index=True)
elif event_type=="ES":
event_coords=event_coords.append({"isoform":"iso1","start":event_info["exon_pre_end"],"end":event_info["exon_aft_start"]},ignore_index=True)
event_coords=event_coords.append({"isoform":"iso2","start":event_info["exon_pre_end"],"end":event_info["exon_start"]},ignore_index=True)
event_coords=event_coords.append({"isoform":"iso2","start":event_info["exon_end"],"end":event_info["exon_aft_start"]},ignore_index=True)
elif event_type=="MUT":
event_coords=event_coords.append({"isoform":"iso1","start":event_info["exon_pre_end"],"end":event_info["exon1_start"]},ignore_index=True)
event_coords=event_coords.append({"isoform":"iso1","start":event_info["exon1_end"],"end":event_info["exon_aft_start"]},ignore_index=True)
event_coords=event_coords.append({"isoform":"iso2","start":event_info["exon_pre_end"],"end":event_info["exon2_start"]},ignore_index=True)
event_coords=event_coords.append({"isoform":"iso2","start":event_info["exon2_end"],"end":event_info["exon_aft_start"]},ignore_index=True)
elif (event_type=="A3" and event_info["strand"]=="+") or (event_type=="A5" and event_info["strand"]=="-"):
event_coords=event_coords.append({"isoform":"iso2","start":event_info["exon_const_end"],"end":event_info["exon_alt1_start"]},ignore_index=True)
event_coords=event_coords.append({"isoform":"iso1","start":event_info["exon_const_end"],"end":event_info["exon_alt2_start"]},ignore_index=True)
elif (event_type=="A3" and event_info["strand"]=="-") or (event_type=="A5" and event_info["strand"]=="+"):
event_coords=event_coords.append({"isoform":"iso2","start":event_info["exon_alt1_end"],"end":event_info["exon_const_start"]},ignore_index=True)
event_coords=event_coords.append({"isoform":"iso1","start":event_info["exon_alt2_end"],"end":event_info["exon_const_start"]},ignore_index=True)
return event_coords
def jid_to_coords(event_jid):
event_coords=pd.DataFrame(columns=("isoform","start","end"))
iso1_coords=event_jid.split("g.")[1].split(">")[0].split('_')
for coord in iso1_coords:
if not coord=="NONE":
event_coords=event_coords.append({"isoform":"iso1","start":int(coord.split('j')[0]),"end":int(coord.split('j')[1])},ignore_index=True)
iso2_coords=event_jid.split("g.")[1].split(">")[1].split("[")[0].split('_')
for coord in iso2_coords:
if not coord=="NONE":
event_coords=event_coords.append({"isoform":"iso2","start":int(coord.split('j')[0]),"end":int(coord.split('j')[1])},ignore_index=True)
return event_coords
def find_matching_transcripts(ensembl,gene_id,event_coords):
try:
transcript_ids=ensembl.transcript_ids_of_gene_id(gene_id[0:15])
except:
print('Warning: ' + gene_id[0:15] + ' not found')
transcript_ids=[]
transcript_table=
|
pd.DataFrame(columns=["coding","matching_isoform"],index=transcript_ids)
|
pandas.DataFrame
|
'''
Clase que contiene los métodos que permiten "limpiar" la información extraida por el servicio de web scrapper
(Es implementada directamente por la calse analyzer)
'''
import pandas as pd
import re
from pathlib import Path
import numpy as np
import unidecode
class Csvcleaner:
@staticmethod
def FilterDataOpinautos():
base_path = Path(__file__).parent
file_path = (base_path / "../extractors/opinautos_items.csv").resolve()
file_path_out = (base_path / "../extractors/opinautos_items_filtered.csv").resolve()
df_opinautos = pd.read_csv(file_path,encoding='utf-8',
header=0,
names=['Nombre', 'Marca','Modelo', 'Estrellas','Opinion','Votos','Fecha'])
df_opinautos=Csvcleaner.FilterBrand(df_opinautos,'Marca')# Filtrado de marcas
df_opinautos=Csvcleaner.FilterModel(df_opinautos,'Modelo')# Filtrado de modelos
df_opinautos=df_opinautos.loc[df_opinautos['Fecha'].str.contains('z', flags = re.IGNORECASE)].reset_index(drop=True)# Elimirar aquellos con fecha en otro formato
for index, row in df_opinautos.iterrows():
df_opinautos.iloc[index,4]=df_opinautos.iloc[index,4].replace(u"\r",u" ").replace(u"\n",u" ").strip()# Ajuste de texto en opiniones
df_opinautos=df_opinautos.loc[df_opinautos['Opinion'].str.len()<3000].reset_index(drop=True) # limito numero de caracteres
df_opinautos['Fecha'] = pd.to_datetime(df_opinautos['Fecha'])# Conversion de formato de fecha
mask = (df_opinautos['Fecha'] > '2019-1-01') & (df_opinautos['Fecha'] <= '2021-1-1')
df_opinautos=df_opinautos.loc[df_opinautos['Nombre'].str.contains('2019', flags = re.IGNORECASE) | df_opinautos['Nombre'].str.contains('2020', flags = re.IGNORECASE)]
df_opinautos=df_opinautos.loc[mask]
df_opinautos.to_csv(file_path_out,index=False)
return df_opinautos
@staticmethod
def FilterDataAutotest():
base_path = Path(__file__).parent
file_path = (base_path / "../extractors/autotest_items.csv").resolve()
file_path_out = (base_path / "../extractors/autotest_items_filtered.csv").resolve()
df_autotest = pd.read_csv(file_path,encoding='utf-8',
header=0,
names=['Nombre', 'Marca','Modelo', 'C_General','C_Vida','C_Diseño','C_Manejo','C_Performance','A_favor','En_contra'])
df_autotest=Csvcleaner.FilterBrand(df_autotest,'Marca')# Filtrado de marcas
df_autotest=Csvcleaner.FilterModel(df_autotest,'Modelo')# Filtrado de modelos
df_autotest.to_csv(file_path_out,index=False)
return df_autotest
@staticmethod
def FilterDataMotorpasion():
base_path = Path(__file__).parent
file_path = (base_path / "../extractors/webextractor/motorpasion_items.csv").resolve()
file_path_out = (base_path / "../extractors/motorpasion_items_filtered.csv").resolve()
df_motor = pd.read_csv(file_path,encoding='utf-8',
header=0,
names=['Nombre', 'Version', 'C_General','C_Acabados','C_Seguridad','C_Equipamiento','C_Infotenimiento',
'C_Comportamiento', 'C_Motor', 'C_Transmision', 'C_Consumo', 'C_Espacio', 'C_Precio', 'Lo_Bueno', 'Lo_Malo'])
df_motor.dropna(subset=['Nombre'], inplace=True)
df_motor=Csvcleaner.FilterBrand(df_motor,'Nombre')# Filtrado de marcas
df_motor=Csvcleaner.FilterModel(df_motor,'Nombre')# Filtrado de modelos
df_motor.to_csv(file_path_out,index=False)
return df_motor
@staticmethod
def FilterDataQuecoche():
base_path = Path(__file__).parent
file_path = (base_path / "../extractors/webextractor/quecochemecompro_items.csv").resolve()
file_path_out = (base_path / "../extractors/quecochemecompro_items_filtered.csv").resolve()
df_quecoche = pd.read_csv(file_path,encoding='utf-8',
header=0,
names=['Nombre', 'Marca', 'Puntuacion', 'Informativo', 'C_peque_manej', 'C_deportivo', 'C_bueno_barato',
'C_practico', 'C_ecologico', 'C_atractivo', 'Lo_mejor', 'Lo_peor'])
df_quecoche=Csvcleaner.FilterBrand(df_quecoche,'Nombre')# Filtrado de marcas
df_quecoche=Csvcleaner.FilterModel(df_quecoche,'Nombre')# Filtrado de modelos
df_quecoche.to_csv(file_path_out,index=False)
return df_quecoche
@staticmethod
def FilterBrand(dataframe, brandField):
dataframe=dataframe.loc[dataframe[brandField].str.contains('nissan', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('chevrolet', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('buick', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('gmc', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('cadillac', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('audi', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('porsche', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('seat', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('volkswagen', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('toyota', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('ram', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('dodge', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('jeep', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('fiat', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('chrysler', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('alfa', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('kia', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('honda', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('mazda', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('hyundai', flags = re.IGNORECASE)|
dataframe[brandField].str.contains('renault', flags = re.IGNORECASE)].reset_index(drop=True)
return dataframe
@staticmethod
def FilterModel(dataframe, ModelField):
dataframe=dataframe.loc[~dataframe[ModelField].str.contains('malib', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('cabstar', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('urvan', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('express', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('silverado', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('caddy', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('crafter', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('transporter', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('hiace', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('promaster', flags = re.IGNORECASE)&
~dataframe[ModelField].str.contains('Ducato', flags = re.IGNORECASE)].reset_index(drop=True)
return dataframe
# TODO: generar hoja de puntuaciones
@staticmethod
def generateScoreSheet():
base_path = Path(__file__).parent
file_autos_path = (base_path / "../data_csv/autos_data_mod_csv.csv").resolve()
file_autos_path_out = (base_path / "../data_csv/scoreSheet.csv").resolve()
file_quecoche_path = (base_path / "../extractors/quecochemecompro_items_filtered.csv").resolve()
file_autotest_path = (base_path / "../extractors/autotest_items_filtered.csv").resolve()
file_motorpasion_path = (base_path / "../extractors/motorpasion_items_filtered.csv").resolve()
file_opinautos_path = (base_path / "../extractors/opinautos_items_Comprehend_parsed.csv").resolve()
col_list = ["marca", "modelo", "año", "versión"]
dfAutos = pd.read_csv(file_autos_path, encoding='utf-8', usecols=col_list)
dfQuecoche = pd.read_csv(file_quecoche_path, encoding='utf-8')
dfAutoTest = pd.read_csv(file_autotest_path, encoding='utf-8')
dfMotorPasion = pd.read_csv(file_motorpasion_path, encoding='utf-8')
dfOpinautos = pd.read_csv(file_opinautos_path, encoding='utf-8')
columns=['general', 'confort', 'desempeño','tecnología','ostentosidad','deportividad','economía','eficiencia','seguridad','ecología','a_favor','en_contra','cP','cN']
dfAutos[columns] = pd.DataFrame([[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]], index=dfAutos.index)
dfAutos['modelo'] = dfAutos['modelo'].apply(Csvcleaner.remove_accents)
dfQuecoche['Nombre'] = dfQuecoche['Nombre'].apply(Csvcleaner.remove_accents)
dfAutoTest['Nombre'] = dfAutoTest['Nombre'].apply(Csvcleaner.remove_accents)
dfMotorPasion['Nombre'] = dfMotorPasion['Nombre'].apply(Csvcleaner.remove_accents)
dfOpinautos['Modelo'] = dfOpinautos['Modelo'].apply(Csvcleaner.remove_accents)
for index, row in dfAutos.iterrows():
general=[]
confort=[]
desempeño=[]
tecnologia=[]
ostentosidad=[]
deportividad=[]
economia=[]
eficiencia=[]
seguridad=[]
ecologia=[]
cp=[]
cn=[]
afavor=''
encontra=''
dfAux=dfQuecoche.loc[dfQuecoche['Nombre'].str.contains(row['marca']+' ', flags = re.IGNORECASE) &
dfQuecoche['Nombre'].str.contains(' '+row['modelo'], flags = re.IGNORECASE)]
if not dfAux.empty:
idxVersion=Csvcleaner.getVersionIndex(dfAux,' '+row['versión'],'Puntuacion')
if not pd.isnull(dfAux.at[idxVersion, 'Puntuacion']):
general.append(float(dfAux.at[idxVersion, 'Puntuacion'].replace(",", ".")))
if not pd.isnull(dfAux.at[idxVersion, 'C_peque_manej']):
confort.append(dfAux.at[idxVersion, 'C_peque_manej'])
if not pd.isnull(dfAux.at[idxVersion, 'C_atractivo']):
confort.append(dfAux.at[idxVersion, 'C_atractivo'])
if not
|
pd.isnull(dfAux.at[idxVersion, 'C_deportivo'])
|
pandas.isnull
|
from pandas.util.py3compat import StringIO
import unittest
import sqlite3
import sys
import numpy as np
import pandas.io.sql as sql
import pandas.util.testing as tm
from pandas import Series, Index
class TestSQLite(unittest.TestCase):
def setUp(self):
self.db = sqlite3.connect(':memory:')
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.ix[0, 0] = np.nan
create_sql = sql.get_sqlite_schema(frame, 'test')
self.db.execute(create_sql)
cur = self.db.cursor()
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = sql.format_query(ins, *row)
sql.tquery(fmt_sql, cur=cur)
self.db.commit()
result = sql.read_frame("select * from test", con=self.db)
result.index = frame.index
tm.assert_frame_equal(result, frame)
def test_execute(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_sqlite_schema(frame, 'test')
self.db.execute(create_sql)
ins = "INSERT INTO test VALUES (?, ?, ?, ?)"
row = frame.ix[0]
sql.execute(ins, self.db, params=tuple(row))
self.db.commit()
result = sql.read_frame("select * from test", self.db)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_execute_fail(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
self.db.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db)
|
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.db)
|
pandas.io.sql.execute
|
from datetime import datetime, timedelta
import textwrap
import requests
import pandas as pd
from dateutil import parser
from requests.adapters import HTTPAdapter
ENDPOINTS = {
"global": "/global",
"coin": "/coins/{}",
"coins": "/coins",
"coin_tweeter": "/coins/{}/twitter",
"coin_events": "/coins/{}/events",
"coin_exchanges": "/coins/{}/exchanges",
"coin_markets": "/coins/{}/markets",
"ohlcv": "/coins/{}/ohlcv/latest",
"ohlcv_hist": "/coins/{}/ohlcv/historical",
"people": "/people/{}",
"tickers": "/tickers",
"ticker_info": "/tickers/{}",
"exchanges": "/exchanges",
"exchange_info": "/exchanges/{}",
"exchange_markets": "/exchanges/{}/markets",
"contract_platforms": "/contracts",
"contract_platform_addresses": "/contracts/{}",
"search": "/search",
}
PAPRIKA_BASE_URL = "https://api.coinpaprika.com/v1"
# Mount a session with adapter. It's solution for time to time timeouts
session = requests.Session()
session.mount(PAPRIKA_BASE_URL, HTTPAdapter(max_retries=5))
def make_request(endpoint, payload=None, **kwargs):
"""Helper method that handles request for coinpaprika api.
It prepares URL for given endpoint and payload if it's part of requests
Parameters
----------
endpoint: str,
it's an endpoint that we want to query. e.g. to get twitter data for given coin we need to use:
https://api.coinpaprika.com/v1/coins/{}/twitter
payload: dict
the body of your request. Contains the data send to the CoinPaprika API when making an API request
kwargs:
additional parameters that will be added to payload
Returns
-------
dict with response data
"""
url = f"{PAPRIKA_BASE_URL}{endpoint}"
if payload is None:
payload = {}
if kwargs:
payload.update(kwargs)
return session.get(url, params=payload).json()
def get_global_market():
"""Return data frame with most important global crypto statistics like:
market_cap_usd, volume_24h_usd, bitcoin_dominance_percentage, cryptocurrencies_number,
market_cap_ath_value, market_cap_ath_date, volume_24h_ath_value, volume_24h_ath_date,
market_cap_change_24h, volume_24h_change_24h, last_updated,
Returns
-------
pandas.DataFrame
Metric, Value
"""
global_markets = make_request(ENDPOINTS["global"])
global_markets["last_updated"] = datetime.fromtimestamp(
global_markets["last_updated"]
)
for key, date in global_markets.items():
if "date" in key:
try:
global_markets[key] = parser.parse(date).strftime("%Y-%m-%d %H:%M:%S")
except (KeyError, ValueError, TypeError):
...
df = pd.Series(global_markets).to_frame().reset_index()
df.columns = ["Metric", "Value"]
return df
def get_list_of_coins():
"""Get list of all available coins on CoinPaprika
Returns
-------
pandas.DataFrame
rank, id, name, symbol, type
"""
coins = make_request(ENDPOINTS["coins"])
df = pd.DataFrame(coins)
df = df[df["is_active"]]
return df[["rank", "id", "name", "symbol", "type"]]
def get_coin(coin_id="eth-ethereum"):
"""Get coin by id
Parameters
----------
coin_id: str
id of coin from coinpaprika e.g. Ethereum - > 'eth-ethereum'
Returns
-------
dict with response
"""
coin = make_request(ENDPOINTS["coin"].format(coin_id))
return coin
def get_coin_twitter_timeline(coin_id="eth-ethereum"):
"""Get twitter timeline for given coin id. Not more than last 50 tweets
Parameters
----------
coin_id: str
id of coin from coinpaprika e.g. Ethereum - > 'eth-ethereum'
Returns
-------
pandas.DataFrame
date, user_name, status, retweet_count, like_count
"""
res = make_request(ENDPOINTS["coin_tweeter"].format(coin_id))
if "error" in res:
print(res)
return pd.DataFrame()
if isinstance(res, list) and len(res) == 0:
return pd.DataFrame()
df = pd.DataFrame(res)[
["date", "user_name", "status", "retweet_count", "like_count"]
]
df = df.applymap(
lambda x: "\n".join(textwrap.wrap(x, width=80)) if isinstance(x, str) else x
)
df["status"] = df["status"].apply(lambda x: x.replace(" ", ""))
df["date"] = df["date"].apply(lambda x: x.replace("T", "\n"))
df["date"] = df["date"].apply(lambda x: x.replace("Z", ""))
return df
def get_coin_events_by_id(coin_id="eth-ethereum"):
"""
Example of response from API:
{
"id": "17398-cme-april-first-trade",
"date": "2018-04-02T00:00:00Z",
"date_to": "string",
"name": "CME: April First Trade",
"description": "First trade of Bitcoin futures contract for April 2018.",
"is_conference": false,
"link": "http://www.cmegroup.com/trading/equity-index/us-index/bitcoin_product_calendar_futures.html",
"proof_image_link": "https://static.coinpaprika.com/storage/cdn/event_images/16635.jpg"
}
Parameters
----------
coin_id: str
id of coin from coinpaprika e.g. Ethereum - > 'eth-ethereum'
Returns
-------
pandas.DataFrame
id, date , date_to, name, description, is_conference, link, proof_image_link
"""
res = make_request(ENDPOINTS["coin_events"].format(coin_id))
if not res:
return pd.DataFrame()
data = pd.DataFrame(res)
data["description"] = data["description"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=40)) if isinstance(x, str) else x
)
data.drop(["id", "proof_image_link"], axis=1, inplace=True)
for col in ["date", "date_to"]:
data[col] = data[col].apply(lambda x: x.replace("T", "\n"))
data[col] = data[col].apply(lambda x: x.replace("Z", ""))
return data
def get_coin_exchanges_by_id(coin_id="eth-ethereum"):
"""Get all exchanges for given coin id.
Parameters
----------
coin_id: Identifier of Coin from CoinPaprika
Returns
-------
pandas.DataFrame
id, name, adjusted_volume_24h_share, fiats
"""
res = make_request(ENDPOINTS["coin_exchanges"].format(coin_id))
df = pd.DataFrame(res)
df["fiats"] = df["fiats"].copy().apply(lambda x: len([i["symbol"] for i in x if x]))
return df
def get_coin_markets_by_id(coin_id="eth-ethereum", quotes="USD"):
"""
Parameters
----------
coin_id: Coin Parpika identifier of coin e.g. eth-ethereum
quotes: Comma separated list of quotes to return.
Example: quotes=USD,BTC
Allowed values:
BTC, ETH, USD, EUR, PLN, KRW, GBP, CAD, JPY, RUB, TRY, NZD, AUD, CHF, UAH, HKD, SGD, NGN, PHP, MXN, BRL,
THB, CLP, CNY, CZK, DKK, HUF, IDR, ILS, INR, MYR, NOK, PKR, SEK, TWD, ZAR, VND, BOB, COP, PEN, ARS, ISK
Returns
-------
pandas.DataFrame
"""
markets = make_request(ENDPOINTS["coin_markets"].format(coin_id), quotes=quotes)
if "error" in markets:
print(markets)
return pd.DataFrame()
data = []
for r in markets:
dct = {
"exchange": r.get("exchange_name"),
"pair": r.get("pair"),
"trust_score": r.get("trust_score"),
"pct_volume_share": r.get("adjusted_volume_24h_share"),
}
quotes = r.get("quotes")
for k, v in quotes.items():
dct[f"{k.lower()}_price"] = v.get("price")
dct[f"{k.lower()}_volume"] = v.get("volume_24h")
dct["market_url"] = r.get("market_url")
data.append(dct)
return pd.DataFrame(data)
def get_ohlc_historical(coin_id="eth-ethereum", quotes="USD", days=90):
"""
Open/High/Low/Close values with volume and market_cap.
Request example: https://api.coinpaprika.com/v1/coins/btc-bitcoin/ohlcv/historical?start=2019-01-01&end=2019-01-20
if the last day is current day it can an change with every request until actual close of the day at 23:59:59
Parameters
----------
coin_id: str
Paprika coin identifier e.g. eth-ethereum
quotes: str
returned data quote (available values: usd btc)
days: int
time range for chart in days. Maximum 365
Returns
-------
pandas.DataFrame
"""
if quotes.lower() not in ["usd", "btc"]:
quotes = "USD"
if abs(int(days)) > 365:
days = 365
end = datetime.now().strftime("%Y-%m-%d")
start = (datetime.now() - timedelta(days=days)).strftime("%Y-%m-%d")
data = make_request(
ENDPOINTS["ohlcv_hist"].format(coin_id), quotes=quotes, start=start, end=end
)
if "error" in data:
print(data)
return pd.DataFrame()
return pd.DataFrame(data)
def _get_coins_info_helper(quotes="USD"):
"""Helper method that call /tickers endpoint which returns for all coins quoted in provided currency/crypto
{
"id": "btc-bitcoin",
"name": "Bitcoin",
"symbol": "BTC",
"rank": 1,
"circulating_supply": 17007062,
"total_supply": 17007062,
"max_supply": 21000000,
"beta_value": 0.735327,
"first_data_at": "2010-11-14T07:20:41Z",
"last_updated": "2018-11-14T07:20:41Z",
"quotes" : {
"USD": {
"price": 5162.15941296,
"volume_24h": 7304207651.1585,
"volume_24h_change_24h": -2.5,
"market_cap": 91094433242,
"market_cap_change_24h": 1.6,
"percent_change_15m": 0,
"percent_change_30m": 0,
"percent_change_1h": 0,
"percent_change_6h": 0,
"percent_change_12h": -0.09,
"percent_change_24h": 1.59,
"percent_change_7d": 0.28,
"percent_change_30d": 27.39,
"percent_change_1y": -37.99,
"ath_price": 20089,
"ath_date": "2017-12-17T12:19:00Z",
"percent_from_price_ath": -74.3
}
}
}
Parameters
----------
quotes: Coma separated quotes to return e.g quotes=USD,BTC
Returns
-------
pandas.DataFrame
id, name, symbol, rank, circulating_supply, total_supply, max_supply, beta_value, first_data_at,
last_updated, price, volume_24h, volume_24h_change_24h, market_cap, market_cap_change_24h,
percent_change_15m, percent_change_30m, percent_change_1h, percent_change_6h, percent_change_12h,
percent_change_24h, percent_change_7d, percent_change_30d, percent_change_1y,
ath_price, ath_date, percent_from_price_ath
"""
tickers = make_request(ENDPOINTS["tickers"], quotes=quotes)
data =
|
pd.json_normalize(tickers)
|
pandas.json_normalize
|
import sys as _sys
import warnings
from functools import partial
from scipy import sparse
from sklearn.utils.metaestimators import available_if
from sklearn.utils.validation import (
_make_indexable,
_num_features,
)
import numpy as np
from typing import Dict, Any
from . import __version__
try:
import pandas as pd
except:
pass
import copy
from .xydata import XyData
class XyAdapterStub(object):
def __call__(self, klass):
obj = XyAdapterStub()
obj.__class__ = XyAdapterFactory(klass)
return obj
class XyAdapterBase:
def __getstate__(self):
try:
state = super().__getstate__()
except AttributeError:
state = self.__dict__.copy()
if type(self).__module__.startswith("sklearn_transformer_extensions."):
return dict(state.items(), _xyadapter_version=__version__)
else:
return state
def __setstate__(self, state):
if type(self).__module__.startswith("sklearn_transformer_extensions."):
pickle_version = state.pop("_xyadapter_version", "pre-0.18")
if pickle_version != __version__:
warnings.warn(
"Trying to unpickle estimator {0} from version {1} when "
"using version {2}. This might lead to breaking code or "
"invalid results. Use at your own risk. "
"For more info please refer to:\n"
"https://scikit-learn.org/stable/modules/model_persistence"
".html#security-maintainability-limitations".format(
self.__class__.__name__, pickle_version, __version__),
UserWarning,
)
try:
super().__setstate__(state)
except AttributeError:
self.__dict__.update(state)
def _check_method(method):
def fn(self):
for klass in self.__class__.mro():
if issubclass(klass, XyAdapterBase):
continue
break
return hasattr(klass, method) # type: ignore
return fn
def XyAdapterFactory(klass):
"""An adapter that specializes a given klass object (which expected to
be a scikit-learn transformer or estimator class) so all of klass'
methods like `fit`, `transform`, etc now accept a XyData object in
addition to accepting X and y as separate arguments (default behavior).
Internally, if the input to a method is an XyData object, the adapter
splits the input into features (X) and labels (y) before calling the
corresponding scikit-learn object's method. If the input is not an
XyData object, then the X and y arguments to the function are passed
through as is effecting scikit-learn's traditional behavior.
For transformers, the returned value from scikit-learn object's
`fit_transform` and `transform` method calls are combined with labels
(if exists) to create new XyData object and returned. If the original
features (X) was pandas `DataFrame`, the returned transformed features
is also a pandas `DataFrame`. The column names are obtained from
scikit-learn's new `get_feature_names_out` interface. If scikit-learn's
object does not provide this method, then we retain the original
DataFrame's columns.
Parameters
----------
Same as the base class which is expected to be a scikit-learn
transformer or estimator.
Attributes
----------
Same as the base class.
Examples
--------
In this example, we recreate the example from scikit-learn's
LogisticRegression documentation.
>>> from sklearn_transformer_extensions import XyAdapter, XyData
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> import numpy as np
>>> X, y = load_iris(return_X_y=True)
>>> Xy = XyData(X, y)
>>> clf = XyAdapter(LogisticRegression)(random_state=0)
>>> clf.fit(Xy)
LogisticRegression(random_state=0)
>>> clf.predict(X[:2, :])
array([0, 0])
>>> clf.predict_proba(X[:2, :])
array([[9.8...e-01, 1.8...e-02, 1.4...e-08],
[9.7...e-01, 2.8...e-02, ...e-08]])
>>> clf.score(Xy)
0.97...
"""
# https://stackoverflow.com/questions/4647566/pickle-a-dynamically-parameterized-sub-class
class XyAdapter(klass, XyAdapterBase):
@available_if(_check_method("get_params"))
def get_params(self, deep: bool = True) -> Dict[str, Any]:
# from xgboost/get_params
params = super().get_params(deep)
cp = copy.copy(self)
cp.__class__ = cp.__class__.__bases__[0]
params.update(cp.__class__.get_params(cp, deep))
# if kwargs is a dict, update params accordingly
if hasattr(self, "kwargs") and isinstance(self.kwargs, dict):
params.update(self.kwargs)
return params
def __eq__(self, other):
return (isinstance(self, other.__class__) or isinstance(
other, self.__class__)) and self.__dict__ is other.__dict__
def _joinXy(self, X, y, ofmt):
if type(X) == XyData:
# the input y replaced by what is got from the transformer
X, y = X
if ofmt == 'pandas':
if not hasattr(X, "iloc"):
feature_names_out = self._get_feature_names_out()
if feature_names_out is None:
Xt = X
if hasattr(X, "to_frame"):
Xt = X.to_frame()
elif sparse.issparse(X):
# Sparse already is 2-d
pass
else:
Xt = np.atleast_2d(X)
n_features = _num_features(Xt)
feature_names_out = np.asarray(
[f"col{i}" for i in range(n_features)],
dtype=object)
if sparse.issparse(X):
X = pd.DataFrame.sparse.from_spmatrix(
X, columns=feature_names_out)
X = X.infer_objects()
else:
X =
|
pd.DataFrame(X, columns=feature_names_out)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import pytest
from dku_timeseries import IntervalRestrictor
from recipe_config_loading import get_interval_restriction_params
@pytest.fixture
def datetime_column():
return "Date"
@pytest.fixture
def df(datetime_column):
co2 = [315.58, 316.39, 316.79, 316.2]
country = ["first", "first", "second", "second"]
time_index = pd.date_range("1-1-1959", periods=4, freq="M")
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, "country": country, datetime_column: time_index})
return df
@pytest.fixture
def long_df(datetime_column):
co2 = [315.58, 316.39, 100, 116.2, 345, 234, 201, 100]
country = ["first", "first", "first", "first", "second", "second", "second", "second"]
time_index = pd.date_range("1-1-1959", periods=4, freq="D").append(pd.date_range("1-1-1959", periods=4, freq="D"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, "country": country, datetime_column: time_index})
return df
@pytest.fixture
def long_df_2(datetime_column):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 10]
country = ["first", "first", "second", "second", "third", "third"]
country_2 = ["first", "first", "second", "second", "third", "third"]
time_index =
|
pd.date_range("1-1-1959", periods=2, freq="M")
|
pandas.date_range
|
import warnings
import pandas as pd
from geopandas import GeoDataFrame
from geopandas.array import _check_crs, _crs_mismatch_warn
def sjoin(
left_df, right_df, how="inner", op="intersects", lsuffix="left", rsuffix="right"
):
"""Spatial join of two GeoDataFrames.
Parameters
----------
left_df, right_df : GeoDataFrames
how : string, default 'inner'
The type of join:
* 'left': use keys from left_df; retain only left_df geometry column
* 'right': use keys from right_df; retain only right_df geometry column
* 'inner': use intersection of keys from both dfs; retain only
left_df geometry column
op : string, default 'intersects'
Binary predicate, one of {'intersects', 'contains', 'within'}.
See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.
lsuffix : string, default 'left'
Suffix to apply to overlapping column names (left GeoDataFrame).
rsuffix : string, default 'right'
Suffix to apply to overlapping column names (right GeoDataFrame).
"""
if not isinstance(left_df, GeoDataFrame):
raise ValueError(
"'left_df' should be GeoDataFrame, got {}".format(type(left_df))
)
if not isinstance(right_df, GeoDataFrame):
raise ValueError(
"'right_df' should be GeoDataFrame, got {}".format(type(right_df))
)
allowed_hows = ["left", "right", "inner"]
if how not in allowed_hows:
raise ValueError(
'`how` was "%s" but is expected to be in %s' % (how, allowed_hows)
)
allowed_ops = ["contains", "within", "intersects"]
if op not in allowed_ops:
raise ValueError(
'`op` was "%s" but is expected to be in %s' % (op, allowed_ops)
)
if not _check_crs(left_df, right_df):
_crs_mismatch_warn(left_df, right_df, stacklevel=3)
index_left = "index_%s" % lsuffix
index_right = "index_%s" % rsuffix
# due to GH 352
if any(left_df.columns.isin([index_left, index_right])) or any(
right_df.columns.isin([index_left, index_right])
):
raise ValueError(
"'{0}' and '{1}' cannot be names in the frames being"
" joined".format(index_left, index_right)
)
# query index
with warnings.catch_warnings():
# We don't need to show our own warning here
# TODO remove this once the deprecation has been enforced
warnings.filterwarnings(
"ignore", "Generated spatial index is empty", FutureWarning
)
if op == "within":
# within is implemented as the inverse of contains
# contains is a faster predicate
# see discussion at https://github.com/geopandas/geopandas/pull/1421
predicate = "contains"
sindex = left_df.sindex
input_geoms = right_df.geometry
else:
# all other predicates are symmetric
# keep them the same
predicate = op
sindex = right_df.sindex
input_geoms = left_df.geometry
if sindex:
l_idx, r_idx = sindex.query_bulk(input_geoms, predicate=predicate, sort=False)
result =
|
pd.DataFrame({"_key_left": l_idx, "_key_right": r_idx})
|
pandas.DataFrame
|
import pandas as pd
from Client import clientclass
from Server import serverclass
from Stats import *
from LTE import *
from NodeDiscovery import *
from policy import *
from dynamic_plot import *
from user import *
from multiprocessing import Process
import multiprocessing
import os.path
import os
import shutil as sh
import time
import datetime
import sys
def choose_vm(dfvm, user_def):
"""The choose_vm function filters the dataset which has multiple vm migrations
by the user defined proprieties. Depending on the mode the simulation
is running the resulting dataframe may be a set of VMs that follow the
specifications or a single VM from a given ID that the user introduced.
The first situation being vm_mode = 1 and the second situation being vm_mode = 0
:param dfvm: Dataframe with the VMs
:type dfvm: Pandas dataframe
:param user_def: User definitions
:type user_def: Object of the class UserDef
:return: Returns a filtered dataframe with the VMs that will be used
:rtype: Pandas dataframe
"""
if(user_def.vm_mode == 0):
dfvm = dfvm[dfvm['Migration ID'].values == user_def.vm_id]
dfvm = dfvm.reset_index(drop=True)
elif(user_def.vm_mode == 1):
dfvm = dfvm[dfvm['Migration Technique'].values == user_def.migtype]
dfvm = dfvm[dfvm['Workload'].values == user_def.benchmark]
dfvm = dfvm[dfvm['Page transfer rate (MB/s)'].values == user_def.PTR]
dfvm = dfvm.reset_index(drop=True)
return dfvm
def setup_folders(user_def):
"""The setup_folders function generates the correct folders for
outputting the results of the simulation.
:param user_def: User definitions
:type user_def: Object of the class UserDef
:return: Returns 1 in success
:rtype: Integer
"""
if(not os.path.isdir('./Digest/LoopVM')):
os.makedirs('./Digest/LoopVM')
if(not os.path.isdir('./Digest/SingleVM')):
os.makedirs('./Digest/SingleVM')
if(not os.path.isdir(user_def.digest_path)):
os.makedirs(user_def.digest_path)
if(not os.path.isdir('./OUT')):
os.makedirs('./OUT')
for files in os.listdir('./OUT'):
os.remove(os.path.join('./OUT', files))
return 1
def data_to_digest(user_def):
"""The data_to_digest function copies the results of the simulation
to the correct digest folders.
:param user_def: User definitions
:type user_def: Object of the class UserDef
:return: Returns 1 in success
:rtype: Integer
"""
sh.copy2('./OUT/Node_Determination.xlsx', user_def.digest_path)
sh.copy2('./OUT/Statistics.xlsx', user_def.digest_path)
sh.copy2('./OUT/Client_path.xlsx', user_def.digest_path)
return 1
def check_dataframes(dftrips, dfstations, df_LTE, dfvm, user_def):
"""The check_dataframes function verifies if all datasets provided exist
and have a correct structure based on the provided header of each .csv file.
This function is used to prevent program errors futher in the execution process
by checking if all the information that the user used to run the simulation can actually
be used.
:param dftrips: Dataframe with the trips
:type dftrips: Pandas dataframe
:param dfstations: Dataframe with the edge server stations
:type dfstations: Pandas dataframe
:param df_LTE: Dataframe with the LTE stations
:type df_LTE: Pandas dataframe
:param dfvm: Dataframe with the VMs
:type dfvm: Pandas dataframe
:param user_def: User definitions
:type user_def: Object of the class UserDef
:return: Returns 1 in success
:rtype: Integer
"""
trips = ['TripID', 'TimeStamp', 'Speed', 'Acceleration', 'Heading', 'HeadingChange', 'Latitude', 'Longitude']
stations = ['ID_LTE', 'radio', 'lat', 'lon']
vm = ['Migration ID', 'Migration Technique', 'Workload', 'Page Dirty Rate (4KB pages per second)', 'VM_SIZE (MB)',
'Page transfer rate (MB/s)', 'Total Migration Time (ms)', 'Downtime (ms)', 'Total Transferred Data (KB)']
list_df = [trips, stations, stations, vm]
trips_hd = dftrips.columns.values.tolist()
stations_hd = dfstations.columns.values.tolist()
lte_hd = df_LTE.columns.values.tolist()
vm_hd = dfvm.columns.values.tolist()
list_hd = [trips_hd, stations_hd, lte_hd, vm_hd]
flag = 0
for df in list_df:
for column in df: #
for hd in list_hd:
for column_hd in hd: #
if(column_hd == column):
flag = 1
break
if (flag == 0):
print('Dataset has wrong header sintaxe\n\tCheck header: ', hd)
sys.exit(2)
if(flag == 1):
flag = 0
break
list_hd.pop(0)
list_vm = ['Migration ID', 'Migration Technique', 'Workload', 'Page Dirty Rate (4KB pages per second)']
list_user = [user_def.vm_id, user_def.migtype, user_def.benchmark, user_def.PTR]
for vm_def, usr_def in zip(list_vm, list_user):
if(dfvm.loc[dfvm[vm_def] == usr_def][vm_def].empty):
print('User definitions not found in the provided dataframes\n\t Definition error: ', vm_def)
sys.exit(2)
return 1
def simulation(user_def, dfstations, dfnode, dfpath, clientlist, stationlist):
"""The simulation function is responsible for checking the main sequence of events
that evaluates each step of the client by iterating through the multiple trips and
virutal machines for each coordinate of the client. On each coordinate analyzed an evaluation
is done to see if the migration is viable. If the suggested destination is approved by the
policy evaluator then the migration process occurs. Otherwise, the next coordinate will be
analyzed the same way, until the right opportunity appears. In the process of running the simulation
some data is saved and acquired, so that in the future some results about the migration mechanism can be studied.
:param user_def: User definitions
:type user_def: Object of the class UserDef
:param dfstations: Dataframe with the edge server stations
:type dfstations: Pandas dataframe
:param dfnode: Dataframe used for saving the data required by the dynamic plot
:type dfnode: Pandas dataframe
:param dfvm: Dataframe used for saving the latency and distance data to the user throughout the path
:type dfvm: Pandas dataframe
:param clientlist: List with all the clients
:type clientlist: List
:param clientlist: List with all the edge server stations
:type clientlist: List
:return: Returns 1 in success
:rtype: Integer
"""
#Plot Process
if(user_def.dynamic_plot == 1):
lock = multiprocessing.Lock()
process = Process(target=plot_process, args=(dftrips,dfstations,lock))
plt_dynamic = 0
for client in clientlist:
print(client.dftrip['TripID'].values[0])
client.calc_triptime()
client.calc_tripdistance()
#print(client.vm_df)
for i in range(0, client.vm_df['Migration ID'].count()):
#for i in range(0, 10):
client.vm = client.vm_df.iloc[[i]]
print(i)
client.reset_vars(user_def.cone) #reset vars do cliente
get_client_source(client, dfstations) #give dfmigrations the first source
server = stationlist[client.get_origin_server_id()] #find the first origin server for that client
lte_connection(client, 0) #find the first lte_st for that client
for coor_index in range( 0, client.count_coordinates()):
cone_determination(client, coor_index)
lte_connection(client, coor_index)
client.latencies[0] = get_latency(client, coor_index, client.get_server_origin_coor())
client.distancies[0] = server.calc_distance(client.get_coordinates(coor_index), client.get_server_origin_coor())
if(client.mig_under == 0):
ret_node = node_search(client, coor_index, dfstations, user_def) #-1 correspondes to not finding a posible destination
#ret_node = node_search_close(client, coor_index, dfstations)
if(ret_node != -1):
client.latencies[1] = get_latency(client, coor_index, client.get_server_target_coor())
client.distancies[1] = server.calc_distance(client.get_coordinates(coor_index), client.get_server_target_coor())
if(ret_node != -1 and client.mig_under == 0 and policy_evaluator(server, client, coor_index, user_def)):
client.mig_under = 1
Mt_est = math.ceil(server.migration_time_estimate(client, 2.5, 14.5)) # seconds
Mt_real = math.ceil(client.vm_time_mig(2.5, 14.5)) #seconds
if(user_def.mig_cost == 0):
Mt_est = 0
Mt_real = 0
elapsed = 0
if (Mt_est >= Mt_real):
elapsed = Mt_est
elif (Mt_real >= Mt_est):
elapsed = Mt_real
if(user_def.timeout == 1):
client.timeout = Mt_real + user_def.timeout_multiplier * Mt_real
client.mig_id_inc = client.mig_id_inc + 1
creatstats(client, server, coor_index)
if(user_def.dynamic_plot == 1):
dfnode = df_dynamic_plot(client, dfnode, coor_index, ret_node, lock)
if(plt_dynamic==0):
process.start()
plt_dynamic = 1
dfpath = path_stats(dfpath, client, coor_index)
if(client.mig_under == 1 and ret_node != -1):
#function stats to collect all data during migration
stats_collect(Mt_real, Mt_est, server, client, user_def)
if(elapsed == 0 ):
client.mig_under = 0
# Apagar primeira linha do dfmigrations -> same as saying: migration happend
client.dfmigrations = client.dfmigrations.drop([0], axis='index')
client.dfmigrations = client.dfmigrations.reset_index(drop=True)
server = stationlist[client.get_origin_server_id()]
elapsed = elapsed - 1
Mt_est = Mt_est - 1
Mt_real = Mt_real - 1
#print(client.triptime)
#print(client.tripdistance)
if(user_def.dynamic_plot == 1):
process.join()
#Save statistics
df_statistics = stats_df(clientlist)
df_statistics.to_excel('./OUT/Statistics.xlsx', index=False, engine='xlsxwriter')
df_stat_node = stats_df_node_dt(clientlist)
df_stat_node.to_excel('./OUT/Node_Determination.xlsx', index=False, engine='xlsxwriter')
dfpath.to_excel('./OUT/Client_path.xlsx', index=False, engine='xlsxwriter')
#Copy data to digest path
data_to_digest(user_def)
return 1
if __name__ == "__main__":
start_time = time.time()
#Setup user definitions
if(len(sys.argv)<=1):
print('Missing Parameters:\n\tTo run default mode enter: main.py -d\n\tTo get help enter: main.py -h')
sys.exit(1)
argv = sys.argv[1:]
user_def_dict = handle_user_def (argv)
user_def = UserDef(user_def_dict)
setup_folders(user_def)
#define and initilize the dataframes and lists
dftrips =
|
pd.read_csv("./Datasets/Mobility/" + user_def.dftrips_path)
|
pandas.read_csv
|
# -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
def countnum():
dates = pd.date_range(start="2019-01-01", end="2019-05-31", freq='M')
# print(dates)
# print(dates[0])
# print(type(dates[0]))
col1 = [i for i in range(1, len(dates) + 1)]
# print(col1)
col2 = [i + 1 for i in range(1, len(dates) + 1)]
df = pd.DataFrame({'col1': col1, 'col2': col2}, index=dates)
# print(df)
dict_ic = {}
dict_ic['date'] = df
df_ic = pd.concat(dict_ic.values(), keys=dict_ic.keys())
# print (df_ic)
# 基于list统计
mean = df_ic.groupby(level=0).apply(lambda frame: len(
[i for i in frame['col1'].values if i > 2]) / len(frame['col1']))
print(mean)
def statfunc():
perf_dict = {"code": ['000001', '000002', '000003'],
"close": [100, 91.1, 5.4],
"vol": [1000, 200, 3000]}
df =
|
pd.DataFrame(perf_dict)
|
pandas.DataFrame
|
# Modified by Seungjae @ 2021. 07. 31
# Copyright (c) 2020 Huawei Technologies Co., Ltd.
# Licensed under CC BY-NC-SA 4.0 (Attribution-NonCommercial-ShareAlike 4.0 International) (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
#
# The code is released for academic research use only. For commercial use, please contact Huawei Technologies Co., Ltd.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file contains content licensed by https://github.com/xinntao/BasicSR/blob/master/LICENSE/LICENSE
import glob
import sys
from collections import OrderedDict
from natsort import natsort
import options.options as option
from Measure import Measure, psnr
from models.SRFlow_model import SRFlowModel
from imresize import imresize
from models import create_model
import torch
from torch import nn
from utils.util import opt_get
import numpy as np
import pandas as pd
import os
import cv2
import argparse
from utils import util
from model import Glow, Cond_Glow_Skip
import torchvision
from torchvision import transforms
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def fiFindByWildcard(wildcard):
return natsort.natsorted(glob.glob(wildcard, recursive=True))
def load_model(conf_path):
opt = option.parse(conf_path, is_train=False)
opt['gpu_ids'] = None
opt = option.dict_to_nonedict(opt)
model = create_model(opt)
model_path = opt_get(opt, ['model_path'], None)
model.load_network(load_path=model_path, network=model.netG)
return model, opt
def predict(model, lr):
model.feed_data({"LQ": t(lr)}, need_GT=False)
model.test()
visuals = model.get_current_visuals(need_GT=False)
return visuals.get('rlt', visuals.get("SR"))
def t(array): return torch.Tensor(np.expand_dims(array.transpose([2, 0, 1]), axis=0).astype(np.float32)) / 255
def rgb(t): return (
np.clip((t[0] if len(t.shape) == 4 else t).detach().cpu().numpy().transpose([1, 2, 0]), 0, 1) * 255).astype(
np.uint8)
def imread(path):
return cv2.imread(path)[:, :, [2, 1, 0]]
def imwrite(path, img):
os.makedirs(os.path.dirname(path), exist_ok=True)
cv2.imwrite(path, img[:, :, [2, 1, 0]])
def imCropCenter(img, size):
h, w, c = img.shape
h_start = max(h // 2 - size // 2, 0)
h_end = min(h_start + size, h)
w_start = max(w // 2 - size // 2, 0)
w_end = min(w_start + size, w)
return img[h_start:h_end, w_start:w_end]
def impad(img, top=0, bottom=0, left=0, right=0, color=255):
return np.pad(img, [(top, bottom), (left, right), (0, 0)], 'reflect')
def sample_data(path, batch_size, image_size):
transform = transforms.Compose(
[
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
]
)
dataset = torchvision.datasets.ImageFolder(path, transform=transform)
loader = torch.utils.data.DataLoader(dataset, shuffle=False, batch_size=batch_size)
loader = iter(loader)
while True:
try:
yield next(loader)
except StopIteration:
loader = torch.utils.data.DataLoader(
dataset, shuffle=False, batch_size=batch_size, num_workers=4
)
loader = iter(loader)
yield next(loader)
def calc_z_shapes(n_channel, input_size, n_flow, n_block):
z_shapes = []
for i in range(n_block - 1):
input_size //= 2
n_channel *= 2
z_shapes.append((n_channel, input_size, input_size))
input_size //= 2
z_shapes.append((n_channel * 4, input_size, input_size))
return z_shapes
def calc_inp_shapes(n_channels, image_size, n_blocks):
# calculates z shapes (inputs) after SQUEEZE operation (before Block operations) - e.g. channels: 12, 24, 48, 96
def calc_z_shapes(n_channel, image_size, n_block):
# calculates shapes of z's after SPLIT operation (after Block operations) - e.g. channels: 6, 12, 24, 96
z_shapes = []
for i in range(n_block - 1):
image_size = (image_size[0] // 2, image_size[1] // 2)
n_channel = n_channel * 2
shape = (n_channel, *image_size)
z_shapes.append(shape)
# for the very last block where we have no split operation
image_size = (image_size[0] // 2, image_size[1] // 2)
shape = (n_channel * 4, *image_size)
z_shapes.append(shape)
return z_shapes
z_shapes = calc_z_shapes(n_channels, image_size, n_blocks)
input_shapes = []
for i in range(len(z_shapes)):
if i < len(z_shapes) - 1:
channels = z_shapes[i][0] * 2
input_shapes.append((channels, z_shapes[i][1], z_shapes[i][2]))
else:
input_shapes.append((z_shapes[i][0], z_shapes[i][1], z_shapes[i][2]))
return input_shapes
def calc_cond_shapes(n_channels, image_size, n_blocks):
# computes additional channels dimensions based on additional conditions: left input + condition
input_shapes = calc_inp_shapes(n_channels, image_size, n_blocks)
cond_shapes = []
for block_idx in range(len(input_shapes)):
shape = [input_shapes[block_idx][0], input_shapes[block_idx][1], input_shapes[block_idx][2]] # from left glow
cond_shapes.append(tuple(shape))
return cond_shapes
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--opt', type=str, default='./confs/SRFlow_CelebA_4X_seungjae_load_for_test.yml',
help='Path to option YMAL file.')
parser.add_argument('--p', type=str, default='./celeba_small_test',
help='Path to celeba_small_test')
parser.add_argument('--exp_name', type=str,
default='SRDualGlow',
help='exp name')
args = parser.parse_args()
### For SRFlow
opt = option.parse(args.opt, is_train=True)
opt = option.dict_to_nonedict(opt)
conf_path = 'SRFlow_seungjae/code/confs/SRFlow_CelebA_4X_seungjae_load_for_test.yml'
conf = conf_path.split('/')[-1].replace('.yml', '')
n_bits = 5
n_bins = 2.0 ** n_bits
temp = 0.8
img_size = 128
n_block, n_flow = 4, 32
no_lu = False
affine = True
tr_resize = transforms.Resize((img_size, img_size))
left_glow_params = './srdualglow_models/model_lr_040001.pt'
right_glow_params = './srdualglow_models/model_hr_190001.pt'
input_shapes = calc_inp_shapes(3, [img_size, img_size], n_block)
cond_shapes = calc_cond_shapes(3, [img_size, img_size], n_block)
model_single_left = Glow(
3, n_flow, n_block, affine=False, conv_lu=not no_lu
)
model_left = nn.DataParallel(model_single_left)
model_left = model_left.to(device)
model_left.load_state_dict(torch.load(left_glow_params))
model_left.eval()
model_single_right = Cond_Glow_Skip(
3, n_flow, n_block, input_shapes, cond_shapes, affine=affine, conv_lu=not no_lu
)
model_right = nn.DataParallel(model_single_right)
model_right = model_right.to(device)
model_right.load_state_dict(torch.load(right_glow_params))
model_right.eval()
### Load dataset
dataset_lr = iter(sample_data(args.p, 1, 128 // 4))
dataset_hr = iter(sample_data(args.p, 1, 128))
dataset = torchvision.datasets.ImageFolder(args.p)
leng = len(dataset)
test_dir = f'./{args.exp_name}_results'
os.makedirs(test_dir, exist_ok=True)
print(f"Out dir: {test_dir}")
measure = Measure(use_gpu=False)
fname = f'measure_full.csv'
fname_tmp = fname + "_"
path_out_measures = os.path.join(test_dir, fname_tmp)
path_out_measures_final = os.path.join(test_dir, fname)
print(path_out_measures)
if os.path.isfile(path_out_measures_final):
df = pd.read_csv(path_out_measures_final)
elif os.path.isfile(path_out_measures):
df = pd.read_csv(path_out_measures)
else:
df = None
for idx_test in range(leng):
image_lr, _ = next(dataset_lr)
image_lr = image_lr.to(device)
image_hr, _ = next(dataset_hr)
image_hr = image_hr.to(device)
z_sample_hr = []
z_shapes_hr = calc_z_shapes(3, img_size, n_flow, n_block)
for z in z_shapes_hr:
z_new = torch.randn(len(image_lr), *z) * temp
z_sample_hr.append(z_new.to(device))
image_lr = image_lr * 255
if n_bits < 8:
image_lr = torch.floor(image_lr / 2 ** (8 - n_bits))
image_lr = image_lr / n_bins - 0.5
image_hr = image_hr * 255
if n_bits < 8:
image_hr = torch.floor(image_hr / 2 ** (8 - n_bits))
image_hr = image_hr / n_bins - 0.5
image_lr_resize = tr_resize(image_lr)
with torch.no_grad():
left_glow_out = model_left.module(image_lr + torch.rand_like(image_lr) / n_bins)
image_sr = model_single_right.reverse(z_sample_hr, reconstruct=False,
left_glow_out=left_glow_out,
image_lr_resize=image_lr_resize)
### Inference part (Currently for SRDualGlow)
print(image_hr.size(), torch.max(image_hr), torch.min(image_hr))
hr = rgb(image_hr + 0.5) # To make numpy array
sr = rgb(image_sr + 0.5) # To make numpy array
# IMSAVE
path_out_sr = f'{test_dir}/{idx_test:06d}.png'
imwrite(path_out_sr, sr)
# MEASURE
meas = OrderedDict(conf=conf, heat=temp, name=idx_test)
meas['PSNR'], meas['SSIM'], meas['LPIPS'] = measure.measure(sr, hr)
str_out = format_measurements(meas)
print(str_out)
# SAVE CSV
df = pd.DataFrame([meas]) if df is None else pd.concat([
|
pd.DataFrame([meas])
|
pandas.DataFrame
|
#!/usr/bin/env python3
"""Generate non-canonical nucleotide probability predictions using signal align output
"""
import os
import itertools
import numpy as np
import pandas as pd
from py3helpers.utils import list_dir, merge_lists
from py3helpers.multiprocess import *
from signalalign.nanoporeRead import NanoporeRead
from signalalign.signalAlignment import SignalAlignment
from signalalign.train.trainModels import read_in_alignment_file
from signalalign.utils.sequenceTools import CustomAmbiguityPositions, AMBIG_BASES
class MarginalizeVariants(object):
def __init__(self, variant_data, variants, read_name):
"""Marginalize over all posterior probabilities to give a per position read probability
:param variants: bases to track probabilities
:param variant_data: variant data
"""
self.read_name = read_name
self.variant_data = variant_data
self.variants = sorted(variants)
self.columns = merge_lists([['read_name', 'contig', 'position', 'strand', 'forward_mapped'],
list(self.variants)])
self.contig = NanoporeRead.bytes_to_string(self.variant_data["contig"][0])
self.position_probs = pd.DataFrame()
self.has_data = False
self.per_read_calls = pd.DataFrame()
self.per_read_columns = merge_lists([['read_name', 'contig', 'strand', "forward_mapped",
"n_sites"], list(self.variants)])
def get_data(self):
"""Calculate the normalized probability of variant for each nucleotide and across the read"""
# final location of per position data and per read data
data = []
per_read_data = []
for read_strand in (b"t", b"c"):
read_strand_specifc_data = self.variant_data[self.variant_data["strand"] == read_strand]
read_strand = read_strand.decode("utf-8")
if len(read_strand_specifc_data) == 0:
continue
for forward_mapped in set(self.variant_data["forward_mapped"]):
mapping_strand = "-"
if forward_mapped == b"forward":
mapping_strand = "+"
strand_specifc_data = read_strand_specifc_data[read_strand_specifc_data["forward_mapped"] ==
forward_mapped]
if len(strand_specifc_data) == 0:
continue
# get positions on strand
positions = set(strand_specifc_data["reference_position"])
n_positions = len(positions)
strand_read_nuc_data = [0] * len(self.variants)
# marginalize probabilities for each position
for pos in positions:
pos_data = strand_specifc_data[strand_specifc_data["reference_position"] == pos]
total_prob = 0
position_nuc_dict = {x: 0.0 for x in self.variants}
# Get total probability for each nucleotide
for nuc in set(pos_data["base"]):
nuc_data = pos_data[pos_data["base"] == nuc]
nuc_prob = sum(nuc_data["posterior_probability"])
total_prob += nuc_prob
position_nuc_dict[NanoporeRead.bytes_to_string(nuc)] = nuc_prob
# normalize probabilities over each position
nuc_data = [0] * len(self.variants)
for nuc in position_nuc_dict.keys():
index = self.variants.index(nuc)
nuc_data[index] = position_nuc_dict[nuc] / total_prob
strand_read_nuc_data[index] += nuc_data[index]
data.append(merge_lists([[self.read_name, self.contig, pos, read_strand, mapping_strand],
nuc_data]))
if n_positions > 0:
per_read_data.append(merge_lists([[self.read_name, self.contig, read_strand, mapping_strand,
n_positions],
[prob / n_positions for prob in strand_read_nuc_data]]))
self.position_probs = pd.DataFrame(data, columns=self.columns)
self.per_read_calls = pd.DataFrame(per_read_data, columns=self.per_read_columns)
self.has_data = True
return self.position_probs
class MarginalizeFullVariants(object):
def __init__(self, full_data, variants, read_name, forward_mapped):
"""Marginalize over all posterior probabilities to give a per position read probability
:param variants: bases to track probabilities
:param full_data: path to full tsv file
['contig', 'reference_index',
'reference_kmer', 'read_file',
'strand', 'event_index',
'event_mean', 'event_noise',
'event_duration', 'aligned_kmer',
'scaled_mean_current', 'scaled_noise',
'posterior_probability', 'descaled_event_mean',
'ont_model_mean', 'path_kmer']
"""
self.read_name = read_name
self.full_data = full_data
self.variants = sorted(variants)
self.ambig_char = AMBIG_BASES["".join(self.variants)]
self.variant_data = self.full_data[[self.ambig_char in kmer or "X" in kmer for kmer in self.full_data["reference_kmer"]]]
self.forward_mapped = forward_mapped
self.columns = merge_lists([['read_name', 'contig', 'position', 'strand', 'forward_mapped'],
list(self.variants)])
self.contig = NanoporeRead.bytes_to_string(self.full_data["contig"][0])
self.position_probs = pd.DataFrame()
self.has_data = False
self.per_read_calls = pd.DataFrame()
self.per_read_columns = merge_lists([['read_name', 'contig', 'strand', "forward_mapped", "n_sites"],
list(self.variants)])
def get_data(self):
"""Calculate the normalized probability of variant for each nucleotide and across the read"""
# final location of per position data and per read data
data = []
per_read_data = []
if self.forward_mapped:
mapping_strands = ["+", "-"]
else:
mapping_strands = ["-", "+"]
if len(self.variant_data) > 0:
kmer_len_1 = len(self.variant_data["reference_kmer"].iloc[0]) - 1
mapping_index = 0
for read_strand in ("t", "c"):
read_strand_specifc_data = self.variant_data[self.variant_data["strand"] == read_strand]
# read_strand = read_strand.decode("utf-8")
if len(read_strand_specifc_data) == 0:
continue
# get positions on strand
positions = sorted(set(read_strand_specifc_data["reference_index"]))
if mapping_strands[mapping_index] == "-":
positions = positions[::-1]
strand_read_nuc_data = [0] * len(self.variants)
# marginalize probabilities for each position
n_positions = 0
for pos in positions:
pos_data = read_strand_specifc_data[read_strand_specifc_data["reference_index"] == pos]
base = pos_data["aligned_kmer"].iloc[0][kmer_len_1]
if base != self.ambig_char and base != "X":
continue
n_positions += 1
total_prob = 0
position_nuc_dict = {x: 0.0 for x in self.variants}
# Get total probability for each nucleotide
for nuc in self.variants:
# kmer_len_1 = pos_data["reference_kmer"].iloc[0].find("X")
# print(pos_data["reference_kmer"].iloc[0])
nuc_data = pos_data[[nuc == kmer[kmer_len_1] for kmer in pos_data["path_kmer"]]]
nuc_prob = sum(nuc_data["posterior_probability"])
total_prob += nuc_prob
position_nuc_dict[NanoporeRead.bytes_to_string(nuc)] = nuc_prob
# normalize probabilities over each position
nuc_data = [0] * len(self.variants)
for index, nuc in enumerate(self.variants):
assert total_prob > 0, "Check 'variants' parameter. There seems to be no kmers with those " \
"variant characters"
nuc_data[index] = position_nuc_dict[nuc] / total_prob
strand_read_nuc_data[index] += nuc_data[index]
data.append(merge_lists([[self.read_name, self.contig, pos, read_strand,
mapping_strands[mapping_index]], nuc_data]))
if n_positions > 0:
per_read_data.append(merge_lists([[self.read_name, self.contig, read_strand,
mapping_strands[mapping_index], n_positions],
[prob / n_positions for prob in strand_read_nuc_data]]))
mapping_index += 1
self.position_probs = pd.DataFrame(data, columns=self.columns)
self.per_read_calls = pd.DataFrame(per_read_data, columns=self.per_read_columns)
self.has_data = True
else:
self.has_data = False
return self.position_probs
class AggregateOverReads(object):
def __init__(self, variant_tsv_dir, variants="ATGC", verbose=False):
"""Marginalize over all posterior probabilities to give a per position read probability
:param variant_tsv_dir: directory of variantCaller output from signalAlign
:param variants: bases to track probabilities
"""
self.variant_tsv_dir = variant_tsv_dir
self.variants = sorted(variants)
self.columns = merge_lists([['contig', 'position', 'strand', 'forward_mapped'], list(self.variants)])
self.variant_tsvs = list_dir(self.variant_tsv_dir, ext=".vc.tsv")
self.aggregate_position_probs = pd.DataFrame()
self.per_position_data = pd.DataFrame()
self.verbose = verbose
self.per_read_data = pd.DataFrame()
self.has_data = self._aggregate_all_variantcalls()
def _aggregate_all_variantcalls(self):
"""Aggregate all the variant calls"""
for v_tsv in self.variant_tsvs:
if os.stat(v_tsv).st_size == 0:
continue
read_name = os.path.basename(v_tsv)
variant_data = SignalAlignment.read_in_signal_align_tsv(v_tsv, "variantCaller")
mv_h = MarginalizeVariants(variant_data, variants=self.variants, read_name=read_name)
mv_h.get_data()
if self.verbose:
print(v_tsv)
self.per_position_data = self.per_position_data.append(mv_h.position_probs, ignore_index=True)
self.per_read_data = self.per_read_data.append(mv_h.per_read_calls, ignore_index=True)
return True
def marginalize_over_all_reads(self):
"""Calculate the per position posterior probability"""
assert self.has_data, "AggregateOverReads does not have data. Make sure you initialized correctly"
self.aggregate_position_probs = pd.concat([pd.DataFrame([i], columns=self.columns)
for i in self._normalize_all_data(self.per_position_data)],
ignore_index=True)
return self.aggregate_position_probs
def _normalize_all_data(self, all_data):
"""Helper function to normalize all probability data"""
for strand in set(all_data["strand"]):
strand_data = all_data[all_data["strand"] == strand]
for contig in set(strand_data["contig"]):
contig_data = strand_data[strand_data["contig"] == contig]
for mapped_strand in set(contig_data["forward_mapped"]):
strand_mapped_data = contig_data[contig_data["forward_mapped"] == mapped_strand]
for position in set(strand_mapped_data["position"]):
position_data = strand_mapped_data[strand_mapped_data["position"] == position]
sum_total = sum(sum(position_data.loc[:, base]) for base in self.variants)
normalized_probs = [np.round(sum(position_data.loc[:, base]) / sum_total, 6) for base
in self.variants]
yield merge_lists([[contig, position, strand, mapped_strand], normalized_probs])
def write_data(self, out_path):
"""Write out aggregate_position_probs to tsv file"""
self.aggregate_position_probs.to_csv(out_path, sep='\t', index=False)
def generate_labels(self, labelled_positions, predicted_data):
"""Generate labels for predictions given labelled positions.
Note: This will drop sites that do not have labels in 'labelled_positions'
"""
for char in self.variants:
predicted_data.loc[:, char+"_label"] = pd.Series(0, index=predicted_data.index)
for i in range(len(predicted_data)):
contig = predicted_data.loc[i]["contig"]
forward_mapped = predicted_data.loc[i]["forward_mapped"]
position = predicted_data.loc[i]["position"]
true_char = get_true_character(labelled_positions, contig, forward_mapped, position)
if true_char is None:
print("No variant found in labelled data at chr:{} pos:{} "
"forward_mapped:{}: Check positions file".format(contig, position, forward_mapped))
predicted_data = predicted_data.drop([i])
else:
predicted_data.loc[i, true_char+"_label"] = 1
return predicted_data
def generate_labels2(self, predicted_data, true_char):
"""Generate labels for predictions given labelled positions"""
for char in self.variants:
predicted_data.loc[:, char+"_label"] = pd.Series(0, index=predicted_data.index)
for i in range(len(predicted_data)):
predicted_data.loc[i, true_char+"_label"] = 1
return predicted_data
class AggregateOverReadsFull(object):
def __init__(self, sa_full_tsv_dirs, variants="ATGC", verbose=False, processes=2):
"""Marginalize over all posterior probabilities to give a per position read probability
:param sa_full_tsv_dirs: list of directories of full output from signalAlign
:param variants: bases to track probabilities
"""
self.sa_full_tsv_dirs = sa_full_tsv_dirs
self.variants = sorted(variants)
self.columns = merge_lists([['contig', 'position', 'strand', 'forward_mapped'], list(self.variants)])
self.forward_tsvs = list(itertools.chain.from_iterable([list_dir(x, ext=".forward.tsv") for x
in self.sa_full_tsv_dirs]))
self.backward_tsvs = list(itertools.chain.from_iterable([list_dir(x, ext=".backward.tsv") for x
in self.sa_full_tsv_dirs]))
self.verbose = verbose
self.worker_count = processes
self.aggregate_position_probs = pd.DataFrame()
self.per_position_data = pd.DataFrame()
self.per_read_data = pd.DataFrame()
self.has_data = self._multiprocess_aggregate_all_variantcalls()
def _aggregate_all_variantcalls(self):
"""Aggregate all the variant calls"""
for v_tsv in self.forward_tsvs:
if os.stat(v_tsv).st_size == 0:
continue
read_name = os.path.basename(v_tsv)
variant_data = read_in_alignment_file(v_tsv)
mv_h = MarginalizeFullVariants(variant_data, variants=self.variants, read_name=read_name,
forward_mapped=True)
mv_h.get_data()
if self.verbose:
print(v_tsv)
if mv_h.has_data:
self.per_position_data = self.per_position_data.append(mv_h.position_probs, ignore_index=True)
self.per_read_data = self.per_read_data.append(mv_h.per_read_calls, ignore_index=True)
for v_tsv in self.backward_tsvs:
if os.stat(v_tsv).st_size == 0:
continue
read_name = os.path.basename(v_tsv)
variant_data = read_in_alignment_file(v_tsv)
mv_h = MarginalizeFullVariants(variant_data, variants=self.variants, read_name=read_name,
forward_mapped=False)
mv_h.get_data()
if self.verbose:
print(v_tsv)
if mv_h.has_data:
self.per_position_data = self.per_position_data.append(mv_h.position_probs, ignore_index=True)
self.per_read_data = self.per_read_data.append(mv_h.per_read_calls, ignore_index=True)
return True
def _multiprocess_aggregate_all_variantcalls(self):
"""Aggregate all the variant calls"""
def marginalize_wrapper(v_tsv, variants, forward_mapped, verbose):
mv_h = None
if os.stat(v_tsv).st_size != 0:
if verbose:
print(v_tsv)
read_name = os.path.basename(v_tsv)
variant_data = read_in_alignment_file(v_tsv)
mv_h = MarginalizeFullVariants(variant_data, variants=variants, read_name=read_name,
forward_mapped=forward_mapped)
mv_h.get_data()
return mv_h
test_args = {"variants": self.variants,
"forward_mapped": True,
"verbose": self.verbose}
service = BasicService(marginalize_wrapper, service_name="forward_multiprocess_aggregate_all_variantcalls")
total, failure, messages, output = run_service(service.run, self.forward_tsvs,
test_args, ["v_tsv"], worker_count=self.worker_count)
for mv_h in output:
if mv_h is None:
continue
if mv_h.has_data:
self.per_position_data = self.per_position_data.append(mv_h.position_probs, ignore_index=True)
self.per_read_data = self.per_read_data.append(mv_h.per_read_calls, ignore_index=True)
test_args = {"variants": self.variants,
"forward_mapped": False,
"verbose": self.verbose}
service = BasicService(marginalize_wrapper, service_name="backward_multiprocess_aggregate_all_variantcalls")
total, failure, messages, output = run_service(service.run, self.backward_tsvs,
test_args, ["v_tsv"], worker_count=self.worker_count)
for mv_h in output:
if mv_h is None:
continue
if mv_h.has_data:
self.per_position_data = self.per_position_data.append(mv_h.position_probs, ignore_index=True)
self.per_read_data = self.per_read_data.append(mv_h.per_read_calls, ignore_index=True)
return True
def marginalize_over_all_reads(self):
"""Calculate the per position posterior probability"""
assert self.has_data, "AggregateOverReads does not have data. Make sure you initialized correctly"
self.aggregate_position_probs = pd.concat([pd.DataFrame([i], columns=self.columns)
for i in self._normalize_all_data(self.per_position_data)],
ignore_index=True)
return self.aggregate_position_probs
def _normalize_all_data(self, all_data):
"""Helper function to normalize all probability data"""
assert not all_data.empty, "Check your input. 'all_data' is empty"
for strand in set(all_data["strand"]):
strand_data = all_data[all_data["strand"] == strand]
for contig in set(strand_data["contig"]):
contig_data = strand_data[strand_data["contig"] == contig]
for mapped_strand in set(contig_data["forward_mapped"]):
strand_mapped_data = contig_data[contig_data["forward_mapped"] == mapped_strand]
for position in set(strand_mapped_data["position"]):
position_data = strand_mapped_data[strand_mapped_data["position"] == position]
sum_total = sum(sum(position_data.loc[:, base]) for base in self.variants)
normalized_probs = [np.round(sum(position_data.loc[:, base]) / sum_total, 6) for base in
self.variants]
yield merge_lists([[contig, position, strand, mapped_strand], normalized_probs])
def write_data(self, out_path):
"""Write out aggregate_position_probs to tsv file"""
self.aggregate_position_probs.to_csv(out_path, sep='\t', index=False)
def generate_labels(self, labelled_positions, predicted_data):
"""Generate labels for predictions given labelled positions.
Note: This will drop sites that do not have labels in 'labelled_positions'
"""
for char in self.variants:
predicted_data.loc[:, char+"_label"] =
|
pd.Series(0, index=predicted_data.index)
|
pandas.Series
|
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng =
|
pd.period_range('2014-01', '2016-12', freq='M')
|
pandas.period_range
|
import numpy as np
import pandas as pd
# File that I made just to calculate the probabilities of the sequences
# -----------------------------------------------------------
# Creating the Arrays
reel1 = np.array([
['7', 1],
['A', 3],
['Spades', 2],
['Clubs', 1],
['Hearts', 7],
['Diamonds', 5],
['Happy Face', 2]
])
reel2 = np.array([
['7', 1],
['A', 2],
['Spades', 2],
['Clubs', 5],
['Hearts', 3],
['Diamonds', 5],
['Happy Face', 6]
])
reel3 = np.array([
['7', 1],
['A', 1],
['Spades', 2],
['Clubs', 8],
['Hearts', 3],
['Diamonds', 4],
['Sad Face', 2]
])
# -----------------------------------------------------------
# Turning then into data frames
reel1 =
|
pd.DataFrame(reel1, columns=['type','qt'])
|
pandas.DataFrame
|
import tensorflow as tf
import pandas as pd
import numpy as np
import tempfile
import os
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder,MinMaxScaler
from sklearn.model_selection import KFold
from imblearn.combine import SMOTETomek
from imblearn.over_sampling import RandomOverSampler
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
import shutil
import argparse
'''Coloumns contains 0'''
lstZerodrp=['Timestamp','BwdPSHFlags','FwdURGFlags','BwdURGFlags','CWEFlagCount','FwdBytsbAvg','FwdPktsbAvg','FwdBlkRateAvg','BwdBytsbAvg',
'BwdBlkRateAvg','BwdPktsbAvg']
'''Coloumns contains 1'''
lstScaledrp=['FwdPSHFlags','FINFlagCnt','SYNFlagCnt','RSTFlagCnt','PSHFlagCnt','ACKFlagCnt','URGFlagCnt','ECEFlagCnt']
DATA_FILE = '/opt/Network_Traffic.csv'
'''Dataset preprocess'''
def read_dataFile():
chunksize = 10000
chunk_list = []
missing_values = ["n/a", "na", "--", "Infinity", "infinity", "Nan", "NaN"]
for chunk in pd.read_csv(DATA_FILE, chunksize=chunksize, na_values = missing_values):
chunk_list.append(chunk)
break
dataFrme = pd.concat(chunk_list)
lstcols = []
for i in dataFrme.columns:
i = str(i).replace(' ','').replace('/','')
lstcols.append(i)
dataFrme.columns=lstcols
dfAllCpy = dataFrme.copy()
dataFrme = dataFrme.drop(lstZerodrp,axis=1)
return dataFrme
'''Remove NA'''
def preprocess_na(dataFrme):
na_lst = dataFrme.columns[dataFrme.isna().any()].tolist()
for j in na_lst:
dataFrme[j].fillna(0, inplace=True)
return dataFrme
def create_features_label(dataFrme):
#Create independent and Dependent Features
columns = dataFrme.columns.tolist()
# Filter the columns to remove data we do not want
columns = [c for c in columns if c not in ["Label"]]
# Store the variable we are predicting
target = "Label"
# Define a random state
state = np.random.RandomState(42)
X = dataFrme[columns]
Y = dataFrme[target]
return X,Y
'''Label substitution'''
def label_substitution(dataFrme):
dictLabel = {'Benign':0,'Bot':1}
dataFrme['Label']= dataFrme['Label'].map(dictLabel)
LABELS=['Benign','Bot']
count_classes = pd.value_counts(dataFrme['Label'], sort = True)
print(count_classes)
# Get the Benign and the Bot values
Benign = dataFrme[dataFrme['Label']==0]
Bot = dataFrme[dataFrme['Label']==1]
return dataFrme
'''Class Imabalancement'''
def handle_class_imbalance(X,Y):
# os_us = SMOTETomek(ratio=0.5)
# X_res, y_res = os_us.fit_sample(X, Y)
ros = RandomOverSampler(random_state=50)
X_res, y_res = ros.fit_sample(X, Y)
ibtrain_X = pd.DataFrame(X_res,columns=X.columns)
ibtrain_y = pd.DataFrame(y_res,columns=['Label'])
return ibtrain_X,ibtrain_y
'''Feature Selection'''
def correlation_features(ibtrain_X):
# Correlation Ananlysis
corr = ibtrain_X.corr()
cor_columns = np.full((corr.shape[0],), True, dtype=bool)
for i in range(corr.shape[0]):
for j in range(i+1, corr.shape[0]):
if corr.iloc[i,j] >= 0.9:
if cor_columns[j]:
cor_columns[j] = False
dfcorr_features = ibtrain_X[corr.columns[cor_columns]]
return dfcorr_features
''' Highly Coorelated features '''
def top_ten_features(dfcorr_features,ibtrain_X,ibtrain_y):
feat_X = dfcorr_features
feat_y = ibtrain_y['Label']
#apply SelectKBest class to extract top 10 best features
bestfeatures = SelectKBest(score_func=f_classif, k=10)
fit = bestfeatures.fit(feat_X,feat_y)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(feat_X.columns)
#concat two dataframes for better visualization
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Features','Score'] #naming the dataframe columns
final_feature = featureScores.nlargest(10,'Score')['Features'].tolist()
print(type(final_feature))
final_feature.sort()
sort_fn = final_feature
print('*******************')
print(sort_fn)
dictLabel1 = {'Benign':0,'Bot':1}
ibtrain_y['Label']= ibtrain_y['Label'].map(dictLabel1)
selected_X = ibtrain_X[sort_fn]
selected_Y = ibtrain_y['Label']
return selected_X,selected_Y,sort_fn
'''Scaling'''
def normalize_data(selected_X,selected_Y):
scaler = MinMaxScaler(feature_range=(0, 1))
selected_X = pd.DataFrame(scaler.fit_transform(selected_X),columns=selected_X.columns, index=selected_X.index)
trainX, testX, trainY, testY= train_test_split(selected_X,selected_Y, test_size=0.25)
return trainX, testX, trainY, testY
def prec_metric(labels, predictions):
predicted_classes = predictions["class_ids"]
prec_metric = tf.metrics.precision(labels, predicted_classes, name="prec_metric")
return {"prec_metric": prec_metric}
def recall_metric(labels, predictions):
predicted_classes = predictions["class_ids"]
recall_metric = tf.metrics.recall(labels, predicted_classes, name="recall_metric")
return {"recall_metric": recall_metric}
def f1_metric(labels, predictions):
predicted_classes = predictions["class_ids"]
f1_score = tf.contrib.metrics.f1_score(labels, predicted_classes, name="f1_score")
return {"f1_score": f1_score}
def predict_input_fn(testX):
return tf.estimator.inputs.pandas_input_fn(
x=testX,
num_epochs=1,
shuffle=False
)
def get_model(trainX,trainY,testX,testY,final_feature,args):
TF_MODEL_DIR = '/mnt/Model_Network/'
TF_EXPORT_DIR1='/mnt/Model_Network/'
print('****************Get the featture list*')
print(final_feature)
print('*****************************************')
input_columns = [tf.feature_column.numeric_column(k) for k in final_feature]
feature_spec = tf.feature_column.make_parse_example_spec(input_columns)
serving_input_receiver_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)
seed = 1000
config = tf.estimator.RunConfig(model_dir=TF_MODEL_DIR, save_summary_steps=100, save_checkpoints_steps=1000, tf_random_seed=seed)
train_input_fn = tf.estimator.inputs.pandas_input_fn(
x = trainX,
y = trainY,
batch_size = 64,
num_epochs = 1000,
shuffle = False,
queue_capacity = 100,
num_threads = 1
)
test_input_fn = tf.estimator.inputs.pandas_input_fn(
x = testX,
y = testY,
batch_size = 64,
num_epochs = 10,
shuffle = True,
queue_capacity = 10,
num_threads = 1
)
model =tf.estimator.DNNClassifier(feature_columns=input_columns,
hidden_units=[32, 64, 128],
n_classes=2,activation_fn=tf.nn.relu,dropout=0.2,batch_norm=False)
# Adding your custom metrics:
model = tf.contrib.estimator.add_metrics(model, prec_metric)
model = tf.contrib.estimator.add_metrics(model, recall_metric)
model = tf.contrib.estimator.add_metrics(model, f1_metric)
export_final = tf.estimator.FinalExporter(TF_EXPORT_DIR1, serving_input_receiver_fn=serving_input_receiver_fn)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
max_steps=1200)
eval_spec = tf.estimator.EvalSpec(input_fn=test_input_fn,
steps=1200,
exporters=export_final,
throttle_secs=2,
start_delay_secs=2)
result = tf.estimator.train_and_evaluate(model, train_spec, eval_spec)
print(result)
predictions = model.predict(input_fn=predict_input_fn(testX))
pred_list = list(predictions)
predicted_output = [int(predictions['classes']) for predictions in pred_list]
from sklearn.metrics import classification_report,confusion_matrix
print('Confusion Matrx')
cm = confusion_matrix(testY, predicted_output)
total = sum(sum(cm))
acc = (cm[0, 0] + cm[1, 1]) / total
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
print(cm)
print("acc: {:.4f}".format(acc))
print("sensitivity: {:.4f}".format(sensitivity))
print("specificity: {:.4f}".format(specificity))
print('Classification Report')
print(classification_report(testY, predicted_output))
import pandas as pd
df1 = pd.DataFrame({'actual': testY,'pred': predicted_output})
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer =
|
pd.ExcelWriter('/mnt/network_source.xlsx', engine='xlsxwriter')
|
pandas.ExcelWriter
|
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '{beg_dt}' & index >= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self, setup_path):
# GH 3499, losing frequency info on index recreation
df = DataFrame(
dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H")))
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "data")
store.put("data", df, format="table")
result = store.get("data")
tm.assert_frame_equal(df, result)
for attr in ["freq", "tz", "name"]:
for idx in ["index", "columns"]:
assert getattr(getattr(df, idx), attr, None) == getattr(
getattr(result, idx), attr, None
)
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("data", df2)
assert store.get_storer("data").info["index"]["freq"] is None
# this is ok
_maybe_remove(store, "df2")
df2 = DataFrame(
dict(
A=Series(
range(3),
index=[
Timestamp("20010101"),
Timestamp("20010102"),
Timestamp("20020101"),
],
)
)
)
store.append("df2", df2)
df3 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("df2", df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self, setup_path):
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df = DataFrame(
dict(
A=Series(
range(3), index=date_range("2000-1-1", periods=3, freq="H")
)
)
)
df.to_hdf(path, "data", mode="w", append=True)
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
df2.to_hdf(path, "data", append=True)
idx = date_range("2000-1-1", periods=3, freq="H")
idx.name = "foo"
df = DataFrame(dict(A=Series(range(3), index=idx)))
df.to_hdf(path, "data", mode="w", append=True)
assert read_hdf(path, "data").index.name == "foo"
with catch_warnings(record=True):
idx2 = date_range("2001-1-1", periods=3, freq="H")
idx2.name = "bar"
df2 = DataFrame(dict(A=Series(range(3), index=idx2)))
df2.to_hdf(path, "data", append=True)
assert read_hdf(path, "data").index.name is None
def test_frame_select(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
with pytest.raises(ValueError):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(self, setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", "(index>df.index[3] & " 'index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
with pytest.raises(NotImplementedError):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self, setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self, setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
# not implemented
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A'] | columns=['B']")
# in theory we could deal with this
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A','B'] & columns=['C']")
def test_string_select(self, setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df["x"] = "none"
df.loc[2:7, "x"] = ""
store.append("df", df, data_columns=["x"])
result = store.select("df", "x=none")
expected = df[df.x == "none"]
tm.assert_frame_equal(result, expected)
result = store.select("df", "x!=none")
expected = df[df.x != "none"]
tm.assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
store.append("df2", df2, data_columns=["x"])
result = store.select("df2", "x!=none")
expected = df2[isna(df2.x)]
tm.assert_frame_equal(result, expected)
# int ==/!=
df["int"] = 1
df.loc[2:7, "int"] = 2
store.append("df3", df, data_columns=["int"])
result = store.select("df3", "int=2")
expected = df[df.int == 2]
tm.assert_frame_equal(result, expected)
result = store.select("df3", "int!=2")
expected = df[df.int != 2]
tm.assert_frame_equal(result, expected)
def test_read_column(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError, match="No object named df in the file"):
store.select_column("df", "index")
store.append("df", df)
# error
with pytest.raises(
KeyError, match=re.escape("'column [foo] not found in the table'")
):
store.select_column("df", "foo")
with pytest.raises(Exception):
store.select_column("df", "index", where=["index>5"])
# valid
result = store.select_column("df", "index")
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
with pytest.raises(ValueError):
store.select_column("df", "values_block_0")
# a data column
df2 = df.copy()
df2["string"] = "foo"
store.append("df2", df2, data_columns=["string"])
result = store.select_column("df2", "string")
tm.assert_almost_equal(result.values, df2["string"].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3["string"] = "foo"
df3.loc[4:6, "string"] = np.nan
store.append("df3", df3, data_columns=["string"])
result = store.select_column("df3", "string")
tm.assert_almost_equal(result.values, df3["string"].values)
# start/stop
result = store.select_column("df3", "string", start=2)
tm.assert_almost_equal(result.values, df3["string"].values[2:])
result = store.select_column("df3", "string", start=-2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:])
result = store.select_column("df3", "string", stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[:2])
result = store.select_column("df3", "string", stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[:-2])
result = store.select_column("df3", "string", start=2, stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[2:-2])
result = store.select_column("df3", "string", start=-2, stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({"A": np.random.randn(10), "B": "foo"})
store.append("df4", df4, data_columns=True)
expected = df4["B"]
result = store.select_column("df4", "B")
tm.assert_series_equal(result, expected)
def test_coordinates(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append("df", df)
# all
c = store.select_as_coordinates("df")
assert (c.values == np.arange(len(df.index))).all()
# get coordinates back & test vs frame
_maybe_remove(store, "df")
df = DataFrame(dict(A=range(5), B=range(5)))
store.append("df", df)
c = store.select_as_coordinates("df", ["index<3"])
assert (c.values == np.arange(3)).all()
result = store.select("df", where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates("df", ["index>=3", "index<=4"])
assert (c.values == np.arange(2) + 3).all()
result = store.select("df", where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, "df1")
_maybe_remove(store, "df2")
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
c = store.select_as_coordinates("df1", ["A>0", "B>0"])
df1_result = store.select("df1", c)
df2_result = store.select("df2", c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(setup_path) as store:
df = DataFrame(
np.random.randn(1000, 2), index=date_range("20000101", periods=1000)
)
store.append("df", df)
c = store.select_column("df", "index")
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# invalid
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df), dtype="float64"))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df) + 1))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5)
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range("20000101", periods=500)
result = store.select("df", where="index in selection")
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append("df2", df)
result = store.select("df2", where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select("df2", where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select("df2", start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# exceptions
with pytest.raises(ValueError):
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df3"
)
with pytest.raises(ValueError):
store.append_to_multiple({"df1": None, "df2": None}, df, selector="df3")
with pytest.raises(ValueError):
store.append_to_multiple("df1", df, "df1")
# regular operation
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1"
)
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1", dropna=True
)
result = store.select_as_multiple(["df1", "df2"])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select("df1").index, store.select("df2").index)
@pytest.mark.xfail(
run=False, reason="append_to_multiple_dropna_false is not raising as failed"
)
def test_append_to_multiple_dropna_false(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{"df1a": ["A", "B"], "df2a": None}, df, selector="df1a", dropna=False
)
with pytest.raises(ValueError):
store.select_as_multiple(["df1a", "df2a"])
assert not store.select("df1a").index.equals(store.select("df2a").index)
def test_select_as_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
with ensure_clean_store(setup_path) as store:
# no tables stored
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
# exceptions
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
with pytest.raises(Exception):
store.select_as_multiple([None], where=["A>0", "B>0"], selector="df1")
msg = "'No object named df3 in the file'"
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(["df3"], where=["A>0", "B>0"], selector="df1")
with pytest.raises(KeyError, match="'No object named df4 in the file'"):
store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df4"
)
# default select
result = store.select("df1", ["A>0", "B>0"])
expected = store.select_as_multiple(
["df1"], where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
"df1", where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(
["df1", "df2"], where="index>df2.index[4]", selector="df2"
)
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test exception for diff rows
store.append("df3", tm.makeTimeDataFrame(nper=50))
with pytest.raises(ValueError):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
@pytest.mark.skipif(
LooseVersion(tables.__version__) < LooseVersion("3.1.0"),
reason=("tables version does not support fix for nan selection bug: GH 4858"),
)
def test_nan_selection_bug_4858(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(cols=range(6), values=range(6)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(
dict(cols=["13.0", "14.0", "15.0"], values=[3.0, 4.0, 5.0]),
index=[3, 4, 5],
)
# write w/o the index on that particular column
store.append("df", df, data_columns=True, index=["cols"])
result = store.select("df", where="values>2.0")
tm.assert_frame_equal(result, expected)
def test_start_stop_table(self, setup_path):
with ensure_clean_store(setup_path) as store:
# table
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append("df", df)
result = store.select("df", "columns=['A']", start=0, stop=5)
expected = df.loc[0:4, ["A"]]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", "columns=['A']", start=30, stop=40)
assert len(result) == 0
expected = df.loc[30:40, ["A"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_multiple(self, setup_path):
# GH 16209
with ensure_clean_store(setup_path) as store:
df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
store.append_to_multiple(
{"selector": ["foo"], "data": None}, df, selector="selector"
)
result = store.select_as_multiple(
["selector", "data"], selector="selector", start=0, stop=1
)
expected = df.loc[[0], ["foo", "bar"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_fixed(self, setup_path):
with ensure_clean_store(setup_path) as store:
# fixed, GH 8287
df = DataFrame(
dict(A=np.random.rand(20), B=np.random.rand(20)),
index=pd.date_range("20130101", periods=20),
)
store.put("df", df)
result = store.select("df", start=0, stop=5)
expected = df.iloc[0:5, :]
tm.assert_frame_equal(result, expected)
result = store.select("df", start=5, stop=10)
expected = df.iloc[5:10, :]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", start=30, stop=40)
expected = df.iloc[30:40, :]
tm.assert_frame_equal(result, expected)
# series
s = df.A
store.put("s", s)
result = store.select("s", start=0, stop=5)
expected = s.iloc[0:5]
tm.assert_series_equal(result, expected)
result = store.select("s", start=5, stop=10)
expected = s.iloc[5:10]
tm.assert_series_equal(result, expected)
# sparse; not implemented
df = tm.makeDataFrame()
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
def test_select_filter_corner(self, setup_path):
df = DataFrame(np.random.randn(50, 100))
df.index = ["{c:3d}".format(c=c) for c in df.index]
df.columns = ["{c:3d}".format(c=c) for c in df.columns]
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
crit = "columns=df.columns[:75]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
crit = "columns=df.columns[:75:2]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
def test_path_pathlib(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("start, stop", [(0, 2), (1, 2), (None, None)])
def test_contiguous_mixed_data_table(self, start, stop, setup_path):
# GH 17021
# ValueError when reading a contiguous mixed-data table ft. VLArray
df = DataFrame(
{
"a": Series([20111010, 20111011, 20111012]),
"b": Series(["ab", "cd", "ab"]),
}
)
with ensure_clean_store(setup_path) as store:
store.append("test_dataset", df)
result = store.select("test_dataset", start=start, stop=stop)
tm.assert_frame_equal(df[start:stop], result)
def test_path_pathlib_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, "df")
result = tm.round_trip_pathlib(writer, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
def test_path_localpath_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, "df")
result = tm.round_trip_localpath(writer, reader)
tm.assert_frame_equal(df, result)
def _check_roundtrip(self, obj, comparator, path, compression=False, **kwargs):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(
self, obj, comparator, path, compression=False, **kwargs
):
options = {}
if compression:
options["complib"] = compression or _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
store["obj"] = retrieved
again = store["obj"]
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, path, compression=False):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store.put("obj", obj, format="table")
retrieved = store["obj"]
comparator(retrieved, obj)
def test_multiple_open_close(self, setup_path):
# gh-4409: open & close multiple times
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
# single
store = HDFStore(path)
assert "CLOSED" not in store.info()
assert store.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
with ensure_clean_path(setup_path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
with pytest.raises(ValueError):
HDFStore(path)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
assert "CLOSED" not in store1.info()
assert "CLOSED" not in store2.info()
assert store1.is_open
assert store2.is_open
store1.close()
assert "CLOSED" in store1.info()
assert not store1.is_open
assert "CLOSED" not in store2.info()
assert store2.is_open
store2.close()
assert "CLOSED" in store1.info()
assert "CLOSED" in store2.info()
assert not store1.is_open
assert not store2.is_open
# nested close
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store2.append("df2", df)
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
# double closing
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
# ops on a closed store
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
store = HDFStore(path)
store.close()
with pytest.raises(ClosedFileError):
store.keys()
with pytest.raises(ClosedFileError):
"df" in store
with pytest.raises(ClosedFileError):
len(store)
with pytest.raises(ClosedFileError):
store["df"]
with pytest.raises(AttributeError):
store.df
with pytest.raises(ClosedFileError):
store.select("df")
with pytest.raises(ClosedFileError):
store.get("df")
with pytest.raises(ClosedFileError):
store.append("df2", df)
with pytest.raises(ClosedFileError):
store.put("df3", df)
with pytest.raises(ClosedFileError):
store.get_storer("df2")
with pytest.raises(ClosedFileError):
store.remove("df2")
with pytest.raises(ClosedFileError, match="file is not open"):
store.select("df")
def test_pytables_native_read(self, datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf/pytables_native.h5"), mode="r"
) as store:
d2 = store["detector/readout"]
assert isinstance(d2, DataFrame)
@pytest.mark.skipif(
is_platform_windows(), reason="native2 read fails oddly on windows"
)
def test_pytables_native2_read(self, datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "pytables_native2.h5"), mode="r"
) as store:
str(store)
d1 = store["detector"]
assert isinstance(d1, DataFrame)
@td.xfail_non_writeable
def test_legacy_table_fixed_format_read_py2(self, datapath, setup_path):
# GH 24510
# legacy table with fixed format written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_fixed_py2.h5"), mode="r"
) as store:
result = store.select("df")
expected = pd.DataFrame(
[[1, 2, 3, "D"]],
columns=["A", "B", "C", "D"],
index=pd.Index(["ABC"], name="INDEX_NAME"),
)
tm.assert_frame_equal(expected, result)
def test_legacy_table_read_py2(self, datapath, setup_path):
# issue: 24925
# legacy table written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_py2.h5"), mode="r"
) as store:
result = store.select("table")
expected = pd.DataFrame({"a": ["a", "b"], "b": [2, 3]})
tm.assert_frame_equal(expected, result)
def test_copy(self, setup_path):
with catch_warnings(record=True):
def do_copy(f, new_f=None, keys=None, propindexes=True, **kwargs):
try:
store = HDFStore(f, "r")
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(
new_f, keys=keys, propindexes=propindexes, **kwargs
)
# check keys
if keys is None:
keys = store.keys()
assert set(keys) == set(tstore.keys())
# check indices & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
assert orig_t.nrows == new_t.nrows
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
assert new_t[a.name].is_indexed
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except (OSError, ValueError):
pass
safe_remove(new_f)
# new table
df = tm.makeDataFrame()
try:
path = create_tempfile(setup_path)
st = HDFStore(path)
st.append("df", df, data_columns=["A"])
st.close()
do_copy(f=path)
do_copy(f=path, propindexes=False)
finally:
safe_remove(path)
def test_store_datetime_fractional_secs(self, setup_path):
with ensure_clean_store(setup_path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series =
|
Series([0], [dt])
|
pandas.Series
|
# Collection of functions to process and laod tables for visualisation
# for a set of schools whose data has been updated through syncthing_data
from math import ceil
from scripts.clix_platform_data_processing.get_static_vis_data import get_log_level_data, get_engagement_metrics
from scripts.clix_platform_data_processing.get_static_vis_data import get_num_days_tools, get_num_stud_tools, get_avgtime_perday_tools, get_studperday_tools
from scripts.clix_platform_data_processing.get_static_vis_data import get_avg_percnt_visits_modules, get_num_stud_modules, clean_code
import config.clix_config as clix_config
import time
from datetime import datetime
from airflow.models import Variable
import pandas
import json
from functools import reduce
from airflow.models import Variable
from airflow.models import TaskInstance
from airflow.models import DagBag
tools_modules_server_logs_datapath = clix_config.local_dst_state_data_logs
def load_to_db(metric_data):
pass
def partition(lst, n=clix_config.num_school_chunks):
if (len(lst) < n):
return [lst, [], [], []]
else:
division = len(lst) / n
return [lst[round(division * i):round(division * (i + 1))] for i in range(n)]
def process_school_data(state, chunk, **context):
'''
Function to process tables for a set of schools whose
data has been updated through syncthing
'''
if state == 'tg':
state_new = 'ts'
elif state == 'ct':
state_new = 'cg'
else:
state_new = state
list_of_schools = context['ti'].xcom_pull(task_ids='sync_state_data_' + state_new, key = 'school_update_list')
schools_to_process = partition(list_of_schools)[chunk]
print(schools_to_process)
if schools_to_process:
#print('Got all schools')
#This date range is just to process latest data logs and then append them to already processed logs data
# for each state
date_range = ['2018-06-01', str(datetime.utcnow().date())]
#date_range = [Variable.get('prev_update_date_static_' + state), Variable.get('curr_update_date_static_' + state)]
schools_log_data = get_log_level_data(schools=schools_to_process, state=state, date_range=date_range)
# Save chunk of tools data of a state
tools_temp_path = tools_modules_server_logs_datapath + 'tools_temp' + '/' + state + '_' + str(chunk) + '.csv'
schools_log_data[0].to_csv(tools_temp_path, index=False)
# Save chunk of modules data of a state
modules_temp_path = tools_modules_server_logs_datapath + 'modules_temp' + '/' + state + '_' + str(chunk) + '.csv'
schools_log_data[1][0].to_csv(modules_temp_path, index=False)
# Save chunk of serverlogs data of a state
serverlogs_temp_path = tools_modules_server_logs_datapath + 'serverlogs_temp' + '/' + state + '_' + str(chunk) + '.json'
server_logs_data = {key: [each.strftime('%Y%m%d') for each in values] for key, values in schools_log_data[1][1].items()}
with open(serverlogs_temp_path, 'w', encoding='utf-8') as f:
json.dump(server_logs_data, f, ensure_ascii=True, indent=4)
f.close()
all_chunks = [*range(clix_config.num_school_chunks)]
all_chunks.remove(chunk)
try:
dag_bag = DagBag('/usr/local/airflow/dags/clix_static_visuals_dag.py')
target_dag = dag_bag.get_dag('clix_static_visuals_dag')
dr = target_dag.get_dagrun(target_dag.latest_execution_date)
ti_list = [dr.get_task_instance('process_raw_state_data_' + str(each) + '_' + state) for each in all_chunks]
except Exception as e:
import pdb
pdb.set_trace()
other_tasks_status = all([each.current_state() == 'success' for each in ti_list])
if other_tasks_status:
Variable.set('last_updated_date_static_' + state, datetime.utcnow().date())
else:
print('No schools to process for this task')
return None
def combine_chunks(state, **context):
list_of_data_chunks_tools = []
list_of_data_chunks_modules = []
list_of_data_chunks_serverlogs = []
for chunk in list(range(clix_config.num_school_chunks)):
tools_temp_path = tools_modules_server_logs_datapath + 'tools_temp/' + state + '_' + str(chunk) + '.csv'
list_of_data_chunks_tools.append(pandas.read_csv(tools_temp_path))
modules_temp_path = tools_modules_server_logs_datapath + 'modules_temp/' + state + '_' + str(chunk) + '.csv'
list_of_data_chunks_modules.append(pandas.read_csv(modules_temp_path))
serverlogs_temp_path = tools_modules_server_logs_datapath + 'serverlogs_temp/' + state + '_' + str(chunk) + '.json'
with open(serverlogs_temp_path, 'r', encoding='utf-8') as f:
list_of_data_chunks_serverlogs.append(json.load(f))
f.close()
#Combine and save tools data of a state
state_tools_logs_file = tools_modules_server_logs_datapath + 'tool_logs_' + state + '.csv'
pandas.concat(list_of_data_chunks_tools).to_csv(state_tools_logs_file)
#Combine and save modules data of a state
state_modules_logs_file = tools_modules_server_logs_datapath + 'module_logs_' + state + '.csv'
pandas.concat(list_of_data_chunks_modules).to_csv(state_modules_logs_file)
#Combine and save serverlog file of a state
state_server_logs_file = tools_modules_server_logs_datapath + 'server_logs_' + state + '.json'
with open(state_server_logs_file, 'w', encoding='utf-8') as fp:
server_logs_data = reduce(lambda x, y: x.update(y) or x, list_of_data_chunks_serverlogs)
json.dump(server_logs_data, fp, ensure_ascii=True, indent=4)
fp.close()
return None
def get_state_static_vis_data(state, all_states_flag, **context):
months_list = Variable.get('static_vis_range', deserialize_json=True)['months_list']
# Get all the data files required for vis data generation
if not all_states_flag:
state_tools_logs_file = tools_modules_server_logs_datapath + 'tool_logs_' + state + '.csv'
state_tools_data = pandas.read_csv(state_tools_logs_file)
state_modules_logs_file = tools_modules_server_logs_datapath + 'module_logs_' + state + '.csv'
state_modules_data = pandas.read_csv(state_modules_logs_file)
else:
list_of_all_state_df_tools = [pandas.read_csv(tools_modules_server_logs_datapath + 'tool_logs_' + each + '.csv') for each in ['mz', 'ct', 'rj', 'tg']]
state_tools_data = pandas.concat(list_of_all_state_df_tools, ignore_index=True)
list_of_all_state_df_modules = [pandas.read_csv(tools_modules_server_logs_datapath + 'module_logs_' + each + '.csv') for each in ['mz', 'ct', 'rj', 'tg']]
state_modules_data = pandas.concat(list_of_all_state_df_modules, ignore_index=True)
state_tools_data['date_created_new'] = pandas.to_datetime(state_tools_data['date_created'],
format="%Y-%m-%d").apply(lambda x: x.strftime("%b%Y"))
state_modules_data['date_created'] = pandas.to_datetime(state_modules_data['timestamp'],
format = "%Y-%m-%d %H:%M:%S").apply(lambda x: x.date())
state_modules_data['date_created_new'] = pandas.to_datetime(state_modules_data['timestamp'],
format = "%Y-%m-%d %H:%M:%S").apply(lambda x: x.strftime("%b%Y"))
for each_month in months_list:
#Filter out monthly data for tools and modules
if not each_month == 'all_months':
state_tools_data_new = state_tools_data[state_tools_data['date_created_new'].isin([each_month])]
state_modules_data_new = state_modules_data[state_modules_data['date_created_new'].isin([each_month])]
else:
state_tools_data_new = state_tools_data
state_modules_data_new = state_modules_data
state_tools_data_new = state_tools_data_new[~((state_tools_data_new['tool_name'] == 'policesquad') & (state_tools_data_new['time_spent'] >= 120))]
state_tools_data_new = state_tools_data_new[state_tools_data_new['time_spent'] <= 200]
if not all_states_flag:
state_vis_data_path = tools_modules_server_logs_datapath + 'vis_data/' + state + '_' + each_month
elif all_states_flag:
state_vis_data_path = tools_modules_server_logs_datapath + 'vis_data/' + 'all_states_' + each_month
else:
import pdb
pdb.set_trace()
if not state_tools_data_new.empty:
state_tools_data_new = state_tools_data_new.groupby(["school_server_code"]).apply(lambda x: get_engagement_metrics(x,
'tools')).reset_index(level=None, drop=True)
# To get data for vis - Number of Students accessing different tools
get_num_stud_tools(state_tools_data_new).to_csv( state_vis_data_path + '_toolwise_stud.csv', index=False)
#To get data for vis - Number of days of tools usage
get_num_days_tools(state_tools_data_new).to_csv(state_vis_data_path + '_toolwise_days.csv', index=False)
# To get data for vis - Number of students using tools per day
get_studperday_tools(state_tools_data_new).to_csv(state_vis_data_path + '_toolwise_studperday.csv', index=False)
# To get data for vis - Average time spent by students per day
get_avgtime_perday_tools(state_tools_data_new).to_csv(state_vis_data_path + '_toolwise_timespperday.csv', index=False)
# To get data for vis - Monthly variation of time spent
#TODO
#Modules engagement metrics
if not state_modules_data_new.empty:
school_server_code = [each[0] + each[1] for each in zip(state_modules_data_new['school_code'].apply(str).apply(lambda x: x.split('.')[0]).tolist(),
state_modules_data_new['server_id'].astype(str).apply(lambda x: '-' + x).tolist())]
state_modules_data_new['school_server_code'] =
|
pandas.Series(school_server_code, index=state_modules_data_new.index)
|
pandas.Series
|
import sqlite3
import pandas as pd
from zipfile import ZipFile, ZIP_DEFLATED
import csv
import glob
import io
import os
import untangle
def get_paths_from_zip(zip_filepath):
"""
Extracts TransXchange xml-paths from ZipFile (also nested).
"""
xml_contents = []
z = ZipFile(zip_filepath)
files_in_zip = z.namelist()
for name in files_in_zip:
if name.endswith('xml'):
# Create dictionary with name as key and zip filepath value
xml_contents.append({name: z.filename})
# If the zip contained another zip take it's contents
elif name.endswith('.zip'):
# Read inner zip to memory
inner_zip = ZipFile(io.BytesIO(z.read(name)))
# Read files from inner zip
for inner_name in inner_zip.namelist():
if inner_name.endswith('xml'):
xml_contents.append({z.filename: {name: inner_name}})
return xml_contents
def get_xml_paths(filepath):
"""
Retrieves XML paths from:
- directory +
- ZipFiles within a directory +
- ZipFiles within a ZipFile
Finds xml files with all combinations of the above.
"""
# Input is directory
# ------------------
if os.path.isdir(filepath):
# Read all XML and zip files
xml_contents = glob.glob(os.path.join(filepath, '*.xml'))
zip_contents = glob.glob(os.path.join(filepath, '*.zip'))
# Parse xml references inside zip files
if len(zip_contents) > 0:
for zfp in zip_contents:
xml_contents += get_paths_from_zip(zfp)
# Input is a ZipFile
elif filepath.endswith('.zip'):
xml_contents = get_paths_from_zip(filepath)
return xml_contents
def read_unpacked_xml(xml_path):
"""
Reads an XML with untangle.
"""
file_size = os.path.getsize(xml_path)
parsed_xml = untangle.parse(xml_path)
return parsed_xml, file_size, os.path.basename(xml_path)
def read_xml_inside_zip(xml_path):
"""
Reads an XML with untangle which is inside a ZipFile.
"""
zip_filepath = list(xml_path.values())[0]
filename = list(xml_path.keys())[0]
z = ZipFile(zip_filepath)
file_size = z.getinfo(filename).file_size
parsed_xml = untangle.parse(
io.TextIOWrapper(
io.BytesIO(
z.read(filename)
)
)
)
return parsed_xml, file_size, filename
def read_xml_inside_nested_zip(xml_path):
"""
Reads an XML with untangle which is in a ZipFile inside another ZipFile.
"""
zip_filepath = list(xml_path.keys())[0]
inner_zip_info = list(xml_path.values())[0]
inner_zip_name = list(inner_zip_info.keys())[0]
xml_name = list(inner_zip_info.values())[0]
# Read outer zip
z = ZipFile(zip_filepath)
# Read inner zip to memory
inner_zip = ZipFile(io.BytesIO(z.read(inner_zip_name)))
file_size = inner_zip.getinfo(xml_name).file_size
parsed_xml = untangle.parse(
io.TextIOWrapper(
io.BytesIO(
inner_zip.read(xml_name)
)
)
)
return parsed_xml, file_size, xml_name
def generate_gtfs_export(gtfs_db_fp):
"""Reads the gtfs database and generates an export dictionary for GTFS"""
# Initialize connection
conn = sqlite3.connect(gtfs_db_fp)
# Read database and produce the GTFS file
# =======================================
# Stops
# -----
stops = pd.read_sql_query("SELECT * FROM stops", conn)
if 'index' in stops.columns:
stops = stops.drop('index', axis=1)
# Drop duplicates based on stop_id
stops = stops.drop_duplicates(subset=['stop_id'])
# Agency
# ------
agency =
|
pd.read_sql_query("SELECT * FROM agency", conn)
|
pandas.read_sql_query
|
#Somu 30th March 2018
#Project 2018: Programming and Scripting
# This program is divided into 3 sections
# Section 1 : Using Iris Data, create the python-pandas data frame,
# Section 2 : Describe the Iris flower data stored in the dataframes
# Section 3 : Plot the graphs using the matplot library
# This program makes use of panda's, numpy and matplot libraries
#Technical Reference :
# https://stackoverflow.com/
# http://pandas.pydata.org/pandas-docs/version
# https://www.tutorialspoint.com/python_pandas/python_pandas_series.htm
# https://matplotlib.org/api/pyplot_api.html
# https://matplotlib.org/examples/index.html
# https://stackoverflow.com/questions/4270301/matplotlib-multiple-datasets-on-the-same-scatter-plot?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
# https://www.datascience.com/learn-data-science/tutorials/creating-data-visualizations-matplotlib-data-science-python
#Import the libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Define the variables
Gname,Gnamei,Gnameo = '','',''
Lsetosa,Lvirginica,Lversi =[],[],[]
#SECTION 1 -- Starts here
#Read the data from iris.csv file
#Function to create python list for each flower with attributes
#Create Dataframes for each flower using the python List
#Setosa flower: data frame - dfsetosa :: List name - Lsetosa
#virginica flower: data frame - dfvirginica :: List name - Lvirginica
#versicolor flower: data frame - dfversi :: List name - Lversi
# fstore function: Creates list for petel length, petal width, Sepal length and sepal width for each flower type
# 3 different list are created
def fstore(line):
Gplen = line.split(',')[0]
Gpwid = line.split(',')[1]
Gslen = line.split(',')[2]
Gswid = line.split(',')[3]
Gnameo = str(line.split(',')[4]).rstrip()
if Gnameo == 'Iris-setosa':
Lsetosa.append ([float(Gplen),float(Gpwid),float(Gslen),float(Gswid)])
elif Gnameo == 'Iris-versicolor':
Lversi.append ([float(Gplen),float(Gpwid),float(Gslen),float(Gswid)])
elif Gnameo == 'Iris-virginica':
Lvirginica.append ([float(Gplen),float(Gpwid),float(Gslen),float(Gswid)])
# Read the iris Data file from location data/iris.csv
# calls function "fstore" to create the list
with open("data/iris.csv") as f:
for line in f:
fstore(line)
# create dataframe for each flow from the list
dfsetosa = pd.DataFrame(Lsetosa, columns = ['Petal Length','Petal Width','Sepal Length', 'Sepal Width'])
dfvirginica = pd.DataFrame(Lvirginica, columns = ['Petal Length','Petal Width','Sepal Length', 'Sepal Width'])
dfversi = pd.DataFrame(Lversi, columns = ['Petal Length','Petal Width','Sepal Length', 'Sepal Width'])
#SECTION 1 -- Ends here
#SECTION 2 -- Starts here
#Using the dataframe describe function, display the below for each flower
#Count of the records/data for each flower
#Mean/Average of the flower attributes - Sepal wideth and legth; Petal width and length
#Standard diviation of the each attribute
#Minumum and maximum value of the each attribute
print (("\n"),"Setosa Flower :: Data Summary")
print (("\n"),dfsetosa.describe(include='all'))
print (("\n"),"Virginica Flower :: Data Summary")
print (("\n"),dfvirginica.describe(include='all'))
print (("\n"),"Versicolor Flower :: Data Summary")
print (("\n"),dfversi.describe(include='all'))
#SECTION 2 -- Ends here
#SECTION 3 -- Starts here
#Create a dataflower called dflower, which holds the data of all the 3 flowers
#Create columsn in the dataframe (dflower) for each flower attribute
#PL-Setosa -> represents the petal length of Setosa
#....
#PW-Veri -> represents the petal width of versicolor
#Use numpy provide linespace for the X and Y axis
#Use matplot to plot the grapsh (Scatter and Box)
#PLOT and SHOW Graphs using matplot library
#Blue colour in the Graph represents Setosa flower
#Red colout in the Graph represents Virginica flower
#Green colour in the Graph represents Versicolor flower
#Data frame to hold the values of Petal legnth and width's of Iris Setosa, Virginica and Versicolor
dflower = pd.DataFrame()
#Function to create Petal dataframes
def creatpetaldataframe():
dflower['PL-Setosa'] = pd.DataFrame(dfsetosa["Petal Length"])
#dflower = dflower.rename(columns={'Petal Length': 'PL-Setosa'})
dflower['PW-Setosa'] = pd.DataFrame(dfsetosa["Petal Width"])
dflower['PL-Viginica'] = pd.DataFrame(dfvirginica["Petal Length"])
dflower['PW-Viginica'] = pd.DataFrame(dfvirginica["Petal Width"])
dflower['PL-Versi'] =
|
pd.DataFrame(dfversi["Petal Length"])
|
pandas.DataFrame
|
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
def rmovie_basicvar(cdf,
var = 'tg1',
Mm = False,
km = False,
savefig = False,
figname = 'radynvar.html',
color = 'steelblue'):
'''
A function to produce an animated figure of RADYN variables.
This version is pre-constructed and lets you just input the
variable you want to plot. Other variables (such as populations)
will require more input, and are separate functions.
Turns the output into a pandas dataframe, which is then passed to
plotly express to create the animated figure
Parameters
__________
cdf : The radyn cdf object
var : str
The variable to plot (default = 'tg1')
Mm : Boolean
Plot height in Mm (default = False)
km : Boolean
Plot height in km (default = False)
savefig : Boolean
Save the figure (html file)
figname : str
Filename, if saving the output
NOTES :
So far, allowed variables are
tg1 - temperature
ne1 - electron density
bheat1 - beam heating rate
d1 - mass density
vz1 - velocity
np - proton density
<NAME>, March 2021
'''
########################################################################
# Some preliminary set up
########################################################################
if Mm == True:
xtitle = 'Height [Mm]'
height = cdf.z1/1e8
elif km == True:
xtitle = 'Height [km]'
height = cdf.z1/1e5
else:
xtitle = 'Height [cm]'
height = cdf.z1
if var == 'tg1':
rvar = cdf.tg1
ytitle = 'Temperature [K]'
ylog = True
xlog = False
elif var == 'ne1':
rvar = cdf.ne1
ytitle = 'Electron Density [cm<sup>-3</sup>]'
ylog = True
xlog = False
elif var == 'bheat1':
rvar = cdf.bheat1
ytitle = 'Q<sub>beam</sub> [erg cm<sup>-3</sup> s<sup>-1</sup>]'
ylog = False
xlog = False
elif var == 'd1':
rvar = cdf.d1
ytitle = 'Mass Density [g cm<sup>-3</sup>]'
ylog = True
xlog = False
elif var == 'vz1':
rvar = cdf.vz1/1e5
ytitle = 'Velocity [km s<sup>-1</sup>]'
ylog = False
xlog = False
elif var == 'np':
rvar = cdf.n1[:,:,5,0]
ytitle = 'Proton Density [cm<sup>-3</sup>]'
ylog = True
xlog = False
template = dict(
layout = go.Layout(font = dict(family = "Rockwell", size = 16),
title_font = dict(family = "Rockwell", size = 20),
plot_bgcolor = 'white',
paper_bgcolor = 'white',
xaxis = dict(
showexponent = 'all',
exponentformat = 'e',
tickangle = 0,
linewidth = 3,
showgrid = True,
),
yaxis = dict(
showexponent = 'all',
exponentformat = 'e',
linewidth = 3,
showgrid = True,
anchor = 'free',
position = 0,
domain = [0.0,1]
),
coloraxis_colorbar = dict(
thickness = 15,
tickformat = '0.2f',
ticks = 'outside',
titleside = 'right'
)
))
########################################################################
# Build the dataframe
########################################################################
col1 = ytitle
col2 = xtitle
time = 'Time [s]'
timeind = 'Time index'
df_list = []
for i in range(len(cdf.time)):
data = {col1:rvar[i,:],
col2:height[i,:],
time: cdf.time[i],
timeind: i
}
df_list.append(
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import asyncio, logging, unittest
import sqlalchemy as sa
import pandas as pd
from aiomysql.sa import create_engine
from query_maker import MySql, Table
class Alchemy (unittest.IsolatedAsyncioTestCase):
async def asyncSetUp(self):
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
self.db = MySql(
host = 'db',
user = 'root',
pwd = '',
dbName='saTest',
loop=loop
)
await self.db.createDbIfNotExists()
async def test_deleteSelect(self):
await self.db.SaQuery('''CREATE TABLE IF NOT EXISTS `saTable4` (
`id` serial PRIMARY KEY,
`val` varchar(255)
)''')
metadata = sa.MetaData()
tbl = sa.Table('saTable4', metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('val', sa.String(255))
)
res = await self.db.SaQuery([
tbl.insert().values([{'val':'testToDel'}, {'val':'testToDel2'}]),
sa.select([tbl])
], echo=True)
df =
|
pd.DataFrame(res[0])
|
pandas.DataFrame
|
# %% [markdown]
# # Error Estimation
# %%
from datetime import datetime
import glob
import logging
import os
import cartopy.crs as ccrs
import luigi
import matplotlib
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from netCDF4 import Dataset
thresholds = [1e-5, 1e-4, 1e-3, 1e-2, 0]
# %% [markdown]
# ## Data import
# Import functions to read error and size from csv files
def altitude_by(level_of_interest: int):
map = {
-6: '22.1 km',
-19: '4.2 km',
-16: '6.3 km',
-10: '11.9 km',
-35: '22.1 km',
-39: '11.9 km',
-48: '4.2 km',
0: '0 km'
}
return map.get(level_of_interest)
def import_data(path_pattern: str,
gas: str = None,
var: str = None,
threshold: float = None,
loi: int = None,
type: int = None,
inlcude_coordinates=False,
rank: str = None) -> pd.DataFrame:
frames = []
for file in glob.glob(path_pattern):
frame = pd.read_csv(file, index_col=None, header=0)
# filter to gas and variables
if loi:
frame = frame[frame['level_of_interest'] == loi]
if gas:
gas = gas.split('_')[0]
frame = frame[frame['gas'] == gas]
if var:
frame = frame[frame['var'] == var]
# set threshold == 0 to 1 for consistency
if type:
frame = frame[frame['type'] == type]
frame['threshold'].replace(0, 1, inplace=True)
if threshold:
frame = frame[frame['threshold'] == threshold]
if 'level_of_interest' in frame:
# set altitudes
frame['altitude'] = frame['level_of_interest'].apply(altitude_by)
if 'size' in frame:
frame['size'] = frame['size'] / 1000
else:
# distinguish between N2O (nol > -29) and CH4 (nol < -29)
frame.loc[(frame['gas'] == 'GHG') & (
frame['level_of_interest'] >= -29), 'gas'] = 'GHG_N2O'
frame.loc[(frame['gas'] == 'GHG') & (
frame['level_of_interest'] < -29), 'gas'] = 'GHG_CH4'
if inlcude_coordinates:
_, filename = os.path.split(file)
orbit, _ = os.path.splitext(filename)
nc = Dataset(f'data/eigenvalues/{orbit}.nc')
lat = nc['lat'][...]
lon = nc['lon'][...]
if rank:
target_rank = nc[rank][...]
coordinates = pd.DataFrame(
{'lat': lat, 'lon': lon, 'rank': target_rank})
else:
coordinates = pd.DataFrame(
{'lat': lat, 'lon': lon, 'rank': rank})
frame = frame.merge(coordinates, left_on='event', right_index=True)
frames.append(frame)
return pd.concat(frames, axis=0, ignore_index=True)
def import_size(path_pattern: str):
frames = []
for file in glob.glob(path_pattern):
# frame = pd.read_csv(file, index_col=None, header=0)
frame =
|
pd.read_csv(file)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
from rdt.transformers import BooleanTransformer
class TestBooleanTransformer:
def test_boolean_some_nans(self):
"""Test BooleanTransformer on input with some nan values.
Ensure that the BooleanTransformer can fit, transform, and reverse
transform on boolean data with Nones. Expect that the reverse
transformed data is the same as the input.
Input:
- boolean data with None values
Output:
- The reversed transformed data
"""
# Setup
data = pd.DataFrame([True, False, None, False], columns=['bool'])
transformer = BooleanTransformer()
# Run
transformer.fit(data, data.columns.to_list())
transformed = transformer.transform(data)
reverse = transformer.reverse_transform(transformed)
# Assert
pd.testing.assert_frame_equal(reverse, data)
def test_boolean_all_nans(self):
"""Test BooleanTransformer on input with all nan values.
Ensure that the BooleanTransformer can fit, transform, and reverse
transform on boolean data with all Nones. Expect that the reverse
transformed data is the same as the input.
Input:
- 4 rows of all None values
Output:
- The reversed transformed data
"""
# Setup
data = pd.DataFrame([None, None, None, None], columns=['bool'])
transformer = BooleanTransformer()
# Run
transformer.fit(data, data.columns.to_list())
transformed = transformer.transform(data)
reverse = transformer.reverse_transform(transformed)
# Assert
pd.testing.assert_frame_equal(reverse, data)
def test_boolean_input_unchanged(self):
"""Test BooleanTransformer on input with some nan values.
Ensure that the BooleanTransformer can fit, transform, and reverse
transform on boolean data with all Nones. Expect that the intermediate
transformed data is unchanged.
Input:
- 4 rows of all None values
Output:
- The reversed transformed data
Side effects:
- The intermediate transformed data is unchanged.
"""
# Setup
data =
|
pd.DataFrame([True, False, None, False], columns=['bool'])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from numpy import zeros
from pandas import DataFrame, get_dummies
import keras
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Dropout, Bidirectional, Embedding, LSTM, Conv1D, MaxPooling1D, Flatten
from keras.models import Sequential, load_model
from keras.utils import to_categorical, plot_model
from keras.callbacks import EarlyStopping
from keras.constraints import max_norm
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# Import data from CSV
f = pd.read_csv('presidents-data-words-january-3-2018.csv')
df =
|
DataFrame(f)
|
pandas.DataFrame
|
# coding: utf-8
# In[1]:
import warnings
warnings.filterwarnings("ignore")
# In[2]:
get_ipython().run_line_magic('matplotlib', 'inline')
from numpy import array
from random import random
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
import csv
import math
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import sklearn
import time
from sklearn.metrics import classification_report
from sklearn.utils import shuffle
# In[3]:
def folder(f_name): #this function creates a folder named "attacks" in the program directory.
try:
if not os.path.exists(f_name):
os.makedirs(f_name)
except OSError:
print ("The folder could not be created!")
# In[4]:
def find_the_way(path,file_format):
files_add = []
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
for file in f:
if file_format in file:
files_add.append(os.path.join(r, file))
return files_add
# In[5]:
ml_list={"DT":DecisionTreeClassifier()}
# In[6]:
def target_name(name):
df = pd.read_csv(name,usecols=["Label"])
target_names=sorted(list(df["Label"].unique()))
return target_names
# ## Calculation of evaluations
# In[7]:
def score(altime,train_time,test_time,predict,y_test,class_based_results,i,cv,dname,ii):
precision=[]
recall=[]
f1=[]
accuracy=[]
total_time=[]
kappa=[]
accuracy_b=[]
rc=sklearn.metrics.recall_score(y_test, predict,average= "macro")
pr=sklearn.metrics.precision_score(y_test, predict,average= "macro")
f_1=sklearn.metrics.f1_score(y_test, predict,average= "macro")
report = classification_report(y_test, predict, target_names=target_names,output_dict=True)
cr = pd.DataFrame(report).transpose()
if class_based_results.empty:
class_based_results =cr
else:
class_based_results = class_based_results.add(cr, fill_value=0)
precision.append(float(pr))
recall.append(float(rc))
f1.append(float(f_1))
accuracy_b.append(balanced_accuracy_score( y_test,predict))
accuracy.append(accuracy_score(y_test, predict))
#clf.score(X_test, y_test))
#print(balanced_accuracy_score( y_test,predict))
#t_time.append(float((time.time()-second)) )
kappa.append(round(float(sklearn.metrics.cohen_kappa_score(y_test, predict,
labels=None, weights=None, sample_weight=None)),15))
print ('%-15s %-3s %-3s %-6s %-5s %-5s %-5s %-5s %-8s %-5s %-8s %-8s%-8s%-8s' % (dname,i,cv,ii[0:6],str(round(np.mean(accuracy),2)),str(round(np.mean(accuracy_b),2)),
str(round(np.mean(precision),2)), str(round(np.mean(recall),2)),str(round(np.mean(f1),4)),
str(round(np.mean(kappa),2)),str(round(np.mean(train_time),2)),str(round(np.mean(test_time),2)),str(round(np.mean(test_time)+np.mean(train_time),2)),str(round(np.mean(altime),2))))
lines=(str(dname)+","+str(i)+","+str(cv)+","+str(ii)+","+str(round(np.mean(accuracy),15))+","+str(round(np.mean(accuracy_b),15))+","+str(round(np.mean(precision),15))+","+ str(round(np.mean(recall),15))+","+str(round(np.mean(f1),15))+","+str(round(np.mean(kappa),15))+","+str(round(np.mean(train_time),15))+","+str(round(np.mean(test_time),15))+","+str(altime)+"\n")
return lines,class_based_results
# # isolated training and test data
# In[8]:
def ML_isolated(loop1,loop2,output_csv,cols,step,x,dname):
graph_on_off=False
#graph_on_off=False
print ('%-15s %-3s %-3s %-6s %-5s %-5s %-5s %-5s %-8s %-5s %-8s %-8s%-8s%-8s'%
("Dataset","T","CV","ML alg","Acc","b_Acc","Prec", "Rec" , "F1", "kap" ,"tra-T","test-T","total","alg-time"))
ths = open(output_csv, "w")
ths.write("Dataset,T,CV,ML algorithm,Acc,b_Acc,Precision, Recall , F1-score, kappa ,tra-Time,test-Time,Alg-Time\n")
repetition=10
fold=1
from sklearn.metrics import balanced_accuracy_score
from sklearn.preprocessing import Normalizer
for ii in ml_list:
class_based_results=pd.DataFrame()#"" #pd.DataFrame(0, index=np.arange((len(target_names)+3)), columns=["f1-score","precision","recall","support"])
cm=pd.DataFrame()
cv=0
for i in range(repetition):
#TRAIN
df =
|
pd.read_csv(loop1,usecols=cols)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
from tqdm import tqdm
import cv2
import pyfeats
from utils import Plaque
#%% Path & Name of Plaque
path = './data/'
labels = pd.read_excel(path+'labels.xlsx')
path_features = './results/features/'
IMG_NO = len(labels)
#%% Parameters
perc = 1 # Percentage of the plaque to take into consideration when calculating features in (0,1]
Dx = [0,1,1,1] # A. Early Textural - GLDS
Dy = [1,1,0,-1] # A. Early Textural - GLDS
d = 1 # A. Early Textural - NGTDM
Lr, Lc = 4, 4 # A. Early Textural - SFM
l = 7 # A. Early Textural - LTE
s = 4 # A. Early Textural - FDTA
th = [135,140] # A. Late Textural - HOS
P = [8,16,24] # A. Late Textural - LBP
R = [1,2,3] # A. Late Textural - LBP
N = 30 # B Morphology
bins_hist = 32 # C. Histogram - All
num_eros = 3 # C. Histogram - Multi-region Histogram
square_size = 3 # C. Histogram - Multi-region Histogram
wavelet_dwt = 'bior3.3' # D. Multi-Scale - DWT
wavelet_swt = 'bior3.3' # D. Multi-Scale - SWT
wavelet_wp = 'coif1' # D. Multi-Scale - WP
levels_dwt = 3 # D. Multi-Scale - DWT
levels_swt = 3 # D. Multi-Scale - SWT
levels_wp = 3 # D. Multi-Scale - WP
bins_digitize = 32 # C. Histogram - Correlogram
bins_hist_corr = 32 # C. Histogram - Correlogram
zernikes_radii = 9 # E. Other - Zernikes Moments
#%% Init arrays
names = []
# A. Textural
np_fos = np.zeros((IMG_NO,16), np.double)
np_glcm_mean = np.zeros((IMG_NO,14), np.double)
np_glcm_range = np.zeros((IMG_NO,14), np.double)
np_glds = np.zeros((IMG_NO,5), np.double)
np_ngtdm = np.zeros((IMG_NO,5), np.double)
np_sfm = np.zeros((IMG_NO,4), np.double)
np_lte = np.zeros((IMG_NO,6), np.double)
np_fdta = np.zeros((IMG_NO,s+1), np.double)
np_glrlm = np.zeros((IMG_NO,11), np.double)
np_fps = np.zeros((IMG_NO,2), np.double)
np_shape_parameters = np.zeros((IMG_NO,5), np.double)
np_hos = np.zeros((IMG_NO,len(th)), np.double)
np_lbp = np.zeros((IMG_NO,len(P)*2), np.double)
np_glszm = np.zeros((IMG_NO,14), np.double)
# B. Morphological
pdf_L = np.zeros((IMG_NO,N), np.double)
pdf_M = np.zeros((IMG_NO,N), np.double)
pdf_H = np.zeros((IMG_NO,N), np.double)
cdf_L = np.zeros((IMG_NO,N), np.double)
cdf_M = np.zeros((IMG_NO,N), np.double)
cdf_H = np.zeros((IMG_NO,N), np.double)
pdf_gray = np.zeros((IMG_NO,N), np.double)
cdf_gray = np.zeros((IMG_NO,N), np.double)
# C. Histogram
np_histogram = np.zeros((IMG_NO,bins_hist), np.double)
np_multiregion_histogram = np.zeros((IMG_NO,bins_hist*num_eros), np.double)
np_correlogram_d = np.zeros((IMG_NO,bins_digitize*bins_hist), np.double)
np_correlogram_th = np.zeros((IMG_NO,bins_digitize*bins_hist), np.double)
# D. Multi-Scale
np_dwt = np.zeros((IMG_NO,6*levels_dwt), np.double)
np_swt = np.zeros((IMG_NO,6*levels_swt), np.double)
np_wp = np.zeros((IMG_NO,(4**levels_wp-1)*2), np.double)
np_gt = np.zeros((IMG_NO,16), np.double)
np_amfm = np.zeros((IMG_NO,32*4), np.double)
# E. Other
np_hu = np.zeros((IMG_NO,7), np.double)
np_zernikes = np.zeros((IMG_NO,25), np.double)
#%% Calculate Features
progress = tqdm(range(0,IMG_NO), desc="Calculating Textural Features...")
for i in progress:
name = labels.iloc[i,0]
names.append(name)
# Load ultrasound
path_ultrasound = path + 'ultrasounds\\' + name + '.bmp'
ultrasound = cv2.imread(path_ultrasound, cv2.IMREAD_GRAYSCALE)
# Load points
path_points = path + 'points\\' + name + '_points.out'
points = np.loadtxt(path_points, delimiter=',')
points = np.array(points, np.int32)
# Load points near lumen
path_points_lumen = path + 'points_lumen\\' + name + '_points_lumen.out'
points_lumen = np.loadtxt(path_points_lumen, delimiter=',')
points_lumen = np.array(points_lumen, np.int32)
plaque = Plaque(ultrasound, points, points_lumen, name, pad=2)
plaque.mask = Plaque.get_perc_ROI(plaque.mask, plaque.perimeter_lumen, perc)
# A. Textural
progress.set_description('Calculating Early Textural Features' + ' for ' + name)
np_fos[i,:], labels_fos = pyfeats.fos(plaque.plaque, plaque.mask)
np_glcm_mean[i,:], np_glcm_range[i,:], labels_glcm_mean, labels_glcm_range = pyfeats.glcm_features(plaque.plaque, ignore_zeros=True)
np_glds[i,:], labels_glds = pyfeats.glds_features(plaque.plaque, plaque.mask, Dx=Dx, Dy=Dy)
np_ngtdm[i,:], labels_ngtdm = pyfeats.ngtdm_features(plaque.plaque, plaque.mask, d=d)
np_sfm[i,:], labels_sfm = pyfeats.sfm_features(plaque.plaque, plaque.mask, Lr=Lr, Lc=Lc)
np_lte[i,:], labels_lte = pyfeats.lte_measures(plaque.plaque, plaque.mask, l=l)
np_fdta[i,:], labels_fdta = pyfeats.fdta(plaque.plaque, plaque.mask, s=s)
np_glrlm[i,:], labels_glrlm = pyfeats.glrlm_features(plaque.plaque, plaque.mask, Ng=256)
np_fps[i,:], labels_fps = pyfeats.fps(plaque.plaque, plaque.mask)
np_shape_parameters[i,:], labels_shape_parameters = pyfeats.shape_parameters(plaque.plaque, plaque.mask, plaque.perimeter, pixels_per_mm2=1)
progress.set_description('Calculating Late Textural Features')
np_hos[i,:], labels_hos = pyfeats.hos_features(plaque.plaque, th=th)
np_lbp[i,:], labels_lbp = pyfeats.lbp_features(plaque.plaque, plaque.mask, P=P, R=R)
np_glszm[i,:], labels_glszm = pyfeats.glszm_features(plaque.plaque, plaque.mask)
# B. Morphological
progress.set_description('Calculating Morphological Features' + ' for ' + name)
pdf_gray[i,:], cdf_gray[i,:] = pyfeats.grayscale_morphology_features(plaque.plaque, N=N)
pdf_L[i,:], pdf_M[i,:], pdf_H[i,:], cdf_L[i,:], cdf_M[i,:], cdf_H[i,:] = \
pyfeats.multilevel_binary_morphology_features(plaque.plaque, plaque.mask, N=N)
# C. Histogram
progress.set_description('Calculating Histogram Features' + ' for ' + name)
np_histogram[i,:], labels_histogram = pyfeats.histogram(plaque.plaque, plaque.mask, bins_hist)
np_multiregion_histogram[i,:], labels_multiregion_histogram = pyfeats.multiregion_histogram(plaque.plaque, plaque.mask, bins=bins_hist, num_eros=num_eros,square_size=square_size)
np_correlogram_d[i,:], np_correlogram_th[i,:], labels_correlogram = pyfeats.correlogram(plaque.plaque, plaque.mask, bins_digitize=bins_digitize, bins_hist=bins_hist, flatten=True)
# D. Multi-Scale
progress.set_description('Calculating Multi-Scale Features' + ' for ' + name)
np_dwt[i,:], labels_dwt = pyfeats.dwt_features(plaque.plaque, plaque.mask, wavelet=wavelet_dwt, levels=levels_dwt)
np_swt[i,:], labels_swt = pyfeats.swt_features(plaque.plaque, plaque.mask, wavelet=wavelet_swt, levels=levels_swt)
np_wp[i,:], labels_wp = pyfeats.wp_features(plaque.plaque, plaque.mask, wavelet=wavelet_wp, maxlevel=levels_wp)
np_gt[i,:], labels_gt = pyfeats.gt_features(plaque.plaque, plaque.mask)
np_amfm[i,:], labels_amfm = pyfeats.amfm_features(plaque.plaque)
# E. Other
progress.set_description('Calculating Other Features' + ' for ' + name)
np_hu[i,:], labels_hu = pyfeats.hu_moments(plaque.plaque)
np_zernikes[i,:], labels_zernikes = pyfeats.zernikes_moments(plaque.plaque, zernikes_radii)
#%% Convert to pandas
# A. Early Textural
df_fos =
|
pd.DataFrame(data=np_fos, index=names, columns=labels_fos)
|
pandas.DataFrame
|
#Instructions
#------------
#In this challenge, you are tasked with creating a Python script for analyzing the financial records of your company.
#You will be given two sets of revenue data (budget_data_1.csv and budget_data_2.csv).
#Each dataset is composed of two columns: Date and Revenue. (Thankfully, your company has rather lax standards
#for accounting so the records are simple.)
#Your task is to create a Python script that analyzes the records to calculate each of the following:
##The total number of months included in the dataset
##The total amount of revenue gained over the entire period
#The average change in revenue between months over the entire period
#The greatest increase in revenue (date and amount) over the entire period
#The greatest decrease in revenue (date and amount) over the entire period
#As an example, your analysis should look similar to the one below:
#Financial Analysis
#----------------------------
#Total Months: 25
#Total Revenue: $1241412
#Average Revenue Change: $216825
#Greatest Increase in Revenue: Sep-16 ($815531)
#Greatest Decrease in Revenue: Aug-12 ($-652794)
#Your final script should both print the analysis to the terminal
#and export a text file with the results.
#-----------------------------------------------------------------------------
import numpy as np
import pandas as pd
#import os
#import csv
file = "C:/Users/nab226/Desktop/NUCHI201801DATA4-Class-Repository-DATA/MWS/Homework/03-Python/Instructions/PyBank/raw_data/budget_data_1.csv"
df = pd.read_csv(file)
#df.head()
#The total number of months included in the dataset
n_months = df.Date.count()
#n_months
print("Financial Analysis - Dataset #1")
print("----------------------------")
print("Total Months: "+str(n_months))
#The total amount of revenue gained over the entire period
total_rev = df["Revenue"].sum()
#total_rev
print("Total Revenue: $"+str(total_rev))
#The average change in revenue between months over the entire period
rev = np.array(df["Revenue"])
#rev
n_rev = rev.size
#n_rev
delta_rev = np.diff(rev)
#delta_rev
n_delta_rev = delta_rev.size
#n_delta_rev
avg_delta_rev = (delta_rev.sum())/(n_delta_rev)
avg_delta_rev = np.round(avg_delta_rev,2)
print("Average Revenue Change: $"+str(avg_delta_rev))
#The greatest increase in revenue (date and amount) over the entire period
max_rev = df["Revenue"].max()
#max_rev
min_rev = df["Revenue"].min()
#min_rev
dict_df =
|
pd.Series(df.Revenue.values,index=df.Date)
|
pandas.Series
|
# Copyright 2015 Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains algorithms based on linear programming techniques, including mixed-integer linear programming
"""
from __future__ import print_function
import logging
import warnings
import numpy
from IProgress.progressbar import ProgressBar
from IProgress.widgets import Bar, Percentage
from pandas import DataFrame
from sympy import Add
import cobra
from cobra.util import fix_objective_as_constraint
from cobra.exceptions import OptimizationError
from cobra.flux_analysis import find_essential_reactions
from cameo import config
from cameo import ui
from cameo.core.model_dual import convert_to_dual
from cameo.core.strain_design import StrainDesignMethodResult, StrainDesignMethod, StrainDesign
from cameo.core.target import ReactionKnockoutTarget
from cameo.core.utils import get_reaction_for
from cameo.flux_analysis.analysis import phenotypic_phase_plane, flux_variability_analysis
from cameo.flux_analysis.simulation import fba
from cameo.flux_analysis.structural import find_coupled_reactions_nullspace
from cameo.util import reduce_reaction_set, decompose_reaction_groups
from cameo.visualization.plotting import plotter
logger = logging.getLogger(__name__)
__all__ = ["OptKnock"]
class OptKnock(StrainDesignMethod):
"""
OptKnock.
OptKnock solves a bi-level optimization problem, finding the set of knockouts that allows maximal
target production under optimal growth.
Parameters
----------
model : cobra.Model
A model to be used for finding optimal knockouts. Always set a non-zero lower bound on
biomass reaction before using OptKnock.
exclude_reactions : iterable of str or Reaction objects
Reactions that will not be knocked out. Excluding reactions can give more realistic results
and decrease running time. Essential reactions and exchanges are always excluded.
remove_blocked : boolean (default True)
If True, reactions that cannot carry flux (determined by FVA) will be removed from the model.
This reduces running time significantly.
fraction_of_optimum : If not None, this value will be used to constrain the inner objective (e.g. growth) to
a fraction of the optimal inner objective value. If inner objective is not constrained manually
this argument should be used. (Default: None)
exclude_non_gene_reactions : If True (default), reactions that are not associated with genes will not be
knocked out. This results in more practically relevant solutions as well as shorter running times.
use_nullspace_simplification: Boolean (default True)
Use a basis for the nullspace to find groups of reactions whose fluxes are multiples of each other. From
each of these groups only 1 reaction will be included as a possible knockout
Examples
--------
>>> from cameo import models
>>> from cameo.strain_design.deterministic import OptKnock
>>> model = models.bigg.e_coli_core
>>> model.reactions.Biomass_Ecoli_core_w_GAM.lower_bound = 0.1
>>> model.solver = "gurobi" # Using gurobi or cplex is recommended
>>> optknock = OptKnock(model)
>>> result = optknock.run(k=2, target="EX_ac_e", max_results=3)
"""
def __init__(self, model, exclude_reactions=None, remove_blocked=True, fraction_of_optimum=0.1,
exclude_non_gene_reactions=True, use_nullspace_simplification=True, *args, **kwargs):
super(OptKnock, self).__init__(*args, **kwargs)
self._model = model.copy()
self._original_model = model
if "gurobi" in config.solvers:
logger.info("Changing solver to Gurobi and tweaking some parameters.")
if "gurobi_interface" not in model.solver.interface.__name__:
model.solver = "gurobi"
# The tolerances are set to the minimum value. This gives maximum precision.
problem = model.solver.problem
problem.params.NodeMethod = 1 # primal simplex node relaxation
problem.params.FeasibilityTol = 1e-9
problem.params.OptimalityTol = 1e-3
problem.params.IntFeasTol = 1e-9
problem.params.MIPgapAbs = 1e-9
problem.params.MIPgap = 1e-9
elif "cplex" in config.solvers:
logger.debug("Changing solver to cplex and tweaking some parameters.")
if "cplex_interface" not in self._model.solver.interface.__name__:
self._model.solver = "cplex"
problem = self._model.solver.problem
problem.parameters.mip.strategy.startalgorithm.set(1)
problem.parameters.simplex.tolerances.feasibility.set(1e-8)
problem.parameters.simplex.tolerances.optimality.set(1e-8)
problem.parameters.mip.tolerances.integrality.set(1e-8)
problem.parameters.mip.tolerances.absmipgap.set(1e-8)
problem.parameters.mip.tolerances.mipgap.set(1e-8)
else:
warnings.warn("You are trying to run OptKnock with %s. This might not end well." %
self._model.solver.interface.__name__.split(".")[-1])
if fraction_of_optimum is not None:
fix_objective_as_constraint(self._model, fraction=fraction_of_optimum)
if remove_blocked:
self._remove_blocked_reactions()
if exclude_reactions:
# Convert exclude_reactions to reaction ID's
exclude_reactions = [
r.id if isinstance(r, cobra.core.Reaction) else r for r in exclude_reactions
]
for r_id in exclude_reactions:
if r_id not in self._model.reactions:
raise ValueError("Excluded reaction {} is not in the model".format(r_id))
else:
exclude_reactions = []
if exclude_non_gene_reactions:
exclude_reactions += [r.id for r in self._model.reactions if not r.genes]
self._build_problem(exclude_reactions, use_nullspace_simplification)
def _remove_blocked_reactions(self):
fva_res = flux_variability_analysis(self._model, fraction_of_optimum=0)
# FIXME: Iterate over the index only (reaction identifiers).
blocked = [
self._model.reactions.get_by_id(reaction) for reaction, row in fva_res.data_frame.iterrows()
if (round(row["lower_bound"], config.ndecimals) == round(
row["upper_bound"], config.ndecimals) == 0)
]
self._model.remove_reactions(blocked)
def _reduce_to_nullspace(self, reactions):
self.reaction_groups = find_coupled_reactions_nullspace(self._model)
reaction_groups_keys = [set(group) for group in self.reaction_groups]
reduced_reactions = reduce_reaction_set(reactions, reaction_groups_keys)
return reduced_reactions
def _build_problem(self, exclude_reactions, use_nullspace_simplification):
logger.debug("Starting to formulate OptKnock problem")
self.essential_reactions = find_essential_reactions(self._model, processes=1).union(self._model.boundary)
if exclude_reactions:
self.exclude_reactions = set.union(
self.essential_reactions,
set(self._model.reactions.get_by_id(r) for r in exclude_reactions)
)
reactions = set(self._model.reactions) - self.exclude_reactions
if use_nullspace_simplification:
reactions = self._reduce_to_nullspace(reactions)
else:
self.reaction_groups = None
self._make_dual()
self._combine_primal_and_dual()
logger.debug("Primal and dual successfully combined")
y_vars = {}
constrained_dual_vars = set()
for reaction in reactions:
if reaction not in self.exclude_reactions and reaction.lower_bound <= 0 <= reaction.upper_bound:
y_var, constrained_vars = self._add_knockout_constraints(reaction)
y_vars[y_var] = reaction
constrained_dual_vars.update(constrained_vars)
self._y_vars = y_vars
primal_objective = self._model.solver.objective
dual_objective = self._model.solver.interface.Objective.clone(
self._dual_problem.objective, model=self._model.solver)
reduced_expression = Add(*((c * v) for v, c in dual_objective.expression.as_coefficients_dict().items()
if v not in constrained_dual_vars))
dual_objective = self._model.solver.interface.Objective(reduced_expression, direction=dual_objective.direction)
optimality_constraint = self._model.solver.interface.Constraint(
primal_objective.expression - dual_objective.expression,
lb=0, ub=0, name="inner_optimality")
self._model.solver.add(optimality_constraint)
logger.debug("Inner optimality constrained")
logger.debug("Adding constraint for number of knockouts")
knockout_number_constraint = self._model.solver.interface.Constraint(
Add(*y_vars), lb=len(y_vars), ub=len(y_vars)
)
self._model.solver.add(knockout_number_constraint)
self._number_of_knockouts_constraint = knockout_number_constraint
def _make_dual(self):
dual_problem = convert_to_dual(self._model.solver)
self._dual_problem = dual_problem
logger.debug("Dual problem successfully created")
def _combine_primal_and_dual(self):
primal_problem = self._model.solver
dual_problem = self._dual_problem
for var in dual_problem.variables:
var = primal_problem.interface.Variable.clone(var)
primal_problem.add(var)
for const in dual_problem.constraints:
const = primal_problem.interface.Constraint.clone(const, model=primal_problem)
primal_problem.add(const)
def _add_knockout_constraints(self, reaction):
interface = self._model.solver.interface
y_var = interface.Variable("y_" + reaction.id, type="binary")
self._model.solver.add(interface.Constraint(reaction.flux_expression - 1000 * y_var, ub=0))
self._model.solver.add(interface.Constraint(reaction.flux_expression + 1000 * y_var, lb=0))
constrained_vars = []
if reaction.upper_bound != 0:
dual_forward_ub = self._model.solver.variables["dual_" + reaction.forward_variable.name + "_ub"]
self._model.solver.add(interface.Constraint(dual_forward_ub - 1000 * (1 - y_var), ub=0))
constrained_vars.append(dual_forward_ub)
if reaction.lower_bound != 0:
dual_reverse_ub = self._model.solver.variables["dual_" + reaction.reverse_variable.name + "_ub"]
self._model.solver.add(interface.Constraint(dual_reverse_ub - 1000 * (1 - y_var), ub=0))
constrained_vars.append(dual_reverse_ub)
return y_var, constrained_vars
def run(self, max_knockouts=5, biomass=None, target=None, max_results=1, *args, **kwargs):
"""
Perform the OptKnock simulation
Parameters
----------
target: str, Metabolite or Reaction
The design target
biomass: str, Metabolite or Reaction
The biomass definition in the model
max_knockouts: int
Max number of knockouts allowed
max_results: int
Max number of different designs to return if found
Returns
-------
OptKnockResult
"""
# TODO: why not required arguments?
if biomass is None or target is None:
raise ValueError('missing biomass and/or target reaction')
target = get_reaction_for(self._model, target, add=False)
biomass = get_reaction_for(self._model, biomass, add=False)
knockout_list = []
fluxes_list = []
production_list = []
biomass_list = []
loader_id = ui.loading()
with self._model:
self._model.objective = target.id
self._number_of_knockouts_constraint.lb = self._number_of_knockouts_constraint.ub - max_knockouts
count = 0
while count < max_results:
try:
solution = self._model.optimize(raise_error=True)
except OptimizationError as e:
logger.debug("Problem could not be solved. Terminating and returning " + str(count) + " solutions")
logger.debug(str(e))
break
knockouts = tuple(reaction for y, reaction in self._y_vars.items() if round(y.primal, 3) == 0)
assert len(knockouts) <= max_knockouts
if self.reaction_groups:
combinations = decompose_reaction_groups(self.reaction_groups, knockouts)
for kos in combinations:
knockout_list.append({r.id for r in kos})
fluxes_list.append(solution.fluxes)
production_list.append(solution.objective_value)
biomass_list.append(solution.fluxes[biomass.id])
else:
knockout_list.append({r.id for r in knockouts})
fluxes_list.append(solution.fluxes)
production_list.append(solution.objective_value)
biomass_list.append(solution.fluxes[biomass.id])
# Add an integer cut
y_vars_to_cut = [y for y in self._y_vars if round(y.primal, 3) == 0]
integer_cut = self._model.solver.interface.Constraint(Add(*y_vars_to_cut),
lb=1,
name="integer_cut_" + str(count))
if len(knockouts) < max_knockouts:
self._number_of_knockouts_constraint.lb = self._number_of_knockouts_constraint.ub - len(knockouts)
self._model.add_cons_vars(integer_cut)
count += 1
ui.stop_loader(loader_id)
return OptKnockResult(self._original_model, knockout_list, fluxes_list,
production_list, biomass_list, target.id, biomass)
class RobustKnock(StrainDesignMethod):
pass
class OptKnockResult(StrainDesignMethodResult):
__method_name__ = "OptKnock"
def __init__(self, model, knockouts, fluxes, production_fluxes, biomass_fluxes, target, biomass, *args, **kwargs):
super(OptKnockResult, self).__init__(self._generate_designs(knockouts), *args, **kwargs)
self._model = model
self._knockouts = knockouts
self._fluxes = fluxes
self._production_fluxes = production_fluxes
self._biomass_fluxes = biomass_fluxes
self._target = target
self._biomass = biomass
self._processed_knockouts = None
@staticmethod
def _generate_designs(knockouts):
designs = []
for knockout_design in knockouts:
designs.append(StrainDesign([ReactionKnockoutTarget(ko for ko in knockout_design)]))
return designs
def _process_knockouts(self):
progress = ProgressBar(maxval=len(self._knockouts), widgets=["Processing solutions: ", Bar(), Percentage()])
self._processed_knockouts = DataFrame(columns=["reactions", "size", self._target,
"biomass", "fva_min", "fva_max"])
for i, knockouts in progress(enumerate(self._knockouts)):
try:
with self._model:
[self._model.reactions.get_by_id(ko).knock_out() for ko in knockouts]
fva = flux_variability_analysis(self._model, fraction_of_optimum=0.99, reactions=[self.target])
self._processed_knockouts.loc[i] = [knockouts, len(knockouts), self.production[i], self.biomass[i],
fva.lower_bound(self.target), fva.upper_bound(self.target)]
except OptimizationError:
self._processed_knockouts.loc[i] = [numpy.nan for _ in self._processed_knockouts.columns]
@property
def knockouts(self):
return self._knockouts
@property
def fluxes(self):
return self._fluxes
@property
def production(self):
return self._production_fluxes
@property
def biomass(self):
return self._biomass_fluxes
@property
def target(self):
return self._target
def display_on_map(self, index=0, map_name=None, palette="YlGnBu"):
with self._model:
for ko in self.data_frame.loc[index, "reactions"]:
self._model.reactions.get_by_id(ko).knock_out()
fluxes = fba(self._model)
fluxes.display_on_map(map_name=map_name, palette=palette)
def plot(self, index=0, grid=None, width=None, height=None, title=None, palette=None, **kwargs):
wt_production = phenotypic_phase_plane(self._model, objective=self._target, variables=[self._biomass.id])
with self._model:
for ko in self.data_frame.loc[index, "reactions"]:
self._model.reactions.get_by_id(ko).knock_out()
mt_production = phenotypic_phase_plane(self._model, objective=self._target, variables=[self._biomass.id])
if title is None:
title = "Production Envelope"
dataframe = DataFrame(columns=["ub", "lb", "value", "strain"])
for _, row in wt_production.iterrows():
_df = DataFrame([[row['objective_upper_bound'], row['objective_lower_bound'], row[self._biomass.id], "WT"]],
columns=dataframe.columns)
dataframe = dataframe.append(_df)
for _, row in mt_production.iterrows():
_df = DataFrame([[row['objective_upper_bound'], row['objective_lower_bound'], row[self._biomass.id], "MT"]],
columns=dataframe.columns)
dataframe = dataframe.append(_df)
plot = plotter.production_envelope(dataframe, grid=grid, width=width, height=height, title=title,
x_axis_label=self._biomass.id, y_axis_label=self._target, palette=palette)
plotter.display(plot)
@property
def data_frame(self):
if self._processed_knockouts is None:
self._process_knockouts()
data_frame =
|
DataFrame(self._processed_knockouts)
|
pandas.DataFrame
|
import sys
import os
import pandas as pd
class UserError(Exception):
"""Errors regarding operations on user field of credentials."""
pass
class IAM:
"""Identity and Access Manager"""
# credentials storage path
_path: str = "credentials.csv"
# local timezone
_tz_local: str = 'utc'
# needed columns in credentials dataframe
_columns: list = ["user", "role", "password", "creation"]
def __init__(self, path: str = "credentials.csv", tz_local = "utc"):
"""Parameters:
- path: path of a csv file containing credentials with columns user, role, password, creation;
- tz_local: local timezone used for timestamps."""
self._path = path
self._tz_local = tz_local
def _df_is_valid(self, /, df: pd.DataFrame) -> bool:
for col in self._columns:
if col not in df.columns.tolist():
# missing column
return False
# df is valid
return True
def _read_df(self, check: bool = True) -> pd.DataFrame:
# read df
if os.path.exists(self._path):
df =
|
pd.read_csv(self._path, usecols=self._columns)
|
pandas.read_csv
|
from grdb.database.v1_1_0.models import (
Sample,
PreparationStep,
Recipe,
Properties,
RamanSet,
RamanFile,
RamanSpectrum,
SemFile,
Author,
Software,
)
# from gresq.recipe import Recipe
from sqlalchemy import String, Integer, Float
from gresq.util.box_adaptor import BoxAdaptor
import uuid
import pandas as pd
import os
import sys
from datetime import date
par = os.path.abspath(os.path.pardir)
sys.path.append(os.path.join(par, "src", "gresq", "dashboard", "gsaraman", "src"))
from gsaraman.gsaraman import auto_fitting
from gresq.dashboard.submit.util import get_or_add_software_row
from gresq import __version__ as GRESQ_VERSION
from gsaimage import __version__ as GSAIMAGE_VERSION
from gsaraman import __version__ as GSARAMAN_VERSION
sample_key = {"experiment_date": "DATE"}
properties_key = {
"average_thickness_of_growth": "PROPERTY: Average Thickness of Growth (nm)",
"standard_deviation_of_growth": "PROPERTY: Standard Deviation of Growth (nm)",
"number_of_layers": "PROPERTY: Number of Layers",
"growth_coverage": "PROPERTY: Growth Coverage (%)",
}
recipe_key = {
"sample_surface_area": r"PROPERTY: Sample Surface Area (mm$\^2$)",
"thickness": r"PROPERTY: Thickness ($\mu$m)",
"tube_diameter": "ALL CONDITION: Tube Diameter (mm)",
"tube_length": "ALL CONDITION: Tube Length (mm)",
"catalyst": "ALL CONDITION: Catalyst",
"cross_sectional_area": "ALL CONDITION: Cross Sectional Area (mm^2)",
"base_pressure": "ALL CONDITION: Base Pressure (mTorr)",
}
preparation_step_key = {
"duration": "PREPARATION STEP DETAIL: Timestamp",
"furnace_temperature": "PREPARATION STEP DETAIL: Furnace Temperature",
"furnace_pressure": "PREPARATION STEP DETAIL: Furnace Pressure",
"sample_location": "PREPARATION STEP DETAIL: Sample Location",
"helium_flow_rate": "PREPARATION STEP DETAIL: Helium Flow Rate", ## l/s vs sccm
"hydrogen_flow_rate": "PREPARATION STEP DETAIL: Hydrogen Flow Rate",
"carbon_source": "PREPARATION STEP DETAIL: Carbon Source",
"carbon_source_flow_rate": "PREPARATION STEP DETAIL: Carbon Source Flow Rate",
"argon_flow_rate": "PREPARATION STEP DETAIL: Argon Flow Rate",
}
sql_validator = {
"int": lambda x: isinstance(x.property.columns[0].type, Integer),
"float": lambda x: isinstance(x.property.columns[0].type, Float),
"str": lambda x: isinstance(x.property.columns[0].type, String),
}
def convert(value, field, header=None):
if sql_validator["int"](field):
return int(value)
elif sql_validator["float"](field):
value = float(value)
if "mTorr" in header:
value /= 1000
return value
else:
return str(value)
def upload_file(
box_adaptor,
file_path,
folder_name=None,
box_config_path="/Users/Joshua_Schiller/Dropbox/GSAMain/src/box_config.json",
):
upload_folder = box_adaptor.create_upload_folder(folder_name=folder_name)
box_file = box_adaptor.upload_file(upload_folder, file_path, str(uuid.uuid4()))
return box_file.get_shared_link_download_url(access="open")
def get_filepaths(reference_id, folder_path="./"):
contents = os.listdir(os.path.join(folder_path, reference_id))
raman = []
sem = []
for f in contents:
if f.split(".")[-1] == "txt":
raman.append(f)
elif f.split(".")[-1] == "tif":
sem.append(f)
return raman, sem
def convert_date(d):
words = d.split("/")
month = int(words[0])
day = int(words[1])
year = int("20" + words[2])
return date(year, month, day)
def convert_db(data):
columns = data.columns
for i in range(data.shape[0]):
# for c, col in enumerate(columns):
for col in enumerate(columns):
if "Torr l/s" in col:
value = data[col][i]
if pd.isnull(value) == False:
value = float(value)
new_col = col.replace("Torr l/s", "sccm")
if pd.isnull(data[new_col][i]):
data[new_col][i] = value / 0.01270903
new_cols = [col for col in columns if "Torr l/s" not in col]
data = data[new_cols].copy()
return data
def build_db(session, filepath, sem_raman_path=None, nrun=None, box_config_path=None):
data = pd.read_csv(os.path.join(filepath, "recipe_2019-08-27.csv"))
data = convert_db(data)
box_adaptor = BoxAdaptor(box_config_path)
name_idxs = []
cooling_idx = None
for c, col in enumerate(data.columns):
if "PREPARATION STEP NAME" in col:
name_idxs.append(c)
elif "Cooling Rate" in col:
cooling_idx = c
annealing_df = data.iloc[:, name_idxs[0] + 1 : name_idxs[1]].copy()
growing_df = data.iloc[:, name_idxs[1] + 1 : name_idxs[2]].copy()
cooling_df = data.iloc[:, cooling_idx + 1 :].copy()
cooling_rate = data.iloc[:, cooling_idx].copy()
box_folder = data["BOX FOLDER"].copy()
author_column = data["CONTRIBUTOR"].copy()
# Check software versions
gresq_soft = get_or_add_software_row(session, "gresq", GRESQ_VERSION)
gsaimage_soft = get_or_add_software_row(session, "gsaimage", GSAIMAGE_VERSION)
gsaraman_soft = get_or_add_software_row(session, "gsaraman", GSARAMAN_VERSION)
if nrun == None:
nrun = data.shape[0]
for i in range(nrun):
if "Kaihao" in author_column[i]:
s = Sample(
software_name=gresq_soft.name, software_version=gresq_soft.version
)
s.material_name = "Graphene"
s.validated = True
date_string = data[sample_key["experiment_date"]][i]
if pd.isnull(date_string) == False:
s.experiment_date = convert_date(date_string)
session.add(s)
session.flush()
pr = Properties()
pr.sample_id = s.id
for key, header in properties_key.items():
value = data[header][i]
if
|
pd.isnull(value)
|
pandas.isnull
|
import ipywidgets
import numpy as np
import pandas as pd
import pathlib
import datetime as dt
from bokeh.io import push_notebook, show, output_notebook
from bokeh.plotting import figure, gridplot
from bokeh.models import ColumnDataSource, RangeTool, Circle, Slope, Label
from bokeh.layouts import gridplot, column, row
class view_wind:
def __init__(self):
self.ep_columns_filtered = ['date','time', 'H', 'qc_H', 'LE', 'qc_LE','sonic_temperature', 'air_temperature', 'air_pressure', 'air_density',
'ET', 'e', 'es', 'RH', 'VPD','Tdew', 'u_unrot', 'v_unrot', 'w_unrot', 'u_rot', 'v_rot', 'w_rot', 'wind_speed', 'max_wind_speed', 'wind_dir', 'u*', '(z-d)/L',
'un_H', 'H_scf', 'un_LE', 'LE_scf','u_var', 'v_var', 'w_var', 'ts_var']
self.lf_columns_filtered = ['TIMESTAMP','Hs','u_star','Ts_stdev','Ux_stdev','Uy_stdev','Uz_stdev','Ux_Avg', 'Uy_Avg', 'Uz_Avg',
'Ts_Avg', 'LE_wpl', 'Hc','H2O_mean', 'amb_tmpr_Avg', 'amb_press_mean', 'Tc_mean', 'rho_a_mean','CO2_sig_strgth_mean',
'H2O_sig_strgth_mean','T_tmpr_rh_mean', 'e_tmpr_rh_mean', 'e_sat_tmpr_rh_mean', 'H2O_tmpr_rh_mean', 'RH_tmpr_rh_mean',
'Rn_Avg', 'albedo_Avg', 'Rs_incoming_Avg', 'Rs_outgoing_Avg', 'Rl_incoming_Avg', 'Rl_outgoing_Avg', 'Rl_incoming_meas_Avg',
'Rl_outgoing_meas_Avg', 'shf_Avg(1)', 'shf_Avg(2)', 'precip_Tot', 'panel_tmpr_Avg']
self.TOOLS="pan,wheel_zoom,box_select,lasso_select,save,reset"
output_notebook()
self.tabs = ipywidgets.Tab([self.tab00(), self.tab01(), self.tab02()])
self.tabs.set_title(0, 'EP - Master Folder')
self.tabs.set_title(1, 'LowFreq - Master Folder')
self.tabs.set_title(2, 'Plot')
self.source_ep = ColumnDataSource(data=dict(x=[], u=[], u_c=[], v=[],v_c=[],w=[],w_c=[],teste_x=[],teste_y=[]))
self.fig_01 = figure(title='Uncorrected', plot_height=250, plot_width=700, x_axis_type='datetime', tools=self.TOOLS)
circle_u_uncorrected = self.fig_01.circle(x='x',y='u', source=self.source_ep,color='blue', legend_label='u')
circle_v_uncorrected = self.fig_01.circle(x='x',y='v', source=self.source_ep,color='red', legend_label='v')
circle_w_uncorrected = self.fig_01.circle(x='x',y='w', source=self.source_ep,color='green', legend_label='w')
self.fig_01.legend.location = 'top_left'
self.fig_01.legend.click_policy='hide'
self.fig_02 = figure(title='Corrected', plot_height=250, plot_width=700, x_axis_type='datetime', x_range=self.fig_01.x_range)
circle_u_corrected = self.fig_02.circle(x='x',y='u_c', source=self.source_ep,color='blue', legend_label='u_c')
circle_v_corrected = self.fig_02.circle(x='x',y='v_c', source=self.source_ep,color='red', legend_label='v_c')
circle_w_corrected = self.fig_02.circle(x='x',y='w_c', source=self.source_ep,color='green', legend_label='w_c')
self.fig_02.legend.location = 'top_left'
self.fig_02.legend.click_policy='hide'
# wind_data = dict(inner=)
self.source_ep2 = ColumnDataSource(data=dict(inner=[0],outer=[1],start=[0],end=[2]))
self.fig_03 = figure(title='tes',plot_height=500, plot_width=500)
self.fig_03.xgrid.grid_line_color = None
self.fig_03.ygrid.grid_line_color = None
# self.fig_03.wedge(x=[0,0,0], y=[0,0,0], radius=[1,2,3], start_angle=[0,0.5,1], end_angle=[0.5,1,1.5])
# bars = self.fig_03.vbar(x='teste_x', width=0.5, bottom=0, top='teste_y', source=self.source_ep)
wedge = self.fig_03.annular_wedge(x=0, y=0, inner_radius='inner', outer_radius='outer', start_angle='start', end_angle='end',color='#FF00FF',source=self.source_ep2)
circle = self.fig_03.circle(x=0, y=0, radius=[0.25,0.5,0.75,1], fill_color=None,line_color='white')
self.fig_03.annular_wedge(x=0, y=0, inner_radius='inner', outer_radius='outer', start_angle='start', end_angle='end', line_color='white',fill_color=None, line_width=1,source=self.source_ep2)
c = column([self.fig_01, self.fig_02])
# c = gridplot([[self.fig_01],[self.fig_02]])
display(self.tabs)
show(row(c,self.fig_03), notebook_handle=True)
def tab00(self):
self.out_00 = ipywidgets.Output()
with self.out_00:
self.path_EP = ipywidgets.Text(placeholder='Path EP output',
layout=ipywidgets.Layout(width='90%'))
self.button_path_ep = ipywidgets.Button(description='Show EP')
self.button_path_ep.on_click(self._button_Path)
self.select_meta = ipywidgets.Select(description='Configs:',
layout=ipywidgets.Layout(width='90%'),
style={'description_width':'initial'})
self.select_meta.observe(self._select_config, 'value')
return ipywidgets.VBox([ipywidgets.HBox([self.path_EP, self.button_path_ep]),
self.select_meta,
self.out_00])
def tab01(self):
self.out_01 = ipywidgets.Output()
with self.out_01:
self.path_LF = ipywidgets.Text(placeholder='Path LF output',
layout=ipywidgets.Layout(width='90%'))
self.button_path_lf = ipywidgets.Button(description='Show LF')
self.button_path_lf.on_click(self._button_Path)
self.html_lf = ipywidgets.HTML()
return ipywidgets.VBox([self.out_01,
ipywidgets.HBox([self.path_LF, self.button_path_lf]),
self.html_lf])
def tab02(self):
self.out_02 = ipywidgets.Output()
with self.out_02:
self.button_plot = ipywidgets.Button(description='Plot')
# self.button_plot.on_click(self.update_ep)
self.button_plot.on_click(self._button_plot)
self.date_range = ipywidgets.SelectionRangeSlider(description='Date Range:', options=[1,2], layout={'width': '1000px'})
self.date_range.observe(self.update_byDate, 'value')
self.hour_range = ipywidgets.SelectionRangeSlider(description='Hour Range:', options=[1,2], layout=ipywidgets.Layout(width='1000px'))
self.hour_range.observe(self.update_byDate, 'value')
return ipywidgets.VBox([self.out_02,
self.button_plot,
self.date_range,
self.hour_range])
def _button_Path(self, *args):
if self.tabs.selected_index == 0:
with self.out_00:
try:
self.folder_path_ep = pathlib.Path(self.path_EP.value)
readme = self.folder_path_ep.rglob('Readme.txt')
readme_df = pd.read_csv(list(readme)[0], delimiter=',')
temp_list = [row.to_list() for i,row in readme_df[['rotation', 'lowfrequency','highfrequency','wpl','flagging','name']].iterrows()]
a = []
self.config_name = []
for i in temp_list:
self.config_name.append(i[5])
a.append('Rotation:{} |LF:{} |HF:{} |WPL:{} |Flag:{}'.format(i[0],i[1],i[2],i[3],i[4]))
self.select_meta.options = a
except:
print('Erro')
if self.tabs.selected_index == 1:
with self.out_01:
try:
self.folder_path_lf = pathlib.Path(self.path_LF.value)
lf_files = self.folder_path_lf.rglob('TOA5*.flux.dat')
self.dfs_02_01 = []
for file in lf_files:
# print(file)
self.dfs_02_01.append(pd.read_csv(file, skiprows=[0,2,3], parse_dates=['TIMESTAMP'],na_values='NAN', usecols=self.lf_columns_filtered))
self.dfs_concat_02_01 = pd.concat(self.dfs_02_01)
# self.dropdown_yAxis_lf.options = self.lf_columns_filtered
self.html_lf.value = "<table> <tr><td><span style='font-weight:bold'>Number of Files:</spam></td> <td>{}</td></tr><tr><td><span style='font-weight:bold'>Begin:</span></td> <td>{}</td></tr> <tr> <td><span style='font-weight:bold'>End:</span></td><td>{}</td> </tr>".format(len(self.dfs_02_01), self.dfs_concat_02_01['TIMESTAMP'].min(),self.dfs_concat_02_01['TIMESTAMP'].max())
except:
print('erro')
def _select_config(self, *args):
with self.out_00:
# self.dfs_01_01 = []
# for i in self.select_meta.index:
full_output_files = self.folder_path_ep.rglob('*{}*_full_output*.csv'.format(self.config_name[self.select_meta.index]))
dfs_single_config = []
for file in full_output_files:
dfs_single_config.append(pd.read_csv(file, skiprows=[0,2], na_values=-9999, parse_dates={'TIMESTAMP':['date', 'time']}, usecols=self.ep_columns_filtered))
# self.df_ep = pd.read_csv(file, skiprows=[0,2], na_values=-9999, parse_dates={'TIMESTAMP':['date', 'time']}, usecols=self.ep_columns_filtered)
self.df_ep = pd.concat(dfs_single_config)
def _button_plot(self, *args):
with self.out_02:
self.dfs_compare = pd.merge(left=self.dfs_concat_02_01, right=self.df_ep, how='outer', on='TIMESTAMP', suffixes=("_lf","_ep"))
# self.date_range.options = self.dfs_compare['TIMESTAMP'].to_list()
self.date_range.options = self.dfs_compare['TIMESTAMP'].dt.date.unique()
self.hour_range.options = sorted(list(self.dfs_compare['TIMESTAMP'].dt.time.unique()))
self.theta = np.linspace(0,360,36)
theta1 = np.linspace(0,360,37)
self.dfs_compare['wind_bin'] =
|
pd.cut(x=self.dfs_compare['wind_dir'], bins=theta1)
|
pandas.cut
|
import glob
import os
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
def plot_roc_prc(path: str) -> None:
"""
Aggregates CV results for different choices of k and creates a plot with ROC-AUC
and PRC curve. Also saves df with mean/stds
Args:
path (str): Path to a folder with files called `knn_{k}_cv_results.csv` with
differrent choices of k (e.g. generated with knn_baseline.py)
"""
dfs = []
for filepath in glob.iglob(path + '/*.csv'):
k = int(filepath.split('/')[-1].split('_')[1])
df =
|
pd.read_csv(filepath)
|
pandas.read_csv
|
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
|
set_option("display.expand_frame_repr", False)
|
pandas.set_option
|
import os
import sys
import numpy as np
import pandas as pd
import xlwings as xw
from logzero import logger
from openpyxl import load_workbook
from openpyxl.styles import Alignment
from spareparts.lib.colors import Colors
from spareparts.lib.filters import (
trash_assemblies,
trash_description,
trash_fastener,
trash_file_name,
trash_item_number,
trash_parts_ending_P1_or_A1,
trash_prp,
trash_prp1,
trash_robot,
)
from spareparts.lib.settings import (
JDEPATH,
blue,
dict_header,
excel_headers,
headers_bg_hue,
mauve,
orange,
splname,
temp_jde,
template1,
template2,
tempo_local,
)
from yaspin import Spinner, yaspin
sp = Spinner([
"[ ]",
"[= ]",
"[== ]",
"[=== ]",
"[ ===]",
"[ ==]",
"[ =]",
"[ ]",
"[ =]",
"[ ==]",
"[ ===]",
"[====]",
"[=== ]",
"[== ]",
"[= ]"
], 80)
class Spareparts:
"""Generate spareparts list."""
JDE_TEMP = os.path.join(tempo_local, temp_jde)
def __init__(self):
self.jde = self.load_jde_data()
self.db = pd.DataFrame()
self.spl = pd.DataFrame()
self.asm = pd.DataFrame()
self.elec = pd.DataFrame()
self.garbage = pd.DataFrame()
self.nuts = pd.DataFrame()
self.plates = pd.DataFrame()
self.gearbox = pd.DataFrame()
self.drawings = {}
def generate_spl(self):
if os.path.exists("SPL.xlsx"):
raise FileExistsError(
"Remove or rename the SPL.xlsx in the current folder to able the process to run."
)
has_text_reports = os.listdir(".")
if not has_text_reports:
raise FileNotFoundError(
"No text file report has been found in the current folder."
)
files = (file for file in Spareparts.listing_txt_files())
parts = pd.concat(
[Spareparts.parse_se_report(file) for file in files], ignore_index=True
)
self.spl = Spareparts.joining_spl_jde(self.jde, parts)
self.spl.part_number = (
self.spl.part_number.str.strip()
) # strip part_number column
def load_db(self):
"""Load the item-level database"""
db_model = os.path.join(tempo_local, "levels.csv")
if not os.path.exists(db_model):
raise FileNotFoundError("No file levels.csv found in user tempo.\n")
df = pd.read_csv(db_model, dtype={"possibility": str})
df.columns = df.columns.str.strip().str.lower().str.replace(" ", "_")
df.item_number = df.item_number.astype(str)
df.item_number = df.item_number.str.strip()
df.possibility = df.possibility.astype(str)
df.possibility = df.possibility.str.strip()
self.db = df[["item_number", "possibility"]]
self.spl = self.spl.join(self.db.set_index("item_number"), on="jdelitm")
@staticmethod
def loading_spl(path):
"""load the data from spl list"""
if not os.path.exists(path):
raise FileNotFoundError("Check if spl path is correct.")
spl = pd.read_excel(path, sheet_name="Sheet1")
spl.columns = spl.columns.str.strip().str.lower().str.replace(" ", "_")
spl.item_number = spl.item_number.astype("str")
spl = spl[["item_number"]]
return spl
@staticmethod
def load_jde_data():
JDE_TEMP = Spareparts.JDE_TEMP
if os.path.exists(JDE_TEMP):
answer = input(
f"Do you want to load the temporary jde? (fast) \n Path:{JDE_TEMP}\n Proceed ([y]/n) ?:"
)
if answer.lower() in ["yes", "y"]:
jde_temp = pd.read_csv(JDE_TEMP)
return jde_temp
else:
print("Process interrupted.")
sys.exit()
else:
with yaspin(sp, side="right", text="Loading the JDE Inventory..."):
jde_data = Spareparts.extract_jde()
jde_data.to_csv(JDE_TEMP, index=False)
return jde_data
@staticmethod
def extract_jde():
""""""
# add a try - except (in case the file is not found)
# logger.info()
df = pd.read_excel(
JDEPATH,
sheet_name=0,
skiprows=[0, 1, 2, 3],
usecols="A,C,P,E,H,I,K,O,U,X,AA,AR,AT,CB",
dtype={"Business Unit": int, "Unit Cost": float},
)
df.columns = df.columns.str.strip().str.lower().str.replace(" ", "_")
df = df[df.business_unit == 101]
return df
@staticmethod
def parse_se_report(fichier):
""""""
try:
# add try and except
df = pd.read_csv(
fichier,
delimiter="\t",
skiprows=[0, 2],
header=1,
names=[
"Part Number",
"Revision",
"DSC_A",
"JDELITM",
"DIM",
"Quantity",
"File Name",
],
index_col=False,
encoding="latin3",
error_bad_lines=False,
na_values="-",
)
except pd.errors.ParserError as parse_error:
# Wrong format of text extracted from solidedge.
logger.error(f" [-][{parse_error}]")
sys.exit()
else:
# clean the columns
df.columns = df.columns.str.strip().str.lower().str.replace(" ", "_")
df["jdelitm"] = df["jdelitm"].str.strip()
df = Spareparts.replacing_C01(df)
df["quantity"] = pd.to_numeric(df["quantity"], errors="coerce")
df = df.groupby(
["part_number", "revision", "dsc_a", "dim", "jdelitm", "file_name"],
as_index=False,
)["quantity"].sum()
df = df.replace(r"^-?\s+$", np.nan, regex=True)
df = df.dropna(subset=["part_number", "jdelitm"]) #TODO: Keep the JDELITM columns in spl.
# give the module number
module_number = os.path.splitext(os.path.basename(fichier))[0]
df["module"] = module_number
logger.info(" [+][\t %s }\t]" % module_number)
return df
finally:
df = None
@staticmethod
def listing_txt_files():
""""""
return (file for file in os.listdir(".") if file.endswith(".txt"))
@staticmethod
def replacing_C01(df):
"""Replacing 123456_C01 to 123456."""
pat = r"(?P<number>\d{6})(?P<suffixe>_C\d{2})"
repl = lambda m: m.group("number")
df["part_number"] = df["part_number"].str.replace(pat, repl)
return df
@staticmethod
def joining_spl_jde(jde, parts):
"""transform the jde column to string format
join the parts documents with the jde on jdelitm column
and sort it on column:module
"""
jde.item_number = jde.item_number.astype(str)
spl = parts.join(jde.set_index("item_number"), on="jdelitm").sort_values(
"module"
)
return spl
def part_type(self):
"""create a column type --> .par .psm .asm"""
self.spl["type"] = self.spl.file_name.str.split(".").str[-1].str.strip()
self.spl.type = self.spl.type.str.lower()
def lines_numbers(self):
logger.info(
"\n\n"
"Qty/Groups :\n"
"-------------------------\n"
f"spl :\t{self.spl.shape[0]}\n"
f"garbage :\t{self.garbage.shape[0]}\n"
f"plates :\t{self.plates.shape[0]}\n"
f"elec :\t{self.elec.shape[0]}\n"
f"asm :\t{self.asm.shape[0]}\n"
f"nuts :\t{self.nuts.shape[0]}\n"
"-------------------------\n\n"
)
@yaspin(sp, side="right", text="Creating excel file, do not close the window ")
def create_excel(self, given_name_xlsx):
"""fill the tabs in excel file with the dataframes"""
tabs = {
"nuts": self.nuts,
"asm": self.asm,
"plates": self.plates,
"elec": self.elec,
"gearbox": self.gearbox,
"garbage": self.garbage,
"spl": self.spl,
}
wb = xw.Book() # this will create a new workbook
for tab in tabs.keys():
sht = wb.sheets.add(tab)
for tab, df in tabs.items():
sht = wb.sheets[
tab
] # skip the Sheet1 and create spl within a loop for all tab
sht.range("A1").value = excel_headers # insert headers (horizontal)
sht.range("A1:R1").api.Font.Bold = True # bold headers (horizontal)
for rang, color in headers_bg_hue.items():
xw.Range(rang).color = color
for colum, data in dict_header.items():
sht.range(colum).options(index=False, header=False).value = df[data]
sht.autofit()
wb.sheets[-1].delete()
wb.save(given_name_xlsx)
wb.close()
logger.info(f"{template1}: created")
@staticmethod
@yaspin(sp, side="right", text="Editing excel file, do not close the window ")
def edit_excel(file_name, new_name):
wb = load_workbook(file_name)
for s in wb.sheetnames:
ws = wb[s]
MAX_ = ws.max_row
field = f"A1:X{MAX_}"
ws.auto_filter.ref = field
for sheet in wb.sheetnames:
ws = wb[sheet]
significance_column = ws["F"]
for cell in significance_column:
cell.alignment = Alignment(horizontal="center")
wb.save(new_name)
wb.close()
logger.info(f"{template2}: created")
def refine(self):
ambiguous = self.spl[
~(
(self.spl.possibility == "1")
| (self.spl.possibility == "2")
| (self.spl.possibility == "3")
)
]
ambiguous_items = (
ambiguous.part_number.str.strip().tolist()
) # Whitespaces stripped here
for itm in ambiguous_items:
mdl = self.spl.loc[itm, "module"] # module => mdl
self.spl.possibility[
self.spl.part_number == itm, "possibility"
] = self.db.loc[itm, mdl]
@Colors.obsolete(mauve)
@Colors.meter_foot(blue)
@Colors.electric(["Electric Component"], orange)
def extraction(splname, workbook, sht_name):
df =
|
pd.read_excel(splname, sheet_name=sht_name)
|
pandas.read_excel
|
import abc
import math
import numpy as np
import pandas as pd
import tensorflow as tf
from dataclasses import dataclass
from pathlib import Path
try:
from emnist import extract_samples
except ModuleNotFoundError:
pass
from sklearn.model_selection import train_test_split
from sklearn.base import TransformerMixin
from sklearn.preprocessing import MinMaxScaler
from scipy.io import arff
from typing import List, Callable, Union, Tuple
from libs.DataTypes import AutoencoderLayers
from utils import BASE_PATH
@dataclass
class DataLabels:
"""
Class storing test/train data
"""
# We'll put everything in the train data if no test data was given and split later
x_train: np.ndarray # Train data
y_train: np.ndarray
x_test: np.ndarray = None # Test data
y_test: np.ndarray = None
x_val: np.ndarray = None # Validation data
y_val: np.ndarray = None
# If needed: a scaler
scaler: TransformerMixin = None
# Configuration
test_split: float = .2 # Test data percentage
val_split: float = .05 # Validation data percentage
random_state: int = None # Random seed
# Metadata
shape: tuple = None # Shape of the data
available_classes: Union[List[int], List[str]] = None # all available classes
## Class methods
def __repr__(self):
return self.__class__.__name__
## Retrievers
def get_target_autoencoder_data(
self, data_split: str,
drop_classes: Union[List[int], List[str]] = None, include_classes: Union[List[int], List[str]] = None
) -> Tuple[np.ndarray, np.ndarray]:
"""
Get data for useful for autoencoders
:param data_split: get data of either "train", "val" or "test"
:param drop_classes: which classes to drop, drop none if None
:param include_classes: which classes to include (has priority over drop_classes)
:return: features and labels
"""
# Get data
this_data = self._get_data_set(data_split=data_split)
# Drop the classes
if include_classes:
drop_classes = self.include_to_drop(include_classes)
this_x = np.delete(this_data[0], np.where(np.isin(this_data[1], drop_classes)), axis=0)
# For the autoencoder, we don't need much else than x
return this_x, this_x
def get_target_classifier_data(
self, data_split: str,
drop_classes: Union[List[int], List[str]] = None, include_classes: Union[List[int], List[str]] = None
) -> Tuple[np.ndarray, np.ndarray]:
"""
Get data for useful for classifiers
:param data_split: get data of either "train", "val" or "test"
:param drop_classes: which classes to drop, drop none if None
:param include_classes: which classes to include (has priority over drop_classes)
:return: features and labels
"""
# Get data
this_data = self._get_data_set(data_split=data_split)
# Drop the classes
if include_classes:
drop_classes = self.include_to_drop(include_classes)
this_x = np.delete(this_data[0], np.where(np.isin(this_data[1], drop_classes)), axis=0)
this_y = np.delete(this_data[1], np.where(np.isin(this_data[1], drop_classes)), axis=0)
# Return the data
return this_x, this_y
def get_alarm_data(
self, data_split: str, anomaly_classes: Union[List[int], List[str]], drop_classes: List[int] = None,
include_classes: List[int] = None,
n_anomaly_samples: int = None
) -> Tuple[np.ndarray, np.ndarray]:
"""
Get the labels for the alarm network, i.e. with binary anomaly labels
:param data_split: get data of either "train", "val" or "test"
:param anomaly_classes: classes marked as anomaly
:param drop_classes: which classes to drop (none if None)
:param include_classes: which classes to include (has priority over drop_classes)
:param n_anomaly_samples: reduce the number of anomaly samples
:return: features and labels
"""
# Get data
this_data = self._get_data_set(data_split=data_split)
# Drop the classes
if include_classes:
drop_classes = self.include_to_drop(include_classes)
this_x = np.delete(this_data[0], np.where(np.isin(this_data[1], drop_classes)), axis=0)
this_y = np.delete(this_data[1], np.where(np.isin(this_data[1], drop_classes)), axis=0)
# Make labels binary
this_y[np.where(~np.isin(this_y, anomaly_classes))] = -1
this_y[np.where(np.isin(this_y, anomaly_classes))] = 0
this_y += 1
this_y = this_y.astype("uint8")
# If desired, reduce the number anomalous samples
if n_anomaly_samples is not None:
# IDs of all anomaly samples
idx_anom = np.where(this_y == 1)[0]
# Select the indices to delete
n_delete = len(idx_anom) - n_anomaly_samples
idx_delete = np.random.choice(idx_anom, size=n_delete, replace=False)
# Delete indices
this_x = np.delete(this_x, idx_delete, axis=0)
this_y = np.delete(this_y, idx_delete, axis=0)
# Check if we really have the right amount of anomaly samples
assert np.sum(this_y) == n_anomaly_samples
return this_x, this_y
## Preprocessors
@abc.abstractmethod
def _preprocess(self):
# Preprocessing steps, e.g. data normalisation
raise NotImplementedError("Implement in subclass")
def __post_init__(self):
"""
Process the data
:return:
"""
# Fix randomness
np.random.seed(seed=self.random_state)
# Get all available classes
# TODO: we're only looking at the training data so far
self.available_classes = np.unique(self.y_train).tolist()
# Split in test and train
if self.x_test is None:
self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(
self.x_train, self.y_train, test_size=self.test_split, random_state=self.random_state
)
# Split in train and validation
if self.x_val is None:
self.x_train, self.x_val, self.y_train, self.y_val = train_test_split(
self.x_train, self.y_train, test_size=self.val_split, random_state=self.random_state
)
# Preprocess
self._preprocess()
# Note down the shape
self.shape = self.x_train.shape[1:]
## Helpers
def include_to_drop(self, include_data: Union[List[int], List[str]]) -> Union[List[int], List[str]]:
"""
Convert a list of classes to include to a list of classes to drop
:param include_data: classes to include
:param all_classes: available classes
:return: classes to drop
"""
drop_classes = set(self.available_classes) - set(include_data)
return list(drop_classes)
def _get_data_set(self, data_split: str) -> Tuple[np.ndarray, np.ndarray]:
"""
Get the right data split
:param data_split: train, val or test data?
:return: the right data set
"""
if data_split == "train":
return self.x_train.copy(), self.y_train.copy()
elif data_split == "test":
return self.x_test.copy(), self.y_test.copy()
elif data_split == "val":
return self.x_val.copy(), self.y_val.copy()
else:
raise ValueError("The requested data must be of either train, val or test set.")
@staticmethod
def _ae_feature_selector(selected_layers: List[AutoencoderLayers], n_hidden: int) -> List[int]:
"""
Index of features based on their name representation for symmetric autoencoders
:param selected_layers: list of names for the desired layers
:param n_hidden: number of hidden states
:return: list of indices where to find the desired layers
"""
# If nothing was specified, we'll assume that all features are meant
if not selected_layers:
return list(range(n_hidden))
# If already numbers were given, use them
if isinstance(selected_layers[0], int):
return selected_layers
# 0-indexed list are used
n_hidden -= 1
# We assume symmetric autoencoders, such that the code is in the middle
i_code = math.floor(n_hidden / 2)
# Life is easier with a translation dictionary
trans_dict = {
AutoencoderLayers.OUTPUT: [n_hidden],
AutoencoderLayers.CODE: [i_code],
AutoencoderLayers.ENCODER: list(range(i_code)),
AutoencoderLayers.DECODER: list(range(i_code + 1, n_hidden)),
}
# We'll replace the selected lists by their index values a concatenate them
index_list = [trans_dict[cur_el] for cur_el in selected_layers]
index_list = [cur_el for cur_list in index_list for cur_el in cur_list]
return sorted(index_list)
def scikit_scale(self, scikit_scaler: Callable[[], TransformerMixin] = MinMaxScaler):
"""
Apply a scikit scaler to the data, e.g. MinMaxScaler transform data to [0,1]
:return:
"""
# Fit scaler to train set
self.scaler = scikit_scaler()
self.x_train = self.scaler.fit_transform(self.x_train)
# Scale the rest
self.x_val = self.scaler.transform(self.x_val)
self.x_test = self.scaler.transform(self.x_test)
pass
class MNIST(DataLabels):
def __init__(self, enrich_mnist_by=None, enrich_test_by=None, *args, **kwargs):
"""
Load the MNIST data set
"""
# Simply load the data with the kind help of Keras
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Add channel dimension to the data
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
# If desired, add new samples from EMNIST to MNIST
if enrich_mnist_by:
# load both, train and test data set from EMNIST
emnist_x_train, emnist_y_train = extract_samples('letters', 'train')
emnist_x_test, emnist_y_test = extract_samples('letters', 'test')
# Add channel dimension to emnist data
emnist_x_train = np.expand_dims(emnist_x_train, -1)
emnist_x_test = np.expand_dims(emnist_x_test, -1)
# choose the desired letters from emnist and translate numerical lables to letters
idx_train = []
idx_test = []
enrich_mnist_by = [i-9 for i in enrich_mnist_by]
for i in range(len(enrich_mnist_by)):
# get locations/indices of desired letters
idx_train.append(np.where(emnist_y_train == list(enrich_mnist_by)[i]))
idx_test.append(np.where(emnist_y_test == list(enrich_mnist_by)[i]))
idx_train = np.asarray(idx_train).flatten()
emnist_x_train = emnist_x_train[idx_train]
emnist_y_train = emnist_y_train[idx_train]+9
idx_test = np.asarray(idx_test).flatten()
emnist_x_test = emnist_x_test[idx_test]
emnist_y_test = emnist_y_test[idx_test]+9
# concatenate mnist train set and emnist train dataset
y_train = np.append(y_train, emnist_y_train)
x_train = np.concatenate((x_train, emnist_x_train), axis=0)
# concatenate mnist test set and emnist test dataset
y_test = np.append(y_test, emnist_y_test)
x_test = np.concatenate((x_test, emnist_x_test), axis=0)
super(MNIST, self).__init__(
x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test, *args, **kwargs
)
def _preprocess(self):
"""
For MNIST, we can scale everything by just dividing by 255
:return:
"""
self.x_train = self.x_train / 255.
self.x_test = self.x_test / 255.
self.x_val = self.x_val / 255.
class EMNIST(DataLabels):
def __init__(self, anom_list, *args, **kwargs):
"""
Load the MNIST data set
"""
# load MNIST letters using emnist package
data, labels = extract_samples('letters', 'train')
# Add channel dimension to the data
data = np.expand_dims(data, -1)
# take anom_list as anomalies and delete other values and map to one value
idx = np.where((labels >= anom_list[0]) & (labels <= anom_list[len(anom_list) - 1]))
data = data[idx]
labels = labels[idx]
labels.fill(10)
# load mnist digit dataset
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Add channel dimension to the data
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
# concatenate mnist and emnist dataset
dat = np.concatenate((data, x_train, x_test), axis=0)
label = np.concatenate((labels, y_train, y_test), axis=0)
super(EMNIST, self).__init__(x_train=dat, y_train=label, *args,
**kwargs)
def _preprocess(self):
"""
For MNIST, we can scale everything by just dividing by 255
:return:
"""
self.x_train = self.x_train / 255.
self.x_test = self.x_test / 255.
self.x_val = self.x_val / 255.
class CreditCard(DataLabels):
def __init__(
self, data_path: Path = (BASE_PATH / "data" / "creditcard" / "creditcard").with_suffix(".csv"),
*args, **kwargs
):
"""
Load the CreditCard data set (https://www.kaggle.com/mlg-ulb/creditcardfraud)
:param data_path: absolute path to the CreditCard csv
"""
data = pd.read_csv(data_path)
# Time axis does not directly add information (although frequency might be a feature)
data = data.drop(['Time'], axis=1)
# Column class has the anomaly values, the rest is data
x_train = data.drop(['Class'], axis=1)
y_train = data.loc[:, ['Class']]
# We don't need the overhead of pandas here
x_train = x_train.to_numpy()
y_train = y_train.to_numpy()
# TODO: why is this even in here?
# for i in range(len(y_train)):
# y_train[i, 0] = y_train[i, 0].replace("\'", "")
super(CreditCard, self).__init__(
x_train=x_train, y_train=y_train, *args, **kwargs
)
def _preprocess(self):
"""
Standardscale the data
:return:
"""
self.y_test = self.y_test.astype(np.int)
self.y_train = self.y_train.astype(np.int)
self.y_val = self.y_val.astype(np.int)
self.scikit_scale()
def _drop_class(self):
"""
Drop frauds (Class==1)
"""
# Delete from training data if we train the autoencoder
if not self.is_alarm:
self.x_train = np.delete(self.x_train, np.where(self.y_train == self.drop_num), axis=0)
self.y_train = np.delete(self.y_train, np.where(self.y_train == self.drop_num), axis=0)
# We should also drop it from the validation data, such that we only optimise on reconstruction valid data
self.x_val = np.delete(self.x_val, np.where(self.y_val == self.drop_num), axis=0)
self.y_val = np.delete(self.y_val, np.where(self.y_val == self.drop_num), axis=0)
# Rewrite train labels -> not necessary for this data set
if not self.is_binary:
raise NotImplementedError("This data set only has binary labels")
class NSL_KDD(DataLabels):
def __init__(self, data_folder: str = "NSL-KDD", *args, **kwargs):
"""
NSL KDD data set: https://www.unb.ca/cic/datasets/nsl.html
:param data_folder: subfolder of "data" where raw data resides
"""
# Open raw data
common_path = BASE_PATH / "data" / data_folder
train_data = arff.loadarff((common_path / "KDDTrain+").with_suffix(".arff"))
test_data = arff.loadarff((common_path / "KDDTest+").with_suffix(".arff"))
# Extract column names
all_cols = [cur_key for cur_key in test_data[1]._attributes.keys()]
all_cat = {
cur_key: cur_val.range for cur_key, cur_val in test_data[1]._attributes.items()
if cur_val.range is not None
}
# Create pandas dataframe
train_data = pd.DataFrame(data=train_data[0], columns=all_cols)
test_data =
|
pd.DataFrame(data=test_data[0], columns=all_cols)
|
pandas.DataFrame
|
import datetime
import numpy as np
import pandas
import fitparse
import fitanalysis.util
# Set to True to add a column to the DataFrame indicating whether a row would
# have been removed if removal of stopped periods were enabled, but don't
# actually remove it.
DEBUG_EXCISE = False
class Activity(fitparse.FitFile):
"""Represents an activity recorded as a .fit file.
Construction of an Activity parses the .fit file and detects periods of
inactivity, as such periods must be removed from the data for heart rate-,
cadence-, and power-based calculations.
"""
EVENT_TYPE_START = 'start'
EVENT_TYPE_STOP = 'stop'
TIMER_TRIGGER_DETECTED = 'detected'
# Speeds less than or equal to this value (in m/s) are
# considered to be stopped
STOPPED_THRESHOLD = 0.3
def __init__(self, file_obj, remove_stopped_periods=True):
"""Creates an Activity from a .fit file.
Args:
file_obj: A file-like object representing a .fit file.
remove_stopped_periods: If True, regions of data with speed below a
threshold will be removed from the data. Default
is True.
"""
super(Activity, self).__init__(file_obj)
self._remove_stopped_periods = remove_stopped_periods or DEBUG_EXCISE
records = list(self.get_messages('record'))
# Get elapsed time before modifying the data
self.start_time = records[0].get('timestamp').value
self.end_time = records[-1].get('timestamp').value
self.elapsed_time = self.end_time - self.start_time
# Calculated when needed and memoized here
self._moving_time = None
self._norm_power = None
self.events = self._df_from_messages(
self.get_messages('event'),
['event', 'event_type', 'event_group', 'timer_trigger', 'data'],
timestamp_index=True)
# We will build a DataFrame with these fields as columns. Values for each
# of these fields will be extracted from each record from the .fit file.
fields = ['timestamp', 'speed', 'heart_rate', 'power', 'cadence']
# The primary index of the DataFrame is the "block". A block is defined as
# a period of movement. Blocks may be defined by start/stop event messages
# from the .fit file, or they may be detected based on speed in the case
# that the recording device did not automatically pause recording when
# stopped.
blocks = []
curr_block = -1
# The secondary index is the duration from the start of the activity
time_offsets = []
# Get start/stop events from .fit file and combine with the events detected
# from speed data, keeping the event from the .fit file if timestamps are
# identical
timer_events = self.events[self.events['event'] == 'timer']
if self._remove_stopped_periods:
# Detect start/stop events based on stopped threshold speed. If the
# recording device did not have autopause enabled then this is the only
# way periods of no movement can be detected and removed.
detected_events = self._detect_start_stop_events(records)
timer_events = timer_events.combine_first(detected_events)
# Build the rows and indices of the DataFrame
excise = False
event_index = 0
rows = []
for record in records:
curr_timestamp = record.get('timestamp').value
# Match data record timestamps with event timestamps in order to mark
# "blocks" as described above. Periods of no movement will be excised
# (if the recording device did not have autopause enabled there will be
# blocks of no movement that should be removed before data analysis).
if event_index < len(timer_events) and (
curr_timestamp >= timer_events.iloc[event_index].name):
# Events usually have timestamps that correspond to a data timestamp,
# but this isn't always the case. Process events until the events catch
# up with the data.
while True:
event_type = timer_events.iloc[event_index]['event_type']
trigger = timer_events.iloc[event_index]['timer_trigger']
if event_type == self.EVENT_TYPE_START:
curr_block += 1
# If we've seen a start event we should not be excising data
# TODO(mtraver) Do I care if the start event is detected or from
# the .fit file? I don't think so.
excise = False
elif event_type.startswith(self.EVENT_TYPE_STOP):
# If the stop event was detected based on speed, excise the region
# until the next start event, because we know that it's a region of
# data with speed under the stopped threshold.
if trigger == self.TIMER_TRIGGER_DETECTED:
excise = True
event_index += 1
# Once the event timestamp is ahead of the data timestamp we can
# continue processing data; the next event will be processed as the
# data timestamps catch up with it.
if event_index >= len(timer_events) or (
curr_timestamp < timer_events.iloc[event_index].name):
break
if not excise or DEBUG_EXCISE:
# Build indices
time_offsets.append(curr_timestamp - self.start_time)
blocks.append(curr_block)
row = []
for field_name in fields:
field = record.get(field_name)
row.append(field.value if field is not None else None)
if DEBUG_EXCISE:
row.append(excise)
rows.append(row)
assert len(blocks) == len(time_offsets)
if DEBUG_EXCISE:
fields += ['excise']
self.data = pandas.DataFrame(rows, columns=fields,
index=[blocks, time_offsets])
self.data.index.names = ['block', 'offset']
# These fields may not exist in all .fit files,
# so drop the columns if they're not present.
for field in ['power', 'cadence', 'heart_rate']:
if self.data[self.data[field].notnull()].empty:
self.data.drop(field, axis=1, inplace=True)
if self.has_power and self.has_cadence:
self._clean_up_power_and_cadence()
def _df_from_messages(self, messages, fields, timestamp_index=False):
"""Creates a DataFrame from an iterable of fitparse messages.
Args:
messages: Iterable of fitparse messages.
fields: List of message fields to include in the DataFrame. Each one will
be a separate column, and if a field isn't present in a particular
message, its value will be set to None.
timestamp_index: If True, message timestamps will be used as the index of
the DataFrame. Otherwise the default index is used.
Default is False.
Returns:
A DataFrame with one row per message and columns for each of
the given fields.
"""
rows = []
timestamps = []
for m in messages:
timestamps.append(m.get('timestamp').value)
row = []
for field_name in fields:
field = m.get(field_name)
row.append(field.value if field is not None else None)
rows.append(row)
if timestamp_index:
return pandas.DataFrame(rows, columns=fields, index=timestamps)
else:
return
|
pandas.DataFrame(rows, columns=fields)
|
pandas.DataFrame
|
import datetime
import pandas as pd
import numpy as np
import yfinance as yf
def options_chain_by_date(symbol, date):
tk = yf.Ticker(symbol)
# Get options for each expiration
options =
|
pd.DataFrame()
|
pandas.DataFrame
|
from english_words import english_words_set
import pandas as pd
import numpy as np
# make a list of 5 lenght words without non-alphas, and no proper nouns
words5 = []
for word in english_words_set:
if len(word) == 5 and word[0].islower() and word.isalpha():
words5.append(word)
df_words = pd.DataFrame(words5)
df_words.columns=['words']
df_words['first'] = df_words['words'].str.slice(0,1)
df_words['second'] = df_words['words'].str.slice(1,2)
df_words['third'] = df_words['words'].str.slice(2,3)
df_words['fourth'] = df_words['words'].str.slice(3,4)
df_words['fifth'] = df_words['words'].str.slice(4,5)
aplhas = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
dfalpha =
|
pd.DataFrame(aplhas)
|
pandas.DataFrame
|
import pandas as pd
from sklearn.model_selection import train_test_split
class BuildFeatures():
def __init__(self, X, y, test):
self.X, self.y, self.test = (X, y, test)
def engineer(self):
"""Engineer features of the configured data
Returns:
Tuple: The engineered features
"""
# The feature engineering performed in this method is nicked from the incredibly
# useful (and lucid) post https://jaketae.github.io/study/sklearn-pipeline/
# 1. Columns being dropped because they're probably not predictive:
# boat - Lifeboat (if survived)
# body - Body number (if did not survive and body was recovered)
# home.dest - home/destination
# 2. Column dropped because contains a high percentage of null values: cabin
for dataset in [self.X, self.test]:
dataset.drop(['boat', 'body', 'cabin', 'home.dest'], axis=1, inplace=True, errors='ignore')
# Split data for train and test.
X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, stratify=self.y, test_size=0.2)
pd.set_option('mode.chained_assignment', None) # phuck pandas and this so-called slice vs copy "solution"
for dataset in [X_train, X_test, self.test]:
dataset['family_size'] = dataset['parch'] + dataset['sibsp']
dataset.drop(['parch', 'sibsp'], axis=1, inplace=True, errors='ignore')
dataset['is_alone'] = 1
dataset['is_alone'].loc[dataset['family_size'] > 1] = 0
dataset['title'] = dataset['name'].str.split(", ", expand=True)[1].str.split(".", expand=True)[0]
dataset.drop(['name', 'ticket'], axis=1, inplace=True, errors='ignore')
# Combine some of the many titles in the data.
|
pd.crosstab(X_train['title'], X_train['sex'])
|
pandas.crosstab
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import plotly.express as px
import matplotlib.dates as mdates
import datetime as DT
import time
|
pd.set_option('max_columns', None)
|
pandas.set_option
|
##
import pickle,os,math
import matplotlib.pyplot as plt
import plotly.express as px
import pandas as pd
import numpy as np
from seisgo import utils
from tslearn.utils import to_time_series, to_time_series_dataset
from tslearn.clustering import TimeSeriesKMeans
from minisom import MiniSom
######
def vmodel_kmean_depth(lat, lon, depth,v,ncluster,spacing=1,njob=1,
verbose=False,plot=True,savefig=True,figbase='kmean',
metric='dtw',max_iter_barycenter=100, random_state=0,save=True,
source='vmodel',tag='v',figsize=None):
all_v = []
lat_subidx=[int(x) for x in np.arange(0,len(lat),spacing)]
lon_subidx=[int(x) for x in np.arange(0,len(lon),spacing)]
lat0=[]
lon0=[]
count=0
for i in lat_subidx:
for j in lon_subidx:
v0=np.ndarray((v.shape[0]))
for pp in range(v.shape[0]):
v0[pp]=v[pp,i,j]
if not np.isnan(v0).any() :
all_v.append(v0)
lat0.append(lat[i])
lon0.append(lon[j])
count += 1
ts = to_time_series_dataset(all_v)
km = TimeSeriesKMeans(n_clusters=ncluster, n_jobs=njob,metric=metric, verbose=verbose,
max_iter_barycenter=max_iter_barycenter, random_state=random_state)
y_pred = km.fit_predict(ts)
rows = []
for c in range(count):
cluster = km.labels_[c]
rows.append([lat0[c], lon0[c], cluster+1])
df =
|
pd.DataFrame(rows, columns=['lat', 'lon', 'cluster'])
|
pandas.DataFrame
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46:
|
pd.Timestamp("2012-06-17 00:00:00")
|
pandas.Timestamp
|
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import numpy as np
import pandas as pd
import pyspark
from pyspark.sql import Column
from databricks.koala.testing.utils import ReusedSQLTestCase, TestUtils
class DataFrameTest(ReusedSQLTestCase, TestUtils):
@property
def full(self):
return pd.DataFrame({
'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0],
}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
@property
def df(self):
return self.spark.from_pandas(self.full)
def test_Dataframe(self):
d = self.df
full = self.full
expected = pd.Series([2, 3, 4, 5, 6, 7, 8, 9, 10],
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
name='(a + 1)') # TODO: name='a'
self.assert_eq(d['a'] + 1, expected)
self.assert_eq(d.columns, pd.Index(['a', 'b']))
self.assert_eq(d[d['b'] > 2], full[full['b'] > 2])
# TODO: self.assert_eq(d[['a', 'b']], full[['a', 'b']])
self.assert_eq(d.a, full.a)
# TODO: assert d.b.mean().compute() == full.b.mean()
# TODO: assert np.allclose(d.b.var().compute(), full.b.var())
# TODO: assert np.allclose(d.b.std().compute(), full.b.std())
assert repr(d)
def test_head_tail(self):
d = self.df
full = self.full
self.assert_eq(d.head(2), full.head(2))
self.assert_eq(d.head(3), full.head(3))
self.assert_eq(d['a'].head(2), full['a'].head(2))
self.assert_eq(d['a'].head(3), full['a'].head(3))
# TODO: self.assert_eq(d.tail(2), full.tail(2))
# TODO: self.assert_eq(d.tail(3), full.tail(3))
# TODO: self.assert_eq(d['a'].tail(2), full['a'].tail(2))
# TODO: self.assert_eq(d['a'].tail(3), full['a'].tail(3))
@unittest.skip('TODO: support index')
def test_index_head(self):
d = self.df
full = self.full
self.assert_eq(d.index[:2], full.index[:2])
self.assert_eq(d.index[:3], full.index[:3])
def test_Series(self):
d = self.df
full = self.full
self.assertTrue(isinstance(d.a, Column))
self.assertTrue(isinstance(d.a + 1, Column))
# TODO: self.assert_eq(d + 1, full + 1)
@unittest.skip('TODO: support index')
def test_Index(self):
for case in [pd.DataFrame(np.random.randn(10, 5), index=list('abcdefghij')),
pd.DataFrame(np.random.randn(10, 5),
index=pd.date_range('2011-01-01', freq='D',
periods=10))]:
ddf = self.spark.from_pandas(case)
self.assert_eq(ddf.index, case.index)
def test_attributes(self):
d = self.df
self.assertIn('a', dir(d))
self.assertNotIn('foo', dir(d))
self.assertRaises(AttributeError, lambda: d.foo)
df = self.spark.from_pandas(pd.DataFrame({'a b c': [1, 2, 3]}))
self.assertNotIn('a b c', dir(df))
df = self.spark.from_pandas(pd.DataFrame({'a': [1, 2], 5: [1, 2]}))
self.assertIn('a', dir(df))
self.assertNotIn(5, dir(df))
def test_column_names(self):
d = self.df
self.assert_eq(d.columns, pd.Index(['a', 'b']))
# TODO: self.assert_eq(d[['b', 'a']].columns, pd.Index(['b', 'a']))
self.assertEqual(d['a'].name, 'a')
self.assertEqual((d['a'] + 1).name, '(a + 1)') # TODO: 'a'
self.assertEqual((d['a'] + d['b']).name, '(a + b)') # TODO: None
@unittest.skip('TODO: support index')
def test_index_names(self):
d = self.df
self.assertIsNone(d.index.name)
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name='x')
df = pd.DataFrame(np.random.randn(10, 5), idx)
ddf = self.spark.from_pandas(df)
self.assertEqual(ddf.index.name, 'x')
def test_rename_columns(self):
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = self.spark.from_pandas(df)
ddf.columns = ['x', 'y']
df.columns = ['x', 'y']
self.assert_eq(ddf.columns, pd.Index(['x', 'y']))
self.assert_eq(ddf, df)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
ddf.columns = [1, 2, 3, 4]
# Multi-index columns
df = pd.DataFrame({('A', '0'): [1, 2, 2, 3], ('B', 1): [1, 2, 3, 4]})
ddf = self.spark.from_pandas(df)
df.columns = ['x', 'y']
ddf.columns = ['x', 'y']
self.assert_eq(ddf.columns, pd.Index(['x', 'y']))
self.assert_eq(ddf, df)
def test_rename_series(self):
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
ds = self.spark.from_pandas(pd.DataFrame(s)).x
s.name = 'renamed'
ds.name = 'renamed'
self.assertEqual(ds.name, 'renamed')
self.assert_eq(ds, s)
# TODO: index
# ind = s.index
# dind = ds.index
# ind.name = 'renamed'
# dind.name = 'renamed'
# self.assertEqual(ind.name, 'renamed')
# self.assert_eq(dind, ind)
def test_rename_series_method(self):
# Series name
s =
|
pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
|
pandas.Series
|
import asyncio
import aiohttp
import airly
import sys
import time
import pandas as pd
class AirlyDownloader:
def __init__(self, key):
self.key = key
@staticmethod
def __extract_data_installation(installation):
"""
Function to collect infos for an installment in a robust way
:param installation: coroutine representing an installment object returned by function load_installation_...
:return: dictionary with data for a given installment
"""
data = {
'installation_id': installation['id'],
'airly_sensor': installation['airly']
}
try:
data['sponsor'] = installation['sponsor']['name']
except:
data['sponsor'] = 'None'
try:
data['elevation'] = installation['elevation']
except:
data['elevation'] = 'None'
try:
data['geolocation_latitude'] = installation['location']['latitude']
data['geolocation_longitude'] = installation['location']['longitude']
except:
data['geolocation_longitude'] = 'None'
data['geolocation_latitude'] = 'None'
try:
data['address_country'] = installation['address']['country']
data['address_city'] = installation['address']['city']
data['address_street'] = installation['address']['street']
data['address_number'] = installation['address']['number']
except:
data['address_country'] = 'None'
data['address_city'] = 'None'
data['address_street'] = 'None'
data['address_number'] = 'None'
return data
@staticmethod
def __extract_data_measurement(measurement):
"""
Function to collect measurements in a robust way
:param measurement: coroutine representing a measurement object returned by function create_measurements_session_...
:return: dictionary with a measurement data
"""
data = {
'start_date': measurement['fromDateTime'],
'end_date': measurement['tillDateTime'],
'air_quality_index_name': measurement['indexes'][0]['name'],
'air_quality_index_value': measurement['indexes'][0]['value'],
'air_quality_index_level': measurement['indexes'][0]['level'],
'air_quality_index_desc': measurement['indexes'][0]['description'],
'air_quality_index_advice': measurement['indexes'][0]['advice'],
'air_quality_index_colour': measurement['indexes'][0]['color']
}
names = [measurement['values'][i]['name'] for i in range(0, len(measurement['values']))]
try:
pos = names.index('PM1')
data['PM1'] = measurement['values'][pos]['value']
except:
data['PM1'] = 'None'
try:
pos = names.index('PM25')
data['PM25'] = measurement['values'][pos]['value']
except:
data['PM25'] = 'None'
try:
pos = names.index('PM10')
data['PM10'] = measurement['values'][pos]['value']
except:
data['PM10'] = 'None'
try:
pos = names.index('PRESSURE')
data['Pressure'] = measurement['values'][pos]['value']
except:
data['Pressure'] = 'None'
try:
pos = names.index('HUMIDITY')
data['Humidity'] = measurement['values'][pos]['value']
except:
data['Humidity'] = 'None'
try:
pos = names.index('TEMPERATURE')
data['Temperature'] = measurement['values'][pos]['value']
except:
data['Temperature'] = 'None'
try:
pos = names.index('NO2')
data['NO2'] = measurement['values'][pos]['value']
except:
data['NO2'] = 'None'
try:
pos = names.index('CO')
data['CO'] = measurement['values'][pos]['value']
except:
data['CO'] = 'None'
try:
pos = names.index('O3')
data['O3'] = measurement['values'][pos]['value']
except:
data['O3'] = 'None'
try:
pos = names.index('SO2')
data['SO2'] = measurement['values'][pos]['value']
except:
data['SO2'] = 'None'
return data
async def __installation_async(self, installation_id):
"""
Private function to retrieve information about a specific installation by given installation_id
:param installation_id: int representing the indicator of installation
:return: pandas DataFrame with installation infos (coroutine)
"""
async with aiohttp.ClientSession() as http_session:
airly_api = airly.Airly(self.key, http_session)
installation = await airly_api.load_installation_by_id(installation_id)
data = self.__extract_data_installation(installation)
data = pd.DataFrame(data, index=[0])
return data
def installation(self, installation_id, filename=None):
"""
Wrapper for installation_async function to perform coroutine
:param installation_id: int representing the indicator of installation
:param filename: if not none, string representing the (path + ) filename to save the info
:return: pandas DataFrame with installation infos
"""
loop = asyncio.get_event_loop()
data = loop.run_until_complete(self.__installation_async(installation_id))
if filename is not None:
data.to_csv(filename)
return data
async def __installations_nearest_async(self, latitude, longitude, max_distance_km, max_results):
"""
Private function to retrieve the information about available installations around given location within given
distance and limited to a given number of results
:param latitude: float representing latitude of location
:param longitude: float representing a longitude of location
:param max_distance_km: float representing maximal distance within which to look for installations
:param max_results: int representing maximal number of results to be returned
:return: pandas DataFrame with the installations infos (coroutine)
"""
async with aiohttp.ClientSession() as http_session:
airly_api = airly.Airly(self.key, http_session)
sys.stdout.flush()
installation_list = await airly_api.load_installation_nearest(latitude=latitude, longitude=longitude,
max_distance_km=max_distance_km,
max_results=max_results)
sys.stdout.flush()
installation_ids = [loc['id'] for loc in installation_list]
data = {}
for installation_id in range(0, len(installation_ids)):
data[installation_id] = []
data[installation_id].append(self.__extract_data_installation(installation_list[installation_id]))
data = pd.concat([pd.DataFrame(data[i]) for i in data]).reset_index(drop=True)
return data
def installations_nearest(self, latitude, longitude, max_distance_km=1, max_results=-1, filename=None):
"""
Wrapper function for function installations_nearest_async to perform coroutine
:param latitude: float representing latitude of location
:param longitude: float representing a longitude of location
:param max_distance_km: float representing maximal distance within which to look for installations
:param max_results: int representing maximal number of results to be returned
:param filename: if not none, string representing the (path + ) filename to save the info
:return: pandas DataFrame with the installations infos
"""
loop = asyncio.get_event_loop()
data = loop.run_until_complete(self.__installations_nearest_async(latitude, longitude, max_distance_km, max_results))
if filename is not None:
data.to_csv(filename)
return data
async def __measurement_installation_id_async_current(self, installation_id):
"""
Private function to get current measurement for an installation with a given installation_id
:param installation_id: int representing an installation ID
:return: pandas DataFrame with measurement (coroutine)
"""
async with aiohttp.ClientSession() as http_session:
airly_api = airly.Airly(self.key, http_session)
measurement = airly_api.create_measurements_session_installation(installation_id)
sys.stdout.flush()
await measurement.update()
sys.stdout.flush()
current = measurement.current
data = self.__extract_data_measurement(current)
data = pd.DataFrame(data, index=[0])
return data
async def __measurement_installation_id_async_history(self, installation_id):
"""
Private function to get measurements for a last 24 hour (historical) for an installation with a given installation_id
:param installation_id: int representing an installation ID
:return: pandas DataFrame with measurement (coroutine)
"""
async with aiohttp.ClientSession() as http_session:
airly_api = airly.Airly(self.key, http_session)
measurement = airly_api.create_measurements_session_installation(installation_id)
sys.stdout.flush()
await measurement.update()
sys.stdout.flush()
history = measurement.history
data = {}
for i in range(0, 24):
data[i] = []
data[i].append(self.__extract_data_measurement(history[i]))
data = pd.concat([pd.DataFrame(data[i]) for i in data]).reset_index(drop=True)
return data
async def __measurement_installation_id_async_forecast(self, installation_id):
"""
Private function to get measurements for a next 24 hours (forecast) for an installation with a given installation_id
:param installation_id: int representing an installation ID
:return: pandas DataFrame with measurement (coroutine)
"""
async with aiohttp.ClientSession() as http_session:
airly_api = airly.Airly(self.key, http_session)
measurement = airly_api.create_measurements_session_installation(installation_id)
sys.stdout.flush()
await measurement.update()
sys.stdout.flush()
forecast = measurement.forecast
data = {}
for i in range(0, 24):
data[i] = []
data[i].append(self.__extract_data_measurement(forecast[i]))
data = pd.concat([pd.DataFrame(data[i]) for i in data]).reset_index(drop=True)
return data
def measurement_installation_id(self, installation_id, measurement_type, filename=None):
"""
Wrapper function for functions measurement_installation_id_async_current,
measurement_installation_id_async_history and measurement_installation_id_async_forecast to retrieve
(respectively) current, historical or forecasted measurements for an installation with a given ID
:param installation_id: int representing installation ID
:param measurement_type: string representing a requested measurement type;
possible values: current, history, forecast
:param filename: if not none, string representing the (path + ) filename to save the info
:return: pandas DataFrame with the measurement info
"""
loop = asyncio.get_event_loop()
if measurement_type == 'current':
data = loop.run_until_complete(self.__measurement_installation_id_async_current(installation_id))
elif measurement_type == 'history':
data = loop.run_until_complete(self.__measurement_installation_id_async_history(installation_id))
elif measurement_type == 'forecast':
data = loop.run_until_complete(self.__measurement_installation_id_async_forecast(installation_id))
else:
raise Exception('Wrong type of measurement!')
data['installation_id'] = installation_id
if filename is not None:
data.to_csv(filename)
return data
async def __measurement_nearest_async_current(self, latitude, longitude, max_distance_km):
"""
Private function to retrieve current measurement for an installation closest to the given coordinates
:param latitude: float representing the latitude of a given location
:param longitude: float representing the longitude of a given location
:param max_distance_km: float representing the maximal range (in KM) to look for an installation
:return: pandas DataFrame with measurements (coroutine)
"""
async with aiohttp.ClientSession() as http_session:
airly_api = airly.Airly(self.key, http_session)
measurement = airly_api.create_measurements_session_nearest(latitude, longitude, max_distance_km)
sys.stdout.flush()
await measurement.update()
sys.stdout.flush()
current = measurement.current
data = self.__extract_data_measurement(current)
data = pd.DataFrame(data, index=[0])
return data
async def __measurement_nearest_async_history(self, latitude, longitude, max_distance_km):
"""
Private function to retrieve measurements for a last 24 hours (historical) for an installation closest
to the given coordinates
:param latitude: float representing the latitude of a given location
:param longitude: float representing the longitude of a given location
:param max_distance_km: float representing the maximal range (in KM) to look for an installation
:return: pandas DataFrame with measurements (coroutine)
"""
async with aiohttp.ClientSession() as http_session:
airly_api = airly.Airly(self.key, http_session)
measurement = airly_api.create_measurements_session_nearest(latitude, longitude, max_distance_km)
sys.stdout.flush()
await measurement.update()
sys.stdout.flush()
history = measurement.history
data = {}
for i in range(0, 24):
data[i] = []
data[i].append(self.__extract_data_measurement(history[i]))
data = pd.concat([pd.DataFrame(data[i]) for i in data]).reset_index(drop=True)
return data
async def __measurement_nearest_async_forecast(self, latitude, longitude, max_distance_km):
"""
Private function to retrieve measurements for a next 24 hours (forecast) for an installation closest
to the given coordinates
:param latitude: float representing the latitude of a given location
:param longitude: float representing the longitude of a given location
:param max_distance_km: float representing the maximal range (in KM) to look for an installation
:return: pandas DataFrame with measurements (coroutine)
"""
async with aiohttp.ClientSession() as http_session:
airly_api = airly.Airly(self.key, http_session)
measurement = airly_api.create_measurements_session_nearest(latitude, longitude, max_distance_km)
sys.stdout.flush()
await measurement.update()
sys.stdout.flush()
forecast = measurement.forecast
data = {}
for i in range(0, 24):
data[i] = []
data[i].append(self.__extract_data_measurement(forecast[i]))
data = pd.concat([pd.DataFrame(data[i]) for i in data]).reset_index(drop=True)
return data
def measurement_nearest(self, latitude, longitude, measurement_type, max_distance_km=3, filename=None):
"""
Wrapper function for functions measurement_nearest_async_current, measurement_nearest_async_history and
measurement_nearest_async_forecast to retrieve (respectively) current, historical and forecasted measurements for
a installation closest to the given coordinates (and within given range)
:param latitude: float representing the latitude of a given location
:param longitude: float representing the longitude of a given location
:param measurement_type: string representing a requested measurement type;
possible values: current, history, forecast
:param max_distance_km: float representing the maximal range (in KM) to look for an installation
:param filename: if not none, string representing the (path + ) filename to save the info
:return: pandas DataFrame with the measurements info
"""
loop = asyncio.get_event_loop()
if measurement_type == 'current':
data = loop.run_until_complete(self.__measurement_nearest_async_current(latitude, longitude, max_distance_km))
elif measurement_type == 'history':
data = loop.run_until_complete(self.__measurement_nearest_async_history(latitude, longitude, max_distance_km))
elif measurement_type == 'forecast':
data = loop.run_until_complete(self.__measurement_nearest_async_forecast(latitude, longitude, max_distance_km))
else:
raise Exception('Wrong type of measurement!')
if filename is not None:
data.to_csv(filename)
return data
async def __measurement_location_async_current(self, latitude, longitude):
"""
Private function to retrieve a current and interpolated measurement for a given coordinates
:param latitude: float representing the latitude of a given location
:param longitude: float representing a longitude of a given location
:return: pandas DataFrame with measurement info (coroutine)
"""
async with aiohttp.ClientSession() as http_session:
airly_api = airly.Airly(self.key, http_session)
measurement = airly_api.create_measurements_session_point(latitude, longitude)
sys.stdout.flush()
await measurement.update()
sys.stdout.flush()
current = measurement.current
data = self.__extract_data_measurement(current)
data = pd.DataFrame(data, index=[0])
return data
async def __measurement_location_async_history(self, latitude, longitude):
"""
Private function to retrieve interpolated measurements for a last 24 hours (historical) for a given coordinates
:param latitude: float representing the latitude of a given location
:param longitude: float representing a longitude of a given location
:return: pandas DataFrame with measurement info (coroutine)
"""
async with aiohttp.ClientSession() as http_session:
airly_api = airly.Airly(self.key, http_session)
measurement = airly_api.create_measurements_session_point(latitude, longitude)
sys.stdout.flush()
await measurement.update()
sys.stdout.flush()
history = measurement.history
data = {}
for i in range(0, 24):
data[i] = []
data[i].append(self.__extract_data_measurement(history[i]))
data = pd.concat([pd.DataFrame(data[i]) for i in data]).reset_index(drop=True)
return data
async def __measurement_location_async_forecast(self, latitude, longitude):
"""
Private function to retrieve interpolated measurements for a nest 24 hours (forecast) for a given coordinates
:param latitude: float representing the latitude of a given location
:param longitude: float representing a longitude of a given location
:return: pandas DataFrame with measurement info (coroutine)
"""
async with aiohttp.ClientSession() as http_session:
airly_api = airly.Airly(self.key, http_session)
measurement = airly_api.create_measurements_session_point(latitude, longitude)
sys.stdout.flush()
await measurement.update()
sys.stdout.flush()
forecast = measurement.forecast
data = {}
for i in range(0, 24):
data[i] = []
data[i].append(self.__extract_data_measurement(forecast[i]))
data = pd.concat([pd.DataFrame(data[i]) for i in data]).reset_index(drop=True)
return data
def measurement_location(self, latitude, longitude, measurement_type, filename=None):
"""
Wrapper function for functions measurement_location_async_current, measurement_location_async_history and
measurement_location_async_forecast to retrieve (respectively) current, historical and forecasted interpolated
measurements for a given location
:param latitude: float representing the latitude of a given location
:param longitude: float representing a longitude of a given location
:param measurement_type: string representing a requested measurement type;
possible values: current, history, forecast
:param filename: if not none, string representing the (path + ) filename to save the info
:return: pandas DataFrame with the measurements infos
"""
loop = asyncio.get_event_loop()
if measurement_type == 'current':
data = loop.run_until_complete(self.__measurement_location_async_current(latitude, longitude))
elif measurement_type == 'history':
data = loop.run_until_complete(self.__measurement_location_async_history(latitude, longitude))
elif measurement_type == 'forecast':
data = loop.run_until_complete(self.__measurement_location_async_forecast(latitude, longitude))
else:
raise Exception('Wrong type of measurement!')
if filename is not None:
data.to_csv(filename)
return data
def get_installation_ids(self):
"""
Get list of IDs of all available Airly and non-Airly installations
:return: list with IDs
"""
loop = asyncio.get_event_loop()
data = loop.run_until_complete(self.__installations_nearest_async(0, 0, 40000, -1))
return data['installation_id'].tolist()
def get_all_installations(self, filename=None):
"""
Get infos about all available Airly and non-Airly installations
:param filename: if not none, string representing the (path + ) filename to save the info
:return: pandas DataFrame with infos about installations
"""
loop = asyncio.get_event_loop()
data = loop.run_until_complete(self.__installations_nearest_async(0, 0, 40000, -1))
if filename is not None:
data.to_csv(filename)
return data
def get_installations_ids_location(self, latitude, longitude, max_distance_km):
"""
Get a list IDs of all Airly and non-Airly installations around given location
:param latitude: float representing a latitude of a given location
:param longitude: float representing longitude of a given location
:param max_distance_km: maximal range (in KM) within which to look for installations
:return: list with IDs
"""
loop = asyncio.get_event_loop()
data = loop.run_until_complete(self.__installations_nearest_async(latitude, longitude, max_distance_km, -1))
return data['installation_id'].tolist()
def collect_measurements(self, filename=None):
"""
Collect historical (last 24 hours) measurements for all available installations (maximal number of API requests
per day is 1000)
:param filename: if not none, string representing the (path + ) filename to save the info
:return: pandas DataFrame with the measurements
"""
ids = self.get_installation_ids()
frames = []
for i in ids:
print(str(id))
start = time.time()
frames.append(self.measurement_installation_id(i, 'history'))
end = time.time()
elapsed = end - start
time.sleep(max(0, int(2 - elapsed) + 1))
data =
|
pd.concat(frames)
|
pandas.concat
|
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
|
assert_frame_equal(result, expected)
|
pandas.util.testing.assert_frame_equal
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.