prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python [conda env:holoview]
# language: python
# name: conda-env-holoview-py
# ---
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %%
from dask.distributed import Client
# import geopandas
import numpy as np
import pandas as pd
# import os
import xarray as xr
from collections import Counter
# %%
# client = Client(n_workers=2, threads_per_worker=2, memory_limit='1GB')
# client = Client(processes=False)
client = Client()
client
# %%
client.close()
# %%
work_dir = '/Volumes/USGS_NHM2/datasets/wrf-conus404/originals'
# Variables that are integrated over 60 minutes per hourly timestep
vars_60min_accum = ['ACDEWC', 'ACDRIPR', 'ACDRIPS', 'ACECAN', 'ACEDIR', 'ACETLSM', 'ACETRAN',
'ACEVAC', 'ACEVB', 'ACEVC', 'ACEVG', 'ACFROC', 'ACFRZC', 'ACGHB', 'ACGHFLSM',
'ACGHV', 'ACINTR', 'ACINTS', 'ACIRB', 'ACIRC', 'ACIRG', 'ACLHFLSM', 'ACLWDNLSM',
'ACLWUPLSM', 'ACMELTC', 'ACPAHB', 'ACPAHG', 'ACPAHLSM', 'ACPAHV', 'ACPONDING',
'ACQLAT', 'ACQRF', 'ACRAINLSM', 'ACRAINSNOW', 'ACRUNSB', 'ACRUNSF', 'ACSAGB',
'ACSAGV', 'ACSAV', 'ACSHB', 'ACSHC', 'ACSHFLSM', 'ACSHG', 'ACSNBOT', 'ACSNFRO',
'ACSNOWLSM', 'ACSNSUB', 'ACSUBC', 'ACSWDNLSM', 'ACSWUPLSM', 'ACTHROR', 'ACTHROS',
'ACTR', 'GRAUPEL_ACC_NC', 'PREC_ACC_NC', 'SNOW_ACC_NC']
# Variables that are accumulated from model start
vars_model_accum = ['ACLWDNB', 'ACLWDNBC', 'ACLWDNT', 'ACLWDNTC', 'ACLWUPB', 'ACLWUPBC',
'ACLWUPT', 'ACLWUPTC', 'ACSNOM', 'ACSWDNB', 'ACSWDNBC', 'ACSWDNT',
'ACSWDNTC', 'ACSWUPB', 'ACSWUPBC', 'ACSWUPT', 'ACSWUPTC']
# %%
# Read word map file for processing the description strings
fhdl = open('wrfout_words.txt', 'r', encoding='ascii')
rawdata = fhdl.read().splitlines()
fhdl.close()
it = iter(rawdata)
next(it) # Skip header
word_map = {}
for row in it:
flds = row.split('\t')
if len(flds[2]) != 0:
word_map[flds[0].replace('"', '')] = flds[2].replace('"', '')
print(flds)
# %%
# %%
# %%
# %%
df = xr.open_dataset(f'{work_dir}/wrfout_d01_2020-09-30_00:00:00', decode_coords=False, chunks={})
# %%
df
# %%
# dim_names = [dd for dd in df.dims.keys()]
# dim_names
# dim_cnt = Counter(dim_names)
# dim_cnt = Counter()
attr_cnt = Counter()
word_cnt = Counter()
# dim_cnt['Time'] += 1
wrfout_vars = {}
for vv in list(df.keys()):
cvar = df[vv]
wrfout_vars[vv] = {}
for cattr, val in cvar.attrs.items():
if cattr in ['description', 'units', 'coordinates']:
attr_cnt[cattr] += 1
wrfout_vars[vv][cattr] = val
if cattr == 'description':
new_val = []
for ww in val.split(' '):
if ww in word_map:
new_val.append(word_map[ww])
else:
new_val.append(ww)
word_cnt[ww] += 1
# result = string[0].upper() + string[1:]
outstr = ' '.join(new_val)
if len(outstr) > 0:
outstr = outstr[0].upper() + outstr[1:]
wrfout_vars[vv]['description_new'] = outstr
wrfout_vars[vv]['datatype'] = cvar.encoding['dtype'].name
wrfout_vars[vv]['dimensions'] = ' '.join(cvar.dims)
if vv in vars_60min_accum:
# Add a cell_methods field
wrfout_vars[vv]['cell_methods'] = 'XTIME: sum (interval: 1 minute)'
# for kk in list(const_df.keys()):
# src_cvar = const_df[kk]
# # Create list of dimensions, modify dim names as needed
# cvar_dims = []
# cvar_cnk = []
# for xx in src_cvar.dims:
# %%
wrfout_vars
# %%
attr_cnt
# %%
out_df = | pd.DataFrame(wrfout_vars) | pandas.DataFrame |
import requests
from pandas import DataFrame, Series
import pandas as pd
from utilities import (LICENSE_KEY, generate_token, master_player_lookup)
import numpy as np
pd.options.mode.chained_assignment = None
######################
# top level functions:
######################
def get_league_rosters(lookup, league_id, week=None):
teams = get_teams_in_league(league_id)
league_rosters = pd.concat(
[_get_team_roster(x, league_id, lookup) for x in teams['team_id']],
ignore_index=True)
return league_rosters
def get_teams_in_league(league_id):
teams_url = ('https://www.fleaflicker.com/api/FetchLeagueStandings?' +
f'leagueId={league_id}')
teams_json = requests.get(teams_url).json()
teams_df = _divs_from_league(teams_json['divisions'])
teams_df['league_id'] = league_id
return teams_df
def get_league_schedule(league_id):
return pd.concat([_get_schedule_by_week(league_id, week) for week in
range(1, 15)], ignore_index=True)
##################
# helper functions
##################
# roster helper functions
def _process_player(slot):
dict_to_return = {}
if 'leaguePlayer' in slot.keys():
fleaflicker_player_dict = slot['leaguePlayer']['proPlayer']
dict_to_return['name'] = fleaflicker_player_dict['nameFull']
dict_to_return['player_position'] = fleaflicker_player_dict['position']
dict_to_return['fleaflicker_id'] = fleaflicker_player_dict['id']
if 'requestedGames' in slot['leaguePlayer']:
game = slot['leaguePlayer']['requestedGames'][0]
if 'pointsActual' in game:
if 'value' in game['pointsActual']:
dict_to_return['actual'] = game['pointsActual']['value']
if 'position' in slot.keys():
fleaflicker_position_dict = slot['position']
dict_to_return['team_position'] = fleaflicker_position_dict['label']
return dict_to_return
# list of dicts: put in DataFrame
def _add_pos_suffix(df_subset):
if len(df_subset) > 1:
suffix = Series(range(1, len(df_subset) + 1), index=df_subset.index)
df_subset['team_position'] = df_subset['team_position'] + suffix.astype(str)
return df_subset
def _get_team_roster(team_id, league_id, lookup):
roster_url = ('https://www.fleaflicker.com/api/FetchRoster?' +
f'leagueId={league_id}&teamId={team_id}')
roster_json = requests.get(roster_url).json()
starter_slots = roster_json['groups'][0]['slots']
bench_slots = roster_json['groups'][1]['slots']
starter_df = DataFrame([_process_player(x) for x in starter_slots])
bench_df = DataFrame([_process_player(x) for x in bench_slots])
starter_df2 = pd.concat([
_add_pos_suffix(starter_df.query(f"team_position == '{x}'"))
for x in starter_df['team_position'].unique()])
starter_df2['start'] = True
bench_df['start'] = False
team_df = | pd.concat([starter_df2, bench_df], ignore_index=True) | pandas.concat |
import pandas as pd
import numpy as np
import random
# report confusion matrix with labels
def confusion_matrix(predicted, true):
if len(predicted) != len(true):
print("Error: lengths of labels do not match")
else:
d = {'predicted': predicted, 'true': true}
df = pd.DataFrame(data=d)
tp = 0
tn = 0
fp = 0
fn = 0
for row in range(len(df)):
if df['predicted'].iloc[row] == df['true'].iloc[row]:
if df['predicted'].iloc[row] == 1:
tp += 1
else:
tn += 1
else:
if df['predicted'].iloc[row] == 1:
fp += 1
else:
fn += 1
d = {' ': ['Predicted Positive (1)', 'Predicted Negetive (0)'],
'Actually Positive (1)': [tp, fn],
'Acyually Negative (0)': [fp, tn]}
df = | pd.DataFrame(data=d) | pandas.DataFrame |
#!/usr/bin/env python
import sys, time
import numpy as np
from io import StringIO
import pickle as pickle
from pandas import DataFrame
from pandas import concat
from pandas import read_pickle
from pandas import cut
from pandas import concat
from sklearn.externals import joblib
from sklearn.cross_validation import cross_val_score
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from djeval import *
CROSS_VALIDATION_N = 150000
MIN_SAMPLES_LEAF = 300
MIN_SAMPLES_SPLIT = 1000
FITTING_N = 50000
n_estimators = 200
cv_groups = 3
n_jobs = -1
# For when we were messing around with blundergroups:
#
# so that we can test the idea that dataset size being equal,
# we make better predictions with data specifically in that blundergroup
#
# CROSS_VALIDATION_N = 7500
# debugging 'Cannot allocate memory'
n_jobs = 1
if False:
inflation = 4
CROSS_VALIDATION_N = inflation * CROSS_VALIDATION_N
MIN_SAMPLES_LEAF = inflation * MIN_SAMPLES_LEAF
MIN_SAMPLES_SPLIT = inflation * MIN_SAMPLES_SPLIT
FITTING_N = inflation * FITTING_N
just_testing = False
if just_testing:
CROSS_VALIDATION_N = 1500
n_estimators = 2
n_jobs = -1
blunder_cv_results = []
def sample_df(df, n_to_sample):
if n_to_sample >= len(df.index.values):
return df
row_indexes = np.random.choice(df.index.values, n_to_sample, replace=False)
return df.ix[row_indexes]
def group_scorer(estimator, X, y):
pred_y = estimator.predict(X)
msg("GROUPED SCORES FOR a CV GROUP:")
dfx = DataFrame(X, columns=features_to_use)
dfx['pred_abserror'] = abs(pred_y - y)
blunder_cvgroups, blunder_cvbins = cut(dfx['movergain'], blunder_cats, retbins=True)
blunder_cvgrouped = dfx.groupby(blunder_cvgroups)['pred_abserror'].agg({'lad': np.mean})
blunder_cv_results.append(blunder_cvgrouped)
msg("scores: %s" % str(blunder_cvgrouped))
return mean_absolute_error(y, pred_y)
def crossval_rfr(df):
sampled_df = sample_df(df, CROSS_VALIDATION_N)
sample_size = len(sampled_df)
mss = max([sample_size / 150, 100])
msl = max([sample_size / 450, 30])
# rfr_here = RandomForestRegressor(n_estimators=n_estimators, n_jobs=n_jobs, min_samples_leaf=msl, min_samples_split=mss, verbose=1)
rfr_here = RandomForestRegressor(n_estimators=n_estimators, n_jobs=n_jobs, min_samples_leaf=MIN_SAMPLES_LEAF, min_samples_split=MIN_SAMPLES_SPLIT, verbose=1)
crossval_X = sampled_df[features_to_use]
crossval_y = sampled_df['elo']
crossval_weights = sampled_df['weight']
msg("Starting cross validation. %i records" % sample_size)
begin_time = time.time()
cvs = cross_val_score(rfr_here, crossval_X, crossval_y, cv=cv_groups, n_jobs=n_jobs, scoring='mean_absolute_error', fit_params={'sample_weight': crossval_weights})
msg("Cross validation took %f seconds with %i threads, %i records, %i estimators and %i CV groups" % ((time.time() - begin_time), n_jobs, len(crossval_X), n_estimators, cv_groups))
msg("Results: %f, %s" % (np.mean(cvs), str(cvs)))
return np.mean(cvs)
msg("Hi, reading moves.")
moves_df = read_pickle(sys.argv[1])
moves_file = open(sys.argv[1] + '.info', 'rb')
moves_info = pickle.load(moves_file)
categorical_features = moves_info['categorical_features']
msg("Computing weights")
game_weights = (1. / (moves_df.groupby('gamenum')['halfply'].agg({'max':np.max}).clip(1,1000)))['max']
moves_df['weight'] = moves_df['gamenum'].map(game_weights)
msg("Done")
#moves_df['abs_moverscore'] = moves_df['moverscore'].abs()
features_to_exclude = [
'elo',
'weight',
'clippedgain',
'gamenum',
]
features_to_use = [col for col in moves_df.columns if (col not in features_to_exclude and col not in categorical_features)]
#features_to_use = ['moverscore', 'halfply', 'movergain', 'side']
insample_df = moves_df[moves_df['elo'].notnull()]
do_blunder_groups = False
if do_blunder_groups:
blunder_cats = [-1e9,-1024,-512,-256,-128,-64,-32, -16, -8, -1, 0]
blunder_groups, blunder_bins = cut(insample_df['movergain'], blunder_cats, retbins=True)
msg("Doing RFR CV per blunder group")
blunder_grouped = insample_df.groupby(blunder_groups)
cv_scores = blunder_grouped.apply(lambda x: crossval_rfr(x))
msg("SCORES:")
msg(cv_scores)
msg("blunder group errors vs mean-value")
lads = blunder_grouped.apply(lambda x: np.mean(abs(x['elo'] - np.mean(x['elo']))))
msg(lads)
rfr = RandomForestRegressor(n_estimators=n_estimators, n_jobs=n_jobs, min_samples_leaf=MIN_SAMPLES_LEAF, min_samples_split=MIN_SAMPLES_SPLIT, verbose=1)
# Results: -221.781702, [-221.24820221 -222.91541283 -221.18149079]
do_crossval = True
if do_crossval:
crossval_df = sample_df(insample_df, CROSS_VALIDATION_N)
crossval_X = crossval_df[features_to_use]
crossval_y = crossval_df['elo']
crossval_weights = crossval_df['weight']
movergain_index = features_to_use.index('movergain')
msg("Starting full DF cross validation")
begin_time = time.time()
# using n_jobs=1 here because the parallelization of cross_val_score interferes with
# our gross hack of stashing info about blundergroups into a global variable as a side effect
if do_blunder_groups:
cvs = cross_val_score(rfr, crossval_X, crossval_y, cv=cv_groups, n_jobs=1, scoring=group_scorer, fit_params={'sample_weight': crossval_weights})
else:
cvs = cross_val_score(rfr, crossval_X, crossval_y, cv=cv_groups, n_jobs=n_jobs, scoring='mean_absolute_error')
msg("Cross validation took %f seconds with %i threads, %i records, %i estimators and %i CV groups" % ((time.time() - begin_time), n_jobs, len(crossval_X), n_estimators, cv_groups))
msg("Results: %f, %s" % (np.mean(cvs), str(cvs)))
if do_blunder_groups:
msg("per-blundergroup results:")
#for bcv in blunder_cv_results:
# msg("here: %s" % bcv)
concat_df = | concat(blunder_cv_results, axis=1) | pandas.concat |
"""Tools for creating and manipulating neighborhood datasets."""
import os
import pathlib
from warnings import warn
import geopandas as gpd
import pandas as pd
from appdirs import user_data_dir
appname = "geosnap"
appauthor = "geosnap"
data_dir = user_data_dir(appname, appauthor)
def _fetcher(local_path, remote_path, warning_msg):
try:
t = gpd.read_parquet(local_path)
except FileNotFoundError:
warn(warning_msg)
t = gpd.read_parquet(remote_path, storage_options={"anon": True})
return t
class _Map(dict):
"""tabbable dict."""
def __init__(self, *args, **kwargs):
super(_Map, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.iteritems():
self[k] = v
if kwargs:
for k, v in kwargs.iteritems():
self[k] = v
def __getattr__(self, attr):
return self.get(attr)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(_Map, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(_Map, self).__delitem__(key)
del self.__dict__[key]
class DataStore:
"""Storage for geosnap data. Currently supports US Census data.
Unless otherwise noted, data are collected from the U.S. Census Bureau's TIGER/LINE Files
https://www.census.gov/cgi-bin/geo/shapefiles/index.php?year=2018 and converted to
parquet files.
"""
def __init__(self):
self
def __dir__(self):
atts = [
"acs",
"blocks_2000",
"blocks_2010",
"codebook",
"counties",
"ltdb",
"msa_definitions",
"msas",
"ncdb",
"states",
"show_data_dir",
"tracts_1990",
"tracts_2000",
"tracts_2010",
]
return atts
def show_data_dir(self):
"""Print the location of the local geosnap data storage directory.
Returns
-------
string
location of local storage directory.
"""
print(data_dir)
return data_dir
def acs(self, year=2018, level="tract", states=None):
"""American Community Survey Data.
Parameters
----------
year : str
vingage of ACS release.
level : str
geographic level
states : list, optional
subset of states (as 2-digit fips) to return
Returns
-------
geopandas.GeoDataFrame
geodataframe of ACS data indexed by FIPS code
"""
local_path = pathlib.Path(data_dir, "acs", f"acs_{year}_{level}.parquet")
remote_path = f"s3://spatial-ucr/census/acs/acs_{year}_{level}.parquet"
msg = "Streaming data from S3. Use `geosnap.io.store_acs()` to store the data locally for better performance"
t = _fetcher(local_path, remote_path, msg)
t = t.reset_index().rename(columns={"GEOID": "geoid"})
if states:
t = t[t.geoid.str[:2].isin(states)]
t["year"] = year
return t
def blocks_2000(self, states=None, fips=None):
"""Census blocks for 2000.
Parameters
----------
states : list-like
list of state fips codes to return as a datafrrame.
Returns
-------
type
pandas.DataFrame or geopandas.GeoDataFrame
2000 blocks as a geodataframe or as a dataframe with geometry
stored as well-known binary on the 'wkb' column.
"""
msg = (
"Unable to locate local census 2010 block data. Streaming instead.\n"
"If you plan to use census data repeatedly you can store it locally "
"with the io.store_blocks_2010 function for better performance"
)
if isinstance(states, (str, int)):
states = [states]
blks = {}
for state in states:
local = pathlib.Path(data_dir, "blocks_2000", f"{state}.parquet")
remote = f"s3://spatial-ucr/census/blocks_2000/{state}.parquet"
blks[state] = _fetcher(local, remote, msg)
if fips:
blks[state] = blks[state][blks[state]["geoid"].str.startswith(fips)]
blks[state]["year"] = 2000
blocks = list(blks.values())
blocks = gpd.GeoDataFrame(pd.concat(blocks, sort=True))
return blocks
def blocks_2010(self, states=None, fips=None):
"""Census blocks for 2010.
Parameters
----------
states : list-like
list of state fips codes to return as a datafrrame.
Returns
-------
type
pandas.DataFrame or geopandas.GeoDataFrame
2010 blocks as a geodataframe or as a dataframe with geometry
stored as well-known binary on the 'wkb' column.
"""
msg = (
"Unable to locate local census 2010 block data. Streaming instead.\n"
"If you plan to use census data repeatedly you can store it locally "
"with the io.store_blocks_2010 function for better performance"
)
if isinstance(states, (str, int)):
states = [states]
blks = {}
for state in states:
local = pathlib.Path(data_dir, "blocks_2010", f"{state}.parquet")
remote = f"s3://spatial-ucr/census/blocks_2010/{state}.parquet"
blks[state] = _fetcher(local, remote, msg)
if fips:
blks[state] = blks[state][blks[state]["geoid"].str.startswith(fips)]
blks[state]["year"] = 2010
blocks = list(blks.values())
blocks = gpd.GeoDataFrame( | pd.concat(blocks, sort=True) | pandas.concat |
import matplotlib.pyplot as plt
import cantools
import pandas as pd
import cv2
import numpy as np
import os
import glob
import re
import subprocess
import json
LOG_FOLDER = "/media/andrei/Samsung_T51/nemodrive/25_nov/session_2/1543155398_log"
CAN_FILE_PATH = os.path.join(LOG_FOLDER, "can_raw.log")
DBC_FILE = "logan.dbc"
SPEED_CAN_ID = "354"
OBD_SPEED_FILE = LOG_FOLDER + "obd_SPEED.log"
CAMERA_FILE_PREFIX = os.path.join(LOG_FOLDER, "camera_*")
camera_vids_path = glob.glob(CAMERA_FILE_PREFIX + ".mkv")
camera_vids_path.sort()
vid_names = [os.path.splitext(os.path.basename(vid_path))[0] for vid_path in camera_vids_path]
vid_dirs = [os.path.dirname(vid_path) for vid_path in camera_vids_path]
cameras = [os.path.join(x, y) for x, y in zip(vid_dirs, vid_names)]
camera_logs_path = [camera_name + ".log" for camera_name in cameras]
cameras_tp_path = [camera_name + "_timestamp" for camera_name in cameras]
phone_log_path = os.path.join(LOG_FOLDER, "phone.log")
def read_can_file(can_file_path):
df_can = pd.read_csv(can_file_path, header=None, delimiter=" ")
df_can["tp"] = df_can[0].apply(lambda x: float(x[1:-1]))
df_can["can_id"] = df_can[2].apply(lambda x: x[:x.find("#")])
df_can["data_str"] = df_can[2].apply(lambda x: x[x.find("#") + 1:])
return df_can
def get_can_data(db, cmd, data, msg):
decoded_info = db.decode_message(cmd, bytearray.fromhex(msg))
return decoded_info[data]
# =======================================================================
# extract pts data
PTS_CMD = "ffprobe -v error -show_entries frame=pkt_pts_" \
"time -of default=noprint_wrappers=1:nokey=1 {} > {}_pts.log"
for vid_path, vid_name in zip(camera_vids_path, cameras):
cmd = PTS_CMD.format(vid_path, vid_name)
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
process.wait()
print(process.returncode)
# =======================================================================
# -- Load DBC file stuff
db = cantools.database.load_file(DBC_FILE, strict=False)
cmd_names = [
("SPEED_SENSOR", "SPEED_KPS"),
("STEERING_SENSORS", "STEER_ANGLE"),
("BRAKE_SENSOR", "PRESIUNE_C_P")
]
cmd_idx = 0
cmd_name = cmd_names[cmd_idx][0]
data_name = cmd_names[cmd_idx][1]
"""
Decode values using:
Define: cmd_name, data_name, raw_can_msg
decoded_info = db.decode_message(cmd_name, bytearray.fromhex(raw_can_msg))
print(decoded_info[data_name])
ffmpeg -f v4l2 -video_size {}x{} -i /dev/video{} -c copy {}.mkv
"""
# =======================================================================
# -- Load Raw can
df_can = read_can_file(CAN_FILE_PATH)
# =======================================================================
# -- Load speed command
df_can_speed = df_can[df_can["can_id"] == SPEED_CAN_ID]
df_can_speed["speed"] = df_can_speed["data_str"].apply(lambda x: get_can_data(db, cmd_name,
data_name, x))
# df_can_speed[df_can_speed.speed > 0]
# df_can_speed["pts"] = df_can_speed["tp"] - 1539434950.220346
plt.plot(df_can_speed["tp"].values, df_can_speed["speed"])
plt.show()
# Write to csv
speed_file = os.path.join(LOG_FOLDER, "speed.csv")
df_can_speed.to_csv(speed_file)
# =======================================================================
# -- Load steer command
STEER_CMD_NAME = "STEERING_SENSORS"
STEER_CMD_DATA_NAME = "STEER_ANGLE"
STEERING_CAN_ID = "0C6"
df_can_steer = df_can[df_can["can_id"] == STEERING_CAN_ID]
df_can_steer["steering"] = df_can_steer["data_str"].apply(lambda x: get_can_data(db,
STEER_CMD_NAME,
STEER_CMD_DATA_NAME, x))
# Write to csv
steer_file = os.path.join(LOG_FOLDER, "steer.csv")
df_can_steer.to_csv(steer_file)
# --Plot can data
plt.plot(df_can_steer["tp"].values, df_can_steer["steering"])
plt.show()
# steering_values = []
# rng = 100
# for index in range(rng, len(df_can_steer)):
# x = df_can_steer.iloc[index-rng: index+1]["steering"].values
# steering_values.append(np.abs(x[1:] - x[:-1]).sum())
#
# steering_values_df = pd.Series(steering_values[:36494], name="Steering angle per second")
# steering_values_df.describe()
# # steering_values_df.plot()
# steering_values_df.plot(kind="box")
# plt.show()
# =======================================================================
# -- OBD speed file
df_speed = | pd.read_csv(OBD_SPEED_FILE, header=None) | pandas.read_csv |
from collections import OrderedDict
import timeit
import numpy as np
import pandas as pd
from randomstate.prng import (mt19937, sfmt, dsfmt, xoroshiro128plus,
xorshift1024, pcg64)
REPS = 3
SIZE = 100000
SETUP = """
import numpy
from numpy import array, random
from randomstate.prng import (mt19937, sfmt, dsfmt, xoroshiro128plus,
xorshift1024, pcg64)
import randomstate
import randomstate.prng
rs = {prng}.RandomState(123456)
f = rs.__getattribute__('{fn}')
args = {args}
"""
prngs = (np.random, mt19937, sfmt, dsfmt, xoroshiro128plus, xorshift1024, pcg64)
functions = {'randint': {'low': 2 ** 31, 'dtype': 'uint32'},
'random_sample': {},
'random_raw': {'output': False},
'standard_exponential': {'method': 'zig'},
'standard_gamma': {'shape': 2.4, 'method': 'zig'},
'standard_normal': {'method': 'zig'},
'multinomial': {'n': 20, 'pvals': [1.0 / 6.0] * np.ones(6)},
'negative_binomial': {'n': 5, 'p': 0.16},
'poisson': {'lam': 3.0},
'complex_normal': {'gamma': 2 + 0j, 'relation': 1 + 0.5j, 'method': 'zig'},
'laplace': {'loc': 1, 'scale': 3},
'binomial': {'n': 35, 'p': 0.25}}
def timer(prng: str, fn: str, args: dict):
if prng == 'random':
# Differences with NumPy
if fn in ('random_raw', 'complex_normal'):
return np.nan
if fn in ('standard_normal','standard_exponential', 'standard_gamma'):
args = {k: v for k, v in args.items() if k != 'method'}
elif prng == 'mt19937' and fn == 'random_raw': # To make comparable
args['size'] = 2 * args['size']
setup = SETUP.format(prng=prng, fn=fn, args=args)
return min(timeit.Timer('f(**args)', setup=setup).repeat(10, REPS)) / SIZE / REPS
results = OrderedDict()
for prng in prngs:
name = prng.__name__.split('.')[-1]
speeds = OrderedDict()
for fn, args in functions.items():
args['size'] = SIZE
speeds[fn] = np.round(timer(name, fn, args) * 10 ** 9, 2)
results[name] = pd.Series(speeds)
print(name)
print(results[name])
results = | pd.DataFrame(results) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 19 23:34:57 2019
@author: reynaldo.espana.rey
Web scrapping algorithm to build data set for text generator
source: https://towardsdatascience.com/how-to-web-scrape-with-python-in-4-minutes-bc49186a8460
"""
# =============================================================================
# Libraries
# =============================================================================
import numpy as np
import pandas as pd
import requests
import re
import time
import os
from bs4 import BeautifulSoup
import string
# =============================================================================
# Functions
# =============================================================================
# request page and make it BeautifulSoup
def get_page(url, verbose=0):
# get page
response = requests.get(url)
if verbose:
print('Successful:', str(response) =='<Response [200]>')
if str(response) =='<Response [200]>':
# BeautifulSoup data structure
soup = BeautifulSoup(response.text, 'html.parser')
return soup
return str(response)
# function to retrieve links from inspector gadget pluggin
def get_href(url, attr):
# get page
soup = get_page(url)
# get data links
data = soup.select(attr)
links = np.unique([x['href'] for x in data])
return links
def get_text(url, attr):
# get page
soup = get_page(url)
# get data links
data = soup.select(attr)
return data
# valid file name
def valid_name(value):
value = re.sub('[^\w\s-]', '', value).strip().lower()
value = re.sub('[-\s]+', '-', value)
return value
# funtion to remove chars
def remove_chars(doc, chars_2remove=None):
if chars_2remove is None:
# list of character not UTF-8 to be remove from doc
chars_2remove = ['\x85', '\x91', '\x92', '\x93', '\x94', '\x96',
'\x97', '\xa0']
# as reggex expression
chars_2remove = '[' + ''.join(chars_2remove) + ']'
# erase
doc = re.sub(chars_2remove, ' ', doc)
doc = re.sub(' +', ' ', doc).strip()
return doc
# =============================================================================
# Spanish poems
# =============================================================================
#### Spanish poems web page source
# root source
url_root = 'https://www.poemas-del-alma.com/'
## Path to use
## Retrieve poems and save it in .txt
path_poems = '../data/DB/spanish poems/'
# save list of poems links
path_poems_links = '../data/DB/poems_list.csv'
# =============================================================================
# Poems
# =============================================================================
##### POETS #####
# poems by author in alphabetial order
alphabet = [x for x in string.ascii_uppercase]
# get list of poets
poets = pd.DataFrame()
for letter in alphabet:
print(letter)
links = get_href(url_root + letter + '.html', attr='#content li a')
authors = pd.DataFrame({'author': [x.split('/')[-1].split('.')[0] for x in links],
'link': links})
poets = poets.append(authors)
time.sleep(.5)
poets = poets.reset_index(drop=True)
print('Poests found:', len(poets))
##### POEMS #####
# go throgh all the poems in poets
# run only for poems not already in folder
poems = pd.read_csv(path_poems_links)
# filter poets to scrap
poets['in_disk'] = poets['author'].isin(poems['author'])
# filter songs df
print ('Files in disk already:', poets.groupby(['in_disk']).size())
# loop to remaining poets
poets_2scrap = poets[poets['in_disk']==False]
# shuffle, else all errors will be first
poets_2scrap = poets_2scrap.sample(frac=1).reset_index(drop=True)
# loop for each poet link
for index, row in poets_2scrap.iterrows():
if (index % 25 == 0):
print('\n\n- Progress %:', index/len(poets_2scrap), '- Total poems:', len(poems))
time.sleep(5)
try:
# get page with poems links
links = get_href(row['link'], attr='#block-poems a')
time.sleep(.5)
links = pd.DataFrame({'poem': links})
# save and append
links['author'] = row['author']
links['author_link'] = row['link']
poems = poems.append(links, sort=False)
except:
print("An exception occurred:", row['link'])
time.sleep(30)
print('Poems found:', len(poems))
poems.to_csv(path_poems_links, index=False)
# =============================================================================
# COURPUS
# =============================================================================
### Create poem corpus and save it as .txt
# list of poems to search
poems = pd.read_csv(path_poems_links)
print('Poems found:', len(poems))
# run only for poems not already in folder
# get file names
poems_files = os.listdir(path_poems)
# get ids of song in disk
poems_files_ids = [x.split('.')[0] for x in poems_files]
# filter poems df
poems['id'] = [x.split('.')[0] for x in poems['poem']]
poems['in_disk'] = poems['id'].isin(poems_files_ids)
print ('Files in disk already:', poems.groupby(['in_disk']).size())
# filter files to run webscrappin
poems_2scrap = poems[poems['in_disk']==False]
# shuffle, else all errors will be first
poems_2scrap = poems_2scrap.sample(frac=1).reset_index(drop=True)
# keep count of errors
errors = 0
# loop for each poet link
for index, row in poems_2scrap.iterrows():
if (index % 20 == 0):
counter = len(poems[poems['in_disk']==True])+index
print('\n\n- Progress %: {0:.4f}'.format(index/len(poems_2scrap)),
'- Poems in disk: {}'.format(counter),
'- Total %: {0:.4f}'.format(counter/len(poems)))
try:
# get page
link = row['poem']
soup = get_page(url_root + link)
# wait 1 second to not overheat the webscraping
time.sleep(.5)
# get poem
page = soup.select('#contentfont p')
if len(page):
doc = str()
for x in page:
doc = doc + x.getText()
# encoding
doc = remove_chars(doc)
# TODO: remove chars
# save file
filename = link.split('.')[0]
with open(path_poems + filename + '.txt', "w") as text_file:
text_file.write(doc)
else:
print(link, '- poem is a set of poems')
# get links
links = get_href(url_root + link, attr='.list-poems a')
#time.sleep(.5)
links = | pd.DataFrame({'poem': links}) | pandas.DataFrame |
import copy
import itertools
import os
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.preprocessing import PowerTransformer
from scipy.stats import yeojohnson
from tqdm import tqdm
import tensorflow as tf
import warnings
warnings.simplefilter("ignore")
n_wavelengths = 55
n_timesteps = 300
class read_Ariel_dataset():
def __init__(self, noisy_path_train, noisy_path_test, params_path, start_read):
"""
For reading Ariel Dataset.
:param noisy_path_train: (str) The *relative path's parent directory* from the current
working directory to all noisy training files. For local files start with "./", for
colab files alternatively start with "/content/" (and "./" works fine).
:param noisy_path_train: (str) The *relative path's parent directory* from the current
working directory to all noisy test files. For local files start with "./", for
colab files alternatively start with "/content/" (and "./" works fine).
:param params_path: (str) The *relative path's parent directory* from the current
working directory to all params files. For local files start with "./", for
colab files alternatively start with "/content/" (and "./" works fine).
:param start_read: (int) How many data points to replace at the beginning of the
file. Used for preprocessing of files by replacing values before start_read
with 1.0 to minimize impact of the drop valley.
"""
super().__init__()
self.noisy_path = noisy_path_train
self.noisy_path_test = noisy_path_test
self.params_path = params_path
self.start_read = start_read
# list all files in path(s).
self.noisy_list= os.listdir(self.noisy_path)
self.noisy_list_test = os.listdir(self.noisy_path_test)
self.params_list = os.listdir(self.params_path)
# Grouped by AAAA:
self.group_noisy_list = self._group_list(self.noisy_list)
self.group_noisy_list_test = self._group_list(self.noisy_list_test)
self.group_params_list = self._group_list(self.params_list)
def _group_list_return(self):
"""
Only used for unit test purposes.
Return self.group_noisy_list and assert it is true.
"""
return self.group_noisy_list
def _choose_train_or_test(self, folder="noisy_train", batch_size=1):
"""Private function to choose train or test.
:param batch_size (int): The batch size to take. NotImplemented yet.
"""
if folder == "noisy_train":
path = self.noisy_path
files = self.noisy_list
elif folder == "noisy_test":
path = self.noisy_path_test
files = self.noisy_list_test
else:
raise FileNotFoundError("Not in the list (noisy_train, noisy_test). "
"Please input the choices in the list stated and try again.")
return path, files
def _len_noisy_list(self):
return len(self.noisy_list)
def unoptimized_read_noisy(self, folder="noisy_train", **kwargs):
"""
Read noisy files greedily, stacking them on the first axis.
First axis is the time series axis. So a file with 300x55, read
3 files would be 900x55.
:param folder (str): Which folder to do baseline transition. Choices:
"noisy_train" (default), "noisy_test".
"""
path, files = self._choose_train_or_test(folder=folder, **kwargs)
predefined = pd.DataFrame()
for item in files:
# Concatenate filename and their parent folder.
relative_file_path = path + "/" + item
# Renaming the columns
names = [item[-14:-4] + f"_{i}" for i in range(n_timesteps)]
curr = pd.read_csv(relative_file_path, delimiter="\t", skiprows=6, header=None)
curr.rename(columns={x: y for x, y in zip(curr.columns, names)}, inplace=True)
# Concatenating the pandas.
predefined = pd.concat([predefined, curr], axis=1)
return predefined
def unoptimized_read_params(self):
"""
Read params files greedily, stacking them on the first axis.
"""
predefined = pd.DataFrame()
for item in self.params_list:
# Relative file path:
relative_file_path = self.params_path + "/" + item
names = [item[-14:-4]] # Have to be a list to take effect
curr = pd.read_csv(relative_file_path, delimiter="\t", skiprows=2, header=None).T
curr.rename(columns = {x: y for x, y in zip(curr.columns, names)}, inplace=True)
predefined = pd.concat([predefined, curr], axis=1)
return predefined
def _group_list(self, mylist):
"""
Group list together. Here the function is specific to group AAAA together into
a sublist to not cramp the memory and dataframe I/O.
"""
return [list(v) for i, v in itertools.groupby(mylist, lambda x: x[:4])]
def read_noisy_extra_param(self, folder="train", saveto="./feature_store/noisy_train"):
"""
Read the extra 6 stellar and planet parameters in noisy files.
:param folder (str): "train" or "test" choice. Default "train" for noisy train set.
:param saveto (str): The directory to save to. Will make the directory if not
already exists.
"""
header = ["star_temp", "star_logg", "star_rad", "star_mass", "star_k_mag", "period"]
predefined = pd.DataFrame()
if saveto[-1] != "/":
saveto += "/"
try:
os.makedirs(saveto)
except OSError as e:
pass
if folder == "train":
path = self.noisy_path
mylist = self.group_noisy_list
elif folder == "test":
path = self.noisy_path_test
mylist = self.group_noisy_list_test
else:
raise ValueError("Invalid 'folder' entry. Please choose between 'train' or 'test'.")
# To ensure small enough, read them into groups of csv first.
for grouped_item in tqdm(mylist):
for item in grouped_item:
temp_storage_float = []
relative_file_path = path + "/" + item
with open(relative_file_path, "r") as f:
temp_storage_str = list(itertools.islice(f, 6))
# Preprocess for numbers only
for string in temp_storage_str:
# Separate the digits and the non-digits.
new_str = ["".join(x) for _, x in itertools.groupby(string, key=str.isdigit)]
# Only new_str[0] is the one we want to omit.
# We want to join back into a single string because "." previously is classifed
# as non-digit.
new_str = "".join(new_str[1:])
# Convert to float.
temp_storage_float.append(float(new_str))
# Convert to pandas DataFrame.
temp_storage_float = pd.DataFrame(temp_storage_float)
# Define file name
names = [item[-14:-4]]
# Change the column name
temp_storage_float.rename(columns =
{x: y for x, y in zip(temp_storage_float.columns, names)},
inplace=True
)
# Change the row names for predefined (optional for readability)
temp_storage_float.rename(index = {x: y for x, y in zip(range(6), header)},
inplace=True)
predefined = pd.concat([predefined, temp_storage_float], axis=1)
predefined.to_csv(saveto + item[:4] + ".csv")
# Reset predefined
predefined = pd.DataFrame()
# Then concatenate the csv files.
saved_list = os.listdir(saveto)
predefined = pd.DataFrame()
for item in saved_list:
relative_file_path = saveto + item
name = [item[:-4]] # ignore the .csv at the end.
temp_df = pd.read_csv(relative_file_path, index_col=0)
predefined = pd.concat([predefined, temp_df], axis=1)
return predefined
def read_params_extra_param(self, saveto="./feature_store/params_train"):
"""
Read the extra 2 intermediate target params in the params files.
"""
header = ["sma", "incl"]
predefined = pd.DataFrame()
if saveto[-1] != "/":
saveto += "/"
try:
os.makedirs(saveto)
except OSError as e:
pass
mylist = self.group_params_list # Since we only have one folder, so hardcoded here.
for grouped_item in tqdm(mylist):
for item in grouped_item:
temp_storage_float = []
relative_file_path = self.params_path + "/" + item
with open(relative_file_path, "r") as f:
temp_storage_str = list(itertools.islice(f, 2))
# Preprocess for numbers only
for string in temp_storage_str:
# Separate the digits and the non-digits.
new_str = ["".join(x) for _, x in itertools.groupby(string, key=str.isdigit)]
# Only new_str[0] is the one we want to omit.
# We want to join back into a single string because "." previously is classifed
# as non-digit.
new_str = "".join(new_str[1:])
# Convert to float.
temp_storage_float.append(float(new_str))
# Convert to pandas DataFrame.
temp_storage_float = | pd.DataFrame(temp_storage_float) | pandas.DataFrame |
import random
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# Explanation "Where": Plot for explanation in fundamentals chapter
# 1. Generate data with gaussian, uniform and mixed distribution
n = 3000
var = 0.12
# Dimension 1
dim1_sequence_100percent_gaussian = np.random.normal(0.5, var, n)
dim1_sequence_100percent_uniform = np.random.uniform(-0.5, 1.5, n)
dim1_sequence_mixed = np.append(np.random.normal(0.5, var, int(n/2)), np.random.uniform(-0.5, 1.5, n))
# Dimension 2
dim2_sequence_100percent_gaussian = np.random.normal(0.5, var, n)
dim2_sequence_100percent_uniform = np.random.uniform(-0.5, 1.5, n)
dim2_sequence_mixed = np.append(np.random.normal(0.5, var, int(n/2)), np.random.uniform(-0.5, 1.5, n))
# Shuffle data
random.shuffle(dim1_sequence_100percent_gaussian)
random.shuffle(dim1_sequence_100percent_uniform)
random.shuffle(dim1_sequence_mixed)
random.shuffle(dim2_sequence_100percent_gaussian)
random.shuffle(dim2_sequence_100percent_uniform)
random.shuffle(dim2_sequence_mixed)
# 2. Generate 2-dimensional dataset
# Gaussian-uniform
df_gaussian_uniform = pd.DataFrame()
df_gaussian_uniform['Dim 1']=pd.Series(dim1_sequence_100percent_gaussian)
df_gaussian_uniform['Dim 2']=pd.Series(dim2_sequence_100percent_uniform)
# Gaussian-mixed
df_gaussian_mixed = pd.DataFrame()
df_gaussian_mixed['Dim 1']= | pd.Series(dim1_sequence_100percent_gaussian) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
调用wset函数的部分
下载数据的方法
1.在时间上使用折半可以最少的下载数据,但已经下了一部分,要补下时如果挪了一位,又得全重下
2.在文件上,三个文件一组,三组一样,删中间一个,直到不能删了,退出
"""
import os
import pandas as pd
from .utils import asDateTime
def download_sectorconstituent(w, date, sector, windcode, field='wind_code'):
"""
板块成份
中信证券一级行业指数:时间好像没有必要,因为日历日也会查询出来
风险警示股票:日期就是查询的日期,股票也是最新名,没有啥用
w.wset("sectorconstituent","date=2017-03-03;sectorid=a001010100000000;field=wind_code")
w.wset("sectorconstituent","date=2017-03-03;sectorid=a001010100000000")
w.wset("sectorconstituent","date=2017-03-03;windcode=000300.SH")
:param w:
:param sector:
:param date:
:return:
"""
param = 'date=%s' % date
if sector:
param += ';sector=%s' % sector
if windcode:
param += ';windcode=%s' % windcode
if field:
param += ';field=%s' % field
w.asDateTime = asDateTime
w_wset_data = w.wset("sectorconstituent", param)
df = pd.DataFrame(w_wset_data.Data)
df = df.T
df.columns = w_wset_data.Fields
try:
df['date'] = pd.to_datetime(df['date'])
except KeyError:
pass
return df
def download_indexconstituent(w, date, windcode, field='wind_code,i_weight'):
"""
指数权重
如果指定日期不是交易日,会返回时前一个交易日的信息
:param w:
:param windcode:
:param date:
:return:
"""
param = 'date=%s' % date
if windcode:
param += ';windcode=%s' % windcode
if field:
param += ';field=%s' % field
w.asDateTime = asDateTime
w_wset_data = w.wset("indexconstituent", param)
df = pd.DataFrame(w_wset_data.Data)
df = df.T
df.columns = w_wset_data.Fields
return df
def download_optioncontractbasicinfo(w, exchange='sse', windcode='510050.SH', status='trading',
field='wind_code,trade_code,sec_name,contract_unit,listed_date,expire_date,reference_price'):
"""
指数权重
如果指定日期不是交易日,会返回时前一个交易日的信息
:param w:
:param windcode:
:param date:
:return:
"""
param = 'exchange=%s' % exchange
param += ';windcode=%s' % windcode
param += ';status=%s' % status
if field:
param += ';field=%s' % field
w.asDateTime = asDateTime
w_wset_data = w.wset("optioncontractbasicinfo", param)
df = pd.DataFrame(w_wset_data.Data)
df = df.T
df.columns = w_wset_data.Fields
return df
def download_optionchain(w, date='2017-11-28', us_code='510050.SH',
field='option_code,option_name,strike_price,multiplier'):
"""
下载指定日期期权数据
w_wset_data = vba_wset("optionchain","date=2017-11-28;us_code=510050.SH;option_var=全部;call_put=全部;field=option_code,option_name,strike_price,multiplier",)
:param w:
:param windcode:
:param date:
:return:
"""
param = 'date=%s' % date
param += ';us_code=%s' % us_code
if field:
param += ';field=%s' % field
w.asDateTime = asDateTime
w_wset_data = w.wset("optionchain", param)
df = pd.DataFrame(w_wset_data.Data)
df = df.T
df.columns = w_wset_data.Fields
return df
def read_constituent(path):
"""
读取板块文件
:param path:
:return:
"""
try:
df = pd.read_csv(path, encoding='utf-8-sig', parse_dates=True)
except Exception as e:
return None
try:
df['date'] = pd.to_datetime(df['date'])
except KeyError:
pass
return df
def read_sectorconstituent_from_dir(path, key_field='wind_code'):
"""
从目录中读取整个文件
:param path:
:param key_field:
:return:
"""
last_set = None
df = None
for parent, dirnames, filenames in os.walk(path):
for filename in filenames:
filepath = os.path.join(parent, filename)
curr_df = read_constituent(filepath)
# 由于两头数据可能一样,这样处理,只留第一个,可以加快处理速度
curr_set = set(curr_df[key_field])
if last_set == curr_set:
last_set = curr_set
continue
last_set = curr_set
data_date_str = filename[:-4]
curr_df['_datetime_'] = pd.to_datetime(data_date_str)
if df is None:
df = curr_df
else:
df = pd.concat([df, curr_df])
return df
def write_constituent(path, df):
df.to_csv(path, encoding='utf-8-sig', date_format='%Y-%m-%d', index=False)
def read_indexconstituent_from_dir(path):
"""
由于权重每天都不一样,只能根据用户指定的日期下载才行
:param path:
:return:
"""
last_set = None
df = None
for parent, dirnames, filenames in os.walk(path):
for filename in filenames:
filepath = os.path.join(parent, filename)
curr_df = read_constituent(filepath)
# 2016-12-12,有成份新加入,但权重为nan
curr_df.fillna(0, inplace=True)
data_date_str = filename[:-4]
curr_df['_datetime_'] = pd.to_datetime(data_date_str)
if df is None:
df = curr_df
else:
df = | pd.concat([df, curr_df]) | pandas.concat |
def report_classification(df_features,df_target,algorithms='default',test_size=0.3,scaling=None,
large_data=False,encode='dummy',average='binary',change_data_type = False,
threshold=8,random_state=None):
'''
df_features : Pandas DataFrame
df_target : Pandas Series
algorithms : List ,'default'=
[LogisticRegression(),
GaussianNB(),
DecisionTreeClassifier(),
RandomForestClassifier(),
GradientBoostingClassifier(),
AdaBoostClassifier(),
XGBClassifier()]
The above are the default algorithms, if one needs any specific algorithms, they have to import
libraries then pass the instances of alogorith as list
For example, if one needs random forest and adaboost only, then pass
algorithms=[RandomForestClassifier(max_depth=8),AdaBoostClassifier()]
But, these libraries must be imported before passing into above list like
test_size: If float, should be between 0.0 and 1.0 and represent the proportion of the
dataset to include in the test split.
scaling : {'standard-scalar', 'min-max'} or None , default=None
encode : {'dummy','onehot','label'} ,default='dummy'
change_data_type : bool, default=False
Some columns will be of numerical datatype though there are only 2-3 unique values in that column,
so these columns must be converted to object as it is more relevant.
By setting change_data_type= True , these columns will be converted into object datatype
threshold : int ,default=8
Maximum unique value a column can have
large_data : bool, default=False
If the dataset is large then the parameter large_data should be set to True,
make sure if your system has enough memory before setting Large_data=True
average : {'micro', 'macro', 'samples','weighted', 'binary'} or None, default='binary'
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
random_state : int, RandomState instance or None, default=None
'''
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder,StandardScaler,MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix,roc_auc_score,roc_curve,accuracy_score,recall_score,precision_score
from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier,AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from xgboost import XGBClassifier
from warnings import filterwarnings
filterwarnings('ignore')
print("Shape of the data :",df_features.shape)
print("---------------------------------------")
#Check if there is any missing values
if df_features.isna().sum().sum()==0:
df_num=df_features.select_dtypes(exclude="object")
#Some columns will be of numerical datatype though there are only 2-3 unique values in that column
#Here the if-condition will check if the unique values are less than the specified threshold in each column
if change_data_type == True:
for i in df_num.columns:
if len(df_num[i].value_counts())<threshold:
#The datatype will be changed to object if the condition is not satisfied
df_features[i] = df_features[i].astype('object')
print("Datatype of {} changed to 'object as there were less than {} unique values".format(i,threshold))
print("-----------------------------------------------------------------------------------------")
else:
pass
#In some features like movie-tiltle,id,etc where there will be many unique values must be must be dropped
#These features can also be label encoded and then can be passed
df_cat=df_features.select_dtypes(include="object")
for i in df_cat:
if df_features[i].nunique()>threshold:
raise Exception("Recheck the datatype of {}, as there are more than {} unique values or change the datatype of {}".format(i,threshold))
df_num=df_features.select_dtypes(exclude="object")
#Encoding of categorical features
if df_cat.shape[1]!=0:
#Dummy-encoding
if encode == 'dummy':
print("Encoding : Dummy Encoding" )
print("---------------------------------------")
encoding=pd.get_dummies(df_cat,drop_first=True)
X=pd.concat([encoding,df_num],axis=1)
#Onehot encoding
elif encode == 'onehot':
print("Encoding : One-hot Encoding" )
print("---------------------------------------")
encoding=pd.get_dummies(df_cat)
X=pd.concat([encoding,df_num],axis=1)
#Label encoding
elif encode == 'label':
print("Encoding : Label Encoding" )
print("---------------------------------------")
encoding=df_cat.apply(LabelEncoder().fit_transform)
X=pd.concat([encoding,df_num],axis=1)
#If there are no categorical features
else:
X=df_features
#Encoding of target column
labelencoder = LabelEncoder()
y = labelencoder.fit_transform(df_target)
#Value count of target column
count=pd.Series(y).value_counts()
print("Value count of target variable :")
for i in range(len(count)):
print("Count of {}s is {} ".format(count.index[i],count.values[i]))
print("---------------------------------------")
#Scaling
#Standard scaling
if scaling=='standard-scalar':
print("Scaling : StandardScalar")
print("---------------------------------------")
ss=StandardScaler()
X=ss.fit_transform(X)
#MinmaxScalar
elif scaling=='min-max':
print("Scaling : MinmaxScalar")
print("---------------------------------------")
mm=MinMaxScaler()
X=mm.fit_transform(X)
else:
print("Scaling : None")
print("---------------------------------------")
#Condition to check how large the data after encoding
if (X.shape[0]*X.shape[1] < 1000000) | large_data==True:
print("Number of Datapoints :",X.shape[0]*X.shape[1])
print("---------------------------------------")
else:
raise Exception("Data too large to process, if you want to still execute, set parameter large_data=False")
#Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
print("Test size for train test split :",test_size)
print("---------------------------------------")
#Algorithms
if algorithms == 'default':
algorithms=[LogisticRegression(),
GaussianNB(),
DecisionTreeClassifier(random_state=random_state),
RandomForestClassifier(random_state=random_state),
GradientBoostingClassifier(random_state=random_state),
AdaBoostClassifier(random_state=random_state),
XGBClassifier(random_state=random_state,verbosity=0)]
else:
algorithms=algorithms
#Binary Classification
if df_target.nunique()<3:
results=pd.DataFrame(columns=["Algorithm_name",'Train_accuracy','Test_accuracy',
"Test_Roc_Auc_score",'Test_recall','Test_precision'])
for i in algorithms:
print("Executing :",i)
i.fit(X_train, y_train)
train_pred_i=i.predict(X_train)
train_acc=accuracy_score(y_train,train_pred_i)
test_pred_i=i.predict(X_test)
test_acc=accuracy_score(y_test,test_pred_i)
recall=recall_score(y_test,test_pred_i,average=average)
precision=precision_score(y_test,test_pred_i,average=average)
roc_auc=roc_auc_score(y_test,test_pred_i)
row={"Algorithm_name":str(i)[:-2],'Train_accuracy':train_acc,"Test_accuracy":test_acc,
"Test_Roc_Auc_score":roc_auc,'Test_recall':recall,"Test_precision":precision}
results=results.append(row,ignore_index=True)
return results
#Multiclass Classification
else:
results=pd.DataFrame(columns=["Algorithm_name",'Train_accuracy','Test_accuracy',"f1_score"])
for i in algorithms:
print("Executing :",i)
i.fit(X_train, y_train)
train_pred_i=i.predict(X_train)
train_acc=accuracy_score(y_train,train_pred_i)
test_pred_i=i.predict(X_test)
test_acc=accuracy_score(y_test,test_pred_i)
f1=recall_score(y_test,test_pred_i,average=average)
row={"Algorithm_name":str(i)[:-2],'Train_accuracy':train_acc,"Test_accuracy":test_acc,"f1_score":f1}
results=results.append(row,ignore_index=True)
return results
else:
raise Exception("The data contains missing values, first handle missing values and then pass the data")
def report_regression(df_features,df_target,algorithms='default',test_size=0.3,
scaling=None,large_data=False,change_data_type=True,encode='dummy',
threshold=8,random_state=None):
'''
df_features : Pandas DataFrame
df_target : Pandas Series
algorithms : List ,'default'=
[LinearRegression(),
Lasso(),
Ridge(),
RandomForestRegressor(),
GradientBoostingRegressor(),
AdaBoostRegressor(),
XGBRegressor]
The above are the default algorithms, if one needs any specific algorithms, they have to import
libraries then pass the instances of alogorith as list
For example, if one needs random forest and adaboost only, then pass
algorithms=[RandomForestRegressor(max_depth=8),AdaBoostRegressor()]
But, these libraries must be imported before passing into above list like
test_size: If float, should be between 0.0 and 1.0 and represent the proportion of the
dataset to include in the test split.
scaling : {'standard-scalar', 'min-max'} or None , default=None
encode : {'dummy','onehot','label'} ,default='dummy'
change_data_type : bool, default=False
Some columns will be of numerical datatype though there are only 2-3 unique values in that column,
so these columns must be converted to object as it is more relevant.
By setting change_data_type= True , these columns will be converted into object datatype
threshold : int ,default=8
Maximum unique value a column can have
large_data : bool, default=False
If the dataset is large then the parameter large_data should be set to True,
make sure if your system has enough memory before setting Large_data=True
random_state : int, RandomState instance or None, default=None
'''
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder,StandardScaler,MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor,AdaBoostRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression,Lasso,Ridge
from xgboost import XGBRegressor
from warnings import filterwarnings
filterwarnings('ignore')
print("Shape of data :",df_features.shape)
print("---------------------------------------")
#Check if there is any missing values
if df_features.isna().sum().sum()==0:
df_num=df_features.select_dtypes(exclude="object")
#Some columns will be of numerical datatype though there are only 2-3 unique values in that column
#Here the if-condition will check if the unique values are less than the specified threshold in each column
if change_data_type == True:
for i in df_num.columns:
#The datatype will be changed to object if the condition is not satisfied
if len(df_num[i].value_counts())<threshold:
df_features[i] = df_features[i].astype('object')
print("Datatype of {} changed to 'object as there were less than {} unique values".format(i,threshold))
print("-----------------------------------------------------------------------------------------")
else:
pass
#In some features like movie-tiltle,id,etc where there will be many unique values must be must be dropped
#These features can also be label encoded and then can be passed
df_cat=df_features.select_dtypes(include="object")
for i in df_cat:
if df_features[i].nunique()>threshold:
raise Exception("Recheck the datatype of {}, as there are more than {} unique values or change the datatype of {}".format(i,threshold))
df_num=df_features.select_dtypes(exclude="object")
#Encoding of categorical features
if df_cat.shape[1]!=0:
#Dummy Encoding
if encode == 'dummy':
print("Encoding : Dummy Encoding" )
print("---------------------------------------")
encoding=pd.get_dummies(df_cat,drop_first=True)
X=pd.concat([encoding,df_num],axis=1)
#Onehot encoding
elif encode == 'onehot':
print("Encoding : One-hot Encoding" )
print("---------------------------------------")
encoding=pd.get_dummies(df_cat)
X=pd.concat([encoding,df_num],axis=1)
#Label encoding
elif encode == 'label':
print("Encoding : Label Encoding" )
print("---------------------------------------")
encoding=df_cat.apply(LabelEncoder().fit_transform)
X=pd.concat([encoding,df_num],axis=1)
else:
X=df_features
#Encoding of target column
labelencoder = LabelEncoder()
y = labelencoder.fit_transform(df_target)
#Scaling
#Scaling of features by StandardScalar
if scaling=='standard-scalar':
print("Scaling : standardScalar")
print("---------------------------------------")
ss=StandardScaler()
X=ss.fit_transform(X)
#Scaling of features by MinmaxScalar
elif scaling=='min-max':
print("Scaling : inmaxScalar")
print("---------------------------------------")
mm=MinMaxScaler()
X=mm.fit_transform(X)
else:
print("Scaling : None")
print("---------------------------------------")
#Condition to check how large the data after encoding
if (X.shape[0]*X.shape[1] < 1000000) | large_data==True:
print("Number of Datapoints :",X.shape[0]*X.shape[1])
print("---------------------------------------")
else:
raise Exception("Data too large to process, if you want to still execute, set parameter large_data=False")
#Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size,random_state=random_state)
print("Test size for train test split :",test_size)
print("---------------------------------------")
if algorithms == 'default':
algorithms=[LinearRegression(),
Lasso(random_state=random_state),
Ridge(random_state=random_state),
DecisionTreeRegressor(random_state=random_state),
RandomForestRegressor(random_state=random_state),
GradientBoostingRegressor(random_state=random_state),
AdaBoostRegressor(random_state=random_state),
XGBRegressor(random_state=random_state)]
else:
algorithms=algorithms
results= | pd.DataFrame(columns=["Algorithm_name",'R-Squared','Adj. R-Squared','Train-RMSE','Test-RMSE']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import pandas as pd
from pandas.testing import assert_frame_equal
import camelot
from camelot.core import Table, TableList
from camelot.__version__ import generate_version
from .data import *
testdir = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(testdir, "files")
def test_stream():
df = pd.DataFrame(data_stream)
filename = os.path.join(testdir, "health.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert_frame_equal(df, tables[0].df)
def test_stream_table_rotated():
df = pd.DataFrame(data_stream_table_rotated)
filename = os.path.join(testdir, "clockwise_table_2.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert_frame_equal(df, tables[0].df)
filename = os.path.join(testdir, "anticlockwise_table_2.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert_frame_equal(df, tables[0].df)
def test_stream_two_tables():
df1 = pd.DataFrame(data_stream_two_tables_1)
df2 = pd.DataFrame(data_stream_two_tables_2)
filename = os.path.join(testdir, "tabula/12s0324.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert len(tables) == 2
assert df1.equals(tables[0].df)
assert df2.equals(tables[1].df)
def test_stream_table_regions():
df = pd.DataFrame(data_stream_table_areas)
filename = os.path.join(testdir, "tabula/us-007.pdf")
tables = camelot.read_pdf(
filename, flavor="stream", table_regions=["320,460,573,335"]
)
assert_frame_equal(df, tables[0].df)
def test_stream_table_areas():
df = pd.DataFrame(data_stream_table_areas)
filename = os.path.join(testdir, "tabula/us-007.pdf")
tables = camelot.read_pdf(
filename, flavor="stream", table_areas=["320,500,573,335"]
)
assert_frame_equal(df, tables[0].df)
def test_stream_columns():
df = pd.DataFrame(data_stream_columns)
filename = os.path.join(testdir, "mexican_towns.pdf")
tables = camelot.read_pdf(
filename, flavor="stream", columns=["67,180,230,425,475"], row_tol=10
)
assert_frame_equal(df, tables[0].df)
def test_stream_split_text():
df = pd.DataFrame(data_stream_split_text)
filename = os.path.join(testdir, "tabula/m27.pdf")
tables = camelot.read_pdf(
filename,
flavor="stream",
columns=["72,95,209,327,442,529,566,606,683"],
split_text=True,
)
assert_frame_equal(df, tables[0].df)
def test_stream_flag_size():
df = pd.DataFrame(data_stream_flag_size)
filename = os.path.join(testdir, "superscript.pdf")
tables = camelot.read_pdf(filename, flavor="stream", flag_size=True)
assert_frame_equal(df, tables[0].df)
def test_stream_strip_text():
df = | pd.DataFrame(data_stream_strip_text) | pandas.DataFrame |
# This script performs the statistical analysis for the pollution growth paper
# Importing required modules
import pandas as pd
import numpy as np
import statsmodels.api as stats
from ToTeX import restab
# Reading in the data
data = pd.read_csv('C:/Users/User/Documents/Data/Pollution/pollution_data.csv')
W = pd.read_csv('C:/Users/User/Documents/Data/Pollution/pollution_data_W_reference.csv', header = None)
# Creating a reference list of nations
nations = list(data.Country.unique())
# Prepping data for pollution regression
# Data sets for individual pollutants
co2_data = data[['ln_co2', 'ln_co2_lag', 'ln_sk', 'ln_n5', 'ln_co2_intensity_rate', 'Country', 'Year', 'ln_co2_intensity_lag']].dropna()
ch4_data = data[['ln_ch4', 'ln_ch4_lag', 'ln_sk', 'ln_n5', 'ln_ch4_intensity_rate', 'Country', 'Year', 'ln_ch4_intensity_lag']].dropna()
nox_data = data[['ln_nox', 'ln_nox_lag', 'ln_sk', 'ln_n5', 'ln_nox_intensity_rate', 'Country', 'Year', 'ln_nox_intensity_lag']].dropna()
# Creating dummy variables for each pollutant
co2_national_dummies = pd.get_dummies(co2_data['Country'])
co2_year_dummies = pd.get_dummies(co2_data['Year'])
ch4_national_dummies = pd.get_dummies(ch4_data['Country'])
ch4_year_dummies = pd.get_dummies(ch4_data['Year'])
nox_national_dummies = pd.get_dummies(nox_data['Country'])
nox_year_dummies = pd.get_dummies(nox_data['Year'])
# Replacing Country and Year with fixed effects
co2_data = pd.concat([co2_data, co2_national_dummies, co2_year_dummies], axis = 1)
ch4_data = pd.concat([ch4_data, ch4_national_dummies, ch4_year_dummies], axis = 1)
nox_data = pd.concat([nox_data, nox_national_dummies, nox_year_dummies], axis = 1)
co2_data = co2_data.drop(['Country', 'Year', 1971, 'United States'], axis = 1)
ch4_data = ch4_data.drop(['Country', 'Year', 1971, 'United States'], axis = 1)
nox_data = nox_data.drop(['Country', 'Year', 1971, 'United States'], axis = 1)
# Create the Y and X matrices
CO2 = co2_data['ln_co2']
CH4 = ch4_data['ln_ch4']
NOX = nox_data['ln_nox']
X_CO2 = co2_data.drop(['ln_co2'], axis = 1)
X_CH4 = ch4_data.drop(['ln_ch4'], axis = 1)
X_NOX = nox_data.drop(['ln_nox'], axis = 1)
# Running pollution regressions
co2_mod = stats.OLS(CO2, X_CO2)
ch4_mod = stats.OLS(CH4, X_CH4)
nox_mod = stats.OLS(NOX, X_NOX)
models = [co2_mod, ch4_mod, nox_mod]
names = ['CO2', 'CH4', 'NOx']
res_list = []
for mod in models:
res = mod.fit(cov_type = 'HC1')
res_list.append(res)
print(res.summary())
file = open('C:/Users/User/Documents/Data/Pollution/' + names[models.index(mod)] + '.txt', 'w')
file.write(res.summary().as_text())
file.close()
restab(res_list, 'C:/Users/User/Documents/Data/Pollution/restab.txt')
# After running the conditional convergence models, we set up the network effects models
# Compute technology growth rate
# \widetilde{g} = \left(\frac{1}{T}\right)\sum\limits_{t=1}^{T}\left(\frac{\eta_{t}}{t-\gamma(t-1)}\right)
g_co2 = (1/23) * sum([(co2_mod.fit().params[i] / ((i-1971) - (co2_mod.fit().params['ln_co2_lag'] * (i-1972)))) for i in range(1972,2015)])
g_ch4 = (1/21) * sum([(ch4_mod.fit().params[i] / ((i-1971) - (ch4_mod.fit().params['ln_ch4_lag'] * (i-1972)))) for i in range(1972,2013)])
g_nox = (1/21) * sum([(nox_mod.fit().params[i] / ((i-1971) - (nox_mod.fit().params['ln_nox_lag'] * (i-1972)))) for i in range(1972,2013)])
# Add technology parameters to the dataframe
co2_tech = []
ch4_tech = []
nox_tech = []
for i in range(len(data)):
if data.Year[i] > 1970 and data.Country[i] in co2_mod.fit().params.keys():
co2_tech.append(co2_mod.fit().params[data.Country[i]] + (g_co2 * (data.Year[i] - 1971)))
else:
co2_tech.append('')
if data.Year[i] > 1970 and data.Country[i] in ch4_mod.fit().params.keys():
ch4_tech.append(ch4_mod.fit().params[data.Country[i]] + (g_ch4 * (data.Year[i] - 1971)))
else:
ch4_tech.append('')
if data.Year[i] > 1970 and data.Country[i] in nox_mod.fit().params.keys():
nox_tech.append(nox_mod.fit().params[data.Country[i]] + (g_nox * (data.Year[i] - 1971)))
else:
nox_tech.append('')
# Add technology values to data set
co2_tech = pd.Series(co2_tech, name = 'co2_tech')
ch4_tech = pd.Series(co2_tech, name = 'ch4_tech')
nox_tech = pd.Series(co2_tech, name = 'nox_tech')
data = pd.concat([data, co2_tech, ch4_tech, nox_tech], axis = 1)
# Convert '' to np.nan to use pandas dropna
data[data[['co2_tech', 'ch4_tech', 'nox_tech']] == ''] = np.nan
# Data prep for network effects regressions for intensities
tc_co2_rob = data[['co2_intensity', 'co2_intensity_init', 'co2_intensity_lag', 'co2_tech', 'TC_CO2_ROB', 'Country', 'Year']].dropna()
tc_ch4_rob = data[['ch4_intensity', 'ch4_intensity_init', 'ch4_intensity_lag', 'ch4_tech', 'TC_CH4_ROB', 'Country', 'Year']].dropna()
tc_nox_rob = data[['nox_intensity', 'nox_intensity_init', 'nox_intensity_lag', 'nox_tech', 'TC_NOX_ROB', 'Country', 'Year']].dropna()
co2_national_dummies = pd.get_dummies(tc_co2_rob['Country'])
co2_year_dummies = pd.get_dummies(tc_co2_rob['Year'])
ch4_national_dummies = pd.get_dummies(tc_ch4_rob['Country'])
ch4_year_dummies = pd.get_dummies(tc_ch4_rob['Year'])
nox_national_dummies = pd.get_dummies(tc_nox_rob['Country'])
nox_year_dummies = pd.get_dummies(tc_nox_rob['Year'])
xtc_co2_rob = pd.concat([tc_co2_rob, co2_national_dummies, co2_year_dummies], axis = 1).drop(['co2_intensity', 'Country', 'Year', 'Zimbabwe', 1971], axis = 1)
xtc_ch4_rob = pd.concat([tc_ch4_rob, ch4_national_dummies, ch4_year_dummies], axis = 1).drop(['ch4_intensity', 'Country', 'Year', 'Zimbabwe', 1971], axis = 1)
xtc_nox_rob = pd.concat([tc_nox_rob, nox_national_dummies, nox_year_dummies], axis = 1).drop(['nox_intensity', 'Country', 'Year', 'Zimbabwe', 1971], axis = 1)
exp_co2_rob = data[['co2_intensity', 'co2_intensity_init', 'co2_intensity_lag', 'co2_tech', 'EXP_CO2_ROB', 'Country', 'Year']].dropna()
exp_ch4_rob = data[['ch4_intensity', 'ch4_intensity_init', 'ch4_intensity_lag', 'ch4_tech', 'EXP_CH4_ROB', 'Country', 'Year']].dropna()
exp_nox_rob = data[['nox_intensity', 'nox_intensity_init', 'nox_intensity_lag', 'nox_tech', 'EXP_NOX_ROB', 'Country', 'Year']].dropna()
co2_national_dummies = pd.get_dummies(exp_co2_rob['Country'])
co2_year_dummies = pd.get_dummies(exp_co2_rob['Year'])
ch4_national_dummies = pd.get_dummies(exp_ch4_rob['Country'])
ch4_year_dummies = pd.get_dummies(exp_ch4_rob['Year'])
nox_national_dummies = pd.get_dummies(exp_nox_rob['Country'])
nox_year_dummies = pd.get_dummies(exp_nox_rob['Year'])
xexp_co2_rob = pd.concat([exp_co2_rob, co2_national_dummies, co2_year_dummies], axis = 1).drop(['co2_intensity', 'Country', 'Year', 'Zimbabwe', 1971], axis = 1)
xexp_ch4_rob = pd.concat([exp_ch4_rob, ch4_national_dummies, ch4_year_dummies], axis = 1).drop(['ch4_intensity', 'Country', 'Year', 'Zimbabwe', 1971], axis = 1)
xexp_nox_rob = pd.concat([exp_nox_rob, nox_national_dummies, nox_year_dummies], axis = 1).drop(['nox_intensity', 'Country', 'Year', 'Zimbabwe', 1971], axis = 1)
imp_co2_rob = data[['co2_intensity', 'co2_intensity_init', 'co2_intensity_lag', 'co2_tech', 'IMP_CO2_ROB', 'Country', 'Year']].dropna()
imp_ch4_rob = data[['ch4_intensity', 'ch4_intensity_init', 'ch4_intensity_lag', 'ch4_tech', 'IMP_CH4_ROB', 'Country', 'Year']].dropna()
imp_nox_rob = data[['nox_intensity', 'nox_intensity_init', 'nox_intensity_lag', 'nox_tech', 'IMP_NOX_ROB', 'Country', 'Year']].dropna()
co2_national_dummies = pd.get_dummies(imp_co2_rob['Country'])
co2_year_dummies = pd.get_dummies(imp_co2_rob['Year'])
ch4_national_dummies = pd.get_dummies(imp_ch4_rob['Country'])
ch4_year_dummies = | pd.get_dummies(imp_ch4_rob['Year']) | pandas.get_dummies |
#!/usr/bin/env python
"""
coding=utf-8
Build model for a dataset by identifying type of column along with its
respective parameters.
"""
from __future__ import print_function
from __future__ import division
import copy
import random
import re
from collections import OrderedDict, defaultdict
import warnings
import pickle
from datetime import datetime
import logging
import pandas as pd
import numpy as np
from . import utils
from .. import data_readers
from .column_profile_compilers import ColumnPrimitiveTypeProfileCompiler, \
ColumnStatsProfileCompiler, ColumnDataLabelerCompiler, UnstructuredCompiler
from ..labelers.data_labelers import DataLabeler
from .helpers.report_helpers import calculate_quantiles, _prepare_report
from .profiler_options import ProfilerOptions, StructuredOptions, \
UnstructuredOptions
from .. import dp_logging
logger = dp_logging.get_child_logger(__name__)
class StructuredColProfiler(object):
def __init__(self, df_series=None, sample_size=None, min_sample_size=5000,
sampling_ratio=0.2, min_true_samples=None,
sample_ids=None, pool=None, options=None):
"""
Instantiate the StructuredColProfiler class for a given column.
:param df_series: Data to be profiled
:type df_series: pandas.core.series.Series
:param sample_size: Number of samples to use in generating profile
:type sample_size: int
:param min_true_samples: Minimum number of samples required for the
profiler
:type min_true_samples: int
:param sample_ids: Randomized list of sample indices
:type sample_ids: list(list)
:param pool: pool utilized for multiprocessing
:type pool: multiprocessing.Pool
:param options: Options for the structured profiler.
:type options: StructuredOptions Object
"""
self.name = None
self.options = options
self._min_sample_size = min_sample_size
self._sampling_ratio = sampling_ratio
self._min_true_samples = min_true_samples
if self._min_true_samples is None:
self._min_true_samples = 0
self.sample_size = 0
self.sample = list()
self.null_count = 0
self.null_types = list()
self.null_types_index = {}
self._min_id = None
self._max_id = None
self._index_shift = None
self._last_batch_size = None
self.profiles = {}
NO_FLAG = 0
self._null_values = {
"": NO_FLAG,
"nan": re.IGNORECASE,
"none": re.IGNORECASE,
"null": re.IGNORECASE,
" *": NO_FLAG,
"--*": NO_FLAG,
"__*": NO_FLAG,
}
if options:
if options.null_values is not None:
self._null_values = options.null_values
if df_series is not None and len(df_series) > 0:
if not sample_size:
sample_size = self._get_sample_size(df_series)
if sample_size < len(df_series):
warnings.warn("The data will be profiled with a sample size of {}. "
"All statistics will be based on this subsample and "
"not the whole dataset.".format(sample_size))
clean_sampled_df, base_stats = \
self.clean_data_and_get_base_stats(
df_series=df_series, sample_size=sample_size,
null_values=self._null_values,
min_true_samples=self._min_true_samples,
sample_ids=sample_ids)
self.update_column_profilers(clean_sampled_df, pool)
self._update_base_stats(base_stats)
def update_column_profilers(self, clean_sampled_df, pool):
"""
Calculates type statistics and labels dataset
:param clean_sampled_df: sampled series with none types dropped
:type clean_sampled_df: Pandas.Series
:param pool: pool utilized for multiprocessing
:type pool: multiprocessing.pool
"""
if self.name is None:
self.name = clean_sampled_df.name
if self.name != clean_sampled_df.name:
raise ValueError(
'Column names have changed, col {} does not match prior name {}',
clean_sampled_df.name, self.name
)
# First run, create the compilers
if self.profiles is None or len(self.profiles) == 0:
self.profiles = {
'data_type_profile':
ColumnPrimitiveTypeProfileCompiler(
clean_sampled_df, self.options, pool),
'data_stats_profile':
ColumnStatsProfileCompiler(
clean_sampled_df, self.options, pool)
}
use_data_labeler = True
if self.options and isinstance(self.options, StructuredOptions):
use_data_labeler = self.options.data_labeler.is_enabled
if use_data_labeler:
self.profiles.update({
'data_label_profile':
ColumnDataLabelerCompiler(
clean_sampled_df, self.options, pool)
})
else:
# Profile compilers being updated
for profile in self.profiles.values():
profile.update_profile(clean_sampled_df, pool)
def __add__(self, other):
"""
Merges two Structured profiles together overriding the `+` operator.
:param other: structured profile being add to this one.
:type other: StructuredColProfiler
:return: merger of the two structured profiles
"""
if type(other) is not type(self):
raise TypeError('`{}` and `{}` are not of the same profiler type.'.
format(type(self).__name__, type(other).__name__))
elif self.name != other.name:
raise ValueError('Structured profile names are unmatched: {} != {}'
.format(self.name, other.name))
elif set(self.profiles) != set(other.profiles): # options check
raise ValueError('Structured profilers were not setup with the same'
' options, hence they do not calculate the same '
'profiles and cannot be added together.')
merged_profile = StructuredColProfiler(
df_series=pd.Series([]),
min_sample_size=max(self._min_sample_size, other._min_sample_size),
sampling_ratio=max(self._sampling_ratio, other._sampling_ratio),
min_true_samples=max(self._min_true_samples,
other._min_true_samples),
options=self.options,
)
merged_profile.name = self.name
merged_profile._update_base_stats(
{"sample": self.sample,
"sample_size": self.sample_size,
"null_count": self.null_count,
"null_types": copy.deepcopy(self.null_types_index),
"min_id": self._min_id,
"max_id": self._max_id}
)
merged_profile._update_base_stats(
{"sample": other.sample,
"sample_size": other.sample_size,
"null_count": other.null_count,
"null_types": copy.deepcopy(other.null_types_index),
"min_id": other._min_id,
"max_id": other._max_id}
)
samples = list(dict.fromkeys(self.sample + other.sample))
merged_profile.sample = random.sample(samples, min(len(samples), 5))
for profile_name in self.profiles:
merged_profile.profiles[profile_name] = (
self.profiles[profile_name] + other.profiles[profile_name]
)
return merged_profile
def diff(self, other_profile, options=None):
"""
Finds the difference between 2 StructuredCols and returns the report
:param other: Structured col finding the difference with this one.
:type other: StructuredColProfiler
:param options: options to change results of the difference
:type options: dict
:return: difference of the structured column
:rtype: dict
"""
unordered_profile = dict()
for key, profile in self.profiles.items():
if key in other_profile.profiles:
comp_diff = self.profiles[key].diff(other_profile.profiles[key],
options=options)
utils.dict_merge(unordered_profile, comp_diff)
name = self.name
if isinstance(self.name, np.integer):
name = int(name)
unordered_profile.update({
"column_name": name,
})
unordered_profile["statistics"].update({
"sample_size": utils.find_diff_of_numbers(
self.sample_size, other_profile.sample_size),
"null_count": utils.find_diff_of_numbers(
self.null_count, other_profile.null_count),
"null_types": utils.find_diff_of_lists_and_sets(
self.null_types, other_profile.null_types),
"null_types_index": utils.find_diff_of_dicts_with_diff_keys(
self.null_types_index, other_profile.null_types_index),
})
if unordered_profile.get("data_type", None) is not None:
unordered_profile["statistics"].update({
"data_type_representation":
unordered_profile["data_type_representation"]
})
dict_order = [
"column_name",
"data_type",
"data_label",
"categorical",
"order",
"statistics",
]
profile = OrderedDict()
if 'data_label_profile' not in self.profiles or\
'data_label_profile' not in other_profile.profiles:
dict_order.remove("data_label")
for key in dict_order:
try:
profile[key] = unordered_profile[key]
except KeyError as e:
profile[key] = None
return profile
@property
def profile(self):
unordered_profile = dict()
for profile in self.profiles.values():
utils.dict_merge(unordered_profile, profile.profile)
name = self.name
if isinstance(self.name, np.integer):
name = int(name)
unordered_profile.update({
"column_name": name,
"samples": self.sample,
})
unordered_profile["statistics"].update({
"sample_size": self.sample_size,
"null_count": self.null_count,
"null_types": self.null_types,
"null_types_index": self.null_types_index
})
if unordered_profile.get("data_type", None) is not None:
unordered_profile["statistics"].update({
"data_type_representation":
unordered_profile["data_type_representation"]
})
dict_order = [
"column_name",
"data_type",
"data_label",
"categorical",
"order",
"samples",
"statistics",
]
profile = OrderedDict()
if 'data_label_profile' not in self.profiles:
dict_order.remove("data_label")
for key in dict_order:
try:
profile[key] = unordered_profile[key]
except KeyError as e:
profile[key] = None
return profile
def _update_base_stats(self, base_stats):
self.sample_size += base_stats["sample_size"]
self._last_batch_size = base_stats["sample_size"]
self.sample = base_stats["sample"]
self.null_count += base_stats["null_count"]
self.null_types = utils._combine_unique_sets(
self.null_types, list(base_stats["null_types"].keys())
)
base_min = base_stats["min_id"]
base_max = base_stats["max_id"]
base_nti = base_stats["null_types"]
# Check if indices overlap, if they do, adjust attributes accordingly
if utils.overlap(self._min_id, self._max_id, base_min, base_max):
warnings.warn(f"Overlapping indices detected. To resolve, indices "
f"where null data present will be shifted forward "
f"when stored in profile: {self.name}")
# Shift indices (min, max, and all indices in null types index
self._index_shift = self._max_id + 1
base_min = base_min + self._index_shift
base_max = base_max + self._index_shift
base_nti = {k: {x + self._index_shift for x in v} for k, v in
base_stats["null_types"].items()}
# Store/compare min/max id with current
if self._min_id is None:
self._min_id = base_min
elif base_min is not None:
self._min_id = min(self._min_id, base_min)
if self._max_id is None:
self._max_id = base_max
elif base_max is not None:
self._max_id = max(self._max_id, base_max)
# Update null row indices
for null_type, null_rows in base_nti.items():
if type(null_rows) is list:
null_rows.sort()
self.null_types_index.setdefault(null_type, set()).update(null_rows)
def update_profile(self, df_series, sample_size=None,
min_true_samples=None, sample_ids=None,
pool=None):
"""
Update the column profiler
:param df_series: Data to be profiled
:type df_series: pandas.core.series.Series
:param sample_size: Number of samples to use in generating profile
:type sample_size: int
:param min_true_samples: Minimum number of samples required for the
profiler
:type min_true_samples: int
:param sample_ids: Randomized list of sample indices
:type sample_ids: list(list)
:param pool: pool utilized for multiprocessing
:type pool: multiprocessing.Pool
"""
if not sample_size:
sample_size = len(df_series)
if not sample_size:
sample_size = self._get_sample_size(df_series)
if not min_true_samples:
min_true_samples = self._min_true_samples
clean_sampled_df, base_stats = \
self.clean_data_and_get_base_stats(
df_series=df_series, sample_size=sample_size,
null_values=self._null_values,
min_true_samples=min_true_samples, sample_ids=sample_ids)
self._update_base_stats(base_stats)
self.update_column_profilers(clean_sampled_df, pool)
def _get_sample_size(self, df_series):
"""
Determines the minimum sampling size for detecting column type.
:param df_series: a column of data
:type df_series: pandas.core.series.Series
:return: integer sampling size
:rtype: int
"""
len_df = len(df_series)
if len_df <= self._min_sample_size:
return int(len_df)
return max(int(self._sampling_ratio * len_df), self._min_sample_size)
# TODO: flag column name with null values and potentially return row
# index number in the error as well
@staticmethod
def clean_data_and_get_base_stats(df_series, sample_size, null_values=None,
min_true_samples=None,
sample_ids=None):
"""
Identify null characters and return them in a dictionary as well as
remove any nulls in column.
:param df_series: a given column
:type df_series: pandas.core.series.Series
:param sample_size: Number of samples to use in generating the profile
:type sample_size: int
:param null_values: Dictionary mapping null values to regex flag where
the key represents the null value to remove from the data and the
flag represents the regex flag to apply
:type null_values: dict[str, re.FLAG]
:param min_true_samples: Minimum number of samples required for the
profiler
:type min_true_samples: int
:param sample_ids: Randomized list of sample indices
:type sample_ids: list(list)
:return: updated column with null removed and dictionary of null
parameters
:rtype: pd.Series, dict
"""
if min_true_samples is None:
min_true_samples = 0
if null_values is None:
null_values = dict()
len_df = len(df_series)
if not len_df:
return df_series, {
"sample_size": 0, "null_count": 0,
"null_types": dict(), "sample": [],
"min_id": None, "max_id": None
}
# Pandas reads empty values in the csv files as nan
df_series = df_series.apply(str)
# Record min and max index values if index is int
is_index_all_ints = True
try:
min_id = min(df_series.index)
max_id = max(df_series.index)
if not (isinstance(min_id, int) and isinstance(max_id, int)):
is_index_all_ints = False
except TypeError:
is_index_all_ints = False
if not is_index_all_ints:
min_id = max_id = None
warnings.warn("Unable to detect minimum and maximum index values "
"for overlap detection. Updating/merging profiles "
"may result in inaccurate null row index reporting "
"due to unhandled overlapping indices.")
# Select generator depending if sample_ids availability
if sample_ids is None:
sample_ind_generator = utils.shuffle_in_chunks(
len_df, chunk_size=sample_size)
else:
sample_ind_generator = utils.partition(
sample_ids[0], chunk_size=sample_size)
na_columns = dict()
true_sample_list = set()
total_sample_size = 0
query = '|'.join(null_values.keys())
regex = f"^(?:{(query)})$"
for chunked_sample_ids in sample_ind_generator:
total_sample_size += len(chunked_sample_ids)
# Find subset of series based on randomly selected ids
df_subset = df_series.iloc[chunked_sample_ids]
# Query should search entire cell for all elements at once
matches = df_subset.str.match(regex, flags=re.IGNORECASE)
# Split series into None samples and true samples
true_sample_list.update(df_subset[~matches].index)
# Iterate over all the Nones
for index, cell in df_subset[matches].items():
na_columns.setdefault(cell, list()).append(index)
# Ensure minimum number of true samples met
# and if total_sample_size >= sample size, exit
if len(true_sample_list) >= min_true_samples \
and total_sample_size >= sample_size:
break
# close the generator in case it is not exhausted.
if sample_ids is None:
sample_ind_generator.close()
# If min_true_samples exists, sort
if min_true_samples > 0 or sample_ids is None:
true_sample_list = sorted(true_sample_list)
# Split out true values for later utilization
df_series = df_series.loc[true_sample_list]
total_na = total_sample_size - len(true_sample_list)
base_stats = {
"sample_size": total_sample_size,
"null_count": total_na,
"null_types": na_columns,
"sample": random.sample(list(df_series.values),
min(len(df_series), 5)),
"min_id": min_id,
"max_id": max_id
}
return df_series, base_stats
class BaseProfiler(object):
_default_labeler_type = None
_option_class = None
_allowed_external_data_types = None
def __init__(self, data, samples_per_update=None, min_true_samples=0,
options=None):
"""
Instantiate the BaseProfiler class
:param data: Data to be profiled
:type data: Data class object
:param samples_per_update: Number of samples to use in generating
profile
:type samples_per_update: int
:param min_true_samples: Minimum number of samples required for the
profiler
:type min_true_samples: int
:param options: Options for the profiler.
:type options: ProfilerOptions Object
:return: Profiler
"""
if min_true_samples is not None and not isinstance(min_true_samples, int):
raise ValueError('`min_true_samples` must be an integer or `None`.')
if self._default_labeler_type is None:
raise ValueError('`_default_labeler_type` must be set when '
'overriding `BaseProfiler`.')
elif self._option_class is None:
raise ValueError('`_option_class` must be set when overriding '
'`BaseProfiler`.')
elif self._allowed_external_data_types is None:
raise ValueError('`_allowed_external_data_types` must be set when '
'overriding `BaseProfiler`.')
options.validate()
self._profile = None
self.options = options
self.encoding = None
self.file_type = None
self._samples_per_update = samples_per_update
self._min_true_samples = min_true_samples
self.total_samples = 0
self.times = defaultdict(float)
# TODO: allow set via options
self._sampling_ratio = 0.2
self._min_sample_size = 5000
# assign data labeler
data_labeler_options = self.options.data_labeler
if data_labeler_options.is_enabled \
and data_labeler_options.data_labeler_object is None:
try:
data_labeler = DataLabeler(
labeler_type=self._default_labeler_type,
dirpath=data_labeler_options.data_labeler_dirpath,
load_options=None)
self.options.set(
{'data_labeler.data_labeler_object': data_labeler})
except Exception as e:
utils.warn_on_profile('data_labeler', e)
self.options.set({'data_labeler.is_enabled': False})
def _add_error_checks(self, other):
"""
Profiler type specific checks to ensure two profiles can be added
together.
"""
raise NotImplementedError()
def __add__(self, other):
"""
Merges two profiles together overriding the `+` operator.
:param other: profile being added to this one.
:type other: BaseProfiler
:return: merger of the two profiles
:rtype: BaseProfiler
"""
if type(other) is not type(self):
raise TypeError('`{}` and `{}` are not of the same profiler type.'.
format(type(self).__name__, type(other).__name__))
# error checks specific to its profiler
self._add_error_checks(other)
merged_profile = self.__class__(
data=None, samples_per_update=self._samples_per_update,
min_true_samples=self._min_true_samples, options=self.options
)
merged_profile.encoding = self.encoding
if self.encoding != other.encoding:
merged_profile.encoding = 'multiple files'
merged_profile.file_type = self.file_type
if self.file_type != other.file_type:
merged_profile.file_type = 'multiple files'
merged_profile.total_samples = self.total_samples + other.total_samples
merged_profile.times = utils.add_nested_dictionaries(self.times,
other.times)
return merged_profile
def diff(self, other_profile, options=None):
"""
Finds the difference of two profiles
:param other: profile being added to this one.
:type other: BaseProfiler
:return: diff of the two profiles
:rtype: dict
"""
if type(other_profile) is not type(self):
raise TypeError('`{}` and `{}` are not of the same profiler type.'.
format(type(self).__name__,
type(other_profile).__name__))
diff_profile = OrderedDict([
("global_stats", {
"file_type": utils.find_diff_of_strings_and_bools(
self.file_type, other_profile.file_type),
"encoding": utils.find_diff_of_strings_and_bools(
self.encoding, other_profile.encoding),
})])
return diff_profile
def _get_sample_size(self, data):
"""
Determines the minimum sampling size for profiling the dataset.
:param data: a dataset
:type data: Union[pd.Series, pd.DataFrame, list]
:return: integer sampling size
:rtype: int
"""
if self._samples_per_update:
return self._samples_per_update
len_data = len(data)
if len_data <= self._min_sample_size:
return int(len_data)
return max(int(self._sampling_ratio * len_data), self._min_sample_size)
@property
def profile(self):
"""
Returns the stored profiles for the given profiler.
:return: None
"""
return self._profile
def report(self, report_options=None):
"""
Returns the profile report based on all profiled data fed into the
profiler. User can specify the output_formats: (pretty, compact,
serializable, flat).
Pretty: floats are rounded to four decimal places, and lists are
shortened.
Compact: Similar to pretty, but removes detailed statistics such as
runtimes, label probabilities, index locations of null types,
etc.
Serializable: Output is json serializable and not prettified
Flat: Nested output is returned as a flattened dictionary
:var report_options: optional format changes to the report
`dict(output_format=<FORMAT>)`
:type report_options: dict
:return: dictionary report
:rtype: dict
"""
raise NotImplementedError()
def _update_profile_from_chunk(self, data, sample_size,
min_true_samples=None):
"""
Iterate over the dataset and identify its parameters via profiles.
:param data: dataset to be profiled
:type data: Union[pd.Series, pd.DataFrame, list]
:param sample_size: number of samples for df to use for profiling
:type sample_size: int
:param min_true_samples: minimum number of true samples required
:type min_true_samples: int
:return: list of column profile base subclasses
:rtype: list(BaseColumnProfiler)
"""
raise NotImplementedError()
def update_profile(self, data, sample_size=None, min_true_samples=None):
"""
Update the profile for data provided. User can specify the sample
size to profile the data with. Additionally, the user can specify the
minimum number of non-null samples to profile.
:param data: data to be profiled
:type data: Union[data_readers.base_data.BaseData, pandas.DataFrame,
pandas.Series]
:param sample_size: number of samples to profile from the data
:type sample_size: int
:param min_true_samples: minimum number of non-null samples to profile
:type min_true_samples
:return: None
"""
encoding = None
file_type = None
if min_true_samples is not None \
and not isinstance(min_true_samples, int):
raise ValueError('`min_true_samples` must be an integer or `None`.')
if isinstance(data, data_readers.base_data.BaseData):
encoding = data.file_encoding
file_type = data.data_type
data = data.data
elif isinstance(data, self._allowed_external_data_types):
file_type = str(data.__class__)
else:
raise TypeError(
f"Data must either be imported using the data_readers or using "
f"one of the following: {self._allowed_external_data_types}"
)
if not len(data):
warnings.warn("The passed dataset was empty, hence no data was "
"profiled.")
return
# set sampling properties
if not min_true_samples:
min_true_samples = self._min_true_samples
if not sample_size:
sample_size = self._get_sample_size(data)
self._update_profile_from_chunk(data, sample_size, min_true_samples)
# set file properties since data will be processed
if encoding is not None:
self.encoding = encoding
if file_type is not None:
self.file_type = file_type
def _remove_data_labelers(self):
"""
Helper method for removing all data labelers before saving to disk.
:return: data_labeler used for unstructured labelling
:rtype: DataLabeler
"""
data_labeler = None
data_labeler_options = None
# determine if the data labeler is enabled
use_data_labeler = True
if self.options and isinstance(self.options, (StructuredOptions,
UnstructuredOptions)):
data_labeler_options = self.options.data_labeler
use_data_labeler = data_labeler_options.is_enabled
# remove the data labeler from options
if use_data_labeler and data_labeler_options is not None \
and data_labeler_options.data_labeler_object is not None:
data_labeler = data_labeler_options.data_labeler_object
data_labeler_options.data_labeler_object = None
# get all profiles, unstructured is a single profile and hence needs to
# be in a list, whereas structured is already a list
profilers = [self._profile]
if isinstance(self, StructuredProfiler):
profilers = self._profile
# Remove data labelers for all columns
for profiler in profilers:
# profiles stored differently in Struct/Unstruct, this unifies
# labeler extraction
# unstructured: _profile is a compiler
# structured: StructuredColProfiler.profiles['data_label_profile']
if isinstance(self, StructuredProfiler):
profiler = profiler.profiles.get('data_label_profile', None)
if profiler and use_data_labeler and data_labeler is None:
data_labeler = profiler._profiles['data_labeler'].data_labeler
if profiler and 'data_labeler' in profiler._profiles:
profiler._profiles['data_labeler'].data_labeler = None
return data_labeler
def _restore_data_labelers(self, data_labeler=None):
"""
Helper method for restoring all data labelers after saving to or
loading from disk.
:param data_labeler: unstructured data_labeler
:type data_labeler: DataLabeler
"""
# Restore data labeler for options
use_data_labeler = True
data_labeler_dirpath = None
if self.options and isinstance(self.options, (StructuredOptions,
UnstructuredOptions)):
data_labeler_options = self.options.data_labeler
use_data_labeler = data_labeler_options.is_enabled
data_labeler_dirpath = data_labeler_options.data_labeler_dirpath
if use_data_labeler:
try:
if data_labeler is None:
data_labeler = DataLabeler(
labeler_type=self._default_labeler_type,
dirpath=data_labeler_dirpath,
load_options=None)
self.options.set(
{'data_labeler.data_labeler_object': data_labeler})
except Exception as e:
utils.warn_on_profile('data_labeler', e)
self.options.set({'data_labeler.is_enabled': False})
self.options.set(
{'data_labeler.data_labeler_object': data_labeler})
except Exception as e:
utils.warn_on_profile('data_labeler', e)
self.options.set({'data_labeler.is_enabled': False})
# get all profiles, unstructured is a single profile and hence needs to
# be in a list, whereas structured is already a list
profilers = [self._profile]
if isinstance(self, StructuredProfiler):
profilers = self._profile
# Restore data labelers for all columns
for profiler in profilers:
if use_data_labeler:
# profiles stored differently in Struct/Unstruct, this unifies
# label replacement
# unstructured: _profile is a compiler
# structured: StructuredColProfiler.profiles['data_label_profile']
if isinstance(self, StructuredProfiler):
profiler = profiler.profiles['data_label_profile']
data_labeler_profile = profiler._profiles['data_labeler']
data_labeler_profile.data_labeler = data_labeler
def _save_helper(self, filepath, data_dict):
"""
Save profiler to disk
:param filepath: Path of file to save to
:type filepath: String
:param data_dict: profile data to be saved
:type data_dict: dict
:return: None
"""
# Set Default filepath
if filepath is None:
filepath = "profile-{}.pkl".format(
datetime.now().strftime("%d-%b-%Y-%H:%M:%S.%f"))
# Remove data labelers as they can't be pickled
data_labelers = self._remove_data_labelers()
# add profiler class to data_dict
data_dict['profiler_class'] = self.__class__.__name__
# Pickle and save profile to disk
with open(filepath, "wb") as outfile:
pickle.dump(data_dict, outfile)
# Restore all data labelers
self._restore_data_labelers(data_labelers)
def save(self, filepath=None):
"""
Save profiler to disk
:param filepath: Path of file to save to
:type filepath: String
:return: None
"""
raise NotImplementedError()
@classmethod
def load(cls, filepath):
"""
Load profiler from disk
:param filepath: Path of file to load from
:type filepath: String
:return: Profiler being loaded, StructuredProfiler or
UnstructuredProfiler
:rtype: BaseProfiler
"""
# Load profile from disk
with open(filepath, "rb") as infile:
data = pickle.load(infile)
# remove profiler class if it exists
profiler_class = data.pop('profiler_class', None)
# if the user didn't load from the a given profiler class, we need
# to determine which profiler is being loaded.
profiler_cls = cls
if cls is BaseProfiler:
if profiler_class == 'StructuredProfiler':
profiler_cls = StructuredProfiler
elif profiler_class == 'UnstructuredProfiler':
profiler_cls = UnstructuredProfiler
elif profiler_class is None: # deprecated case
profiler_cls = StructuredProfiler
if '_empty_line_count' in data:
profiler_cls = UnstructuredProfiler
else:
raise ValueError(f'Invalid profiler class {profiler_class} '
f'failed to load.')
profile_options = profiler_cls._option_class()
profile_options.data_labeler.is_enabled = False
profiler = profiler_cls(None, options=profile_options)
for key in data:
setattr(profiler, key, data[key])
# Restore all data labelers
profiler._restore_data_labelers()
return profiler
class UnstructuredProfiler(BaseProfiler):
_default_labeler_type = 'unstructured'
_option_class = UnstructuredOptions
_allowed_external_data_types = (str, list, pd.Series, pd.DataFrame)
def __init__(self, data, samples_per_update=None, min_true_samples=0,
options=None):
"""
Instantiate the UnstructuredProfiler class
:param data: Data to be profiled
:type data: Data class object
:param samples_per_update: Number of samples to use in generating
profile
:type samples_per_update: int
:param min_true_samples: Minimum number of samples required for the
profiler
:type min_true_samples: int
:param options: Options for the profiler.
:type options: ProfilerOptions Object
:return: UnstructuredProfiler
"""
if not options:
options = UnstructuredOptions()
elif isinstance(options, ProfilerOptions):
options = options.unstructured_options
elif not isinstance(options, UnstructuredOptions):
raise ValueError("The profile options must be passed as a "
"ProfileOptions object.")
super().__init__(data, samples_per_update, min_true_samples, options)
# Unstructured specific properties
self._empty_line_count = 0
self.memory_size = 0
self.sample = []
if data is not None:
self.update_profile(data)
def _add_error_checks(self, other):
"""
UnstructuredProfiler specific checks to ensure two profiles can be added
together.
"""
pass
def __add__(self, other):
"""
Merges two Unstructured profiles together overriding the `+` operator.
:param other: unstructured profile being added to this one.
:type other: UnstructuredProfiler
:return: merger of the two profiles
:rtype: UnstructuredProfiler
"""
merged_profile = super().__add__(other)
# unstruct specific property merging
merged_profile._empty_line_count = (
self._empty_line_count + other._empty_line_count)
merged_profile.memory_size = self.memory_size + other.memory_size
samples = list(dict.fromkeys(self.sample + other.sample))
merged_profile.sample = random.sample(list(samples),
min(len(samples), 5))
# merge profiles
merged_profile._profile = self._profile + other._profile
return merged_profile
def diff(self, other_profile, options=None):
"""
Finds the difference between 2 unstuctured profiles and returns the
report.
:param other: profile finding the difference with this one.
:type other: UnstructuredProfiler
:param options: options to impact the results of the diff
:type options: dict
:return: difference of the profiles
:rtype: dict
"""
report = super().diff(other_profile, options)
report["global_stats"].update({
"samples_used": utils.find_diff_of_numbers(
self.total_samples, other_profile.total_samples),
"empty_line_count": utils.find_diff_of_numbers(
self._empty_line_count, other_profile._empty_line_count),
"memory_size": utils.find_diff_of_numbers(
self.memory_size, other_profile.memory_size),
})
report["data_stats"] = self._profile.diff(other_profile._profile,
options=options)
return _prepare_report(report)
def _update_base_stats(self, base_stats):
"""
Updates the samples and line count of the class for the given dataset
batch.
:param base_stats: dictionary of basic sampling / data stats
:type base_stats: dict
:return: None
"""
self.total_samples += base_stats["sample_size"]
self.sample = base_stats["sample"]
self._empty_line_count += base_stats["empty_line_count"]
self.memory_size += base_stats["memory_size"]
def report(self, report_options=None):
"""
Returns the unstructured report based on all profiled data fed into the
profiler. User can specify the output_formats: (pretty, compact,
serializable, flat).
Pretty: floats are rounded to four decimal places, and lists are
shortened.
Compact: Similar to pretty, but removes detailed statistics such as
runtimes, label probabilities, index locations of null types,
etc.
Serializable: Output is json serializable and not prettified
Flat: Nested output is returned as a flattened dictionary
:var report_options: optional format changes to the report
`dict(output_format=<FORMAT>)`
:type report_options: dict
:return: dictionary report
:rtype: dict
"""
if not report_options:
report_options = {
"output_format": None,
"omit_keys": None,
}
output_format = report_options.get("output_format", None)
omit_keys = report_options.get("omit_keys", None)
report = OrderedDict([
("global_stats", {
"samples_used": self.total_samples,
"empty_line_count": self._empty_line_count,
"file_type": self.file_type,
"encoding": self.encoding,
"memory_size": self.memory_size,
"times": self.times,
}),
("data_stats", OrderedDict()),
])
report["data_stats"] = self._profile.profile
return _prepare_report(report, output_format, omit_keys)
@utils.method_timeit(name="clean_and_base_stats")
def _clean_data_and_get_base_stats(self, data, sample_size,
min_true_samples=None):
"""
Identify empty rows and return a cleaned version of text data without
empty rows.
:param data: a series of text data
:type data: pandas.core.series.Series
:param sample_size: Number of samples to use in generating the profile
:type sample_size: int
:param min_true_samples: Minimum number of samples required for the
profiler
:type min_true_samples: int
:return: updated column with null removed and dictionary of null
parameters
:rtype: pd.Series, dict
"""
if min_true_samples is None:
min_true_samples = 0
len_data = len(data)
if not len_data:
return data, {
"sample_size": 0, "empty_line_count": dict(),
"sample": [], "memory_size": 0
}
# ensure all data are of type str
data = data.apply(str)
# get memory size
base_stats = {"memory_size": utils.get_memory_size(data, unit='M')}
# Setup sample generator
sample_ind_generator = utils.shuffle_in_chunks(
len_data, chunk_size=sample_size)
true_sample_list = set()
total_sample_size = 0
regex = f"^\s*$"
for chunked_sample_ids in sample_ind_generator:
total_sample_size += len(chunked_sample_ids)
# Find subset of series based on randomly selected ids
data_subset = data.iloc[chunked_sample_ids]
# Query should search entire cell for all elements at once
matches = data_subset.str.match(regex, flags=re.IGNORECASE)
# Split series into None samples and true samples
true_sample_list.update(data_subset[~matches].index)
# Ensure minimum number of true samples met
# and if total_sample_size >= sample size, exit
if len(true_sample_list) >= min_true_samples \
and total_sample_size >= sample_size:
break
# close the generator in case it is not exhausted.
sample_ind_generator.close()
true_sample_list = sorted(true_sample_list)
# Split out true values for later utilization
data = data.loc[true_sample_list]
total_empty = total_sample_size - len(true_sample_list)
base_stats.update(
{
"sample_size": total_sample_size,
"empty_line_count": total_empty,
"sample": random.sample(list(data.values),
min(len(data), 5)),
}
)
return data, base_stats
def _update_profile_from_chunk(self, data, sample_size,
min_true_samples=None):
"""
Iterate over the dataset and identify its parameters via profiles.
:param data: a text dataset
:type data: Union[pd.Series, pd.DataFrame, list]
:param sample_size: number of samples for df to use for profiling
:type sample_size: int
:param min_true_samples: minimum number of true samples required
:type min_true_samples: int
:return: list of column profile base subclasses
:rtype: list(BaseColumnProfiler)
"""
if isinstance(data, pd.DataFrame):
if len(data.columns) > 1:
raise ValueError("The unstructured cannot handle a dataset "
"with more than 1 column. Please make sure "
"the data format of the dataset is "
"appropriate.")
data = data[data.columns[0]]
elif isinstance(data, (str, list)):
# we know that if it comes in as a list, it is a 1-d list based
# bc of our data readers
# for strings, we just need to put it inside a series for compute.
data = pd.Series(data)
# Format the data
notification_str = "Finding the empty lines in the data..."
logger.info(notification_str)
data, base_stats = self._clean_data_and_get_base_stats(
data, sample_size, min_true_samples)
self._update_base_stats(base_stats)
if sample_size < len(data):
warnings.warn("The data will be profiled with a sample size of {}. "
"All statistics will be based on this subsample and "
"not the whole dataset.".format(sample_size))
# process the text data
notification_str = "Calculating the statistics... "
logger.info(notification_str)
pool = None
if self._profile is None:
self._profile = UnstructuredCompiler(data, options=self.options,
pool=pool)
else:
self._profile.update_profile(data, pool=pool)
def save(self, filepath=None):
"""
Save profiler to disk
:param filepath: Path of file to save to
:type filepath: String
:return: None
"""
# Create dictionary for all metadata, options, and profile
data_dict = {
"total_samples": self.total_samples,
"sample": self.sample,
"encoding": self.encoding,
"file_type": self.file_type,
"_samples_per_update": self._samples_per_update,
"_min_true_samples": self._min_true_samples,
"_empty_line_count": self._empty_line_count,
"memory_size": self.memory_size,
"options": self.options,
"_profile": self.profile,
"times": self.times,
}
self._save_helper(filepath, data_dict)
class StructuredProfiler(BaseProfiler):
_default_labeler_type = 'structured'
_option_class = StructuredOptions
_allowed_external_data_types = (list, pd.Series, pd.DataFrame)
def __init__(self, data, samples_per_update=None, min_true_samples=0,
options=None):
"""
Instantiate the StructuredProfiler class
:param data: Data to be profiled
:type data: Data class object
:param samples_per_update: Number of samples to use in generating
profile
:type samples_per_update: int
:param min_true_samples: Minimum number of samples required for the
profiler
:type min_true_samples: int
:param options: Options for the profiler.
:type options: ProfilerOptions Object
:return: StructuredProfiler
"""
if not options:
options = StructuredOptions()
elif isinstance(options, ProfilerOptions):
options = options.structured_options
elif not isinstance(options, StructuredOptions):
raise ValueError("The profile options must be passed as a "
"ProfileOptions object.")
if isinstance(data, data_readers.text_data.TextData):
raise TypeError("Cannot provide TextData object to "
"StructuredProfiler")
super().__init__(data, samples_per_update, min_true_samples, options)
# Structured specific properties
self.row_has_null_count = 0
self.row_is_null_count = 0
self.hashed_row_dict = dict()
self._profile = []
self._col_name_to_idx = defaultdict(list)
self.correlation_matrix = None
if data is not None:
self.update_profile(data)
def _add_error_checks(self, other):
"""
StructuredProfiler specific checks to ensure two profiles can be added
together.
"""
# Pass with strict = True to enforce both needing to be non-empty
self_to_other_idx = self._get_and_validate_schema_mapping(self._col_name_to_idx,
other._col_name_to_idx,
True)
if not all([isinstance(other._profile[self_to_other_idx[idx]],
type(self._profile[idx]))
for idx in range(len(self._profile))]): # options check
raise ValueError('The two profilers were not setup with the same '
'options, hence they do not calculate the same '
'profiles and cannot be added together.')
def __add__(self, other):
"""
Merges two Structured profiles together overriding the `+` operator.
:param other: profile being added to this one.
:type other: StructuredProfiler
:return: merger of the two profiles
:rtype: StructuredProfiler
"""
merged_profile = super().__add__(other)
# struct specific property merging
merged_profile.row_has_null_count = \
self.row_has_null_count + other.row_has_null_count
merged_profile.row_is_null_count = \
self.row_is_null_count + other.row_is_null_count
merged_profile.hashed_row_dict.update(self.hashed_row_dict)
merged_profile.hashed_row_dict.update(other.hashed_row_dict)
self_to_other_idx = self._get_and_validate_schema_mapping(self._col_name_to_idx,
other._col_name_to_idx)
# merge profiles
for idx in range(len(self._profile)):
other_idx = self_to_other_idx[idx]
merged_profile._profile.append(self._profile[idx] +
other._profile[other_idx])
# schemas are asserted to be identical
merged_profile._col_name_to_idx = copy.deepcopy(self._col_name_to_idx)
# merge correlation
if (self.options.correlation.is_enabled
and other.options.correlation.is_enabled):
merged_profile.correlation_matrix = self._merge_correlation(other)
return merged_profile
def diff(self, other_profile, options=None):
"""
Finds the difference between 2 Profiles and returns the report
:param other: profile finding the difference with this one
:type other: StructuredProfiler
:param options: options to change results of the difference
:type options: dict
:return: difference of the profiles
:rtype: dict
"""
report = super().diff( other_profile, options)
report["global_stats"].update({
"samples_used": utils.find_diff_of_numbers(
self._max_col_samples_used,
other_profile._max_col_samples_used),
"column_count": utils.find_diff_of_numbers(
len(self._profile), len(other_profile._profile)),
"row_count": utils.find_diff_of_numbers(
self.total_samples, other_profile.total_samples),
"row_has_null_ratio": utils.find_diff_of_numbers(
self._get_row_has_null_ratio(),
other_profile._get_row_has_null_ratio()),
"row_is_null_ratio": utils.find_diff_of_numbers(
self._get_row_is_null_ratio(),
other_profile._get_row_is_null_ratio()),
"unique_row_ratio": utils.find_diff_of_numbers(
self._get_unique_row_ratio(),
other_profile._get_unique_row_ratio()),
"duplicate_row_count": utils.find_diff_of_numbers(
self._get_duplicate_row_count(),
other_profile._get_row_is_null_ratio()),
"correlation_matrix": utils.find_diff_of_matrices(
self.correlation_matrix,
other_profile.correlation_matrix),
"profile_schema": defaultdict(list)})
report.update({"data_stats": []})
# Extract the schema of each profile
self_profile_schema = defaultdict(list)
other_profile_schema = defaultdict(list)
for i in range(len(self._profile)):
col_name = self._profile[i].name
self_profile_schema[col_name].append(i)
for i in range(len(other_profile._profile)):
col_name = other_profile._profile[i].name
other_profile_schema[col_name].append(i)
report["global_stats"]["profile_schema"] = \
utils.find_diff_of_dicts_with_diff_keys(self_profile_schema,
other_profile_schema)
# Only find the diff of columns if the schemas are exactly the same
if self_profile_schema == other_profile_schema:
for i in range(len(self._profile)):
report["data_stats"].append(
self._profile[i].diff(other_profile._profile[i],
options=options))
return _prepare_report(report)
@property
def _max_col_samples_used(self):
"""
Calculates and returns the maximum samples used in any of the columns.
"""
samples_used = 0
for col in self._profile:
samples_used = max(samples_used, col.sample_size)
return samples_used
@property
def _min_col_samples_used(self):
"""
Calculates and returns the number of rows that were completely sampled
i.e. every column in the Profile was read up to this row (possibly
further in some cols)
"""
return min([col.sample_size for col in self._profile], default=0)
@property
def _min_sampled_from_batch(self):
"""
Calculates and returns the number of rows that were completely sampled
in the most previous batch
"""
return min([col._last_batch_size for col in self._profile], default=0)
@staticmethod
def _get_and_validate_schema_mapping(schema1, schema2, strict=False):
"""
Validate compatibility between schema1 and schema2 and return a dict
mapping indices in schema1 to their corresponding indices in schema2.
In __add__: want to map self _profile idx -> other _profile idx
In _update_profile_from_chunk: want to map data idx -> _profile idx
:param schema1: a column name to index mapping
:type schema1: Dict[str, list[int]]
:param schema2: a column name to index mapping
:type schema2: Dict[str, list[int]]
:param strict: whether or not to strictly match (__add__ case)
:type strict: bool
:return: a mapping of indices in schema1 to indices in schema2
:rtype: Dict[int, int]
"""
len_schema1 = len(schema1)
len_schema2 = len(schema2)
# If both non-empty, must be same length
if 0 < len_schema1 != len_schema2 > 0:
raise ValueError("Attempted to merge profiles with different "
"numbers of columns")
# In the case of __add__ with one of the schemas not initialized
if strict and (len_schema1 == 0 or len_schema2 == 0):
raise ValueError("Cannot merge empty profiles.")
# In the case of _update_from_chunk with uninitialized schema
if not strict and len_schema2 == 0:
return {col_ind: col_ind for col_ind_list in schema1.values()
for col_ind in col_ind_list}
# Map indices in schema1 to indices in schema2
schema_mapping = dict()
for key in schema1:
# Pandas columns are int by default, but need to fuzzy match strs
if isinstance(key, str):
key = key.lower()
if key not in schema2:
raise ValueError("Columns do not match, cannot update "
"or merge profiles.")
elif len(schema1[key]) != len(schema2[key]):
raise ValueError(f"Different number of columns detected for "
f"'{key}', cannot update or merge profiles.")
is_duplicate_col = len(schema1[key]) > 1
for schema1_col_ind, schema2_col_ind in zip(schema1[key],
schema2[key]):
if is_duplicate_col and (schema1_col_ind != schema2_col_ind):
raise ValueError(f"Different column indices under "
f"duplicate name '{key}', cannot update "
f"or merge unless schema is identical.")
schema_mapping[schema1_col_ind] = schema2_col_ind
return schema_mapping
def report(self, report_options=None):
if not report_options:
report_options = {
"output_format": None,
"num_quantile_groups": 4,
}
output_format = report_options.get("output_format", None)
omit_keys = report_options.get("omit_keys", [])
num_quantile_groups = report_options.get("num_quantile_groups", 4)
report = OrderedDict([
("global_stats", {
"samples_used": self._max_col_samples_used,
"column_count": len(self._profile),
"row_count": self.total_samples,
"row_has_null_ratio": self._get_row_has_null_ratio(),
"row_is_null_ratio": self._get_row_is_null_ratio(),
"unique_row_ratio": self._get_unique_row_ratio(),
"duplicate_row_count": self._get_duplicate_row_count(),
"file_type": self.file_type,
"encoding": self.encoding,
"correlation_matrix": self.correlation_matrix,
"profile_schema": defaultdict(list),
"times": self.times,
}),
("data_stats", []),
])
for i in range(len(self._profile)):
col_name = self._profile[i].name
report["global_stats"]["profile_schema"][col_name].append(i)
report["data_stats"].append(self._profile[i].profile)
quantiles = report["data_stats"][i]["statistics"].get('quantiles')
if quantiles:
quantiles = calculate_quantiles(num_quantile_groups, quantiles)
report["data_stats"][i]["statistics"]["quantiles"] = quantiles
return _prepare_report(report, output_format, omit_keys)
def _get_unique_row_ratio(self):
if self.total_samples:
return len(self.hashed_row_dict) / self.total_samples
return 0
def _get_row_is_null_ratio(self):
if self._min_col_samples_used:
return self.row_is_null_count / self._min_col_samples_used
return 0
def _get_row_has_null_ratio(self):
if self._min_col_samples_used:
return self.row_has_null_count / self._min_col_samples_used
return 0
def _get_duplicate_row_count(self):
return self.total_samples - len(self.hashed_row_dict)
@utils.method_timeit(name='row_stats')
def _update_row_statistics(self, data, sample_ids=None):
"""
Iterate over the provided dataset row by row and calculate
the row statistics. Specifically, number of unique rows,
rows containing null values, and total rows reviewed. This
function is safe to use in batches.
:param data: a dataset
:type data: pandas.DataFrame
:param sample_ids: list of indices in order they were sampled in data
:type sample_ids: list(int)
"""
if not isinstance(data, pd.DataFrame):
raise ValueError("Cannot calculate row statistics on data that is"
"not a DataFrame")
self.total_samples += len(data)
try:
self.hashed_row_dict.update(dict.fromkeys(
pd.util.hash_pandas_object(data, index=False), True
))
except TypeError:
self.hashed_row_dict.update(dict.fromkeys(
pd.util.hash_pandas_object(data.astype(str), index=False), True
))
# Calculate Null Column Count
null_rows = set()
null_in_row_count = set()
first_col_flag = True
for column in self._profile:
null_type_dict = column.null_types_index
null_row_indices = set()
if null_type_dict:
null_row_indices = set.union(*null_type_dict.values())
# If sample ids provided, only consider nulls in rows that
# were fully sampled
if sample_ids is not None:
# This is the amount (integer) indices were shifted by in the
# event of overlap
shift = column._index_shift
if shift is None:
# Shift is None if index is str or if no overlap detected
null_row_indices = null_row_indices.intersection(
data.index[sample_ids[:self._min_sampled_from_batch]])
else:
# Only shift if index shift detected (must be ints)
null_row_indices = null_row_indices.intersection(
data.index[sample_ids[:self._min_sampled_from_batch]] +
shift)
# Find the common null indices between the columns
if first_col_flag:
null_rows = null_row_indices
null_in_row_count = null_row_indices
first_col_flag = False
else:
null_rows = null_rows.intersection(null_row_indices)
null_in_row_count = null_in_row_count.union(null_row_indices)
# If sample_ids provided, increment since that means only new data read
if sample_ids is not None:
self.row_has_null_count += len(null_in_row_count)
self.row_is_null_count += len(null_rows)
else:
self.row_has_null_count = len(null_in_row_count)
self.row_is_null_count = len(null_rows)
def _get_correlation(self, clean_samples, batch_properties):
"""
Calculate correlation matrix on the cleaned data.
:param clean_samples: the input cleaned dataset
:type clean_samples: dict()
:param batch_properties: mean/std/counts of each batch column necessary
for correlation computation
:type batch_properties: dict()
"""
columns = self.options.correlation.columns
clean_column_ids = []
if columns is None:
for idx in range(len(self._profile)):
data_type = self._profile[idx].\
profiles["data_type_profile"].selected_data_type
if data_type not in ["int", "float"]:
clean_samples.pop(idx)
else:
clean_column_ids.append(idx)
data = pd.DataFrame(clean_samples).apply(pd.to_numeric, errors='coerce')
means = {index:mean for index, mean in enumerate(batch_properties['mean'])}
data = data.fillna(value=means)
# Update the counts/std if needed (i.e. if null rows or exist)
if (len(data) != batch_properties['count']).any():
adjusted_stds = np.sqrt(
batch_properties['std']**2 * (batch_properties['count'] - 1) \
/ (len(data) - 1)
)
batch_properties['std'] = adjusted_stds
# Set count key to a single number now that everything's been adjusted
batch_properties['count'] = len(data)
# fill correlation matrix with nan initially
n_cols = len(self._profile)
corr_mat = np.full((n_cols, n_cols), np.nan)
# then, fill in the correlations for valid columns
rows = [[id] for id in clean_column_ids]
corr_mat[rows, clean_column_ids] = np.corrcoef(data, rowvar=False)
return corr_mat
@utils.method_timeit(name='correlation')
def _update_correlation(self, clean_samples, prev_dependent_properties):
"""
Update correlation matrix for cleaned data.
:param clean_samples: the input cleaned dataset
:type clean_samples: dict()
"""
batch_properties = self._get_correlation_dependent_properties(clean_samples)
batch_corr = self._get_correlation(clean_samples, batch_properties)
self.correlation_matrix = self._merge_correlation_helper(
self.correlation_matrix, prev_dependent_properties["mean"],
prev_dependent_properties["std"], self.total_samples - self.row_is_null_count,
batch_corr, batch_properties["mean"],
batch_properties["std"], batch_properties['count'])
@utils.method_timeit(name='correlation')
def _merge_correlation(self, other):
"""
Merge correlation matrix from two profiles
:param other: the other profile that needs to be merged
:return:
"""
corr_mat1 = self.correlation_matrix
corr_mat2 = other.correlation_matrix
n1 = self.total_samples - self.row_is_null_count
n2 = other.total_samples - other.row_is_null_count
if n1 == 0:
return corr_mat2
if n2 == 0:
return corr_mat1
if corr_mat1 is None or corr_mat2 is None:
return None
# get column indices without nan
col_ids1 = np.where(~np.isnan(corr_mat1).all(axis=0))[0]
col_ids2 = np.where(~np.isnan(corr_mat2).all(axis=0))[0]
if len(col_ids1) != len(col_ids2) or len(col_ids1) <= 1:
return None
if (col_ids1 != col_ids2).any():
return None
mean1 = np.array(
[self._profile[idx].profile['statistics']['mean']
for idx in range(len(self._profile)) if idx in col_ids1])
std1 = np.array(
[self._profile[idx].profile['statistics']['stddev']
for idx in range(len(self._profile)) if idx in col_ids1])
mean2 = np.array(
[other._profile[idx].profile['statistics']['mean']
for idx in range(len(self._profile)) if idx in col_ids2])
std2 = np.array(
[other._profile[idx].profile['statistics']['stddev']
for idx in range(len(self._profile)) if idx in col_ids2])
return self._merge_correlation_helper(corr_mat1, mean1, std1, n1,
corr_mat2, mean2, std2, n2)
def _get_correlation_dependent_properties(self, batch=None):
"""
Obtains the necessary dependent properties of the data
(mean/stddev) for calculating correlation. By default,
it will compute it on all columns in the profiler, but if
a batch is given, it will compute it only for the columns
in the batch.
:param batch: Batch data
:type batch: dict
:return: dependent properties
:rtype: dict
"""
dependent_properties = {
'mean': np.full(len(self._profile), np.nan),
'std': np.full(len(self._profile), np.nan),
'count': np.full(len(self._profile), np.nan)
}
for id in range(len(self._profile)):
compiler = self._profile[id]
data_type_compiler = compiler.profiles["data_type_profile"]
data_type = data_type_compiler.selected_data_type
if data_type in ["int", "float"]:
data_type_profiler = data_type_compiler._profiles[data_type]
# Finding dependent values of previous, existing data
if batch is None:
n = data_type_profiler.match_count
dependent_properties['mean'][id] = data_type_profiler.mean
# Subtract null row count as those aren't included in corr. calc
dependent_properties['std'][id] = \
np.sqrt(data_type_profiler._biased_variance * n / (self.total_samples - self.row_is_null_count- 1))
dependent_properties['count'][id] = n
# Finding the properties of the batch data if given
elif id in batch.keys():
history = data_type_profiler._batch_history[-1]
n = history['match_count']
# Since we impute values, we want the total rows (including nulls)
dependent_properties['mean'][id] = history['mean']
dependent_properties['std'][id] = \
np.sqrt(history['biased_variance'] * n / (n - 1))
dependent_properties['count'][id] = n
return dependent_properties
@staticmethod
def _merge_correlation_helper(corr_mat1, mean1, std1, n1,
corr_mat2, mean2, std2, n2):
"""
Helper function to merge correlation matrix from two profiles
:param corr_mat1: correlation matrix of profile1
:type corr_mat1: pd.DataFrame
:param mean1: mean of columns of profile1
:type mean1: np.array
:param std1: standard deviation of columns of profile1
:type std1: np.array
:param corr_mat2: correlation matrix of profile2
:type corr_mat2: pd.DataFrame
:param mean2: mean of columns of profile2
:type mean2: np.array
:param std2: standard deviation of columns of profile2
:type std2: np.array
:return: merged correlation matrix
"""
if corr_mat1 is None:
return corr_mat2
elif corr_mat2 is None:
return corr_mat1
elif len(mean1) == 0:
return corr_mat2
elif len(mean2) == 0:
return corr_mat1
std_mat1 = np.outer(std1, std1)
std_mat2 = np.outer(std2, std2)
mean_diff_vector = mean1 - mean2
mean_diff_mat = np.outer(mean_diff_vector, mean_diff_vector)
cov1 = corr_mat1 * std_mat1
cov2 = corr_mat2 * std_mat2
n = n1 + n2
cov = cov1 * (n1 - 1) + cov2 * (n2 - 1) + mean_diff_mat * (n1 * n2) / n
cov = cov / (n - 1)
delta = mean2 - mean1
M2_1 = (n1 - 1) * (std1 ** 2)
M2_2 = (n2 - 1) * (std2 ** 2)
M2 = M2_1 + M2_2 + delta ** 2 * n1 * n2 / n
std = np.sqrt(M2 / (n - 1))
std_mat = np.outer(std, std)
corr_mat = cov / std_mat
return corr_mat
def _update_profile_from_chunk(self, data, sample_size,
min_true_samples=None):
"""
Iterate over the columns of a dataset and identify its parameters.
:param data: a dataset
:type data: pandas.DataFrame
:param sample_size: number of samples for df to use for profiling
:type sample_size: int
:param min_true_samples: minimum number of true samples required
:type min_true_samples: int
:return: list of column profile base subclasses
:rtype: list(BaseColumnProfiler)
"""
if isinstance(data, pd.Series):
data = data.to_frame()
elif isinstance(data, list):
data = | pd.DataFrame(data, dtype=object) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.colors import same_color, to_rgb, to_rgba
import pytest
from numpy.testing import assert_array_equal
from seaborn.external.version import Version
from seaborn._core.rules import categorical_order
from seaborn._core.scales import Nominal, Continuous
from seaborn._core.properties import (
Alpha,
Color,
Coordinate,
EdgeWidth,
Fill,
LineStyle,
LineWidth,
Marker,
PointSize,
)
from seaborn._compat import MarkerStyle
from seaborn.palettes import color_palette
class DataFixtures:
@pytest.fixture
def num_vector(self, long_df):
return long_df["s"]
@pytest.fixture
def num_order(self, num_vector):
return categorical_order(num_vector)
@pytest.fixture
def cat_vector(self, long_df):
return long_df["a"]
@pytest.fixture
def cat_order(self, cat_vector):
return categorical_order(cat_vector)
@pytest.fixture
def dt_num_vector(self, long_df):
return long_df["t"]
@pytest.fixture
def dt_cat_vector(self, long_df):
return long_df["d"]
@pytest.fixture
def vectors(self, num_vector, cat_vector):
return {"num": num_vector, "cat": cat_vector}
class TestCoordinate(DataFixtures):
def test_bad_scale_arg_str(self, num_vector):
err = "Unknown magic arg for x scale: 'xxx'."
with pytest.raises(ValueError, match=err):
Coordinate("x").infer_scale("xxx", num_vector)
def test_bad_scale_arg_type(self, cat_vector):
err = "Magic arg for x scale must be str, not list."
with pytest.raises(TypeError, match=err):
Coordinate("x").infer_scale([1, 2, 3], cat_vector)
class TestColor(DataFixtures):
def assert_same_rgb(self, a, b):
assert_array_equal(a[:, :3], b[:, :3])
def test_nominal_default_palette(self, cat_vector, cat_order):
m = Color().get_mapping(Nominal(), cat_vector)
n = len(cat_order)
actual = m(np.arange(n))
expected = color_palette(None, n)
for have, want in zip(actual, expected):
assert same_color(have, want)
def test_nominal_default_palette_large(self):
vector = pd.Series(list("abcdefghijklmnopqrstuvwxyz"))
m = Color().get_mapping(Nominal(), vector)
actual = m(np.arange(26))
expected = color_palette("husl", 26)
for have, want in zip(actual, expected):
assert same_color(have, want)
def test_nominal_named_palette(self, cat_vector, cat_order):
palette = "Blues"
m = Color().get_mapping(Nominal(palette), cat_vector)
n = len(cat_order)
actual = m(np.arange(n))
expected = color_palette(palette, n)
for have, want in zip(actual, expected):
assert same_color(have, want)
def test_nominal_list_palette(self, cat_vector, cat_order):
palette = color_palette("Reds", len(cat_order))
m = Color().get_mapping(Nominal(palette), cat_vector)
actual = m(np.arange(len(palette)))
expected = palette
for have, want in zip(actual, expected):
assert same_color(have, want)
def test_nominal_dict_palette(self, cat_vector, cat_order):
colors = color_palette("Greens")
palette = dict(zip(cat_order, colors))
m = Color().get_mapping(Nominal(palette), cat_vector)
n = len(cat_order)
actual = m(np.arange(n))
expected = colors
for have, want in zip(actual, expected):
assert same_color(have, want)
def test_nominal_dict_with_missing_keys(self, cat_vector, cat_order):
palette = dict(zip(cat_order[1:], color_palette("Purples")))
with pytest.raises(ValueError, match="No entry in color dict"):
Color("color").get_mapping(Nominal(palette), cat_vector)
def test_nominal_list_too_short(self, cat_vector, cat_order):
n = len(cat_order) - 1
palette = color_palette("Oranges", n)
msg = rf"The edgecolor list has fewer values \({n}\) than needed \({n + 1}\)"
with pytest.warns(UserWarning, match=msg):
Color("edgecolor").get_mapping(Nominal(palette), cat_vector)
def test_nominal_list_too_long(self, cat_vector, cat_order):
n = len(cat_order) + 1
palette = color_palette("Oranges", n)
msg = rf"The edgecolor list has more values \({n}\) than needed \({n - 1}\)"
with pytest.warns(UserWarning, match=msg):
Color("edgecolor").get_mapping(Nominal(palette), cat_vector)
def test_continuous_default_palette(self, num_vector):
cmap = color_palette("ch:", as_cmap=True)
m = Color().get_mapping(Continuous(), num_vector)
self.assert_same_rgb(m(num_vector), cmap(num_vector))
def test_continuous_named_palette(self, num_vector):
pal = "flare"
cmap = color_palette(pal, as_cmap=True)
m = Color().get_mapping(Continuous(pal), num_vector)
self.assert_same_rgb(m(num_vector), cmap(num_vector))
def test_continuous_tuple_palette(self, num_vector):
vals = ("blue", "red")
cmap = color_palette("blend:" + ",".join(vals), as_cmap=True)
m = Color().get_mapping(Continuous(vals), num_vector)
self.assert_same_rgb(m(num_vector), cmap(num_vector))
def test_continuous_callable_palette(self, num_vector):
cmap = mpl.cm.get_cmap("viridis")
m = Color().get_mapping(Continuous(cmap), num_vector)
self.assert_same_rgb(m(num_vector), cmap(num_vector))
def test_continuous_missing(self):
x = pd.Series([1, 2, np.nan, 4])
m = Color().get_mapping(Continuous(), x)
assert np.isnan(m(x)[2]).all()
def test_bad_scale_values_continuous(self, num_vector):
with pytest.raises(TypeError, match="Scale values for color with a Continuous"):
Color().get_mapping(Continuous(["r", "g", "b"]), num_vector)
def test_bad_scale_values_nominal(self, cat_vector):
with pytest.raises(TypeError, match="Scale values for color with a Nominal"):
Color().get_mapping(Nominal(mpl.cm.get_cmap("viridis")), cat_vector)
def test_bad_inference_arg(self, cat_vector):
with pytest.raises(TypeError, match="A single scale argument for color"):
Color().infer_scale(123, cat_vector)
@pytest.mark.parametrize(
"data_type,scale_class",
[("cat", Nominal), ("num", Continuous)]
)
def test_default(self, data_type, scale_class, vectors):
scale = Color().default_scale(vectors[data_type])
assert isinstance(scale, scale_class)
def test_default_numeric_data_category_dtype(self, num_vector):
scale = Color().default_scale(num_vector.astype("category"))
assert isinstance(scale, Nominal)
def test_default_binary_data(self):
x = pd.Series([0, 0, 1, 0, 1], dtype=int)
scale = Color().default_scale(x)
assert isinstance(scale, Continuous)
# TODO default scales for other types
@pytest.mark.parametrize(
"values,data_type,scale_class",
[
("viridis", "cat", Nominal), # Based on variable type
("viridis", "num", Continuous), # Based on variable type
("muted", "num", Nominal), # Based on qualitative palette
(["r", "g", "b"], "num", Nominal), # Based on list palette
({2: "r", 4: "g", 8: "b"}, "num", Nominal), # Based on dict palette
(("r", "b"), "num", Continuous), # Based on tuple / variable type
(("g", "m"), "cat", Nominal), # Based on tuple / variable type
(mpl.cm.get_cmap("inferno"), "num", Continuous), # Based on callable
]
)
def test_inference(self, values, data_type, scale_class, vectors):
scale = Color().infer_scale(values, vectors[data_type])
assert isinstance(scale, scale_class)
assert scale.values == values
def test_inference_binary_data(self):
x = pd.Series([0, 0, 1, 0, 1], dtype=int)
scale = Color().infer_scale("viridis", x)
assert isinstance(scale, Nominal)
def test_standardization(self):
f = Color().standardize
assert f("C3") == to_rgb("C3")
assert f("dodgerblue") == to_rgb("dodgerblue")
assert f((.1, .2, .3)) == (.1, .2, .3)
assert f((.1, .2, .3, .4)) == (.1, .2, .3, .4)
assert f("#123456") == to_rgb("#123456")
assert f("#12345678") == to_rgba("#12345678")
if Version(mpl.__version__) >= Version("3.4.0"):
assert f("#123") == to_rgb("#123")
assert f("#1234") == to_rgba("#1234")
class ObjectPropertyBase(DataFixtures):
def assert_equal(self, a, b):
assert self.unpack(a) == self.unpack(b)
def unpack(self, x):
return x
@pytest.mark.parametrize("data_type", ["cat", "num"])
def test_default(self, data_type, vectors):
scale = self.prop().default_scale(vectors[data_type])
assert isinstance(scale, Nominal)
@pytest.mark.parametrize("data_type", ["cat", "num"])
def test_inference_list(self, data_type, vectors):
scale = self.prop().infer_scale(self.values, vectors[data_type])
assert isinstance(scale, Nominal)
assert scale.values == self.values
@pytest.mark.parametrize("data_type", ["cat", "num"])
def test_inference_dict(self, data_type, vectors):
x = vectors[data_type]
values = dict(zip(categorical_order(x), self.values))
scale = self.prop().infer_scale(values, x)
assert isinstance(scale, Nominal)
assert scale.values == values
def test_dict_missing(self, cat_vector):
levels = categorical_order(cat_vector)
values = dict(zip(levels, self.values[:-1]))
scale = Nominal(values)
name = self.prop.__name__.lower()
msg = f"No entry in {name} dictionary for {repr(levels[-1])}"
with pytest.raises(ValueError, match=msg):
self.prop().get_mapping(scale, cat_vector)
@pytest.mark.parametrize("data_type", ["cat", "num"])
def test_mapping_default(self, data_type, vectors):
x = vectors[data_type]
mapping = self.prop().get_mapping(Nominal(), x)
n = x.nunique()
for i, expected in enumerate(self.prop()._default_values(n)):
actual, = mapping([i])
self.assert_equal(actual, expected)
@pytest.mark.parametrize("data_type", ["cat", "num"])
def test_mapping_from_list(self, data_type, vectors):
x = vectors[data_type]
scale = Nominal(self.values)
mapping = self.prop().get_mapping(scale, x)
for i, expected in enumerate(self.standardized_values):
actual, = mapping([i])
self.assert_equal(actual, expected)
@pytest.mark.parametrize("data_type", ["cat", "num"])
def test_mapping_from_dict(self, data_type, vectors):
x = vectors[data_type]
levels = categorical_order(x)
values = dict(zip(levels, self.values[::-1]))
standardized_values = dict(zip(levels, self.standardized_values[::-1]))
scale = Nominal(values)
mapping = self.prop().get_mapping(scale, x)
for i, level in enumerate(levels):
actual, = mapping([i])
expected = standardized_values[level]
self.assert_equal(actual, expected)
def test_mapping_with_null_value(self, cat_vector):
mapping = self.prop().get_mapping(Nominal(self.values), cat_vector)
actual = mapping(np.array([0, np.nan, 2]))
v0, _, v2 = self.standardized_values
expected = [v0, self.prop.null_value, v2]
for a, b in zip(actual, expected):
self.assert_equal(a, b)
def test_unique_default_large_n(self):
n = 24
x = pd.Series(np.arange(n))
mapping = self.prop().get_mapping(Nominal(), x)
assert len({self.unpack(x_i) for x_i in mapping(x)}) == n
def test_bad_scale_values(self, cat_vector):
var_name = self.prop.__name__.lower()
with pytest.raises(TypeError, match=f"Scale values for a {var_name} variable"):
self.prop().get_mapping(Nominal(("o", "s")), cat_vector)
class TestMarker(ObjectPropertyBase):
prop = Marker
values = ["o", (5, 2, 0), MarkerStyle("^")]
standardized_values = [MarkerStyle(x) for x in values]
def unpack(self, x):
return (
x.get_path(),
x.get_joinstyle(),
x.get_transform().to_values(),
x.get_fillstyle(),
)
class TestLineStyle(ObjectPropertyBase):
prop = LineStyle
values = ["solid", "--", (1, .5)]
standardized_values = [LineStyle._get_dash_pattern(x) for x in values]
def test_bad_type(self):
p = LineStyle()
with pytest.raises(TypeError, match="^Linestyle must be .+, not list.$"):
p.standardize([1, 2])
def test_bad_style(self):
p = LineStyle()
with pytest.raises(ValueError, match="^Linestyle string must be .+, not 'o'.$"):
p.standardize("o")
def test_bad_dashes(self):
p = LineStyle()
with pytest.raises(TypeError, match="^Invalid dash pattern"):
p.standardize((1, 2, "x"))
class TestFill(DataFixtures):
@pytest.fixture
def vectors(self):
return {
"cat": pd.Series(["a", "a", "b"]),
"num": pd.Series([1, 1, 2]),
"bool": pd.Series([True, True, False])
}
@pytest.fixture
def cat_vector(self, vectors):
return vectors["cat"]
@pytest.fixture
def num_vector(self, vectors):
return vectors["num"]
@pytest.mark.parametrize("data_type", ["cat", "num", "bool"])
def test_default(self, data_type, vectors):
x = vectors[data_type]
scale = Fill().default_scale(x)
assert isinstance(scale, Nominal)
@pytest.mark.parametrize("data_type", ["cat", "num", "bool"])
def test_inference_list(self, data_type, vectors):
x = vectors[data_type]
scale = Fill().infer_scale([True, False], x)
assert isinstance(scale, Nominal)
assert scale.values == [True, False]
@pytest.mark.parametrize("data_type", ["cat", "num", "bool"])
def test_inference_dict(self, data_type, vectors):
x = vectors[data_type]
values = dict(zip(x.unique(), [True, False]))
scale = Fill().infer_scale(values, x)
assert isinstance(scale, Nominal)
assert scale.values == values
def test_mapping_categorical_data(self, cat_vector):
mapping = Fill().get_mapping(Nominal(), cat_vector)
assert_array_equal(mapping([0, 1, 0]), [True, False, True])
def test_mapping_numeric_data(self, num_vector):
mapping = Fill().get_mapping(Nominal(), num_vector)
assert_array_equal(mapping([0, 1, 0]), [True, False, True])
def test_mapping_list(self, cat_vector):
mapping = Fill().get_mapping(Nominal([False, True]), cat_vector)
assert_array_equal(mapping([0, 1, 0]), [False, True, False])
def test_mapping_truthy_list(self, cat_vector):
mapping = Fill().get_mapping(Nominal([0, 1]), cat_vector)
assert_array_equal(mapping([0, 1, 0]), [False, True, False])
def test_mapping_dict(self, cat_vector):
values = dict(zip(cat_vector.unique(), [False, True]))
mapping = Fill().get_mapping(Nominal(values), cat_vector)
assert_array_equal(mapping([0, 1, 0]), [False, True, False])
def test_cycle_warning(self):
x = | pd.Series(["a", "b", "c"]) | pandas.Series |
import pytest
import numpy as np
from datetime import date, timedelta, time, datetime
import dateutil
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import lrange
from pandas.compat.numpy import np_datetime64_compat
from pandas import (DatetimeIndex, Index, date_range, DataFrame,
Timestamp, offsets)
from pandas.util.testing import assert_almost_equal
randn = np.random.randn
class TestDatetimeIndexLikeTimestamp(object):
# Tests for DatetimeIndex behaving like a vectorized Timestamp
def test_dti_date_out_of_range(self):
# see gh-1475
pytest.raises(ValueError, DatetimeIndex, ['1400-01-01'])
pytest.raises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter',
'days_in_month', 'is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
assert result == expected
assert idx.freq == Timestamp(idx[-1], idx.freq).freq
assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr
class TestDatetimeIndex(object):
def test_get_loc(self):
idx = pd.date_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pydatetime(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
if method is not None:
assert idx.get_loc(idx[1], method,
tolerance=pd.Timedelta('0 days')) == 1
assert idx.get_loc('2000-01-01', method='nearest') == 0
assert idx.get_loc('2000-01-01T12', method='nearest') == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance='1 day') == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=pd.Timedelta('1D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=np.timedelta64(1, 'D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=timedelta(1)) == 1
with tm.assert_raises_regex(ValueError,
'unit abbreviation w/o a number'):
idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')
with pytest.raises(KeyError):
idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')
with pytest.raises(
ValueError,
match='tolerance size must match target index size'):
idx.get_loc('2000-01-01', method='nearest',
tolerance=[pd.Timedelta('1day').to_timedelta64(),
pd.Timedelta('1day').to_timedelta64()])
assert idx.get_loc('2000', method='nearest') == slice(0, 3)
assert idx.get_loc('2000-01', method='nearest') == slice(0, 3)
assert idx.get_loc('1999', method='nearest') == 0
assert idx.get_loc('2001', method='nearest') == 2
with pytest.raises(KeyError):
idx.get_loc('1999', method='pad')
with pytest.raises(KeyError):
idx.get_loc('2001', method='backfill')
with pytest.raises(KeyError):
idx.get_loc('foobar')
with pytest.raises(TypeError):
idx.get_loc(slice(2))
idx = pd.to_datetime(['2000-01-01', '2000-01-04'])
assert idx.get_loc('2000-01-02', method='nearest') == 0
assert idx.get_loc('2000-01-03', method='nearest') == 1
assert idx.get_loc('2000-01', method='nearest') == slice(0, 2)
# time indexing
idx = pd.date_range('2000-01-01', periods=24, freq='H')
tm.assert_numpy_array_equal(idx.get_loc(time(12)),
np.array([12]), check_dtype=False)
tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)),
np.array([]), check_dtype=False)
with pytest.raises(NotImplementedError):
idx.get_loc(time(12, 30), method='pad')
def test_get_indexer(self):
idx = pd.date_range('2000-01-01', periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)
target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours',
'1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour')),
np.array([0, -1, 1], dtype=np.intp))
tol_raw = [pd.Timedelta('1 hour'),
pd.Timedelta('1 hour'),
pd.Timedelta('1 hour').to_timedelta64(), ]
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=[np.timedelta64(x) for x in tol_raw]),
np.array([0, -1, 1], dtype=np.intp))
tol_bad = [pd.Timedelta('2 hour').to_timedelta64(),
pd.Timedelta('1 hour').to_timedelta64(),
'foo', ]
with pytest.raises(
ValueError, match='abbreviation w/o a number'):
idx.get_indexer(target, 'nearest', tolerance=tol_bad)
with pytest.raises(ValueError):
idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
assert '2000' in str(e)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
unpickled = tm.round_trip_pickle(index)
tm.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
assert str(index.reindex([])[0].tz) == 'US/Eastern'
assert str(index.reindex(np.array([]))[0].tz) == 'US/Eastern'
def test_time_loc(self): # GH8667
from datetime import time
from pandas._libs.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range('2014-11-26', periods=n, freq='S')
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i,
check_dtype=False)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start='2000', periods=periods, freq='S')
assert len(idx1) == periods
idx2 = pd.date_range(end='2000', periods=periods, freq='S')
assert len(idx2) == periods
def test_nat(self):
assert DatetimeIndex([np.nan])[0] is pd.NaT
def test_week_of_month_frequency(self):
# GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise
d1 = date(2002, 9, 1)
d2 = date(2013, 10, 27)
d3 = date(2012, 9, 30)
idx1 = DatetimeIndex([d1, d2])
idx2 = DatetimeIndex([d3])
result_append = idx1.append(idx2)
expected = DatetimeIndex([d1, d2, d3])
tm.assert_index_equal(result_append, expected)
result_union = idx1.union(idx2)
expected = DatetimeIndex([d1, d3, d2])
tm.assert_index_equal(result_union, expected)
# GH 5115
result = date_range("2013-1-1", periods=4, freq='WOM-1SAT')
dates = ['2013-01-05', '2013-02-02', '2013-03-02', '2013-04-06']
expected = DatetimeIndex(dates, freq='WOM-1SAT')
tm.assert_index_equal(result, expected)
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
# GH2658
import datetime
start = datetime.datetime.now()
idx = DatetimeIndex(start=start, freq="1d", periods=10)
df = DataFrame(lrange(10), index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
assert isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
pytest.raises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
def test_comparisons_nat(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
'2014-05-01', '2014-07-01'])
didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT,
'2014-06-01', '2014-07-01'])
darr = np.array([np_datetime64_compat('2014-02-01 00:00Z'),
np_datetime64_compat('2014-03-01 00:00Z'),
np_datetime64_compat('nat'), np.datetime64('nat'),
np_datetime64_compat('2014-06-01 00:00Z'),
np_datetime64_compat('2014-07-01 00:00Z')])
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = Index([f(x) for x in rng], dtype='<U8')
tm.assert_index_equal(result, exp)
def test_iteration_preserves_tz(self):
# see gh-8890
index = date_range("2012-01-01", periods=3, freq='H', tz='US/Eastern')
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result == expected
index = date_range("2012-01-01", periods=3, freq='H',
tz=dateutil.tz.tzoffset(None, -28800))
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
# 9100
index = pd.DatetimeIndex(['2014-12-01 03:32:39.987000-08:00',
'2014-12-01 04:12:34.987000-08:00'])
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
assert isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
assert not idx.equals(list(idx))
non_datetime = Index(list('abc'))
assert not idx.equals(list(non_datetime))
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.loc['1/3/2000']
assert result.name == df.index[2]
result = df.T['1/3/2000']
assert result.name == df.index[2]
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
tm.assert_index_equal(result, ex)
def test_argmin_argmax(self):
idx = | DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02']) | pandas.DatetimeIndex |
import itertools
import os
import random
import tempfile
from unittest import mock
import pandas as pd
import pytest
import pickle
import numpy as np
import string
import multiprocessing as mp
from copy import copy
import dask
import dask.dataframe as dd
from dask.dataframe._compat import tm, assert_categorical_equal
from dask import delayed
from dask.base import compute_as_if_collection
from dask.optimization import cull
from dask.dataframe.shuffle import (
shuffle,
partitioning_index,
rearrange_by_column,
rearrange_by_divisions,
maybe_buffered_partd,
remove_nans,
)
from dask.dataframe.utils import assert_eq, make_meta
from dask.dataframe._compat import PANDAS_GT_120
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [1, 4, 7]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [2, 5, 8]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [3, 6, 9]}, index=[9, 9, 9]),
}
meta = make_meta({"a": "i8", "b": "i8"}, index=pd.Index([], "i8"))
d = dd.DataFrame(dsk, "x", meta, [0, 4, 9, 9])
full = d.compute()
CHECK_FREQ = {}
if dd._compat.PANDAS_GT_110:
CHECK_FREQ["check_freq"] = False
shuffle_func = shuffle # conflicts with keyword argument
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_shuffle(shuffle):
s = shuffle_func(d, d.b, shuffle=shuffle)
assert isinstance(s, dd.DataFrame)
assert s.npartitions == d.npartitions
x = dask.get(s.dask, (s._name, 0))
y = dask.get(s.dask, (s._name, 1))
assert not (set(x.b) & set(y.b)) # disjoint
assert set(s.dask).issuperset(d.dask)
assert shuffle_func(d, d.b)._name == shuffle_func(d, d.b)._name
def test_default_partitions():
assert shuffle(d, d.b).npartitions == d.npartitions
def test_shuffle_npartitions_task():
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=10)
s = shuffle(ddf, ddf.x, shuffle="tasks", npartitions=17, max_branch=4)
sc = s.compute(scheduler="sync")
assert s.npartitions == 17
assert set(s.dask).issuperset(set(ddf.dask))
assert len(sc) == len(df)
assert list(s.columns) == list(df.columns)
assert set(map(tuple, sc.values.tolist())) == set(map(tuple, df.values.tolist()))
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_index_with_non_series(method):
from dask.dataframe.tests.test_multi import list_eq
list_eq(shuffle(d, d.b, shuffle=method), shuffle(d, "b", shuffle=method))
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_index_with_dataframe(method):
res1 = shuffle(d, d[["b"]], shuffle=method).compute()
res2 = shuffle(d, ["b"], shuffle=method).compute()
res3 = shuffle(d, "b", shuffle=method).compute()
assert sorted(res1.values.tolist()) == sorted(res2.values.tolist())
assert sorted(res1.values.tolist()) == sorted(res3.values.tolist())
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_shuffle_from_one_partition_to_one_other(method):
df = pd.DataFrame({"x": [1, 2, 3]})
a = dd.from_pandas(df, 1)
for i in [1, 2]:
b = shuffle(a, "x", npartitions=i, shuffle=method)
assert len(a.compute(scheduler="sync")) == len(b.compute(scheduler="sync"))
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_shuffle_empty_partitions(method):
df = pd.DataFrame({"x": [1, 2, 3] * 10})
ddf = dd.from_pandas(df, npartitions=3)
s = shuffle(ddf, ddf.x, npartitions=6, shuffle=method)
parts = compute_as_if_collection(dd.DataFrame, s.dask, s.__dask_keys__())
for p in parts:
assert s.columns == p.columns
df2 = pd.DataFrame(
{
"i32": np.array([1, 2, 3] * 3, dtype="int32"),
"f32": np.array([None, 2.5, 3.5] * 3, dtype="float32"),
"cat": pd.Series(["a", "b", "c"] * 3).astype("category"),
"obj": pd.Series(["d", "e", "f"] * 3),
"bool": np.array([True, False, True] * 3),
"dt": pd.Series(pd.date_range("20130101", periods=9)),
"dt_tz": pd.Series(pd.date_range("20130101", periods=9, tz="US/Eastern")),
"td": pd.Series(pd.timedelta_range("2000", periods=9)),
}
)
def test_partitioning_index():
res = partitioning_index(df2.i32, 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
assert (partitioning_index(df2.i32, 3) == partitioning_index(df2.i32, 3)).all()
res = partitioning_index(df2[["i32"]], 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
res = partitioning_index(df2[["cat", "bool", "f32"]], 2)
assert ((0 <= res) & (res < 2)).all()
res = partitioning_index(df2.index, 4)
assert ((res < 4) & (res >= 0)).all()
assert len(np.unique(res)) > 1
def test_partitioning_index_categorical_on_values():
df = pd.DataFrame({"a": list(string.ascii_letters), "b": [1, 2, 3, 4] * 13})
df.a = df.a.astype("category")
df2 = df.copy()
df2.a = df2.a.cat.set_categories(list(reversed(df2.a.cat.categories)))
res = partitioning_index(df.a, 5)
res2 = partitioning_index(df2.a, 5)
assert (res == res2).all()
res = partitioning_index(df, 5)
res2 = partitioning_index(df2, 5)
assert (res == res2).all()
@pytest.mark.parametrize(
"npartitions", [1, 4, 7, pytest.param(23, marks=pytest.mark.slow)]
)
def test_set_index_tasks(npartitions):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=npartitions)
assert_eq(df.set_index("x"), ddf.set_index("x", shuffle="tasks"))
assert_eq(df.set_index("y"), ddf.set_index("y", shuffle="tasks"))
assert_eq(df.set_index(df.x), ddf.set_index(ddf.x, shuffle="tasks"))
assert_eq(df.set_index(df.x + df.y), ddf.set_index(ddf.x + ddf.y, shuffle="tasks"))
assert_eq(df.set_index(df.x + 1), ddf.set_index(ddf.x + 1, shuffle="tasks"))
assert_eq(df.set_index(df.index), ddf.set_index(ddf.index, shuffle="tasks"))
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_self_index(shuffle):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
a = dd.from_pandas(df, npartitions=4)
b = a.set_index(a.index, shuffle=shuffle)
assert a is b
assert_eq(b, df.set_index(df.index))
@pytest.mark.parametrize("shuffle", ["tasks"])
def test_set_index_names(shuffle):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=4)
assert set(ddf.set_index("x", shuffle=shuffle).dask) == set(
ddf.set_index("x", shuffle=shuffle).dask
)
assert set(ddf.set_index("x", shuffle=shuffle).dask) != set(
ddf.set_index("y", shuffle=shuffle).dask
)
assert set(ddf.set_index("x", max_branch=4, shuffle=shuffle).dask) != set(
ddf.set_index("x", max_branch=3, shuffle=shuffle).dask
)
assert set(ddf.set_index("x", drop=True, shuffle=shuffle).dask) != set(
ddf.set_index("x", drop=False, shuffle=shuffle).dask
)
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_tasks_2(shuffle):
df = dd.demo.make_timeseries(
"2000",
"2004",
{"value": float, "name": str, "id": int},
freq="2H",
partition_freq="1M",
seed=1,
)
df2 = df.set_index("name", shuffle=shuffle)
df2.value.sum().compute(scheduler="sync")
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_tasks_3(shuffle):
df = pd.DataFrame(np.random.random((10, 2)), columns=["x", "y"])
ddf = dd.from_pandas(df, npartitions=5)
ddf2 = ddf.set_index(
"x", shuffle=shuffle, max_branch=2, npartitions=ddf.npartitions
)
df2 = df.set_index("x")
assert_eq(df2, ddf2)
assert ddf2.npartitions == ddf.npartitions
@pytest.mark.parametrize("shuffle", ["tasks", "disk"])
def test_shuffle_sort(shuffle):
df = pd.DataFrame({"x": [1, 2, 3, 2, 1], "y": [9, 8, 7, 1, 5]})
ddf = dd.from_pandas(df, npartitions=3)
df2 = df.set_index("x").sort_index()
ddf2 = ddf.set_index("x", shuffle=shuffle)
assert_eq(ddf2.loc[2:3], df2.loc[2:3])
@pytest.mark.parametrize("shuffle", ["tasks", "disk"])
@pytest.mark.parametrize("scheduler", ["threads", "processes"])
def test_rearrange(shuffle, scheduler):
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
result = rearrange_by_column(ddf2, "_partitions", max_branch=32, shuffle=shuffle)
assert result.npartitions == ddf.npartitions
assert set(ddf.dask).issubset(result.dask)
# Every value in exactly one partition
a = result.compute(scheduler=scheduler)
get = dask.base.get_scheduler(scheduler=scheduler)
parts = get(result.dask, result.__dask_keys__())
for i in a._partitions.drop_duplicates():
assert sum(i in set(part._partitions) for part in parts) == 1
def test_rearrange_cleanup():
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
tmpdir = tempfile.mkdtemp()
with dask.config.set(temporay_directory=str(tmpdir)):
result = rearrange_by_column(ddf2, "_partitions", max_branch=32, shuffle="disk")
result.compute(scheduler="processes")
assert len(os.listdir(tmpdir)) == 0
def mock_shuffle_group_3(df, col, npartitions, p):
raise ValueError("Mock exception!")
def test_rearrange_disk_cleanup_with_exception():
# ensure temporary files are cleaned up when there's an internal exception.
with mock.patch("dask.dataframe.shuffle.shuffle_group_3", new=mock_shuffle_group_3):
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
tmpdir = tempfile.mkdtemp()
with dask.config.set(temporay_directory=str(tmpdir)):
with pytest.raises(ValueError, match="Mock exception!"):
result = rearrange_by_column(
ddf2, "_partitions", max_branch=32, shuffle="disk"
)
result.compute(scheduler="processes")
assert len(os.listdir(tmpdir)) == 0
def test_rearrange_by_column_with_narrow_divisions():
from dask.dataframe.tests.test_multi import list_eq
A = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6], "y": [1, 1, 2, 2, 3, 4]})
a = dd.repartition(A, [0, 4, 5])
df = rearrange_by_divisions(a, "x", (0, 2, 5))
list_eq(df, a)
def test_maybe_buffered_partd():
import partd
f = maybe_buffered_partd()
p1 = f()
assert isinstance(p1.partd, partd.Buffer)
f2 = pickle.loads(pickle.dumps(f))
assert not f2.buffer
p2 = f2()
assert isinstance(p2.partd, partd.File)
def test_set_index_with_explicit_divisions():
df = pd.DataFrame({"x": [4, 1, 2, 5]}, index=[10, 20, 30, 40])
ddf = dd.from_pandas(df, npartitions=2)
def throw(*args, **kwargs):
raise Exception()
with dask.config.set(get=throw):
ddf2 = ddf.set_index("x", divisions=[1, 3, 5])
assert ddf2.divisions == (1, 3, 5)
df2 = df.set_index("x")
assert_eq(ddf2, df2)
# Divisions must be sorted
with pytest.raises(ValueError):
ddf.set_index("x", divisions=[3, 1, 5])
def test_set_index_divisions_2():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6], "y": list("abdabd")})
ddf = dd.from_pandas(df, 2)
result = ddf.set_index("y", divisions=["a", "c", "d"])
assert result.divisions == ("a", "c", "d")
assert list(result.compute(scheduler="sync").index[-2:]) == ["d", "d"]
def test_set_index_divisions_compute():
d2 = d.set_index("b", divisions=[0, 2, 9], compute=False)
d3 = d.set_index("b", divisions=[0, 2, 9], compute=True)
assert_eq(d2, d3)
assert_eq(d2, full.set_index("b"))
assert_eq(d3, full.set_index("b"))
assert len(d2.dask) > len(d3.dask)
d4 = d.set_index(d.b, divisions=[0, 2, 9], compute=False)
d5 = d.set_index(d.b, divisions=[0, 2, 9], compute=True)
exp = full.copy()
exp.index = exp.b
assert_eq(d4, d5)
assert_eq(d4, exp)
assert_eq(d5, exp)
assert len(d4.dask) > len(d5.dask)
def test_set_index_divisions_sorted():
p1 = pd.DataFrame({"x": [10, 11, 12], "y": ["a", "a", "a"]})
p2 = pd.DataFrame({"x": [13, 14, 15], "y": ["b", "b", "c"]})
p3 = pd.DataFrame({"x": [16, 17, 18], "y": ["d", "e", "e"]})
ddf = dd.DataFrame(
{("x", 0): p1, ("x", 1): p2, ("x", 2): p3}, "x", p1, [None, None, None, None]
)
df = ddf.compute()
def throw(*args, **kwargs):
raise Exception("Shouldn't have computed")
with dask.config.set(get=throw):
res = ddf.set_index("x", divisions=[10, 13, 16, 18], sorted=True)
assert_eq(res, df.set_index("x"))
with dask.config.set(get=throw):
res = ddf.set_index("y", divisions=["a", "b", "d", "e"], sorted=True)
assert_eq(res, df.set_index("y"))
# with sorted=True, divisions must be same length as df.divisions
with pytest.raises(ValueError):
ddf.set_index("y", divisions=["a", "b", "c", "d", "e"], sorted=True)
# Divisions must be sorted
with pytest.raises(ValueError):
ddf.set_index("y", divisions=["a", "b", "d", "c"], sorted=True)
@pytest.mark.slow
def test_set_index_consistent_divisions():
# See https://github.com/dask/dask/issues/3867
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=4)
ddf = ddf.clear_divisions()
ctx = mp.get_context("spawn")
pool = ctx.Pool(processes=8)
with pool:
results = [pool.apply_async(_set_index, (ddf, "x")) for _ in range(100)]
divisions_set = set(result.get() for result in results)
assert len(divisions_set) == 1
def _set_index(df, *args, **kwargs):
return df.set_index(*args, **kwargs).divisions
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_reduces_partitions_small(shuffle):
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=50)
ddf2 = ddf.set_index("x", shuffle=shuffle, npartitions="auto")
assert ddf2.npartitions < 10
def make_part(n):
return pd.DataFrame({"x": np.random.random(n), "y": np.random.random(n)})
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_reduces_partitions_large(shuffle):
nbytes = 1e6
nparts = 50
n = int(nbytes / (nparts * 8))
ddf = dd.DataFrame(
{("x", i): (make_part, n) for i in range(nparts)},
"x",
make_part(1),
[None] * (nparts + 1),
)
ddf2 = ddf.set_index(
"x", shuffle=shuffle, npartitions="auto", partition_size=nbytes
)
assert 1 < ddf2.npartitions < 20
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_doesnt_increase_partitions(shuffle):
nparts = 2
nbytes = 1e6
n = int(nbytes / (nparts * 8))
ddf = dd.DataFrame(
{("x", i): (make_part, n) for i in range(nparts)},
"x",
make_part(1),
[None] * (nparts + 1),
)
ddf2 = ddf.set_index(
"x", shuffle=shuffle, npartitions="auto", partition_size=nbytes
)
assert ddf2.npartitions <= ddf.npartitions
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_detects_sorted_data(shuffle):
df = pd.DataFrame({"x": range(100), "y": range(100)})
ddf = dd.from_pandas(df, npartitions=10, name="x", sort=False)
ddf2 = ddf.set_index("x", shuffle=shuffle)
assert len(ddf2.dask) < ddf.npartitions * 4
def test_set_index_sorts():
# https://github.com/dask/dask/issues/2288
vals = np.array(
[
1348550149000000000,
1348550149000000000,
1348558142000000000,
1348558142000000000,
1348585928000000000,
1348585928000000000,
1348600739000000000,
1348601706000000000,
1348600739000000000,
1348601706000000000,
1348614789000000000,
1348614789000000000,
1348621037000000000,
1348621038000000000,
1348621040000000000,
1348621037000000000,
1348621038000000000,
1348621040000000000,
1348637628000000000,
1348638159000000000,
1348638160000000000,
1348638159000000000,
1348638160000000000,
1348637628000000000,
1348646354000000000,
1348646354000000000,
1348659107000000000,
1348657111000000000,
1348659107000000000,
1348657111000000000,
1348672876000000000,
1348672876000000000,
1348682787000000000,
1348681985000000000,
1348682787000000000,
1348681985000000000,
1348728167000000000,
1348728167000000000,
1348730745000000000,
1348730745000000000,
1348750198000000000,
1348750198000000000,
1348750198000000000,
1348753539000000000,
1348753539000000000,
1348753539000000000,
1348754449000000000,
1348754449000000000,
1348761333000000000,
1348761554000000000,
1348761610000000000,
1348761333000000000,
1348761554000000000,
1348761610000000000,
1348782624000000000,
1348782624000000000,
1348782624000000000,
1348782624000000000,
]
)
vals = pd.to_datetime(vals, unit="ns")
breaks = [10, 36, 58]
dfs = []
for i in range(len(breaks)):
lo = sum(breaks[:i])
hi = sum(breaks[i : i + 1])
dfs.append(pd.DataFrame({"timestamp": vals[lo:hi]}, index=range(lo, hi)))
ddf = dd.concat(dfs).clear_divisions()
assert ddf.set_index("timestamp").index.compute().is_monotonic is True
def test_set_index():
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [4, 2, 6]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [3, 5, 8]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [9, 1, 8]}, index=[9, 9, 9]),
}
d = dd.DataFrame(dsk, "x", meta, [0, 4, 9, 9])
full = d.compute()
d2 = d.set_index("b", npartitions=3)
assert d2.npartitions == 3
assert d2.index.name == "b"
assert_eq(d2, full.set_index("b"))
d3 = d.set_index(d.b, npartitions=3)
assert d3.npartitions == 3
assert d3.index.name == "b"
assert_eq(d3, full.set_index(full.b))
d4 = d.set_index("b")
assert d4.index.name == "b"
assert_eq(d4, full.set_index("b"))
d5 = d.set_index(["b"])
assert d5.index.name == "b"
assert_eq(d5, full.set_index(["b"]))
@pytest.mark.parametrize("engine", ["pandas", "cudf"])
def test_set_index_interpolate(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
df = pd.DataFrame({"x": [4, 1, 1, 3, 3], "y": [1.0, 1, 1, 1, 2]})
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=3)
else:
d = dd.from_pandas(df, 2)
d1 = d.set_index("x", npartitions=3)
assert d1.npartitions == 3
assert set(d1.divisions) == set([1, 2, 4])
d2 = d.set_index("y", npartitions=3)
assert d2.divisions[0] == 1.0
assert 1.0 < d2.divisions[1] < d2.divisions[2] < 2.0
assert d2.divisions[3] == 2.0
@pytest.mark.parametrize("engine", ["pandas", "cudf"])
def test_set_index_interpolate_int(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
L = sorted(list(range(0, 200, 10)) * 2)
df = pd.DataFrame({"x": 2 * L})
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=2)
else:
d = dd.from_pandas(df, 2)
d1 = d.set_index("x", npartitions=10)
assert all(np.issubdtype(type(x), np.integer) for x in d1.divisions)
@pytest.mark.parametrize("engine", ["pandas", "cudf"])
def test_set_index_interpolate_large_uint(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
"""This test is for #7304"""
df = pd.DataFrame(
{"x": np.array([612509347682975743, 616762138058293247], dtype=np.uint64)}
)
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=2)
else:
d = dd.from_pandas(df, 1)
d1 = d.set_index("x", npartitions=1)
assert d1.npartitions == 1
assert set(d1.divisions) == set([612509347682975743, 616762138058293247])
def test_set_index_timezone():
s_naive = pd.Series(pd.date_range("20130101", periods=3))
s_aware = pd.Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
df = pd.DataFrame({"tz": s_aware, "notz": s_naive})
d = dd.from_pandas(df, 2)
d1 = d.set_index("notz", npartitions=1)
s1 = pd.DatetimeIndex(s_naive.values, dtype=s_naive.dtype)
assert d1.divisions[0] == s_naive[0] == s1[0]
assert d1.divisions[-1] == s_naive[2] == s1[2]
# We currently lose "freq". Converting data with pandas-defined dtypes
# to numpy or pure Python can be lossy like this.
d2 = d.set_index("tz", npartitions=1)
s2 = pd.DatetimeIndex(s_aware, dtype=s_aware.dtype)
assert d2.divisions[0] == s2[0]
assert d2.divisions[-1] == s2[2]
assert d2.divisions[0].tz == s2[0].tz
assert d2.divisions[0].tz is not None
s2badtype = pd.DatetimeIndex(s_aware.values, dtype=s_naive.dtype)
if PANDAS_GT_120:
# starting with pandas 1.2.0, comparing equality of timestamps with different
# timezones returns False instead of raising an error
assert not d2.divisions[0] == s2badtype[0]
else:
with pytest.raises(TypeError):
d2.divisions[0] == s2badtype[0]
def test_set_index_npartitions():
# https://github.com/dask/dask/issues/6974
data = pd.DataFrame(
index=pd.Index(
["A", "A", "A", "A", "A", "A", "A", "A", "A", "B", "B", "B", "C"]
)
)
data = dd.from_pandas(data, npartitions=2)
output = data.reset_index().set_index("index", npartitions=1)
assert output.npartitions == 1
@pytest.mark.parametrize("unit", ["ns", "us"])
def test_set_index_datetime_precision(unit):
# https://github.com/dask/dask/issues/6864
df = pd.DataFrame(
[
[1567703791155681, 1],
[1567703792155681, 2],
[1567703790155681, 0],
[1567703793155681, 3],
],
columns=["ts", "rank"],
)
df.ts = pd.to_datetime(df.ts, unit=unit)
ddf = dd.from_pandas(df, npartitions=2)
ddf = ddf.set_index("ts")
assert_eq(ddf, df.set_index("ts"))
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop(drop):
pdf = pd.DataFrame(
{
"A": list("ABAABBABAA"),
"B": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"C": [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],
}
)
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index("A", drop=drop), pdf.set_index("A", drop=drop))
assert_eq(ddf.set_index("B", drop=drop), pdf.set_index("B", drop=drop))
assert_eq(ddf.set_index("C", drop=drop), pdf.set_index("C", drop=drop))
assert_eq(ddf.set_index(ddf.A, drop=drop), pdf.set_index(pdf.A, drop=drop))
assert_eq(ddf.set_index(ddf.B, drop=drop), pdf.set_index(pdf.B, drop=drop))
assert_eq(ddf.set_index(ddf.C, drop=drop), pdf.set_index(pdf.C, drop=drop))
# numeric columns
pdf = pd.DataFrame(
{
0: list("ABAABBABAA"),
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
2: [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],
}
)
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index(0, drop=drop), pdf.set_index(0, drop=drop))
assert_eq(ddf.set_index(2, drop=drop), pdf.set_index(2, drop=drop))
def test_set_index_raises_error_on_bad_input():
df = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
msg = r"Dask dataframe does not yet support multi-indexes"
with pytest.raises(NotImplementedError) as err:
ddf.set_index(["a", "b"])
assert msg in str(err.value)
with pytest.raises(NotImplementedError) as err:
ddf.set_index([["a", "b"]])
assert msg in str(err.value)
with pytest.raises(NotImplementedError) as err:
ddf.set_index([["a"]])
assert msg in str(err.value)
def test_set_index_sorted_true():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 20, 40], "z": [4, 3, 2, 1]})
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = a.set_index("x", sorted=True)
assert b.known_divisions
assert set(a.dask).issubset(set(b.dask))
for drop in [True, False]:
assert_eq(a.set_index("x", drop=drop), df.set_index("x", drop=drop))
assert_eq(
a.set_index(a.x, sorted=True, drop=drop), df.set_index(df.x, drop=drop)
)
assert_eq(
a.set_index(a.x + 1, sorted=True, drop=drop),
df.set_index(df.x + 1, drop=drop),
)
with pytest.raises(ValueError):
a.set_index(a.z, sorted=True)
def test_set_index_sorted_single_partition():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=1)
assert_eq(ddf.set_index("x", sorted=True), df.set_index("x"))
def test_set_index_sorted_min_max_same():
a = pd.DataFrame({"x": [1, 2, 3], "y": [0, 0, 0]})
b = pd.DataFrame({"x": [1, 2, 3], "y": [1, 1, 1]})
aa = delayed(a)
bb = delayed(b)
df = dd.from_delayed([aa, bb], meta=a)
assert not df.known_divisions
df2 = df.set_index("y", sorted=True)
assert df2.divisions == (0, 1, 1)
def test_set_index_empty_partition():
test_vals = [1, 2, 3]
converters = [int, float, str, lambda x: pd.to_datetime(x, unit="ns")]
for conv in converters:
df = pd.DataFrame(
[{"x": conv(i), "y": i} for i in test_vals], columns=["x", "y"]
)
ddf = dd.concat(
[
dd.from_pandas(df, npartitions=1),
dd.from_pandas(df[df.y > df.y.max()], npartitions=1),
]
)
assert any(ddf.get_partition(p).compute().empty for p in range(ddf.npartitions))
assert assert_eq(ddf.set_index("x"), df.set_index("x"))
def test_set_index_on_empty():
test_vals = [1, 2, 3, 4]
converters = [int, float, str, lambda x: pd.to_datetime(x, unit="ns")]
for converter in converters:
df = pd.DataFrame([{"x": converter(x), "y": x} for x in test_vals])
ddf = dd.from_pandas(df, npartitions=4)
assert ddf.npartitions > 1
ddf = ddf[ddf.y > df.y.max()].set_index("x")
expected_df = df[df.y > df.y.max()].set_index("x")
assert assert_eq(ddf, expected_df, **CHECK_FREQ)
assert ddf.npartitions == 1
def test_set_index_categorical():
# https://github.com/dask/dask/issues/5671
order = list(reversed(string.ascii_letters))
values = list(string.ascii_letters)
random.shuffle(values)
dtype = pd.api.types.CategoricalDtype(order, ordered=True)
df = pd.DataFrame({"A": pd.Categorical(values, dtype=dtype), "B": 1})
result = dd.from_pandas(df, npartitions=2).set_index("A")
assert len(result) == len(df)
# sorted with the metric defined by the Categorical
divisions = pd.Categorical(result.divisions, dtype=dtype)
assert_categorical_equal(divisions, divisions.sort_values())
def test_compute_divisions():
from dask.dataframe.shuffle import compute_and_set_divisions
df = pd.DataFrame(
{"x": [1, 2, 3, 4], "y": [10, 20, 20, 40], "z": [4, 3, 2, 1]},
index=[1, 3, 10, 20],
)
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = compute_and_set_divisions(copy(a))
assert_eq(a, b, check_divisions=False)
assert b.known_divisions
def test_empty_partitions():
# See https://github.com/dask/dask/issues/2408
df = pd.DataFrame({"a": list(range(10))})
df["b"] = df["a"] % 3
df["c"] = df["b"].astype(str)
ddf = dd.from_pandas(df, npartitions=3)
ddf = ddf.set_index("b")
ddf = ddf.repartition(npartitions=3)
ddf.get_partition(0).compute()
assert_eq(ddf, df.set_index("b"))
ddf = ddf.set_index("c")
assert_eq(ddf, df.set_index("b").set_index("c"))
def test_remove_nans():
tests = [
((1, 1, 2), (1, 1, 2)),
((None, 1, 2), (1, 1, 2)),
((1, None, 2), (1, 2, 2)),
((1, 2, None), (1, 2, 2)),
((1, 2, None, None), (1, 2, 2, 2)),
((None, None, 1, 2), (1, 1, 1, 2)),
((1, None, None, 2), (1, 2, 2, 2)),
((None, 1, None, 2, None, 3, None), (1, 1, 2, 2, 3, 3, 3)),
]
converters = [
(int, np.nan),
(float, np.nan),
(str, np.nan),
(lambda x: pd.to_datetime(x, unit="ns"), np.datetime64("NaT")),
]
for conv, none_val in converters:
for inputs, expected in tests:
params = [none_val if x is None else conv(x) for x in inputs]
expected = [conv(x) for x in expected]
assert remove_nans(params) == expected
@pytest.mark.slow
def test_gh_2730():
large = pd.DataFrame({"KEY": np.arange(0, 50000)})
small = pd.DataFrame({"KEY": np.arange(25, 500)})
dd_left = dd.from_pandas(small, npartitions=3)
dd_right = dd.from_pandas(large, npartitions=257)
with dask.config.set(shuffle="tasks", scheduler="sync"):
dd_merged = dd_left.merge(dd_right, how="inner", on="KEY")
result = dd_merged.compute()
expected = large.merge(small, how="inner", on="KEY")
tm.assert_frame_equal(result.sort_values("KEY").reset_index(drop=True), expected)
@pytest.mark.parametrize("npartitions", [None, "auto"])
def test_set_index_does_not_repeat_work_due_to_optimizations(npartitions):
# Atomic counter
count = itertools.count()
def increment():
next(count)
def make_part(dummy, n):
return pd.DataFrame({"x": np.random.random(n), "y": np.random.random(n)})
nbytes = 1e6
nparts = 50
n = int(nbytes / (nparts * 8))
dsk = {("inc", i): (increment,) for i in range(nparts)}
dsk.update({("x", i): (make_part, ("inc", i), n) for i in range(nparts)})
ddf = dd.DataFrame(dsk, "x", make_part(None, 1), [None] * (nparts + 1))
ddf.set_index("x", npartitions=npartitions)
ntimes = next(count)
assert ntimes == nparts
def test_set_index_errors_with_inplace_kwarg():
df = | pd.DataFrame({"a": [9, 8, 7], "b": [6, 5, 4], "c": [3, 2, 1]}) | pandas.DataFrame |
import json
import numpy
import pandas
import re
from datetime import timedelta
from GoogleSheetIOStream import GoogleSheetIOStream
class BonusProcessor (object):
def __init__(self, iostream, config_dir='config/', working_folder='Chouta Stand Payroll', input_folder='Input', config_folder='Config'):
self.io_stream = iostream
self.working_folder = self.io_stream.get_file(name=working_folder)
self.input_folder = self.io_stream.get_file(name=input_folder, parent=self.working_folder)
self.config_folder = self.io_stream.get_file(name=config_folder, parent=self.working_folder)
with open(config_dir + 'format.json', 'r') as format_file:
self.format = json.load(format_file)
with open(config_dir + 'bonus_format.json', 'r') as format_file:
self.bonus_format = json.load(format_file)
def calc_payroll(self):
# Calculate hours
hours_date_range, hours = self.calc_hours()
# Calculate bonus
bonus_date_range, bonus = self.calc_bonus()
# Combine hours and bonuses
payroll = hours.merge(bonus, how='left', on='Name').fillna(0).round(2)
date_range = [min([hours_date_range[0], bonus_date_range[0]]), max([hours_date_range[1], bonus_date_range[1]])]
# Upload payroll to googlesheets
top_line = 'Payroll Period,' + date_range[0] + ',' + date_range[1]
payroll_name = 'Payroll ' + date_range[0] + ' To ' + date_range[1]
payroll_data = ''.join([top_line, '\r\n', payroll.to_csv(line_terminator='\r\n', index=False)])
self.io_stream.upload_sheet(payroll_data, payroll_name, self.working_folder, format=self.format, numlines=len(payroll.index)+1)
def calc_hours(self):
# Read in schedule
hours = self.io_stream.download_sheet('Schedule', self.input_folder)
hours.loc[:, 'Clock In'] = pandas.to_datetime(hours['Clock In'])
hours.loc[:, 'Clock Out'] = | pandas.to_datetime(hours['Clock Out']) | pandas.to_datetime |
import os
import logging
import pandas as pd
from slackbot import licence_plate
log = logging.getLogger(__name__)
class CarOwners:
# Source data:
# https://intranet.xebia.com/display/XNL/Xebia+Group+Kenteken+Registratie
def __init__(self, csv_path='/data/car-owners.csv'):
self.csv_path = csv_path
self.owners_df = None
self.load()
def tag(self, plate, slackid=None, name=None):
plate = licence_plate.normalize(plate)
assert len(plate) == 6, 'Length of the licence plate must be 6 (without any dashes)'
if slackid and slackid.startswith('@'):
slackid = slackid[1:]
self.load()
if plate in self.owners_df.index:
self.owners_df.loc[plate, 'slackid'] = slackid or ''
self.owners_df.loc[plate, 'name'] = name or ''
else:
new_data = pd.Series({'slackid': slackid, 'name': name}, name=plate)
self.owners_df = self.owners_df.append(new_data)
self.owners_df = self.owners_df.where((pd.notnull(self.owners_df)), None)
self.save()
def untag(self, slackid, plate):
self.load()
if plate in self.owners_df.index:
self.owners_df.drop([plate], inplace=True)
self.save()
def lookup(self, plate):
"""
:return: Dict with 'name' and 'slackid' or None is not found
"""
plate = licence_plate.normalize(plate)
assert len(plate) == 6, 'Length of the licence plate must be 6 (without any dashes)'
self.load()
if plate not in self.owners_df.index:
log.info('Owner lookup for %s result: not found.', plate)
return None
res = self.owners_df.loc[plate]
log.info('Owner lookup for %s result: found: %s', plate, res.to_dict())
return res.to_dict()
def load(self):
if not os.path.exists(self.csv_path):
empty_df = pd.DataFrame(columns=['kenteken', 'slackid', 'name'], dtype=str)
empty_df.set_index('kenteken', inplace=True)
self.owners_df = empty_df
else:
self.owners_df = pd.read_csv(self.csv_path, header=0, index_col='kenteken', quoting=1, dtype=str)
self.owners_df = self.owners_df.where(( | pd.notnull(self.owners_df) | pandas.notnull |
import re
import numpy as np
import pandas as pd
import random as rd
from sklearn import preprocessing
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
# Print options
np.set_printoptions(precision=4, threshold=10000, linewidth=160, edgeitems=999, suppress=True)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.width', 160)
pd.set_option('expand_frame_repr', False)
pd.set_option('precision', 4)
# constructing binary features
def process_embarked():
global df_titanic_data
# replacing the missing values with the most commmon value in the variable
df_titanic_data.Embarked[df_titanic_data.Embarked.isnull()] = df_titanic_data.Embarked.dropna().mode().values
# converting the values into numbers
df_titanic_data['Embarked'] = pd.factorize(df_titanic_data['Embarked'])[0]
# binarizing the constructed features
if keep_binary:
df_titanic_data = pd.concat([df_titanic_data, pd.get_dummies(df_titanic_data['Embarked']).rename(
columns=lambda x: 'Embarked_' + str(x))], axis=1)
# Define a helper function that can use RandomForestClassifier for handling the missing values of the age variable
def set_missing_ages():
global df_titanic_data
age_data = df_titanic_data[
['Age', 'Embarked', 'Fare', 'Parch', 'SibSp', 'Title_id', 'Pclass', 'Names', 'CabinLetter']]
input_values_RF = age_data.loc[(df_titanic_data.Age.notnull())].values[:, 1::]
target_values_RF = age_data.loc[(df_titanic_data.Age.notnull())].values[:, 0]
# Creating an object from the random forest regression function of sklearn<use the documentation for more details>
regressor = RandomForestRegressor(n_estimators=2000, n_jobs=-1)
# building the model based on the input values and target values above
regressor.fit(input_values_RF, target_values_RF)
# using the trained model to predict the missing values
predicted_ages = regressor.predict(age_data.loc[(df_titanic_data.Age.isnull())].values[:, 1::])
# Filling the predicted ages in the origial titanic dataframe
age_data.loc[(age_data.Age.isnull()), 'Age'] = predicted_ages
# Helper function for constructing features from the age variable
def process_age():
global df_titanic_data
# calling the set_missing_ages helper function to use random forest regression for predicting missing values of age
set_missing_ages()
# # scale the age variable by centering it around the mean with a unit variance
# if keep_scaled:
# scaler_preprocessing = preprocessing.StandardScaler()
# df_titanic_data['Age_scaled'] = scaler_preprocessing.fit_transform(df_titanic_data.Age.reshape(-1, 1))
# construct a feature for children
df_titanic_data['isChild'] = np.where(df_titanic_data.Age < 13, 1, 0)
# bin into quartiles and create binary features
df_titanic_data['Age_bin'] = pd.qcut(df_titanic_data['Age'], 4)
if keep_binary:
df_titanic_data = pd.concat(
[df_titanic_data, pd.get_dummies(df_titanic_data['Age_bin']).rename(columns=lambda y: 'Age_' + str(y))],
axis=1)
if keep_bins:
df_titanic_data['Age_bin_id'] = pd.factorize(df_titanic_data['Age_bin'])[0] + 1
if keep_bins and keep_scaled:
scaler_processing = preprocessing.StandardScaler()
df_titanic_data['Age_bin_id_scaled'] = scaler_processing.fit_transform(
df_titanic_data.Age_bin_id.reshape(-1, 1))
if not keep_strings:
df_titanic_data.drop('Age_bin', axis=1, inplace=True)
# Helper function for constructing features from the passengers/crew names
def process_name():
global df_titanic_data
# getting the different names in the names variable
df_titanic_data['Names'] = df_titanic_data['Name'].map(lambda y: len(re.split(' ', y)))
# Getting titles for each person
df_titanic_data['Title'] = df_titanic_data['Name'].map(lambda y: re.compile(", (.*?)\.").findall(y)[0])
# handling the low occuring titles
df_titanic_data['Title'][df_titanic_data.Title == 'Jonkheer'] = 'Master'
df_titanic_data['Title'][df_titanic_data.Title.isin(['Ms', 'Mlle'])] = 'Miss'
df_titanic_data['Title'][df_titanic_data.Title == 'Mme'] = 'Mrs'
df_titanic_data['Title'][df_titanic_data.Title.isin(['Capt', 'Don', 'Major', 'Col', 'Sir'])] = 'Sir'
df_titanic_data['Title'][df_titanic_data.Title.isin(['Dona', 'Lady', 'the Countess'])] = 'Lady'
# binarizing all the features
if keep_binary:
df_titanic_data = pd.concat(
[df_titanic_data, pd.get_dummies(df_titanic_data['Title']).rename(columns=lambda x: 'Title_' + str(x))],
axis=1)
# scalling
if keep_scaled:
scaler_preprocessing = preprocessing.StandardScaler()
df_titanic_data['Names_scaled'] = scaler_preprocessing.fit_transform(df_titanic_data.Names.reshape(-1, 1))
# binning
if keep_bins:
df_titanic_data['Title_id'] = pd.factorize(df_titanic_data['Title'])[0] + 1
if keep_bins and keep_scaled:
scaler = preprocessing.StandardScaler()
df_titanic_data['Title_id_scaled'] = scaler.fit_transform(df_titanic_data.Title_id.reshape(-1, 1))
# Generate features from the cabin input variable
def process_cabin():
# refering to the global variable that contains the titanic examples
global df_titanic_data
# repllacing the missing value in cabin variable "U0"
df_titanic_data['Cabin'][df_titanic_data.Cabin.isnull()] = 'U0'
# the cabin number is a sequence of of alphanumerical digits, so we are going to create some features
# from the alphabetical part of it
df_titanic_data['CabinLetter'] = df_titanic_data['Cabin'].map(lambda l: get_cabin_letter(l))
df_titanic_data['CabinLetter'] = pd.factorize(df_titanic_data['CabinLetter'])[0]
# binarizing the cabin letters features
if keep_binary:
cletters = pd.get_dummies(df_titanic_data['CabinLetter']).rename(columns=lambda x: 'CabinLetter_' + str(x))
df_titanic_data = pd.concat([df_titanic_data, cletters], axis=1)
# creating features from the numerical side of the cabin
df_titanic_data['CabinNumber'] = df_titanic_data['Cabin'].map(lambda x: get_cabin_num(x)).astype(int) + 1
# scaling the feature
if keep_scaled:
scaler_processing = preprocessing.StandardScaler()
df_titanic_data['CabinNumber_scaled'] = scaler_processing.fit_transform(df_titanic_data.CabinNumber.reshape(-1, 1))
def get_cabin_letter(cabin_value):
# searching for the letters in the cabin alphanumerical value
letter_match = re.compile("([a-zA-Z]+)").search(cabin_value)
if letter_match:
return letter_match.group()
else:
return 'U'
def get_cabin_num(cabin_value):
# searching for the numbers in the cabin alphanumerical value
number_match = re.compile("([0-9]+)").search(cabin_value)
if number_match:
return number_match.group()
else:
return 0
# helper function for constructing features from the ticket fare variable
def process_fare():
global df_titanic_data
# handling the missing values by replacing it with the median feare
df_titanic_data['Fare'][np.isnan(df_titanic_data['Fare'])] = df_titanic_data['Fare'].median()
# zeros in the fare will cause some division problems so we are going to set them to 1/10th of the lowest fare
df_titanic_data['Fare'][np.where(df_titanic_data['Fare'] == 0)[0]] = df_titanic_data['Fare'][
df_titanic_data['Fare'].nonzero()[
0]].min() / 10
# Binarizing the features by binning them into quantiles
df_titanic_data['Fare_bin'] = pd.qcut(df_titanic_data['Fare'], 4)
if keep_binary:
df_titanic_data = pd.concat(
[df_titanic_data, pd.get_dummies(df_titanic_data['Fare_bin']).rename(columns=lambda x: 'Fare_' + str(x))],
axis=1)
# binning
if keep_bins:
df_titanic_data['Fare_bin_id'] = pd.factorize(df_titanic_data['Fare_bin'])[0] + 1
# scaling the value
if keep_scaled:
scaler_processing = preprocessing.StandardScaler()
df_titanic_data['Fare_scaled'] = scaler_processing.fit_transform(df_titanic_data.Fare.reshape(-1, 1))
if keep_bins and keep_scaled:
scaler_processing = preprocessing.StandardScaler()
df_titanic_data['Fare_bin_id_scaled'] = scaler_processing.fit_transform(
df_titanic_data.Fare_bin_id.reshape(-1, 1))
if not keep_strings:
df_titanic_data.drop('Fare_bin', axis=1, inplace=True)
# Helper function for constructing features from the ticket variable
def process_ticket():
global df_titanic_data
df_titanic_data['TicketPrefix'] = df_titanic_data['Ticket'].map(lambda y: get_ticket_prefix(y.upper()))
df_titanic_data['TicketPrefix'] = df_titanic_data['TicketPrefix'].map(lambda y: re.sub('[\.?\/?]', '', y))
df_titanic_data['TicketPrefix'] = df_titanic_data['TicketPrefix'].map(lambda y: re.sub('STON', 'SOTON', y))
df_titanic_data['TicketPrefixId'] = pd.factorize(df_titanic_data['TicketPrefix'])[0]
# binarzing features for each ticket layer
if keep_binary:
prefixes = | pd.get_dummies(df_titanic_data['TicketPrefix']) | pandas.get_dummies |
"""This module contains nodes for signal filtering."""
import numpy as np
import pandas as pd
from scipy import signal
from timeflux.core.branch import Branch
from timeflux.core.node import Node
from timeflux.nodes.window import Window
from timeflux_dsp.utils.filters import (
construct_fir_filter,
construct_iir_filter,
design_edges,
)
from timeflux_dsp.utils.import_helpers import make_object
class DropRows(Node):
"""Decimate signal by an integer factor.
This node uses Pandas computationally efficient functions to drop rows.
By default, it simply transfers one row out of ``factor`` and drops the others.
If ``method`` is `mean` (resp. median), it applies a rolling window of length
equals ``factor``, computes the mean and returns one value per window.
It maintains an internal state to ensure that every k'th sample is picked
even across chunk boundaries.
Attributes:
i (Port): Default input, expects DataFrame.
o (Port): Default output, provides DataFrame.
Args:
factor (int): Decimation factor. Only every k'th sample will be
transferred into the output.
method (str|None): Method to use to drop rows.
If `None`, the values are transferred as it.
If `mean` (resp. `median`), the mean (resp. median)
of the samples is taken.
Example:
.. literalinclude:: /../../timeflux_dsp/test/graphs/droprows.yaml
:language: yaml
Example:
In this exemple, we generate white noise to stream and we drop one sample out
of two using DropRows, setting:
* ``factor`` = `2`
* ``method`` = `None` (see orange trace) | ``method`` = `"mean"` (see green trace)
.. image:: /../../timeflux_dsp/doc/static/image/droprows_io.svg
:align: center
Notes:
Note that this node is not supposed to dejitter the timestamps, so if
the input chunk is not uniformly sampled, the output chunk won’t be either.
Also, this filter does not implement any anti-aliasing filter.
Hence, it is recommended to precede this node by a low-pass filter
(e.g., FIR or IIR) which cuts out below half of the new sampling rate.
"""
def __init__(self, factor, method=None):
super().__init__()
self._factor = factor
self._method = method
self._previous = pd.DataFrame()
def update(self):
# copy the meta
self.o.meta = self.i.meta
# if nominal rate is specified in the meta, update it.
if "rate" in self.o.meta:
self.o.meta["rate"] /= self._factor
# When we have not received data, there is nothing to do
if not self.i.ready():
return
# At this point, we are sure that we have some data to process
self.i.data = pd.concat([self._previous, self.i.data], axis=0, sort=True)
n = self.i.data.shape[0]
remaining = n % self._factor
self.i.data, self._previous = np.split(self.i.data, [n - remaining])
if self._method is None:
# take every kth sample with k=factor starting from the k-1 position
self.o.data = self.i.data.iloc[self._factor - 1 :: self._factor]
else:
# estimate rolling mean (or median) with window length=factor and take
# every kth sample with k=factor starting from the k-1 position
if self._method == "mean":
self.o.data = (
self.i.data.rolling(
window=self._factor, min_periods=self._factor, center=False
)
.mean()
.iloc[self._factor - 1 :: self._factor]
)
elif self._method == "median":
self.o.data = (
self.i.data.rolling(
window=self._factor, min_periods=self._factor, center=False
)
.median()
.iloc[self._factor - 1 :: self._factor]
)
class Resample(Node):
"""Resample signal.
This node calls the `scipy.signal.resample` function to decimate the signal
using Fourier method.
Attributes:
i (Port): Default input, expects DataFrame.
o (Port): Default output, provides DataFrame.
Args:
factor (int): Decimation factor. Only every k'th sample will be
transferred into the output.
window (str|list|float): Specifies the window applied to the signal
in the Fourier domain. Default: `None`.
Example:
.. literalinclude:: /../../timeflux_dsp/test/graphs/resample.yaml
:language: yaml
Notes:
This node should be used after a buffer to assure that the FFT window
has always the same length.
References:
* `scipy.signal.resample <https://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.signal.resample.html>`_
"""
def __init__(self, factor, window=None):
super().__init__()
self._factor = factor
self._window = window
self._previous = pd.DataFrame()
def update(self):
# copy the meta
self.o.meta = self.i.meta
# if nominal rate is specified in the meta, update it.
if "rate" in self.o.meta:
self.o.meta["rate"] /= self._factor
# When we have not received data, there is nothing to do
if not self.i.ready():
return
# At this point, we are sure that we have some data to process
n = self.i.data.shape[0]
if not self._previous.empty:
self.i.data = | pd.concat([self._previous, self.i.data], axis=0) | pandas.concat |
""" test the scalar Timedelta """
from datetime import timedelta
import numpy as np
import pytest
from pandas._libs import lib
from pandas._libs.tslibs import (
NaT,
iNaT,
)
import pandas as pd
from pandas import (
Timedelta,
TimedeltaIndex,
offsets,
to_timedelta,
)
import pandas._testing as tm
class TestTimedeltaUnaryOps:
def test_unary_ops(self):
td = Timedelta(10, unit="d")
# __neg__, __pos__
assert -td == Timedelta(-10, unit="d")
assert -td == Timedelta("-10d")
assert +td == Timedelta(10, unit="d")
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta("10d")
class TestTimedeltas:
@pytest.mark.parametrize(
"unit, value, expected",
[
("us", 9.999, 9999),
("ms", 9.999999, 9999999),
("s", 9.999999999, 9999999999),
],
)
def test_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
assert result.value == expected
result = Timedelta(str(value) + unit)
assert result.value == expected
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta("1 days, 10:11:12.100123456")
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.isnan(rng.total_seconds())
def test_conversion(self):
for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert isinstance(pydt, timedelta) and not isinstance(pydt, Timedelta)
assert td == np.timedelta64(td.value, "ns")
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, "ns")
assert td == td64
assert isinstance(td64, np.timedelta64)
# this is NOT equal and cannot be roundtripped (because of the nanos)
td = Timedelta("1 days, 10:11:12.012345678")
assert td != td.to_pytimedelta()
def test_fields(self):
def check(value):
# that we are int
assert isinstance(value, int)
# compat to datetime.timedelta
rng = to_timedelta("1 days, 10:11:12")
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta("-1 days, 10:11:12")
assert abs(td) == Timedelta("13:48:48")
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta("0 days 13:48:48")
assert -Timedelta("-1 days, 10:11:12").value == 49728000000000
assert Timedelta("-1 days, 10:11:12").value == -49728000000000
rng = to_timedelta("-1 days, 10:11:12.100123456")
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# components
tup = to_timedelta(-1, "us").components
assert tup.days == -1
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta("-1 days 1 us").components
assert tup.days == -2
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_iso_conversion(self):
# GH #21877
expected = Timedelta(1, unit="s")
assert to_timedelta("P0DT0H0M1S") == expected
def test_nat_converters(self):
result = to_timedelta("nat").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
result = to_timedelta("nan").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
@pytest.mark.parametrize(
"unit, np_unit",
[(value, "W") for value in ["W", "w"]]
+ [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
+ [
(value, "m")
for value in [
"m",
"minute",
"min",
"minutes",
"t",
"Minute",
"Min",
"Minutes",
"T",
]
]
+ [
(value, "s")
for value in [
"s",
"seconds",
"sec",
"second",
"S",
"Seconds",
"Sec",
"Second",
]
]
+ [
(value, "ms")
for value in [
"ms",
"milliseconds",
"millisecond",
"milli",
"millis",
"l",
"MS",
"Milliseconds",
"Millisecond",
"Milli",
"Millis",
"L",
]
]
+ [
(value, "us")
for value in [
"us",
"microseconds",
"microsecond",
"micro",
"micros",
"u",
"US",
"Microseconds",
"Microsecond",
"Micro",
"Micros",
"U",
]
]
+ [
(value, "ns")
for value in [
"ns",
"nanoseconds",
"nanosecond",
"nano",
"nanos",
"n",
"NS",
"Nanoseconds",
"Nanosecond",
"Nano",
"Nanos",
"N",
]
],
)
@pytest.mark.parametrize("wrapper", [np.array, list, pd.Index])
def test_unit_parser(self, unit, np_unit, wrapper):
# validate all units, GH 6855, GH 21762
# array-likes
expected = TimedeltaIndex(
[np.timedelta64(i, np_unit) for i in np.arange(5).tolist()]
)
result = to_timedelta(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
str_repr = [f"{x}{unit}" for x in np.arange(5)]
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
# scalar
expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]"))
result = to_timedelta(2, unit=unit)
assert result == expected
result = Timedelta(2, unit=unit)
assert result == expected
result = to_timedelta(f"2{unit}")
assert result == expected
result = Timedelta(f"2{unit}")
assert result == expected
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
Timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta([1, 2], unit)
def test_numeric_conversions(self):
assert Timedelta(0) == np.timedelta64(0, "ns")
assert Timedelta(10) == np.timedelta64(10, "ns")
assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns")
assert Timedelta(10, unit="us") == np.timedelta64(10, "us")
assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms")
assert Timedelta(10, unit="s") == np.timedelta64(10, "s")
assert Timedelta(10, unit="d") == np.timedelta64(10, "D")
def test_timedelta_conversions(self):
assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").astype(
"m8[ns]"
)
assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").astype(
"m8[ns]"
)
assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").astype("m8[ns]")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
td = Timedelta("10m7s")
assert td.to_timedelta64() == td.to_numpy()
@pytest.mark.parametrize(
"freq,s1,s2",
[
# This first case has s1, s2 being the same as t1,t2 below
(
"N",
Timedelta("1 days 02:34:56.789123456"),
Timedelta("-1 days 02:34:56.789123456"),
),
(
"U",
Timedelta("1 days 02:34:56.789123000"),
Timedelta("-1 days 02:34:56.789123000"),
),
(
"L",
Timedelta("1 days 02:34:56.789000000"),
Timedelta("-1 days 02:34:56.789000000"),
),
("S", Timedelta("1 days 02:34:57"), Timedelta("-1 days 02:34:57")),
("2S", Timedelta("1 days 02:34:56"), Timedelta("-1 days 02:34:56")),
("5S", Timedelta("1 days 02:34:55"), Timedelta("-1 days 02:34:55")),
("T", Timedelta("1 days 02:35:00"), Timedelta("-1 days 02:35:00")),
("12T", Timedelta("1 days 02:36:00"), Timedelta("-1 days 02:36:00")),
("H", Timedelta("1 days 03:00:00"), Timedelta("-1 days 03:00:00")),
("d", Timedelta("1 days"), Timedelta("-1 days")),
],
)
def test_round(self, freq, s1, s2):
t1 = Timedelta("1 days 02:34:56.789123456")
t2 = Timedelta("-1 days 02:34:56.789123456")
r1 = t1.round(freq)
assert r1 == s1
r2 = t2.round(freq)
assert r2 == s2
def test_round_invalid(self):
t1 = Timedelta("1 days 02:34:56.789123456")
for freq, msg in [
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
]:
with pytest.raises(ValueError, match=msg):
t1.round(freq)
def test_round_implementation_bounds(self):
# See also: analogous test for Timestamp
# GH#38964
result = Timedelta.min.ceil("s")
expected = Timedelta.min + Timedelta(seconds=1) - Timedelta(145224193)
assert result == expected
result = Timedelta.max.floor("s")
expected = Timedelta.max - Timedelta(854775807)
assert result == expected
with pytest.raises(OverflowError, match="value too large"):
Timedelta.min.floor("s")
# the second message here shows up in windows builds
msg = "|".join(
["Python int too large to convert to C long", "int too big to convert"]
)
with pytest.raises(OverflowError, match=msg):
Timedelta.max.ceil("s")
@pytest.mark.parametrize("n", range(100))
@pytest.mark.parametrize(
"method", [Timedelta.round, Timedelta.floor, Timedelta.ceil]
)
def test_round_sanity(self, method, n, request):
val = np.random.randint(iNaT + 1, lib.i8max, dtype=np.int64)
td = Timedelta(val)
assert method(td, "ns") == td
res = method(td, "us")
nanos = 1000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "ms")
nanos = 1_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "s")
nanos = 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "min")
nanos = 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "h")
nanos = 60 * 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "D")
nanos = 24 * 60 * 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit="d") + offsets.Hour(1)
for v in [NaT, None, float("nan"), np.nan]:
assert not (v in td)
td = to_timedelta([NaT])
for v in [NaT, None, float("nan"), np.nan]:
assert v in td
def test_identity(self):
td = Timedelta(10, unit="d")
assert isinstance(td, Timedelta)
assert isinstance(td, timedelta)
def test_short_format_converters(self):
def conv(v):
return v.astype("m8[ns]")
assert Timedelta("10") == np.timedelta64(10, "ns")
assert Timedelta("10ns") == np.timedelta64(10, "ns")
assert Timedelta("100") == np.timedelta64(100, "ns")
assert Timedelta("100ns") == np.timedelta64(100, "ns")
assert Timedelta("1000") == np.timedelta64(1000, "ns")
assert Timedelta("1000ns") == np.timedelta64(1000, "ns")
assert Timedelta("1000NS") == np.timedelta64(1000, "ns")
assert Timedelta("10us") == np.timedelta64(10000, "ns")
assert Timedelta("100us") == np.timedelta64(100000, "ns")
assert Timedelta("1000us") == np.timedelta64(1000000, "ns")
assert Timedelta("1000Us") == np.timedelta64(1000000, "ns")
assert Timedelta("1000uS") == np.timedelta64(1000000, "ns")
assert Timedelta("1ms") == np.timedelta64(1000000, "ns")
assert Timedelta("10ms") == np.timedelta64(10000000, "ns")
assert Timedelta("100ms") == np.timedelta64(100000000, "ns")
assert Timedelta("1000ms") == np.timedelta64(1000000000, "ns")
assert Timedelta("-1s") == -np.timedelta64(1000000000, "ns")
assert Timedelta("1s") == np.timedelta64(1000000000, "ns")
assert Timedelta("10s") == np.timedelta64(10000000000, "ns")
assert Timedelta("100s") == np.timedelta64(100000000000, "ns")
assert Timedelta("1000s") == np.timedelta64(1000000000000, "ns")
assert Timedelta("1d") == conv(np.timedelta64(1, "D"))
assert Timedelta("-1d") == -conv(np.timedelta64(1, "D"))
assert Timedelta("1D") == conv(np.timedelta64(1, "D"))
assert Timedelta("10D") == conv(np.timedelta64(10, "D"))
assert Timedelta("100D") == conv(np.timedelta64(100, "D"))
assert Timedelta("1000D") == conv(np.timedelta64(1000, "D"))
assert Timedelta("10000D") == conv(np.timedelta64(10000, "D"))
# space
assert | Timedelta(" 10000D ") | pandas.Timedelta |
"""Contains plotting code used by the web server."""
from datetime import timedelta
from bokeh.models import (
ColumnDataSource,
CustomJS,
DataRange1d,
Range1d,
Whisker,
LabelSet,
HoverTool,
)
from bokeh.models.formatters import DatetimeTickFormatter
from bokeh.layouts import row, Row
from bokeh.plotting import figure
from bokeh.transform import factor_cmap
from django.db.models import F
import pandas as pd
import networkx as nx
from vast_pipeline.models import Measurement, Source
def plot_lightcurve(
source: Source,
vs_abs_min: float = 4.3,
m_abs_min: float = 0.26,
use_peak_flux: bool = True,
) -> Row:
"""Create the lightcurve and 2-epoch metric graph for a source with Bokeh.
Args:
source (Source): Source object.
vs_abs_min (float, optional): pairs of Measurement objects with an absolute vs metric
greater than `vs_abs_min` and m metric greater than `m_abs_min` will be connected
in the metric graph. Defaults to 4.3.
m_abs_min (float, optional): See `vs_abs_min`. Defaults to 0.26.
use_peak_flux (bool, optional): If True, use peak fluxes, otherwise use integrated
fluxes. Defaults to True.
Returns:
Row: Bokeh Row layout object containing the lightcurve and graph plots.
"""
PLOT_WIDTH = 800
PLOT_HEIGHT = 300
flux_column = "flux_peak" if use_peak_flux else "flux_int"
metric_suffix = "peak" if use_peak_flux else "int"
measurements_qs = (
Measurement.objects.filter(source__id=source.id)
.annotate(
taustart_ts=F("image__datetime"),
flux=F(flux_column),
flux_err_lower=F(flux_column) - F(f"{flux_column}_err"),
flux_err_upper=F(flux_column) + F(f"{flux_column}_err"),
)
.values(
"id",
"pk",
"taustart_ts",
"flux",
"flux_err_upper",
"flux_err_lower",
"forced",
)
.order_by("taustart_ts")
)
# lightcurve required cols: taustart_ts, flux, flux_err_upper, flux_err_lower, forced
lightcurve = pd.DataFrame(measurements_qs)
# remap method values to labels to make a better legend
lightcurve["method"] = lightcurve.forced.map({True: "Forced", False: "Selavy"})
lc_source = ColumnDataSource(lightcurve)
method_mapper = factor_cmap(
"method", palette="Colorblind3", factors=["Selavy", "Forced"]
)
min_y = min(0, lightcurve.flux_err_lower.min())
max_y = lightcurve.flux_err_upper.max()
y_padding = (max_y - min_y) * 0.1
fig_lc = figure(
plot_width=PLOT_WIDTH,
plot_height=PLOT_HEIGHT,
sizing_mode="stretch_width",
x_axis_type="datetime",
x_range=DataRange1d(default_span=timedelta(days=1)),
y_range=DataRange1d(start=min_y, end=max_y + y_padding),
)
# line source must be a COPY of the data for the scatter source for the hover and
# selection to work properly, using the same ColumnDataSource will break it
fig_lc.line("taustart_ts", "flux", source=lightcurve)
lc_scatter = fig_lc.scatter(
"taustart_ts",
"flux",
marker="circle",
size=6,
color=method_mapper,
nonselection_color=method_mapper,
selection_color="red",
nonselection_alpha=1.0,
hover_color="red",
alpha=1.0,
source=lc_source,
legend_group="method",
)
fig_lc.add_layout(
Whisker(
base="taustart_ts",
upper="flux_err_upper",
lower="flux_err_lower",
source=lc_source,
)
)
fig_lc.xaxis.axis_label = "Datetime"
fig_lc.xaxis[0].formatter = DatetimeTickFormatter(days="%F", hours='%H:%M')
fig_lc.yaxis.axis_label = (
"Peak flux (mJy/beam)" if use_peak_flux else "Integrated flux (mJy)"
)
# determine legend location: either bottom_left or top_left
legend_location = (
"top_left"
if lightcurve.sort_values("taustart_ts").iloc[0].flux < (max_y - min_y) / 2
else "bottom_left"
)
fig_lc.legend.location = legend_location
# TODO add vs and m metrics to graph edges
# create plot
fig_graph = figure(
plot_width=PLOT_HEIGHT,
plot_height=PLOT_HEIGHT,
x_range=Range1d(-1.1, 1.1),
y_range=Range1d(-1.1, 1.1),
x_axis_type=None,
y_axis_type=None,
sizing_mode="fixed",
)
hover_tool_lc_callback = None
measurement_pairs = source.get_measurement_pairs()
if len(measurement_pairs) > 0:
candidate_measurement_pairs_df = | pd.DataFrame(measurement_pairs) | pandas.DataFrame |
#==============================================================================
# Import packages
#==============================================================================
import numpy as np
import pandas as pd
# Utilities
from sklearn.utils import resample
# Transformer to select a subset of the Pandas DataFrame columns
from sklearn.base import BaseEstimator, TransformerMixin
# Pipeline
from sklearn.pipeline import Pipeline
from sklearn.pipeline import FeatureUnion
# Data preprocessing
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PolynomialFeatures
# Feature selection
from sklearn.feature_selection import VarianceThreshold
#==============================================================================
# Custom transformer classes
#==============================================================================
# Class to select columns
class FeatureSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, x):
return self
def transform(self, x):
return x[self.attribute_names].values
# Class to impute textual category
class ImputerTextualCategory(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, df, y=None):
return self
def transform(self, x):
return pd.DataFrame(x).apply(lambda x: x.fillna(x.value_counts().index[0]))
# Class to encode labels across multiple columns
class MultiColumnLabelEncoder(BaseEstimator, TransformerMixin):
def __init__(self, astype=int):
self.astype = astype
def fit(self, x, y=None):
return self
def transform(self, x, y=None):
if self.astype == int:
return pd.DataFrame(x).apply(LabelEncoder().fit_transform)
else:
return pd.DataFrame(x).apply(LabelEncoder().fit_transform).astype(str)
# Class for one-hot encoding of textual categorical values and optionally
# drop the first dummy feature (if multi-collinearity is a concern)
class GetDummies(BaseEstimator, TransformerMixin):
def __init__(self, drop_first=False):
self.drop_first = drop_first
def fit(self, x, y=None):
return self
def transform(self, x):
return pd.get_dummies(x, drop_first=self.drop_first)
#==============================================================================
# Initialization Settings
#==============================================================================
ID = 'id'
Y = 'y'
DIR = "input"
DATAFILE = "{0}/data_example.csv".format(DIR)
NTRAINROWS = None # Number of rows of data file to read; None reads all rows
UPSAMPLEPCT = .4 # Percent of samples to have positive class; 0 <= pct < 1
SEED = 42 # Seed state for reproducibility
VARTHRESHOLD = .001 # Minimum variability allowed for features
#==============================================================================
# Data import
#==============================================================================
df = | pd.read_csv(DATAFILE, index_col=ID, header=0, nrows=NTRAINROWS) | pandas.read_csv |
from os.path import join
import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
from src import utils as cutil
def convert_non_monotonic_to_nan(array):
"""Converts a numpy array to a monotonically increasing one.
Args:
array (numpy.ndarray [N,]): input array
Returns:
numpy.ndarray [N,]: some values marked as missing, all non-missing
values should be monotonically increasing
Usage:
>>> convert_non_monotonic_to_nan(np.array([0, 0, 5, 3, 4, 6, 3, 7, 6, 7, 8]))
np.array([ 0., 0., np.nan, 3., np.nan, np.nan, 3., np.nan, 6., 7., 8.])
"""
keep = np.arange(0, len(array))
is_monotonic = False
while not is_monotonic:
is_monotonic_array = np.hstack(
(array[keep][1:] >= array[keep][:-1], np.array(True))
)
is_monotonic = is_monotonic_array.all()
keep = keep[is_monotonic_array]
out_array = np.full_like(array.astype(np.float), np.nan)
out_array[keep] = array[keep]
return out_array
def log_interpolate(array):
"""Interpolates assuming log growth.
Args:
array (numpy.ndarray [N,]): input array with missing values
Returns:
numpy.ndarray [N,]: all missing values will be filled
Usage:
>>> log_interpolate(np.array([0, np.nan, 2, np.nan, 4, 6, np.nan, 7, 8]))
np.array([0, 0, 2, 3, 4, 6, 7, 7, 8])
"""
idx = np.arange(0, len(array))
log_array = np.log(array.astype(np.float32) + 1e-1)
interp_array = np.interp(
x=idx, xp=idx[~np.isnan(array)], fp=log_array[~np.isnan(array)]
)
return np.round(np.exp(interp_array)).astype(int)
DATA_CHINA = cutil.DATA_RAW / "china"
health_dxy_file = join(DATA_CHINA, "DXYArea.csv")
health_jan_file = join(DATA_CHINA, "china_city_health_jan.xlsx")
policy_file = join(DATA_CHINA, "CHN_policy_data_sources.csv")
pop_file = join(DATA_CHINA, "china_city_pop.csv")
output_file = cutil.DATA_PROCESSED / "adm2" / "CHN_processed.csv"
match_file = join(DATA_CHINA, "match_china_city_name_w_adm2.csv")
shp_file = cutil.DATA_INTERIM / "adm" / "adm2" / "adm2.shp"
end_date_file = cutil.CODE / "data" / "cutoff_dates.csv"
end_date = pd.read_csv(end_date_file)
(end_date,) = end_date.loc[end_date["tag"] == "default", "end_date"].values
end_date = str(end_date)
print("End Date: ", end_date)
## Load and clean pre 01/24 data
# load pre 01/24 data
df_jan = pd.read_excel(health_jan_file, sheet_name=None)
# process pre 1/24 data
df_jan_merged = pd.DataFrame(columns=["adm0_name", "adm1_name", "adm2_name", "date"])
for old_col, new_col in zip(
["confirmed", "death", "recovery"],
["cum_confirmed_cases", "cum_deaths", "cum_recoveries"],
):
melted = (
df_jan[old_col]
.melt(
id_vars=["adm0_name", "adm1_name", "adm2_name"],
var_name="date",
value_name=new_col,
)
.dropna()
)
df_jan_merged = pd.merge(
df_jan_merged,
melted,
how="outer",
on=["adm0_name", "adm1_name", "adm2_name", "date"],
)
df_jan_merged = df_jan_merged.loc[df_jan_merged["adm2_name"] != "Unknown", :]
## Load and clean main data (scraped), harmonize city names
# data downloaded from
# https://github.com/BlankerL/DXY-COVID-19-Data
df = pd.read_csv(health_dxy_file)
# drop aggregates and cases in other countries
df = df.loc[df["countryEnglishName"] == "China", :]
df = df.loc[df["cityName"].notna(), :]
# df.describe(include='all') # quick summary
# df['provinceName'].unique() # looks clean
# df['provinceEnglishName'].unique() # looks clean
# df['cityName'].unique() # looks messy, will keep raw data
# # check unique English name for obs with the same Chinese cityName
# for cn_name, group in df.groupby(['provinceName', 'cityName']):
# en_name = group['cityEnglishName'].unique()
# if len(en_name) > 1:
# print(cn_name)
# print(en_name)
# print(group['cityEnglishName'].shape)
# print(group['cityEnglishName'].value_counts())
# # check all english city names
# for en_name, _ in df.groupby(['provinceEnglishName', 'cityEnglishName']):
# print(en_name)
# # check all chinese city names
# for cn_name, _ in df.groupby(['provinceName', 'cityName']):
# print(cn_name)
# set and sort index
df = df.set_index(["provinceName", "cityName"]).sort_index()
# record notes
df.loc[:, "notes"] = np.nan
# recode city English names based on Chinese names
cityEnglishName_dict = {
# 'provinceName', 'cityName': 'cityEnglishName', 'assignedToCity'
# for prisons
("浙江省", "省十里丰监狱"): ("Shilifeng Prison", "prison"),
("山东省", "任城监狱"): ("Rencheng Prison", "prison"),
("湖北省", "监狱系统"): ("Prison", "prison"),
# for harmonizing names
("四川省", "凉山"): ("Liangshan Yi Autonomous Prefecture", np.nan),
("四川省", "凉山州"): ("Liangshan Yi Autonomous Prefecture", np.nan),
# for imported cases
(None, "境外输入人员"): ("International Imported Cases", "imported"),
(None, "外地来沪人员"): ("Domestic Imported Cases", "imported"),
(None, "武汉来京人员"): ("Domestic Imported Cases", "imported"),
(None, "外地来京人员"): ("Domestic Imported Cases", "imported"),
(None, "外地来津"): ("Domestic Imported Cases", "imported"),
(None, "外地来津人员"): ("Domestic Imported Cases", "imported"),
(None, "外地来穗人员"): ("Domestic Imported Cases", "imported"),
(None, "外地来粤人员"): ("Domestic Imported Cases", "imported"),
# for unknown
(None, "待明确地区"): ("Unknown", "unknown"),
(None, "未明确地区"): ("Unknown", "unknown"),
(None, "未知"): ("Unknown", "unknown"),
(None, "未知地区"): ("Unknown", "unknown"),
(None, "不明地区"): ("Unknown", "unknown"),
(None, "未明确地区"): ("Unknown", "unknown"),
(None, "待明确"): ("Unknown", "unknown"),
}
# clean up cityEnglishName
for cn_name, values in cityEnglishName_dict.items():
cn_name = tuple(slice(s) if s is None else s for s in cn_name)
df.loc[cn_name, ["cityEnglishName", "notes"]] = values
# # check remaining missing values
# df.loc[df['cityEnglishName'].isna(), :].index.unique().tolist()
# add new admin level
df.loc[:, "adm3_name"] = "N/A"
# recode city English names based on Chinese names
cityEnglishName_dict = {
("上海市", "金山"): "Jinshan District",
("云南省", "红河"): "Honghe",
("云南省", "西双版纳州"): "Xishuangbanna",
("内蒙古自治区", "赤峰市松山区"): ("Chifeng", "Songshan"),
("内蒙古自治区", "赤峰市林西县"): ("Chifeng", "Linxi"),
("内蒙古自治区", "通辽市经济开发区"): "Tongliao",
("内蒙古自治区", "鄂尔多斯东胜区"): ("Ordos", "Dongsheng"),
("内蒙古自治区", "鄂尔多斯鄂托克前旗"): ("Ordos", "Etuokeqianqi"),
("内蒙古自治区", "锡林郭勒"): "Xilingol League",
("内蒙古自治区", "锡林郭勒盟"): "Xilingol League",
("内蒙古自治区", "锡林郭勒盟二连浩特"): ("Xilingol League", "Erlianhaote"),
("内蒙古自治区", "锡林郭勒盟锡林浩特"): ("Xilingol League", "Xilinhaote"),
("北京市", "石景山"): "Shijingshan District",
("北京市", "西城"): "Xicheng District",
("北京市", "通州"): "Tongzhou District",
("北京市", "门头沟"): "Mentougou District",
("北京市", "顺义"): "Shunyi District",
(
"新疆维吾尔自治区",
"石河子",
): "Shihezi, Xinjiang Production and Construction Corps 8th Division",
("新疆维吾尔自治区", "第七师"): "Xinjiang Production and Construction Corps 7th Division",
("新疆维吾尔自治区", "第九师"): "Xinjiang Production and Construction Corps 9th Division",
(
"新疆维吾尔自治区",
"第八师",
): "Shihezi, Xinjiang Production and Construction Corps 8th Division",
(
"新疆维吾尔自治区",
"第八师石河子",
): "Shihezi, Xinjiang Production and Construction Corps 8th Division",
(
"新疆维吾尔自治区",
"第八师石河子市",
): "Shihezi, Xinjiang Production and Construction Corps 8th Division",
("新疆维吾尔自治区", "第六师"): "Xinjiang Production and Construction Corps 6th Division",
("新疆维吾尔自治区", "胡杨河"): (
"Xinjiang Production and Construction Corps 7th Division",
"Huyanghe",
),
("新疆维吾尔自治区", "阿克苏"): "Akesu",
("河北省", "邯郸市"): "Handan",
("河南省", "邓州"): "Zhengzhou",
("河南省", "长垣"): "Changyuan",
("河南省", "长垣县"): "Changyuan",
("河南省", "鹤壁市"): "Hebi",
("海南省", "陵水县"): "Lingshui Li Autonomous County",
("甘肃省", "白银市"): "Baiyin",
("甘肃省", "金昌市"): "Jinchang",
("重庆市", "石柱"): "Shizhu Tujia Autonomous County",
("重庆市", "秀山"): "Xiushan Tujia and Miao Autonomous County",
("重庆市", "酉阳"): "Youyang Tujia and Miao Autonomous County",
("青海省", "西宁市"): "Xining",
# this is not missing but a typo in the original dataset
("河南省", "邓州"): "Dengzhou",
("江苏省", "淮安"): "Huai'an",
}
# clean up cityEnglishName
for cn_name, values in cityEnglishName_dict.items():
if isinstance(values, str):
df.loc[cn_name, "cityEnglishName"] = values
elif len(values) == 2:
df.loc[cn_name, ["cityEnglishName", "adm3_name"]] = values
# rename variables
df.rename(
{
"provinceEnglishName": "adm1_name",
"cityEnglishName": "adm2_name",
"city_confirmedCount": "cum_confirmed_cases",
"city_deadCount": "cum_deaths",
"city_curedCount": "cum_recoveries",
},
axis=1,
inplace=True,
)
# extract dates
df.loc[:, "updateTime"] = pd.to_datetime(df["updateTime"])
df.loc[:, "date"] = df["updateTime"].dt.date
df.loc[:, "date"] = pd.to_datetime(df["date"])
# choose the latest observation in each day
df = df.sort_values(by=["updateTime"])
df = df.drop_duplicates(
subset=["adm1_name", "adm2_name", "adm3_name", "date"], keep="last"
)
# subset columns
df = df.loc[
:,
[
"adm1_name",
"adm2_name",
"adm3_name",
"date",
"notes",
"cum_confirmed_cases",
"cum_deaths",
"cum_recoveries",
],
]
# for big cities, adjust adm level
mask = df["adm1_name"].isin(["Shanghai", "Beijing", "Tianjin", "Chongqing"])
df.loc[mask, "adm3_name"] = df.loc[mask, "adm2_name"].tolist()
df.loc[mask, "adm2_name"] = df.loc[mask, "adm1_name"].tolist()
# drop cases unassigned to cities
df = df.loc[df["notes"] != "prison", :]
df = df.loc[
~df["adm2_name"].isin(
["International Imported Cases", "Domestic Imported Cases", "Unknown"]
),
:,
]
# aggregate to city level
df = (
df.groupby(["adm1_name", "adm2_name", "date"])
.agg(
cum_confirmed_cases=pd.NamedAgg(
column="cum_confirmed_cases", aggfunc=np.nansum
),
cum_deaths=pd.NamedAgg(column="cum_deaths", aggfunc=np.nansum),
cum_recoveries=pd.NamedAgg(column="cum_recoveries", aggfunc=np.nansum),
)
.reset_index()
)
# fill adm0_name variable
df.loc[:, "adm0_name"] = "CHN"
## Merge with pre 01/24 data, create balanced panel
# merge with pre 1/24 data
df = pd.concat([df, df_jan_merged], sort=False)
# createa balanced panel
adm = df.loc[:, ["adm0_name", "adm1_name", "adm2_name"]].drop_duplicates()
days = pd.date_range(start="20200110", end=end_date)
adm_days = pd.concat([adm.assign(date=d) for d in days])
print(f"Sample: {len(adm)} cities; {len(days)} days.")
df = pd.merge(
adm_days, df, how="left", on=["adm0_name", "adm1_name", "adm2_name", "date"]
)
# fill N/A for the first day
df.loc[df["date"] == pd.Timestamp("2020-01-10"), :] = df.loc[
df["date"] == pd.Timestamp("2020-01-10"), :
].fillna(0)
# forward fill
df = df.set_index(["adm0_name", "adm1_name", "adm2_name"]).sort_index()
for _, row in adm.iterrows():
df.loc[tuple(row), :] = df.loc[tuple(row), :].fillna(method="ffill")
## Load and clean policy data
# load dataset of the policies in China
df_policy = pd.read_csv(policy_file).dropna(how="all")
# subset columns
df_policy = df_policy.loc[
:, ["adm0_name", "adm1_name", "adm2_name", "date_start", "date_end", "policy"]
]
# save set of policies
policy_set = df_policy["policy"].unique().tolist()
# parse
df_policy.loc[:, "date_start"] = pd.to_datetime(df_policy["date_start"])
df_policy.loc[:, "date_end"] = pd.to_datetime(df_policy["date_end"])
# check city name agreement
policy_city_set = set(
df_policy.loc[:, ["adm0_name", "adm1_name", "adm2_name"]]
.drop_duplicates()
.apply(tuple, axis=1)
.tolist()
)
adm2_set = set(adm.drop_duplicates().apply(tuple, axis=1).tolist())
adm1_set = set(
adm.loc[:, ["adm0_name", "adm1_name"]]
.drop_duplicates()
.apply(lambda x: (*x, "All"), axis=1)
.tolist()
)
print("Mismatched: ", policy_city_set - (adm1_set | adm2_set))
# subset adm1 policies
adm1_policy = df_policy.loc[df_policy["adm2_name"] == "All", :]
# merge to create balanced panel
adm1_policy = pd.merge(
adm,
adm1_policy.drop(["adm2_name"], axis=1),
how="left",
on=["adm0_name", "adm1_name"],
).dropna(subset=["policy"])
print("no. of adm1 policies: ", adm1_policy.shape[0])
# subset adm2 policies
adm2_policy = df_policy.loc[df_policy["adm2_name"] != "All", :]
print("no. of adm2 policies: ", adm2_policy.shape[0])
# concat policies at different levels
df_policy = pd.concat([adm1_policy, adm2_policy])
# sort by date to discard duplicates
df_policy = df_policy.sort_values(by=["date_start"])
# drop duplicates
df_policy = df_policy.drop_duplicates(
subset=["adm1_name", "adm2_name", "policy"], keep="first"
)
df_policy_set = set(
df_policy.loc[:, ["adm0_name", "adm1_name", "adm2_name"]]
.drop_duplicates()
.apply(tuple, axis=1)
.tolist()
)
print("Cities without any policies: ", len(adm2_set - df_policy_set))
print(adm2_set - df_policy_set)
# unstack to flip policy type to columns
df_policy = df_policy.set_index(
["adm0_name", "adm1_name", "adm2_name", "policy"]
).unstack("policy")
# prepare to merge with multi index
adm_days.set_index(["adm0_name", "adm1_name", "adm2_name"], inplace=True)
adm_days.columns = | pd.MultiIndex.from_tuples([("date", "")]) | pandas.MultiIndex.from_tuples |
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = | sql.read_sql_query("SELECT * FROM iris_view", self.conn) | pandas.io.sql.read_sql_query |
# -*- coding: utf-8 -*-
"""Device curtailment plots.
This module creates plots are related to the curtailment of generators.
@author: <NAME>
"""
import os
import logging
import pandas as pd
from collections import OrderedDict
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.ticker as mtick
import marmot.config.mconfig as mconfig
import marmot.plottingmodules.plotutils.plot_library as plotlib
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, DataSavedInModule,
UnderDevelopment, MissingZoneData)
class MPlot(PlotDataHelper):
"""curtailment MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The curtailment.py module contains methods that are
related to the curtailment of generators .
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
self.x = mconfig.parser("figure_size","xdimension")
self.y = mconfig.parser("figure_size","ydimension")
self.y_axes_decimalpt = mconfig.parser("axes_options","y_axes_decimalpt")
self.curtailment_prop = mconfig.parser("plot_data","curtailment_property")
def curt_duration_curve(self, prop: str = None,
start_date_range: str = None, end_date_range: str = None, **_):
"""Curtailment duration curve (line plot)
Displays curtailment sorted from highest occurrence to lowest
over given time period.
Args:
prop (str, optional): Controls type of re to include in plot.
Controlled through the plot_select.csv.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,f"generator_{self.curtailment_prop}",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
RE_Curtailment_DC = pd.DataFrame()
PV_Curtailment_DC = pd.DataFrame()
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
re_curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
# Timeseries [MW] RE curtailment [MWh]
try: #Check for regions missing all generation.
re_curt = re_curt.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.info(f'No curtailment in {zone_input}')
continue
re_curt = self.df_process_gen_inputs(re_curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
re_curt = self.assign_curtailment_techs(re_curt)
# Timeseries [MW] PV curtailment [MWh]
pv_curt = re_curt[re_curt.columns.intersection(self.pv_gen_cat)]
re_curt = re_curt.sum(axis=1)
pv_curt = pv_curt.sum(axis=1)
re_curt = re_curt.squeeze() #Convert to Series
pv_curt = pv_curt.squeeze() #Convert to Series
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
re_curt = re_curt[start_date_range : end_date_range]
pv_curt = pv_curt[start_date_range : end_date_range]
if re_curt.empty is True and prop == "PV+Wind":
self.logger.warning('No data in selected Date Range')
continue
if pv_curt.empty is True and prop == "PV":
self.logger.warning('No data in selected Date Range')
continue
# Sort from larget to smallest
re_cdc = re_curt.sort_values(ascending=False).reset_index(drop=True)
pv_cdc = pv_curt.sort_values(ascending=False).reset_index(drop=True)
re_cdc.rename(scenario, inplace=True)
pv_cdc.rename(scenario, inplace=True)
RE_Curtailment_DC = pd.concat([RE_Curtailment_DC, re_cdc], axis=1, sort=False)
PV_Curtailment_DC = pd.concat([PV_Curtailment_DC, pv_cdc], axis=1, sort=False)
# Remove columns that have values less than 1
RE_Curtailment_DC = RE_Curtailment_DC.loc[:, (RE_Curtailment_DC >= 1).any(axis=0)]
PV_Curtailment_DC = PV_Curtailment_DC.loc[:, (PV_Curtailment_DC >= 1).any(axis=0)]
# Replace _ with white space
RE_Curtailment_DC.columns = RE_Curtailment_DC.columns.str.replace('_',' ')
PV_Curtailment_DC.columns = PV_Curtailment_DC.columns.str.replace('_',' ')
# Create Dictionary from scenario names and color list
colour_dict = dict(zip(RE_Curtailment_DC.columns, self.color_list))
fig2, ax = plt.subplots(figsize=(self.x,self.y))
if prop == "PV":
if PV_Curtailment_DC.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
# unit conversion return divisor and energy units
unitconversion = PlotDataHelper.capacity_energy_unitconversion(PV_Curtailment_DC.values.max())
PV_Curtailment_DC = PV_Curtailment_DC/unitconversion['divisor']
Data_Table_Out = PV_Curtailment_DC
Data_Table_Out = Data_Table_Out.add_suffix(f" ({unitconversion['units']})")
x_axis_lim = 1.25 * len(PV_Curtailment_DC)
for column in PV_Curtailment_DC:
ax.plot(PV_Curtailment_DC[column], linewidth=3, color=colour_dict[column],
label=column)
ax.legend(loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
ax.set_ylabel(f"PV Curtailment ({unitconversion['units']})", color='black', rotation='vertical')
if prop == "PV+Wind":
if RE_Curtailment_DC.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
# unit conversion return divisor and energy units
unitconversion = PlotDataHelper.capacity_energy_unitconversion(RE_Curtailment_DC.values.max())
RE_Curtailment_DC = RE_Curtailment_DC/unitconversion['divisor']
Data_Table_Out = RE_Curtailment_DC
Data_Table_Out = Data_Table_Out.add_suffix(f" ({unitconversion['units']})")
x_axis_lim = 1.25 * len(RE_Curtailment_DC)
for column in RE_Curtailment_DC:
ax.plot(RE_Curtailment_DC[column], linewidth=3, color=colour_dict[column],
label=column)
ax.legend(loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
ax.set_ylabel(f"PV + Wind Curtailment ({unitconversion['units']})", color='black', rotation='vertical')
ax.set_xlabel('Hours', color='black', rotation='horizontal')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, p: format(x, f',.{self.y_axes_decimalpt}f')))
ax.margins(x=0.01)
#ax.set_xlim(0, 9490)
ax.set_xlim(0,x_axis_lim)
ax.set_ylim(bottom=0)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
outputs[zone_input] = {'fig': fig2, 'data_table': Data_Table_Out}
return outputs
def curt_pen(self, prop: str = None,
start_date_range: str = None, end_date_range: str = None, **_):
"""Plot of curtailment vs penetration.
Each scenario is represented by a different symbel on a x, y axis
Args:
prop (str, optional): Controls type of re to include in plot.
Controlled through the plot_select.csv.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "generator_Generation", self.Scenarios),
(True, "generator_Available_Capacity", self.Scenarios),
(True, f"generator_{self.curtailment_prop}", self.Scenarios),
(True, "generator_Total_Generation_Cost", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
Penetration_Curtailment_out = pd.DataFrame()
self.logger.info(f"{self.AGG_BY } = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
gen = self["generator_Generation"].get(scenario)
try: #Check for regions missing all generation.
gen = gen.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.info(f'No generation in {zone_input}')
continue
avail_gen = self["generator_Available_Capacity"].get(scenario)
avail_gen = avail_gen.xs(zone_input,level=self.AGG_BY)
re_curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
try:
re_curt = re_curt.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.info(f'No curtailment in {zone_input}')
continue
re_curt = self.df_process_gen_inputs(re_curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
re_curt = self.assign_curtailment_techs(re_curt)
# Finds the number of unique hours in the year
no_hours_year = len(gen.index.unique(level="timestamp"))
# Total generation across all technologies [MWh]
total_gen = float(gen.sum())
# Timeseries [MW] and Total VRE generation [MWh]
vre_gen = (gen.loc[(slice(None), self.vre_gen_cat),:])
total_vre_gen = float(vre_gen.sum())
# Timeseries [MW] and Total RE generation [MWh]
re_gen = (gen.loc[(slice(None), self.re_gen_cat),:])
total_re_gen = float(re_gen.sum())
# Timeseries [MW] and Total PV generation [MWh]
pv_gen = (gen.loc[(slice(None), self.pv_gen_cat),:])
total_pv_gen = float(pv_gen.sum())
# % Penetration of generation classes across the year
VRE_Penetration = (total_vre_gen/total_gen)*100
RE_Penetration = (total_re_gen/total_gen)*100
PV_Penetration = (total_pv_gen/total_gen)*100
# Timeseries [MW] and Total RE available [MWh]
re_avail = (avail_gen.loc[(slice(None), self.re_gen_cat),:])
total_re_avail = float(re_avail.sum())
# Timeseries [MW] and Total PV available [MWh]
pv_avail = (avail_gen.loc[(slice(None), self.pv_gen_cat),:])
total_pv_avail = float(pv_avail.sum())
# Total RE curtailment [MWh]
total_re_curt = float(re_curt.sum().sum())
# Timeseries [MW] and Total PV curtailment [MWh]
pv_curt = re_curt[re_curt.columns.intersection(self.pv_gen_cat)]
total_pv_curt = float(pv_curt.sum().sum())
# % of hours with curtailment
Prct_hr_RE_curt = (len((re_curt.sum(axis=1)).loc[(re_curt.sum(axis=1))>0])/no_hours_year)*100
Prct_hr_PV_curt = (len((pv_curt.sum(axis=1)).loc[(pv_curt.sum(axis=1))>0])/no_hours_year)*100
# Max instantaneous curtailment
if re_curt.empty == True:
continue
else:
Max_RE_Curt = max(re_curt.sum(axis=1))
if pv_curt.empty == True:
continue
else:
Max_PV_Curt = max(pv_curt.sum(axis=1))
# % RE and PV Curtailment Capacity Factor
if total_pv_curt > 0:
RE_Curt_Cap_factor = (total_re_curt/Max_RE_Curt)/no_hours_year
PV_Curt_Cap_factor = (total_pv_curt/Max_PV_Curt)/no_hours_year
else:
RE_Curt_Cap_factor = 0
PV_Curt_Cap_factor = 0
# % Curtailment across the year
if total_re_avail == 0:
continue
else:
Prct_RE_curt = (total_re_curt/total_re_avail)*100
if total_pv_avail == 0:
continue
else:
Prct_PV_curt = (total_pv_curt/total_pv_avail)*100
# Total generation cost
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
Total_Gen_Cost = float(Total_Gen_Cost.sum())
vg_out = pd.Series([PV_Penetration ,RE_Penetration, VRE_Penetration, Max_PV_Curt,
Max_RE_Curt, Prct_PV_curt, Prct_RE_curt, Prct_hr_PV_curt,
Prct_hr_RE_curt, PV_Curt_Cap_factor, RE_Curt_Cap_factor, Total_Gen_Cost],
index=["% PV Penetration", "% RE Penetration", "% VRE Penetration",
"Max PV Curtailment [MW]", "Max RE Curtailment [MW]",
"% PV Curtailment", '% RE Curtailment',"% PV hrs Curtailed",
"% RE hrs Curtailed", "PV Curtailment Capacity Factor",
"RE Curtailment Capacity Factor", "Gen Cost"])
vg_out = vg_out.rename(scenario)
Penetration_Curtailment_out = pd.concat([Penetration_Curtailment_out, vg_out], axis=1, sort=False)
Penetration_Curtailment_out = Penetration_Curtailment_out.T
# Data table of values to return to main program
Data_Table_Out = Penetration_Curtailment_out
VG_index = pd.Series(Penetration_Curtailment_out.index)
# VG_index = VG_index.str.split(n=1, pat="_", expand=True)
# VG_index.rename(columns = {0:"Scenario"}, inplace=True)
VG_index.rename("Scenario", inplace=True)
# VG_index = VG_index["Scenario"]
Penetration_Curtailment_out.loc[:, "Scenario"] = VG_index[:,].values
marker_dict = dict(zip(VG_index.unique(), self.marker_style))
colour_dict = dict(zip(VG_index.unique(), self.color_list))
Penetration_Curtailment_out["colour"] = [colour_dict.get(x, '#333333') for x in Penetration_Curtailment_out.Scenario]
Penetration_Curtailment_out["marker"] = [marker_dict.get(x, '.') for x in Penetration_Curtailment_out.Scenario]
if Penetration_Curtailment_out.empty:
self.logger.warning(f'No Generation in {zone_input}')
out = MissingZoneData()
outputs[zone_input] = out
continue
fig1, ax = plt.subplots(figsize=(self.x,self.y))
for index, row in Penetration_Curtailment_out.iterrows():
if prop == "PV":
ax.scatter(row["% PV Penetration"], row["% PV Curtailment"],
marker=row["marker"], c=row["colour"], s=100, label = row["Scenario"])
ax.set_ylabel('% PV Curtailment', color='black', rotation='vertical')
ax.set_xlabel('% PV Penetration', color='black', rotation='horizontal')
elif prop == "PV+Wind":
ax.scatter(row["% RE Penetration"], row["% RE Curtailment"],
marker=row["marker"], c=row["colour"], s=40, label = row["Scenario"])
ax.set_ylabel('% PV + Wind Curtailment', color='black', rotation='vertical')
ax.set_xlabel('% PV + Wind Penetration', color='black', rotation='horizontal')
ax.set_ylim(bottom=0)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
ax.margins(x=0.01)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys(), loc = 'lower right')
outputs[zone_input] = {'fig': fig1, 'data_table': Data_Table_Out}
return outputs
def curt_total(self, start_date_range: str = None, end_date_range: str = None, **_):
"""Creates stacked barplots of total curtailment by technology.
A separate bar is created for each scenario.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, f"generator_{self.curtailment_prop}", self.Scenarios),
(True, "generator_Available_Capacity", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
Total_Curtailment_out = pd.DataFrame()
Total_Available_gen = pd.DataFrame()
vre_curt_chunks = []
avail_gen_chunks = []
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
vre_collection = {}
avail_vre_collection = {}
vre_curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
try:
vre_curt = vre_curt.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.info(f'No curtailment in {zone_input}')
continue
vre_curt = self.df_process_gen_inputs(vre_curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
vre_curt = self.assign_curtailment_techs(vre_curt)
avail_gen = self["generator_Available_Capacity"].get(scenario)
try: #Check for regions missing all generation.
avail_gen = avail_gen.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.info(f'No available generation in {zone_input}')
continue
avail_gen = self.df_process_gen_inputs(avail_gen)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
avail_gen = self.assign_curtailment_techs(avail_gen)
all_empty = True
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
for vre_type in self.vre_gen_cat:
try:
vre_curt_type = vre_curt[vre_type]
# vre_curt_type = vre_curt.xs(vre_type,level='tech')
except KeyError:
self.logger.info(f'No {vre_type} in {zone_input}')
continue
avail_gen_type = avail_gen[vre_type]
# Code to index data by date range, if a date range is listed in marmot_plot_select.csv
if pd.notna(start_date_range):
avail_gen_type = avail_gen_type.groupby(['timestamp']).sum()
vre_curt_type = vre_curt_type.groupby(['timestamp']).sum()
vre_curt_type = vre_curt_type[start_date_range : end_date_range]
avail_gen_type = avail_gen_type[start_date_range : end_date_range]
if vre_curt_type.empty is False and avail_gen_type.empty is False:
all_empty = False
vre_collection[vre_type] = float(vre_curt_type.sum())
avail_vre_collection[vre_type] = float(avail_gen_type.sum())
if all_empty:
self.logger.warning('No data in selected Date Range')
continue
vre_table = pd.DataFrame(vre_collection,index=[scenario])
avail_gen_table = pd.DataFrame(avail_vre_collection,index=[scenario])
vre_curt_chunks.append(vre_table)
avail_gen_chunks.append(avail_gen_table)
if not vre_curt_chunks:
outputs[zone_input] = MissingZoneData()
continue
Total_Curtailment_out = pd.concat(vre_curt_chunks, axis=0, sort=False)
Total_Available_gen = pd.concat(avail_gen_chunks, axis=0, sort=False)
vre_pct_curt = Total_Curtailment_out.sum(axis=1)/Total_Available_gen.sum(axis=1)
Total_Curtailment_out.index = Total_Curtailment_out.index.str.replace('_',' ')
if Total_Curtailment_out.empty == True:
outputs[zone_input] = MissingZoneData()
continue
# unit conversion return divisor and energy units
unitconversion = PlotDataHelper.capacity_energy_unitconversion(max(Total_Curtailment_out.sum()))
Total_Curtailment_out = Total_Curtailment_out/unitconversion['divisor']
# Data table of values to return to main program
Data_Table_Out = Total_Curtailment_out
Data_Table_Out = Data_Table_Out.add_suffix(f" ({unitconversion['units']}h)")
fig3, ax = plt.subplots(figsize=(self.x,self.y))
Total_Curtailment_out.plot.bar(stacked=True,
color=[self.PLEXOS_color_dict.get(x, '#333333') for x in Total_Curtailment_out.columns],
edgecolor='black', linewidth='0.1', ax=ax)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel(f"Total Curtailment ({unitconversion['units']}h)", color='black', rotation='vertical')
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, p: format(x, f',.{self.y_axes_decimalpt}f')))
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Curtailment_out.index
PlotDataHelper.set_barplot_xticklabels(tick_labels, ax=ax)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
ax.margins(x=0.01)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
handles, labels = ax.get_legend_handles_labels()
ax.legend(reversed(handles), reversed(labels), loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
curt_totals = Total_Curtailment_out.sum(axis=1)
#inserts total bar value above each bar
for k, patch in enumerate(ax.patches):
height = curt_totals[k]
width = patch.get_width()
x, y = patch.get_xy()
ax.text(x+width/2,
y+height + 0.05*max(ax.get_ylim()),
'{:.2%}\n|{:,.2f}|'.format(vre_pct_curt[k],curt_totals[k]),
horizontalalignment='center',
verticalalignment='center', fontsize=11, color='red')
if k>=len(vre_pct_curt)-1:
break
outputs[zone_input] = {'fig': fig3, 'data_table': Data_Table_Out}
return outputs
def curt_total_diff(self, start_date_range: str = None, end_date_range: str = None, **_):
"""Creates stacked barplots of total curtailment by technology relative to a base scenario.
Barplots show the change in total curtailment relative to a base scenario.
The default is to comapre against the first scenario provided in the inputs list.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
return UnderDevelopment()
outputs = {}
properties = [(True, f"generator_{self.curtailment_prop}", self.Scenarios),
(True, "generator_Available_Capacity", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
outputs = MissingInputData()
return outputs
for zone_input in self.Zones:
self.logger.info(self.AGG_BY + " = " + zone_input)
Total_Curtailment_out = pd.DataFrame()
Total_Available_gen = pd.DataFrame()
vre_curt_chunks = []
avail_gen_chunks = []
for scenario in self.Scenarios:
self.logger.info("Scenario = " + scenario)
# Adjust list of values to drop from vre_gen_cat depending on if it exists in processed techs
#self.vre_gen_cat = [name for name in self.vre_gen_cat if name in curtailment_collection.get(scenario).index.unique(level="tech")]
vre_collection = {}
avail_vre_collection = {}
vre_curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
try:
vre_curt = vre_curt.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.info('No curtailment in ' + zone_input)
continue
vre_curt = self.df_process_gen_inputs(vre_curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
vre_curt = self.assign_curtailment_techs(vre_curt)
avail_gen = self["generator_Available_Capacity"].get(scenario)
try: #Check for regions missing all generation.
avail_gen = avail_gen.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.info('No available generation in ' + zone_input)
continue
avail_gen = self.df_process_gen_inputs(avail_gen)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
avail_gen = self.assign_curtailment_techs(avail_gen)
for vre_type in self.vre_gen_cat:
try:
vre_curt_type = vre_curt[vre_type]
except KeyError:
self.logger.info('No ' + vre_type + ' in ' + zone_input)
continue
vre_collection[vre_type] = float(vre_curt_type.sum())
avail_gen_type = avail_gen[vre_type]
avail_vre_collection[vre_type] = float(avail_gen_type.sum())
vre_table = pd.DataFrame(vre_collection,index=[scenario])
avail_gen_table = pd.DataFrame(avail_vre_collection,index=[scenario])
vre_curt_chunks.append(vre_table)
avail_gen_chunks.append(avail_gen_table)
Total_Curtailment_out = pd.concat(vre_curt_chunks, axis=0, sort=False)
Total_Available_gen = pd.concat(avail_gen_chunks, axis=0, sort=False)
vre_pct_curt = Total_Curtailment_out.sum(axis=1)/Total_Available_gen.sum(axis=1)
#Change to a diff on the first scenario.
print(Total_Curtailment_out)
Total_Curtailment_out = Total_Curtailment_out-Total_Curtailment_out.xs(self.Scenarios[0])
Total_Curtailment_out.drop(self.Scenarios[0],inplace=True) #Drop base entry
Total_Curtailment_out.index = Total_Curtailment_out.index.str.replace('_',' ')
# Data table of values to return to main program
Data_Table_Out = Total_Curtailment_out
if Total_Curtailment_out.empty == True:
outputs[zone_input] = MissingZoneData()
continue
# unit conversion return divisor and energy units
unitconversion = PlotDataHelper.capacity_energy_unitconversion(max(Total_Curtailment_out.sum()))
Total_Curtailment_out = Total_Curtailment_out/unitconversion['divisor']
fig3, ax= plt.subplots(figsize=(self.x,self.y))
Total_Curtailment_out.plot.bar(stacked=True,
color=[self.PLEXOS_color_dict.get(x, '#333333') for x in Total_Curtailment_out.columns],
edgecolor='black', linewidth='0.1',ax=ax)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('Total Curtailment ({}h)'.format(unitconversion['units']), color='black', rotation='vertical')
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, p: format(x, f',.{self.y_axes_decimalpt}f')))
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Curtailment_out.index
PlotDataHelper.set_barplot_xticklabels(tick_labels, ax=ax)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
ax.margins(x=0.01)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
handles, labels = ax.get_legend_handles_labels()
ax.legend(reversed(handles), reversed(labels), loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
curt_totals = Total_Curtailment_out.sum(axis=1)
print(Total_Curtailment_out)
print(curt_totals)
#inserts total bar value above each bar
k=0
for i in ax.patches:
height = curt_totals[k]
width = i.get_width()
x, y = i.get_xy()
ax.text(x+width/2,
y+height + 0.05*max(ax.get_ylim()),
'{:.2%}\n|{:,.2f}|'.format(vre_pct_curt[k],curt_totals[k]),
horizontalalignment='center',
verticalalignment='center', fontsize=11, color='red')
k += 1
if k>=len(vre_pct_curt):
break
outputs[zone_input] = {'fig': fig3, 'data_table': Data_Table_Out}
return outputs
def curt_ind(self, figure_name: str = None, prop: str = None,
start_date_range: str = None, end_date_range: str = None, **_):
"""Curtailment as a percentage of total generation, of individual generators.
The generators are specified as a comma seperated string in the
fourth column of Marmot_plot_select.csv and is passed to the prop argument.
The method outputs two.csv files:
- one that contains curtailment, in percent, for each scenario and site.
- the other contains total generation, in TWh, for each scenario and site.
This method does not return data to MarmotPlot, data is saved within the method directly
to the output folder.
Args:
figure_name (str, optional): User defined figure output name.
Defaults to None.
prop (str, optional): comma seperated string of generators to display.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "generator_Generation", self.Scenarios),
(True, "generator_Available_Capacity", self.Scenarios),
(True, f"generator_{self.curtailment_prop}", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
Total_Curtailment_Out_perc = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from run import prediction
import tensorflow as tf
import time
import os
np.random.seed(12345)
def top_k_movies(users,ratings_df,k):
"""
Returns top k movies for respective user
INPUTS :
users : list of numbers or number , list of user ids
ratings_df : rating dataframe, store all users rating for respective movies
k : natural number
OUTPUT:
Dictionary conatining user id as key and list of top k movies for that user as value
"""
# Extract unseen movies
dicts={}
if type(users) is not list:
users=[users]
for user in users:
rated_movies=ratings_df[ratings_df['user']==user].drop(['st', 'user'], axis=1)
rated_movie=list(rated_movies['item'].values)
total_movies=list(ratings_df.item.unique())
unseen_movies=list(set(total_movies) - set(rated_movie))
rated_list = []
rated_list=prediction(np.full(len(unseen_movies),user),np.array(unseen_movies))
useen_movies_df=pd.DataFrame({'item': unseen_movies,'rate':rated_list})
top_k=list(useen_movies_df.sort_values(['rate','item'], ascending=[0, 0])['item'].head(k).values)
dicts.update({user:top_k})
result=pd.DataFrame(dicts)
result.to_csv("user_top_k.csv")
return dicts
def user_rating(users,movies):
"""
Returns user rating for respective user
INPUTS :
users : list of numbers or number, list of user ids or just user id
movies : list of numbers or number, list of movie ids or just movie id
OUTPUT:
list of predicted movies
"""
if type(users) is not list:
users=np.array([users])
if type(movies) is not list:
movies=np.array([movies])
return prediction(users,movies)
def top_k_similar_items(movies,ratings_df,k,TRAINED=False):
"""
Returns k similar movies for respective movie
INPUTS :
movies : list of numbers or number, list of movie ids
ratings_df : rating dataframe, store all users rating for respective movies
k : natural number
TRAINED : TRUE or FALSE, weather use trained user vs movie table or untrained
OUTPUT:
list of k similar movies for respected movie
"""
if TRAINED:
df=pd.read_pickle("user_item_table_train.pkl")
else:
df= | pd.read_pickle("user_item_table.pkl") | pandas.read_pickle |
from config import engine
import pandas as pd
import numpy as np
from datetime import datetime
from collections import Counter
def date_difference(my_date, max_date):
'''
This function takes in a single date from the donations dataframe (per row) and compares the difference between that date and the date in which matching occurs.
I.e. pipeline matching should provide a query_date so that this can work.
'''
d1 = datetime.strptime(str(my_date), "%Y-%m-%d")
d2 = datetime.strptime(str(max_date), "%Y-%m-%d")
diff = (d2 - d1)
return diff
def create_scores(query_date):
'''
requires query date as input-- must be string in the following format "%Y-%m-%d"
returns a list of matching_ids and scores as tuples
will also insert rfm scores into rfm_scores table----see src/server/api/admin_api.py
'''
with engine.connect() as connection:
# read in data from database via pull_donations_for_rfm() func (reads in as a list of tuples)
df = pd.read_sql(
"""
select pc.matching_id, s.amount, s.close_date
from salesforcedonations s
inner join pdp_contacts pc on pc.source_id = s.contact_id and pc.source_type = 'salesforcecontacts'
where pc.archived_date is null order by matching_id
"""
, connection)
df = | pd.DataFrame(df, columns=['matching_id', 'amount', 'close_date']) | pandas.DataFrame |
import seaborn as sns
import matplotlib
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib.ticker import ScalarFormatter
from matplotlib import lines
import pandas as pd
import numpy as np
from pathlib import Path
import os
import sys
import csv
import time
import yaml
cache = {}
exp_base_folder = '/psp/experiments-data/'
distros = {
'Figure3': 'DISP2',
'Figure4_a': 'DISP2',
'Figure4_b': 'SBIM2',
'Figure5_a': 'DISP2',
'Figure5_b': 'SBIM2',
'Figure6': 'TPCC',
'Figure7': 'ROCKSDB'
}
workloads = {
'BIM1': {
'avg_s': 1,
'name': '90.0:0.5 -- 10.0:5.5',
'max_load': 14000000,
'distribution': 'bimodal-90.0:0.5-10.0:5.5',
'SHORT': { 'MEAN': .5, 'RATIO': .9, 'YLIM': 60 },
'LONG': { 'MEAN': 5.5, 'RATIO': .1, 'YLIM': 60 },
'UNKNOWN': { 'MEAN': 1, 'RATIO': 1, 'YLIM': 60 }
},
'BIM2': {
'avg_s': 1,
'max_load': 14000000,
'name': '99.9:0.5-0.1:500.5',
'distribution': 'bimodal-99.9:0.5-0.1:500.5',
'SHORT': { 'MEAN': .5, 'RATIO': .999, 'YLIM': 400 },
'LONG': { 'MEAN': 500.5, 'RATIO': .001, 'YLIM': 1500 },
'UNKNOWN': { 'MEAN': 1, 'RATIO': 1, 'YLIM': 1500 }
},
'SBIM2': {
'avg_s': 2.9975,
'max_load': 4670558,
'name': '99.5:0.5-0.05:500',
'distribution': 'bimodal-99.5:0.5-0.5:500.0',
'SHORT': { 'MEAN': .5, 'RATIO': .995, 'YLIM': 300 },
'LONG': { 'MEAN': 500, 'RATIO': .005, 'YLIM': 3600 },
'UNKNOWN': { 'MEAN': 2.9975, 'RATIO': 1, 'YLIM': 3600 }
},
'DISP1': {
'avg_s': 5.5,
'name': '50.0:1.0 -- 50.0:10.0',
'max_load': 2545454,
'distribution': 'bimodal-50.0:1.0-50.0:10.0',
'SHORT': { 'MEAN': 1, 'RATIO': .5, 'YLIM': 50 },
'LONG': { 'MEAN': 10, 'RATIO': .5, 'YLIM': 300 },
'UNKNOWN': { 'MEAN': 5.5, 'RATIO': 1, 'YLIM': 300
}
},
'DISP2': {
'avg_s': 50.5,
'name': '50.0:1.0 -- 50.0:100.0',
'max_load': 277227,
'distribution': 'bimodal-50.0:1.0-50.0:100.0',
'SHORT': { 'MEAN': 1.0, 'RATIO': .5, 'YLIM': 300 },
'LONG': { 'MEAN': 100.0, 'RATIO': .5, 'YLIM': 300 },
'UNKNOWN': { 'MEAN': 50.5, 'RATIO': 1, 'YLIM': 300 }
},
'DISP3': {
'avg_s': 50.950,
'name': '95.0.0:1.0 -- 0.5:100.0',
'max_load': 274779,
'distribution': 'bimodal-95.0:1.0-0.5:100.0',
'SHORT': { 'MEAN': 1.0, 'RATIO': .95, 'YLIM': 300 },
'LONG': { 'MEAN': 100.0, 'RATIO': .5, 'YLIM': 300 },
'UNKNOWN': { 'MEAN': 50.5, 'RATIO': 1, 'YLIM': 300 }
},
'ROCKSDB': {
'avg_s': 526,
'name': 'ROCKSDB',
'max_load': 45000,
'distribution': 'bimodal-50.0:0.0-50.0:0.0',
'GET': { 'MEAN': 2.0, 'RATIO': .5, 'YLIM': 300 },
'SCAN': { 'MEAN': 1050.0, 'RATIO': .5, 'YLIM': 1000 },
'UNKNOWN': { 'MEAN': 526, 'RATIO': 1, 'YLIM': 200 }
},
'TPCC': {
'avg_s': 19,
'name': 'TPC-C',
'max_load': 735000,
'distribution': 'tpcc',
'NewOrder': { 'MEAN': 20, 'RATIO': .44, 'YLIM': 250 },
'Payment': { 'MEAN': 5.7, 'RATIO': .44, 'YLIM': 250 },
'Delivery': { 'MEAN': 88, 'RATIO': .04, 'YLIM': 250 },
'OrderStatus': { 'MEAN': 6, 'RATIO': .04, 'YLIM': 250 },
'StockLevel': { 'MEAN': 100, 'RATIO': .04, 'YLIM': 250 },
'UNKNOWN': { 'MEAN': 19, 'RATIO': 1, 'YLIM': 50 }
}
}
apps = {
'TPCC': ['Payment', 'OrderStatus', 'NewOrder', 'Delivery', 'StockLevel'],
'MB': ['SHORT', 'LONG'],
'REST': ['PAGE', 'REGEX'],
'ROCKSDB': ['GET', 'SCAN'],
}
policies = {
'DFCFS': 'd-FCFS',
'CFCFS': 'c-FCFS',
'shen-DFCFS': 'shen-DFCFS',
'shen-CFCFS': 'shen-CFCFS',
'SJF': 'ARS-FP',
'EDF': 'EDF',
# 'CSCQ-half': 'CSCQ-half',
# 'CSCQ': 'ARS-CS',
# 'EDFNP': 'ARS-EDF',
'cPRESQ': 'cPRESQ',
'cPREMQ': 'cPREMQ',
'DARC': 'DARC'
}
# For final print
pol_names = {
'DARC': 'DARC',
'c-FCFS': 'c-FCFS',
'd-FCFS': 'd-FCFS',
'cPREMQ': 'c-PRE',
'cPRESQ': 'c-PRE',
'ARS-FP': 'FP',
'EDF': 'EDF',
'shen-DFCFS': 'd-FCFS',
'shen-CFCFS': 'c-FCFS'
}
system_pol = {
'DFCFS': 'Perséphone',
'CFCFS': 'Perséphone',
'shen-DFCFS': 'Shenango',
'shen-CFCFS': 'Shenango',
'SJF': 'Perséphone',
'CSCQ-half': 'Perséphone',
'CSCQ': 'Perséphone',
'EDF': 'Perséphone',
'cPRESQ': 'Shinjuku',
'cPREMQ': 'Shinjuku',
'DARC': 'Perséphone'
}
trace_label_to_dtype = {
'client-end-to-end' : ['SENDING', 'COMPLETED'],
'client-receive' : ['READING', 'COMPLETED'],
'client-send' : ['SENDING', 'READING'],
}
CLT_TRACE_ORDER = [
'SENDING',
'READING',
'COMPLETED'
]
def read_profiling_node(exp, app, orders=CLT_TRACE_ORDER, verbose=True):
# First get traces
exp_folder = os.path.join(exp_base_folder, exp, app, '')
filename = os.path.join(exp_folder, 'traces')
if not Path(filename).is_file():
print('{} does not exist. Skipping {} {} traces.'.format(filename, exp, app))
return pd.DataFrame()
if verbose:
print(f"Parsing {filename}")
app_trace_df = | pd.read_csv(filename, delimiter='\t') | pandas.read_csv |
from itertools import permutations
from typing import List, Dict
import pandas as pd
import scipy.stats
import numpy as np
import json
import sys, os
from statsmodels import api as sm
from src.constants import AVG_SEED_WINS, ESPN_SCORES
from scipy import stats
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + "/../")
from src.constants import SEEDS
# from line_profiler import profile
def store_object_as_json(data, filename):
with open(filename, "w") as f:
json.dump(data, f)
def flatten(lst: List) -> List:
return [item for sublist in lst for item in sublist]
class Region:
def __init__(self, region_name: str, bid_teams: List, play_in_dict: Dict = {}):
self.region_name = region_name
self.bid_teams = bid_teams
self.play_in_dict = play_in_dict
self.seeds = SEEDS
self.play_in_seeds = list(self.play_in_dict.keys())
self.play_in_teams = flatten(list(self.play_in_dict.values()))
# self.teams = self.bid_teams + self.play_in_teams
self.seed_teams()
def seed_teams(self):
"""take the ordered list of bid teams (non-play-in)
and play-in dictionary and make a dictionary of all team:seed combos."""
self.team_seeds = {}
team_idx = 0
self.teams = []
for seed in self.seeds:
if seed in self.play_in_seeds:
team_a, team_b = self.play_in_dict[seed]
self.team_seeds[team_a] = seed
self.team_seeds[team_b] = seed
self.teams.append(team_a)
self.teams.append(team_b)
else:
team = self.bid_teams[team_idx]
self.team_seeds[team] = seed
team_idx += 1
self.teams.append(team)
class Bracket:
# @profile
def __init__(
self,
ratings: Dict,
*regions: Region,
exp_stdev: float = 11.7,
pace_modifier: float = 68.4 / 100
):
self.regions = regions
self.ratings = ratings
if pace_modifier:
self.ratings = {k: v * pace_modifier for k, v in self.ratings.items()}
self.exp_stdev = exp_stdev
self.teams = []
self.team_regions = {}
self.team_seeds = {}
for region in self.regions:
self.teams += region.teams
for team in region.teams:
self.team_regions[team] = region.region_name
self.team_seeds[team] = region.team_seeds[team]
self.remaining_teams = self.teams
self.get_probabilities_dict()
self.play_in_matchups = flatten([region.play_in_teams for region in regions])
self.non_play_in_teams = [
team for team in self.teams if team not in self.play_in_matchups
]
self.play_in = len(self.play_in_matchups) > 0
self.n_rounds = int(np.log2(len(self.teams))) + self.play_in
# print(f"rounds: {self.n_rounds}")
self.current_round = None
self.current_round_is_play_in = False
self.current_round_team_a_list: List = []
self.current_round_team_b_list: List = []
self.num_sims = 0
# @profile
def get_probabilities_dict(self):
# by using permutations we are technically 2x the computational work here
# but this makes the code much simpler
# we use a dataframe for easy mapping and vectorization of scipy.stats.norm
team_combos = permutations(self.ratings.keys(), 2)
df = pd.DataFrame(list(team_combos), columns=["a", "b"])
df["a_rtg"] = df["a"].map(self.ratings)
df["b_rtg"] = df["b"].map(self.ratings)
df["rtg_diff"] = df["a_rtg"] - df["b_rtg"]
df["p_win_a"] = scipy.stats.norm(0, self.exp_stdev).cdf(
df["a_rtg"] - df["b_rtg"]
)
df["p_win_b"] = 1 - df["p_win_a"]
df.set_index(["a", "b"], inplace=True)
self.probabilities_dict = dict(zip(df.index.values, df["p_win_a"].values))
# @profile
def run_simulations(self, num_sims=100):
self.num_sims = num_sims
self.simulation_results = []
for sim in range(self.num_sims):
self.sim_id = sim
self.current_round = 1
# self.store_initial_round()
while self.current_round <= self.n_rounds:
self.run_round()
self.sim_results_to_df_and_store()
def merge_playin_winners_with_ro64(self):
play_in_losers = [
team for team in self.play_in_matchups if team not in self.remaining_teams
]
self.remaining_teams = [
team for team in self.teams if team not in play_in_losers
]
# @profile
def store_initial_round(self):
self.store_simulation_results(initial_round=True)
# @profile
def check_play_in(self):
self.current_round_is_play_in = self.current_round == 1 and self.play_in
# @profile
def merge_playin_winners_with_ro64_bracket(self):
# remove the losers and then run the round
play_in_losers = [
team for team in self.play_in_matchups if team not in self.remaining_teams
]
self.remaining_teams = [
team for team in self.teams if team not in play_in_losers
]
def set_current_matchups(self):
if self.current_round == 2 and self.play_in:
self.merge_playin_winners_with_ro64_bracket()
if self.current_round == 1:
if self.play_in:
self.current_matchups = self.play_in_matchups
else:
self.current_matchups = self.teams
else:
self.current_matchups = self.remaining_teams
self.current_round_team_a_list = self.current_matchups[::2]
self.current_round_team_b_list = self.current_matchups[1::2]
self.combined_current_matchups = zip(
self.current_round_team_a_list, self.current_round_team_b_list
)
# @profile
def generate_simulation_seed(self):
self.simulation_values = np.random.uniform(
size=int(len(self.current_matchups) / 2)
)
# @profile
def get_team_a_probs(self):
self.team_a_probs = [
self.probabilities_dict[(a, b)] for a, b in self.combined_current_matchups
]
# @staticmethod
# @profile
def simulate_game(self, team_a, team_b, team_a_prob, rand_val):
# why waste a function call on this?
# this gives us flexibility to try different things in our simulation
# print(f"Round{self.current_round}: {team_a} vs {team_b}")
return team_a if rand_val <= team_a_prob else team_b
# @profile
def get_winners(self):
self.remaining_teams = [
self.simulate_game(team_a, team_b, team_a_prob, rand_val)
for team_a, team_b, team_a_prob, rand_val in zip(
self.current_round_team_a_list,
self.current_round_team_b_list,
self.team_a_probs,
self.simulation_values,
)
]
# @profile
def store_simulation_results(self, initial_round=False):
teams_to_use = self.teams if initial_round else self.remaining_teams
if self.current_round_is_play_in:
teams_to_use += self.non_play_in_teams
for team in teams_to_use:
self.simulation_results.append(
{"team": team, "round": self.current_round, "sim_id": self.sim_id}
)
# @profile
def increment_current_round(self):
self.current_round += 1
# @profile
def run_round(self):
self.check_play_in()
self.set_current_matchups()
# self.update_teams()
self.generate_simulation_seed()
self.get_team_a_probs()
self.get_winners()
self.store_simulation_results()
self.increment_current_round()
# @profile
def sim_results_to_df_and_store(self):
# store_object_as_json(self.simulation_results, "sim_results.json")
full_results = | pd.DataFrame(self.simulation_results) | pandas.DataFrame |
import pandas as pd
def _performer_list():
performers = [
['FULL_NAME', 'SHORT_NAME'],
#['Test and Evaluation Team', 'te'],
['Accenture', 'acc'],
['ARA','ara'],
['Astra', 'ast'],
['BlackSky', 'bla'],
['Kitware', 'kit'],
['STR', 'str'],
]
return performers
def _performer_df():
performers = _performer_list()
df = pd.DataFrame(performers)
new_header = df.iloc[0] #grab the first row for the header
df = df[1:] #take the data less the header row
df.columns = new_header #set the header row as the df header
return(df)
class TE_CONFIG:
def __init__(self):
print('hello from TE_CONFIG -- i am instantiated')
self.performer_df = _performer_df()
def __repr__(self):
return f'{self.performer_df}'
def get_short_names(self):
df = self.performer_df
short_names = df['SHORT_NAME'].to_list()
return short_names
def get_full_names(self):
df = self.performer_df
full_names = df['FULL_NAME'].to_list()
a = (map(lambda x: x.lower(), full_names))
full_names = list(a)
return full_names
def get_bucket_pre(self):
bucket_pre = 'smart-data-'
return bucket_pre
def get_my_test_regions(self):
'''
Here is the complete set of regions to be processed in support of Eval-2 TA-1 assessment:
RadCalNet sites (Absolute Radiometry)
NA_R101
US_R108
CN_R101
FR_R101
GCP sites (Absolute Geometry)
US_R109
US_R110
US_R111
SSH sites
US_R113 (GSFC)
MX_R101 (Sonoran Desert)
AR_R101 (Barreal Blanco)
CN_R102 (Dunhuang)
'''
ssh_site_list = [
['SSH_SITE', 'PLACE_NAME'],
['US_R109', 'Sioux Falls SD'],
['US_R110', 'Riverside CA'],
['US_R111', 'New Orleans LA'],
['US_R113', '(GSFC)'],
['MX_R101', '(Sonoran Desert)'],
['AR_R101', '(Barreal Blanco)'],
['CN_R102', '(Dunhuang)'],
]
df = | pd.DataFrame(ssh_site_list) | pandas.DataFrame |
import numpy as np
import arviz as az
import pandas as pd
import seaborn as sns
import matplotlib.pylab as plt
from math import *
import json
import itertools
import os
import re
sns.set_style("whitegrid")
import tools
from tools import toVec
def jSonIterator(j):
yield j
if isinstance(j,dict):
for key in j.keys():
for m in jSonIterator(j[key]):
yield m
else:
if isinstance(j,list):
for item in j:
for m in jSonIterator(item):
yield m
def getByLabel(j,label):
def hasLabel(j):
if isinstance(j,dict) :
if "label" in j.keys():
if j["label"]==label:
return True
return False
ms=[ item for item in jSonIterator(j) if hasLabel(item) ]
return ms
def getForwardWalkingLabels(j,label):
fwLabels=[]
for m in j["measurements"] :
if ("label" in m.keys() ) and (m["label"]==label ) and ("forwardWalkingSteps" in m.keys() ):
for step in m["forwardWalkingSteps"]:
fwLabels.append(label + "_fw" + str(step))
return fwLabels
def average(data,labels=None,hues=None,minIndex=None):
if minIndex is not None:
data=data[data.index >= minIndex]
if labels is None:
labels=list(data.columns)
if hues is None:
hues = list( set(data.columns) -set(labels) )
averagedData={label : [] for label in labels }
averagedData.update({hue : [] for hue in hues } )
averagedData.update( { "delta" + label : [] for label in toVec(labels) })
if hues == []:
groups= { None : data }
groups=groups.items()
else:
groups = data.groupby(hues)
for hue_values,df in groups:
for label in toVec(labels):
x=np.array(df[label])
averagedData[label].append(np.mean(x) )
neff=az.ess(x)
averagedData["delta" + label].append( np.sqrt(np.var(x) /neff ) )
for name,value in zip(toVec(hues),toVec(hue_values) ):
averagedData[name].append(value)
return pd.DataFrame(averagedData)
def createHueLabel(hueNames,hueValues):
hueNames=toVec(hueNames)
hueValues=toVec(hueValues)
labels= [ str(name) + "=" + str(value) for name,value in zip(hueNames,hueValues) ]
return ", ".join(labels)
def assemblePlot(func):
def assemble(data,hues=None,table=False,nCols=2,width=10.,height=6.,x=None,y=None,delta=None,showLegend=True,*args,**kwds):
fig=plt.figure()
if hues is None:
ax=fig.add_subplot(111)
for x1,y1,delta1 in itertools.zip_longest(toVec(x),toVec(y),toVec(delta)):
func(data,x=x1 ,y=y1 ,delta=delta1,ax=ax,label=y1,*args,**kwds)
if showLegend:
ax.legend()
fig.set_size_inches(width, height)
else:
if not table :
ax=fig.add_subplot(111)
for hue,df in data.groupby(hues):
for x1,y1,delta1 in itertools.zip_longest(toVec(x),toVec(y),toVec(delta)):
func(df,x=x1,y=y1,delta=delta1,label=y1 + ";"+createHueLabel(hues,hue),ax=ax,*args,**kwds)
if showLegend:
ax.legend()
fig.set_size_inches(width, height )
else:
groups=data.groupby(hues)
Nplots=len(groups)
nRows=ceil(Nplots/nCols)
i=1
for hue,df in data.groupby(hues):
ax=fig.add_subplot(nRows,nCols,i)
for x1,y1,delta1 in itertools.zip_longest(toVec(x),toVec(y),toVec(delta)):
func(df,x=x1,y=y1,delta=delta1,label=y1 + ";" +createHueLabel(hues,hue),ax=ax,*args,**kwds)
i+=1
if (showLegend):
ax.legend()
fig.set_size_inches(width, height/2. * len(groups) )
fig.tight_layout()
return assemble
@assemblePlot
def plotVector(data,x,y,delta=None,label=None,ax=None,errorbar=False,*args,**kwds):
if delta is not None and (not errorbar):
ax.fill_between(data[x],data[y]-data[delta],data[y]+data[delta],alpha=0.5)
if errorbar is not True:
ax.plot(data[x],data[y],label=label,*args,**kwds)
else:
ax.errorbar(data[x],data[y],data[delta],label=label,*args,**kwds)
ax.set_xlabel(x)
ax.set_ylabel(y)
@assemblePlot
def plotScalar(data,y,x=None,label=None,ax=None,delta=None,alpha=0.5,trace=False,alpha_trace=1):
if x is None:
x1=np.arange(0,len(data[y]))
else:
x1=np.array(data[x])
if delta is None:
p=ax.plot(x1,np.array(data[y]),label=label,marker="o",linestyle="dashed",alpha=alpha)
else:
p=ax.errorbar(x1,np.array(data[y]),yerr=np.array(data[delta]),label=label,marker="o",linestyle="dashed",alpha=alpha)
if trace and (delta is None):
movingAverage=data[y].expanding().mean()
color=p[0].get_color()
ax.plot(x1,np.array(movingAverage),linestyle="solid",alpha=alpha_trace,color=color)
def compare(data,ax=None):
columns=list(data.columns)
labels = [label for label in columns if ( (re.match("(?!delta).*",label) is not None) and ( ("delta"+label) in columns ) ) ]
if ax is None:
fig=plt.figure()
ax=fig.add_subplot(111)
y=[ float(data[label]) for label in labels]
deltay=[ float(data["delta"+label]) for label in labels]
ax.errorbar(labels,y,yerr=deltay,marker="o",linestyle="None")
def gatherByLabel(baseDir,label,jSonInput,getHues=None,maxRows=None,minIndex=0):
measurements=getByLabel(jSonInput["measurements"],label)
if len(measurements)!=0 and ("recordSteps" in measurements[0]):
fwLabels=getForwardWalkingLabels(jSonInput,label)
datas=[]
for fwLabel in fwLabels:
data=gatherByLabel(baseDir,fwLabel,jSonInput,getHues=getHues,maxRows=maxRows,minIndex=minIndex)
data=data.rename(columns={fwLabel : label})
fwSteps=int(re.match(".*_fw(\d+)",fwLabel).group(1))
fwTime=jSonInput["correlationSteps"]*fwSteps*jSonInput["timeStep"]
data["fwTime"]=float(fwTime)
datas.append(data)
return pd.concat(datas)
filename=os.path.join(baseDir , label + ".dat")
data=pd.read_csv(filename,sep=" ")
if (maxRows is not None) and (len(data) > maxRows) :
data.reset_index(drop=True)
k=len(data)//maxRows
data=data[data.index% k == 0]
if getHues is not None:
hues=getHues(jSonInput)
for name,value in hues.items():
data[name]=value
data=data[data.index >= minIndex]
return data
def gather(dirname,label,hues=None,maxRows=None,minIndex=0,max_level=1):
datas=[]
json_file="input.json"
for subdir, dirs, files in tools.walk(dirname,max_level=max_level):
if json_file in files:
try:
with open(os.path.join(subdir,json_file)) as f:
j = json.load(f)
data=gatherByLabel(subdir,label,jSonInput=j,getHues=hues,maxRows=maxRows,minIndex=minIndex)
datas.append(data)
except FileNotFoundError as e:
print ("Warning: data not availible in " + subdir)
print (str(e))
if datas != []:
data=pd.concat(datas)
#data=data.reset_index(drop=True)
return data
def merge(datas,hues=None,how="outer"):
data=datas[0]
for i in range(1,len(datas) ):
data=pd.merge(data,datas[i],left_index=True,right_index=True,on=hues,how=how)
return data
def getOptimizationRange(data,x,label,hues,delta=None):
data=data.dropna()
optRanges= {hueName : [] for hueName in toVec(hues)}
optRanges.update({x + "_min" : [],x + "_max": []})
for hueValues,df in data.groupby(hues):
ys=np.array(df[label])
xs=np.array(df[x])
i=np.argmin(ys)
miny=ys[i]
minx=xs[i]
if delta is None:
deltays = np.zeros(len(ys))
else:
deltays=df[delta]
nonCompatibleXs=np.array( [x for x,y,deltay in zip(xs,ys,deltays) if abs(y - miny)> deltay ])
x_min= np.min(xs)
x_max = np.max(xs)
if len(nonCompatibleXs) > 0:
left_xs = nonCompatibleXs[nonCompatibleXs < minx]
right_xs = nonCompatibleXs[nonCompatibleXs > minx]
if len(left_xs) > 0:
x_min = np.max(left_xs)
if len(right_xs) > 0 :
x_max = np.min(right_xs)
optRanges[x + "_min"].append( x_min )
optRanges[x + "_max"].append( x_max )
#optRanges[x ].append( minx )
for hueValue,hueName in zip(toVec(hueValues),toVec(hues)):
optRanges[hueName].append(hueValue)
return pd.DataFrame(optRanges)
def expandOptimizationRanges(data,label,n):
data=data.reset_index(drop=True)
data=data.dropna()
xs=[]
i=0
for x_min,x_max in zip(data[label + "_min"],data[label + "_max"] ):
x=np.linspace(x_min,x_max,num=n)
x=pd.DataFrame({label : x})
x.index=x.index*0 + i
xs.append(x)
i+=1
if len(xs) > 0:
xs= pd.concat(xs)
new_parameters_table= | pd.merge(data,xs,left_index=True,right_index=True) | pandas.merge |
import sys
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
plt.rcParams['font.size'] = 6
root_path = os.path.dirname(os.path.abspath('__file__'))
# root_path = os.path.abspath(os.path.join(root_path,os.path.pardir))
graphs_path = root_path+'/graphs/'
results_path = root_path+'/results_analysis/results/'
print("root path:{}".format(root_path))
sys.path.append(root_path)
from tools.results_reader import read_two_stage, read_pure_esvr,read_pure_arma
h_arma = read_pure_arma("Huaxian")
x_arma = read_pure_arma("Xianyang")
z_arma = read_pure_arma("Zhangjiashan")
h_svr_1 = pd.read_csv(root_path+'/Huaxian/projects/esvr/1_ahead_pacf_lag12/optimal_model_results.csv')
h_svr_3 = pd.read_csv(root_path+'/Huaxian/projects/esvr/3_ahead_pacf_lag12/optimal_model_results.csv')
h_svr_5 = pd.read_csv(root_path+'/Huaxian/projects/esvr/5_ahead_pacf_lag12/optimal_model_results.csv')
h_svr_7 = pd.read_csv(root_path+'/Huaxian/projects/esvr/7_ahead_pacf_lag12/optimal_model_results.csv')
x_svr_1 = pd.read_csv(root_path+'/Xianyang/projects/esvr/1_ahead_pacf_lag12/optimal_model_results.csv')
x_svr_3 = pd.read_csv(root_path+'/Xianyang/projects/esvr/3_ahead_pacf_lag12/optimal_model_results.csv')
x_svr_5 = pd.read_csv(root_path+'/Xianyang/projects/esvr/5_ahead_pacf_lag12/optimal_model_results.csv')
x_svr_7 = pd.read_csv(root_path+'/Xianyang/projects/esvr/7_ahead_pacf_lag12/optimal_model_results.csv')
z_svr_1 = pd.read_csv(root_path+'/Zhangjiashan/projects/esvr/1_ahead_pacf_lag12/optimal_model_results.csv')
z_svr_3 = pd.read_csv(root_path+'/Zhangjiashan/projects/esvr/3_ahead_pacf_lag12/optimal_model_results.csv')
z_svr_5 = pd.read_csv(root_path+'/Zhangjiashan/projects/esvr/5_ahead_pacf_lag12/optimal_model_results.csv')
z_svr_7 = pd.read_csv(root_path+'/Zhangjiashan/projects/esvr/7_ahead_pacf_lag12/optimal_model_results.csv')
h_lstm_1 = pd.read_csv(root_path+'/Huaxian/projects/lstm/1_ahead/optimal/opt_pred.csv')
h_lstm_3 = pd.read_csv(root_path+'/Huaxian/projects/lstm/3_ahead/optimal/opt_pred.csv')
h_lstm_5 = pd.read_csv(root_path+'/Huaxian/projects/lstm/5_ahead/optimal/opt_pred.csv')
h_lstm_7 = | pd.read_csv(root_path+'/Huaxian/projects/lstm/7_ahead/optimal/opt_pred.csv') | pandas.read_csv |
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import sklearn as sk
import matplotlib.pyplot as plt
import gc
train = pd.read_csv("train.csv",parse_dates=["activation_date"])
test = | pd.read_csv("test.csv",parse_dates=["activation_date"]) | pandas.read_csv |
import numpy as np
import pandas as pd
from sklearn.model_selection import GroupShuffleSplit as sklearnGroupShuffleSplit
class Split():
def __init__(self, dataset=None):
self.dataset = dataset
def UnSplit(self):
"""Unsplit the dataset by setting all values of the split column to null."""
self.dataset.df["split"] = np.nan
def GroupShuffleSplit(df_main, train_pct=.6, test_pct=.25, val_pct=.25, group_col = 'img_filename', random_state=None):
"""
This function uses the GroupShuffleSplit command from sklearn. It can split into 3 groups (train,
test, and val) by applying the command twice.
"""
gss = sklearnGroupShuffleSplit(n_splits=1, train_size=train_pct)
train_indexes, test_indexes = next(gss.split(X=df_main, y=df_main[group_col], groups=df_main.index.values))
df_main.loc[train_indexes,'split'] = "train"
df_main.loc[test_indexes,'split'] = "test"
if val_pct:
df_train = df_main.loc[df_main['split'] == 'train']
df_test = df_main.loc[df_main['split'] == 'test']
df_test = df_test.reset_index()
second_split_pct = float(test_pct/(test_pct+val_pct))
gss2 = sklearnGroupShuffleSplit(n_splits=1, train_size=second_split_pct)
test_indexes_2, val_indexes_2 = next(gss2.split(X=df_test, y=df_test[group_col], groups=df_test.index.values))
df_test.loc[test_indexes_2,'split'] = "test"
df_test.loc[val_indexes_2,'split'] = "val"
return df_train.append(df_test)
else:
return df_main
#Written with the help of https://stackoverflow.com/questions/56872664/complex-dataset-split-stratifiedgroupshufflesplit
def StratifiedGroupShuffleSplit(df_main, train_pct=.7, test_pct=.3, val_pct=.0, weight=0.01,
group_col = 'img_filename', cat_col = 'cat_name', batch_size=1):
"""
This function will 'split" the dataframe by setting the split collumn equal to
train, test, or val. When a split dataset is exported the annotations will be split into
seperate groups so that can be used used in model training, testing, and validation.
"""
df_main = df_main.reindex(np.random.permutation(df_main.index)) # shuffle dataset
# create empty train, val and test datasets
df_train = pd.DataFrame()
df_val = pd.DataFrame()
df_test = pd.DataFrame()
subject_grouped_df_main = df_main.groupby([group_col], sort=False, as_index=False)
category_grouped_df_main = df_main.groupby(cat_col).count()[[group_col]]/len(df_main)*100
#Check inputs
assert (0 <= weight <= 1), "Weight must be between 0 and 1"
total_splits = round((train_pct) + float(test_pct) + float(val_pct),1)
assert (total_splits == 1), "Sum of train_pct, test_pct, and val_pct must equal 1."
assert (batch_size >= 1 and batch_size <= subject_grouped_df_main.ngroups / 10 ), \
"Batch must be greater than 1 and less than 1/10 count of groups"
def calc_mse_loss(df):
grouped_df = df.groupby(cat_col).count()[[group_col]]/len(df)*100
df_temp = category_grouped_df_main.join(grouped_df, on = cat_col, how = 'left', lsuffix = '_main')
df_temp.fillna(0, inplace=True)
df_temp['diff'] = (df_temp['img_filename_main'] - df_temp[group_col])**2
mse_loss = np.mean(df_temp['diff'])
return mse_loss
i = 0 #counter for all items in dataset
b = 0 #counter for the batches
batch_df = df_main[0:0]
for _, group in subject_grouped_df_main:
if (i < 3):
if (i == 0):
df_train = df_train.append(pd.DataFrame(group), ignore_index=True)
i += 1
continue
elif (i == 1):
df_val = df_val.append( | pd.DataFrame(group) | pandas.DataFrame |
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
import warnings
import requests
import pandas as pd
import re
class RegionMatcher:
"""
Ironing out disrepances between RosPotrebNadzor labels and iso_alpha3 codes.
"""
def get_simplified_region(self, x):
x = x.lower()
x = (
x.replace("край", "")
.replace("область", "")
.replace("республика", "")
.replace(" ао", "")
.replace("г.", "")
)
return x.split()[0]
def get_matching_regions(self, soup, tag="li"):
matches = [
re.match("(\d+\.\s){0,1}(.*)\s-\s(\d+)", x.text) for x in soup.find_all(tag)
]
matches = [
[x.group(2).lower(), int(x.group(3))] for x in matches if x is not None
]
return matches
def collect_region_update(self, table_soup, region_df):
matches = self.get_matching_regions(table_soup, "li")
if len(matches) == 0:
matches = self.get_matching_regions(table_soup, "p")
if len(matches) == 0:
matches = self.get_matching_regions(table_soup, "div")
if len(matches) == 0:
raise ValueError('Rospotrebnadzor parser is not working\
due to an unexpected page formatting change.')
# to simplified format
matches = [(self.get_simplified_region(x[0]), x[1]) for x in matches]
# extracting iso codes
iso_codes = region_df.set_index("name")["iso_code"].to_dict()
iso_codes = {self.get_simplified_region(x): iso_codes[x] for x in iso_codes}
matched_codes = [(iso_codes[x[0]], x[1]) for x in matches]
# finding the last date
date = table_soup.find("p", {"class": "date"})
date = datetime.strptime(date.text[:-3], "%d.%m.%Y").strftime("%Y-%m-%d")
update_df = pd.DataFrame(matched_codes)
update_df.columns = ["region", "confirmed"]
update_df["date"] = date
return update_df
class ReportDownloader:
def __init__(self, cfg):
self.cfg = cfg
def get_latest_info(self):
rospage_response = requests.get(self.cfg["rospotreb_page"] + "about/info/news/")
main_page_content = rospage_response.content.decode("Windows-1251")
soup = BeautifulSoup(main_page_content, "html.parser")
link = (
self.cfg["rospotreb_page"]
+ soup.find(
"a",
text=" О подтвержденных случаях новой коронавирусной инфекции COVID-2019 в России",
)["href"]
)
last_report_response = requests.get(link)
report = last_report_response.content.decode("Windows-1251")
soup = BeautifulSoup(report, "html.parser")
div = soup.find("div", {"class": "news-detail"})
return div
def download_report(self):
confirmed_cases = pd.read_csv(self.cfg["timeseries_page"])
last_update = self.get_latest_info()
return confirmed_cases, last_update
class RussianRegionsParser:
"""
Getting up to date data about confirmed COVID-19 cases in Russia.
"""
def __init__(self, cfg):
main_cfg = cfg["rospotreb"]
aux_cfg = cfg["auxiliary"]
self.downloader = ReportDownloader(main_cfg)
self.regions_fname = aux_cfg["regions"]
self.matcher = RegionMatcher()
def fix_date(self, df):
df["date"] = pd.to_datetime(df.date).dt.strftime("%Y-%m-%d")
return df
def convert_series_format(self, original_series_df, regions_df):
"""
Converting original files to a submission-based format.
From regions as columns and dates as rows to the opposite.
"""
regions_df = regions_df.set_index("csse_province_state")
new_cols = ["date"] + list(original_series_df["Province_State"])
date_series = (
original_series_df[original_series_df.columns[11:]]
.transpose()
.reset_index()
)
date_series.columns = new_cols
date_series = date_series.melt(
id_vars=["date"], var_name="region_name", value_name="confirmed"
)
date_series["region"] = date_series["region_name"].apply(
lambda x: regions_df.loc[x, "iso_code"]
)
return self.fix_date(date_series.set_index("region"))
def merge_update(self, original, update):
"""
RosPotrebNadzor updates are measured in changes by day.
We need to add them to the originals to make resulting update cumulative.
"""
date = datetime.strptime(update["date"][0], "%Y-%m-%d") - timedelta(days=1)
date = date.strftime("%Y-%m-%d")
update["region_name"] = update["region"].apply(
lambda x: original.loc[x, "region_name"][0]
)
update.set_index("region", inplace=True)
original_prev = original.query(f'date == "{date}"')
if original_prev.shape[0] == 0:
warnings.warn(
"Original timeseries source lags two days behind latest rospotrebnadzor update. Returning original."
)
return original
update["confirmed"] = original_prev["confirmed"] + update["confirmed"]
# fill missing values
for region_code in original.index.unique():
if region_code not in update.index:
update.loc[region_code] = [
original_prev.loc[region_code, "confirmed"],
update["date"][0],
original_prev.loc[region_code, "region_name"],
]
return original.append(update).sort_values(by=["region", "date"])
def load_data(self):
regions_df = | pd.read_csv(self.regions_fname) | pandas.read_csv |
# Copyright 2019 WISE-PaaS/AFS
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import json
import afs2datasource.utils as utils
from afs2datasource.helper import Helper
from pymongo import MongoClient
import pandas as pd
ISODATE_PREFIX = 'afs-isotime-'
import dateutil.parser
def ISODateDecoder(dic):
for key, value in dic.items():
if not ISODATE_PREFIX in value:
continue
value = value.split(ISODATE_PREFIX)[-1].strip()
try:
value = dateutil.parser.isoparse(value)
finally:
dic[key] = value
return dic
class MongoHelper(Helper):
def __init__(self, dataDir):
self._connection = None
data = utils.get_data_from_dataDir(dataDir)
self.username, self.password, self.host, self.port, self.database = utils.get_credential_from_dataDir(data)
self._db = self.database
self._collection = data.get('collection', '')
async def connect(self):
if self._connection is None:
uri = 'mongodb://{}:{}@{}:{}/{}'.format(self.username, self.password, self.host, self.port, self.database)
self._connection = MongoClient(uri)
self._connection.server_info()
def disconnect(self):
if self._connection:
self._connection.close()
self._connection = None
async def execute_query(self, querySql):
if not self._collection:
raise AttributeError('No collection in data')
data = list(self._connection[self._db][self._collection].find(querySql, {'_id': 0}))
data = | pd.DataFrame(data=data) | pandas.DataFrame |
# =============================================================================
# Pelote Network to Tabular Unit Tests
# =============================================================================
import networkx as nx
import pandas as pd
from pytest import raises
from pelote.exceptions import MissingPandasException
from pelote.shim import missing_pandas
from pelote.graph_to_tabular import (
graph_to_nodes_dataframe,
graph_to_edges_dataframe,
graph_to_dataframes,
)
def get_basic_range_graph(n: int = 3) -> nx.Graph:
g = nx.Graph()
g.add_nodes_from(range(n))
for i, node in enumerate(g):
g.nodes[node]["value"] = i
return g
class TestToNodesDataframe(object):
def test_errors(self):
with raises(MissingPandasException), missing_pandas():
graph_to_nodes_dataframe(nx.Graph())
with raises(TypeError):
graph_to_nodes_dataframe(None)
def test_default_behavior(self):
g = get_basic_range_graph()
df = graph_to_nodes_dataframe(g)
expected = pd.DataFrame(data={"key": list(range(3)), "value": list(range(3))})
assert df.equals(expected)
def test_node_key_col(self):
g = get_basic_range_graph()
df = graph_to_nodes_dataframe(g, node_key_col="node")
expected = pd.DataFrame(data={"node": list(range(3)), "value": list(range(3))})
assert df.equals(expected)
def test_node_key_as_index(self):
g = get_basic_range_graph()
df = graph_to_nodes_dataframe(g, node_key_col=None)
expected = pd.DataFrame(index=list(range(3)), data={"value": list(range(3))})
assert df.equals(expected)
class TestToEdgesDataframe(object):
def test_errors(self):
with raises(MissingPandasException), missing_pandas():
graph_to_edges_dataframe(nx.Graph())
with raises(TypeError):
graph_to_edges_dataframe(None)
def test_default_behavior(self):
g = nx.Graph()
g.add_edge(0, 1, weight=2.0)
df = graph_to_edges_dataframe(g)
expected = pd.DataFrame(data={"source": [0], "target": [1], "weight": [2.0]})
assert df.equals(expected)
def test_source_target_cols(self):
g = nx.Graph()
g.add_edge(0, 1, weight=2.0)
df = graph_to_edges_dataframe(
g, edge_source_col="Source", edge_target_col="Target"
)
expected = pd.DataFrame(data={"Source": [0], "Target": [1], "weight": [2.0]})
assert df.equals(expected)
def test_source_target_data(self):
g = nx.DiGraph()
g.add_node(1, name="John", age=34)
g.add_node(2, name="Lisa", age=47)
g.add_edge(1, 2)
# Source data
df = graph_to_edges_dataframe(g, source_node_data=("age",))
expected = pd.DataFrame(data={"source": [1], "target": [2], "age": [34]})
assert df.equals(expected)
# Target data
df = graph_to_edges_dataframe(g, target_node_data=("age",))
expected = | pd.DataFrame(data={"source": [1], "target": [2], "age": [47]}) | pandas.DataFrame |
"""
Author: <NAME>
Modified: <NAME>
"""
import os
import warnings
import numpy as np
import pandas as pd
import scipy.stats
import pytest
from numpy.testing import assert_almost_equal, assert_allclose
from statsmodels.tools.sm_exceptions import EstimationWarning
from statsmodels.tsa.holtwinters import (ExponentialSmoothing,
SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS)
base, _ = os.path.split(os.path.abspath(__file__))
housing_data = pd.read_csv(os.path.join(base, 'results', 'housing-data.csv'))
housing_data = housing_data.set_index('DATE')
housing_data = housing_data.asfreq('MS')
SEASONALS = ('add', 'mul', None)
TRENDS = ('add', 'mul', None)
def _simple_dbl_exp_smoother(x, alpha, beta, l0, b0, nforecast=0):
"""
Simple, slow, direct implementation of double exp smoothing for testing
"""
n = x.shape[0]
lvals = np.zeros(n)
b = np.zeros(n)
xhat = np.zeros(n)
f = np.zeros(nforecast)
lvals[0] = l0
b[0] = b0
# Special case the 0 observations since index -1 is not available
xhat[0] = l0 + b0
lvals[0] = alpha * x[0] + (1 - alpha) * (l0 + b0)
b[0] = beta * (lvals[0] - l0) + (1 - beta) * b0
for t in range(1, n):
# Obs in index t is the time t forecast for t + 1
lvals[t] = alpha * x[t] + (1 - alpha) * (lvals[t - 1] + b[t - 1])
b[t] = beta * (lvals[t] - lvals[t - 1]) + (1 - beta) * b[t - 1]
xhat[1:] = lvals[0:-1] + b[0:-1]
f[:] = lvals[-1] + np.arange(1, nforecast + 1) * b[-1]
err = x - xhat
return lvals, b, f, err, xhat
class TestHoltWinters(object):
@classmethod
def setup_class(cls):
# Changed for backwards compatibility with pandas
# oildata_oil_json = '{"851990400000":446.6565229,"883526400000":454.4733065,"915062400000":455.662974,"946598400000":423.6322388,"978220800000":456.2713279,"1009756800000":440.5880501,"1041292800000":425.3325201,"1072828800000":485.1494479,"1104451200000":506.0481621,"1135987200000":526.7919833,"1167523200000":514.268889,"1199059200000":494.2110193}'
# oildata_oil = pd.read_json(oildata_oil_json, typ='Series').sort_index()
data = [446.65652290000003, 454.47330649999998, 455.66297400000002,
423.63223879999998, 456.27132790000002, 440.58805009999998,
425.33252010000001, 485.14944789999998, 506.04816210000001,
526.79198329999997, 514.26888899999994, 494.21101929999998]
index = ['1996-12-31 00:00:00', '1997-12-31 00:00:00', '1998-12-31 00:00:00',
'1999-12-31 00:00:00', '2000-12-31 00:00:00', '2001-12-31 00:00:00',
'2002-12-31 00:00:00', '2003-12-31 00:00:00', '2004-12-31 00:00:00',
'2005-12-31 00:00:00', '2006-12-31 00:00:00', '2007-12-31 00:00:00']
oildata_oil = pd.Series(data, index)
oildata_oil.index = pd.DatetimeIndex(oildata_oil.index,
freq=pd.infer_freq(oildata_oil.index))
cls.oildata_oil = oildata_oil
# air_ausair_json = '{"662601600000":17.5534,"694137600000":21.8601,"725760000000":23.8866,"757296000000":26.9293,"788832000000":26.8885,"820368000000":28.8314,"851990400000":30.0751,"883526400000":30.9535,"915062400000":30.1857,"946598400000":31.5797,"978220800000":32.577569,"1009756800000":33.477398,"1041292800000":39.021581,"1072828800000":41.386432,"1104451200000":41.596552}'
# air_ausair = pd.read_json(air_ausair_json, typ='Series').sort_index()
data = [17.5534, 21.860099999999999, 23.886600000000001,
26.929300000000001, 26.888500000000001, 28.831399999999999,
30.075099999999999, 30.953499999999998, 30.185700000000001,
31.579699999999999, 32.577568999999997, 33.477398000000001,
39.021580999999998, 41.386431999999999, 41.596552000000003]
index = ['1990-12-31 00:00:00', '1991-12-31 00:00:00', '1992-12-31 00:00:00',
'1993-12-31 00:00:00', '1994-12-31 00:00:00', '1995-12-31 00:00:00',
'1996-12-31 00:00:00', '1997-12-31 00:00:00', '1998-12-31 00:00:00',
'1999-12-31 00:00:00', '2000-12-31 00:00:00', '2001-12-31 00:00:00',
'2002-12-31 00:00:00', '2003-12-31 00:00:00', '2004-12-31 00:00:00']
air_ausair = pd.Series(data, index)
air_ausair.index = pd.DatetimeIndex(air_ausair.index,
freq=pd.infer_freq(air_ausair.index))
cls.air_ausair = air_ausair
# livestock2_livestock_json = '{"31449600000":263.917747,"62985600000":268.307222,"94608000000":260.662556,"126144000000":266.639419,"157680000000":277.515778,"189216000000":283.834045,"220838400000":290.309028,"252374400000":292.474198,"283910400000":300.830694,"315446400000":309.286657,"347068800000":318.331081,"378604800000":329.37239,"410140800000":338.883998,"441676800000":339.244126,"473299200000":328.600632,"504835200000":314.255385,"536371200000":314.459695,"567907200000":321.413779,"599529600000":329.789292,"631065600000":346.385165,"662601600000":352.297882,"694137600000":348.370515,"725760000000":417.562922,"757296000000":417.12357,"788832000000":417.749459,"820368000000":412.233904,"851990400000":411.946817,"883526400000":394.697075,"915062400000":401.49927,"946598400000":408.270468,"978220800000":414.2428}'
# livestock2_livestock = pd.read_json(livestock2_livestock_json, typ='Series').sort_index()
data = [263.91774700000002, 268.30722200000002, 260.662556,
266.63941899999998, 277.51577800000001, 283.834045,
290.30902800000001, 292.474198, 300.83069399999999,
309.28665699999999, 318.33108099999998, 329.37239,
338.88399800000002, 339.24412599999999, 328.60063200000002,
314.25538499999999, 314.45969500000001, 321.41377899999998,
329.78929199999999, 346.38516499999997, 352.29788200000002,
348.37051500000001, 417.56292200000001, 417.12356999999997,
417.749459, 412.233904, 411.94681700000001, 394.69707499999998,
401.49927000000002, 408.27046799999999, 414.24279999999999]
index = ['1970-12-31 00:00:00', '1971-12-31 00:00:00', '1972-12-31 00:00:00',
'1973-12-31 00:00:00', '1974-12-31 00:00:00', '1975-12-31 00:00:00',
'1976-12-31 00:00:00', '1977-12-31 00:00:00', '1978-12-31 00:00:00',
'1979-12-31 00:00:00', '1980-12-31 00:00:00', '1981-12-31 00:00:00',
'1982-12-31 00:00:00', '1983-12-31 00:00:00', '1984-12-31 00:00:00',
'1985-12-31 00:00:00', '1986-12-31 00:00:00', '1987-12-31 00:00:00',
'1988-12-31 00:00:00', '1989-12-31 00:00:00', '1990-12-31 00:00:00',
'1991-12-31 00:00:00', '1992-12-31 00:00:00', '1993-12-31 00:00:00',
'1994-12-31 00:00:00', '1995-12-31 00:00:00', '1996-12-31 00:00:00',
'1997-12-31 00:00:00', '1998-12-31 00:00:00', '1999-12-31 00:00:00',
'2000-12-31 00:00:00']
livestock2_livestock = pd.Series(data, index)
livestock2_livestock.index = pd.DatetimeIndex(
livestock2_livestock.index,
freq=pd.infer_freq(livestock2_livestock.index))
cls.livestock2_livestock = livestock2_livestock
# aust_json = '{"1104537600000":41.727458,"1112313600000":24.04185,"1120176000000":32.328103,"1128124800000":37.328708,"1136073600000":46.213153,"1143849600000":29.346326,"1151712000000":36.48291,"1159660800000":42.977719,"1167609600000":48.901525,"1175385600000":31.180221,"1183248000000":37.717881,"1191196800000":40.420211,"1199145600000":51.206863,"1207008000000":31.887228,"1214870400000":40.978263,"1222819200000":43.772491,"1230768000000":55.558567,"1238544000000":33.850915,"1246406400000":42.076383,"1254355200000":45.642292,"1262304000000":59.76678,"1270080000000":35.191877,"1277942400000":44.319737,"1285891200000":47.913736}'
# aust = pd.read_json(aust_json, typ='Series').sort_index()
data = [41.727457999999999, 24.04185, 32.328102999999999,
37.328707999999999, 46.213152999999998, 29.346326000000001,
36.482909999999997, 42.977719, 48.901524999999999,
31.180221, 37.717880999999998, 40.420211000000002,
51.206862999999998, 31.887228, 40.978262999999998,
43.772491000000002, 55.558566999999996, 33.850915000000001,
42.076383, 45.642291999999998, 59.766779999999997,
35.191876999999998, 44.319737000000003, 47.913736]
index = ['2005-03-01 00:00:00', '2005-06-01 00:00:00', '2005-09-01 00:00:00',
'2005-12-01 00:00:00', '2006-03-01 00:00:00', '2006-06-01 00:00:00',
'2006-09-01 00:00:00', '2006-12-01 00:00:00', '2007-03-01 00:00:00',
'2007-06-01 00:00:00', '2007-09-01 00:00:00', '2007-12-01 00:00:00',
'2008-03-01 00:00:00', '2008-06-01 00:00:00', '2008-09-01 00:00:00',
'2008-12-01 00:00:00', '2009-03-01 00:00:00', '2009-06-01 00:00:00',
'2009-09-01 00:00:00', '2009-12-01 00:00:00', '2010-03-01 00:00:00',
'2010-06-01 00:00:00', '2010-09-01 00:00:00', '2010-12-01 00:00:00']
aust = pd.Series(data, index)
aust.index = pd.DatetimeIndex(aust.index,
freq=pd.infer_freq(aust.index))
cls.aust = aust
def test_predict(self):
fit1 = ExponentialSmoothing(self.aust, seasonal_periods=4, trend='add',
seasonal='mul').fit()
fit2 = ExponentialSmoothing(self.aust, seasonal_periods=4, trend='add',
seasonal='mul').fit()
# fit3 = ExponentialSmoothing(self.aust, seasonal_periods=4, trend='add',
# seasonal='mul').fit(remove_bias=True, use_basinhopping=True)
assert_almost_equal(fit1.predict('2011-03-01 00:00:00',
'2011-12-01 00:00:00'),
[61.3083, 37.3730, 46.9652, 51.5578], 3)
assert_almost_equal(fit2.predict(end='2011-12-01 00:00:00'),
[61.3083, 37.3730, 46.9652, 51.5578], 3)
# assert_almost_equal(fit3.predict('2010-10-01 00:00:00', '2010-10-01 00:00:00'), [49.087], 3)
def test_ndarray(self):
fit1 = ExponentialSmoothing(self.aust.values, seasonal_periods=4,
trend='add', seasonal='mul').fit()
assert_almost_equal(fit1.forecast(4),
[61.3083, 37.3730, 46.9652, 51.5578], 3)
# FIXME: this is passing 2019-05-22 on some platforms; what has changed?
@pytest.mark.xfail(reason='Optimizer does not converge', strict=False)
def test_forecast(self):
fit1 = ExponentialSmoothing(self.aust, seasonal_periods=4, trend='add',
seasonal='add').fit()
assert_almost_equal(fit1.forecast(steps=4),
[60.9542, 36.8505, 46.1628, 50.1272], 3)
def test_simple_exp_smoothing(self):
fit1 = SimpleExpSmoothing(self.oildata_oil).fit(0.2, optimized=False)
fit2 = SimpleExpSmoothing(self.oildata_oil).fit(0.6, optimized=False)
fit3 = SimpleExpSmoothing(self.oildata_oil).fit()
assert_almost_equal(fit1.forecast(1), [484.802468], 4)
assert_almost_equal(fit1.level,
[446.65652290, 448.21987962, 449.7084985,
444.49324656, 446.84886283, 445.59670028,
441.54386424, 450.26498098, 461.4216172,
474.49569042, 482.45033014, 484.80246797], 4)
assert_almost_equal(fit2.forecast(1), [501.837461], 4)
assert_almost_equal(fit3.forecast(1), [496.493543], 4)
assert_almost_equal(fit3.params['smoothing_level'], 0.891998, 4)
# has to be 3 for old python2.7 scipy versions
assert_almost_equal(fit3.params['initial_level'], 447.478440, 3)
def test_holt(self):
fit1 = Holt(self.air_ausair).fit(smoothing_level=0.8,
smoothing_slope=0.2, optimized=False)
fit2 = Holt(self.air_ausair, exponential=True).fit(
smoothing_level=0.8, smoothing_slope=0.2,
optimized=False)
fit3 = Holt(self.air_ausair, damped=True).fit(smoothing_level=0.8,
smoothing_slope=0.2)
assert_almost_equal(fit1.forecast(5), [43.76, 45.59, 47.43, 49.27, 51.10], 2)
assert_almost_equal(fit1.slope,
[3.617628, 3.59006512, 3.33438212, 3.23657639, 2.69263502,
2.46388914, 2.2229097, 1.95959226, 1.47054601, 1.3604894,
1.28045881, 1.20355193, 1.88267152, 2.09564416, 1.83655482], 4)
assert_almost_equal(fit1.fittedfcast,
[21.8601, 22.032368, 25.48461872, 27.54058587,
30.28813356, 30.26106173, 31.58122149, 32.599234,
33.24223906, 32.26755382, 33.07776017, 33.95806605,
34.77708354, 40.05535303, 43.21586036, 43.75696849], 4)
assert_almost_equal(fit2.forecast(5),
[44.60, 47.24, 50.04, 53.01, 56.15], 2)
assert_almost_equal(fit3.forecast(5),
[42.85, 43.81, 44.66, 45.41, 46.06], 2)
@pytest.mark.smoke
def test_holt_damp_fit(self):
# Smoke test for parameter estimation
fit1 = SimpleExpSmoothing(self.livestock2_livestock).fit()
mod4 = Holt(self.livestock2_livestock, damped=True)
fit4 = mod4.fit(damping_slope=0.98)
mod5 = Holt(self.livestock2_livestock, exponential=True, damped=True)
fit5 = mod5.fit()
# We accept the below values as we getting a better SSE than text book
assert_almost_equal(fit1.params['smoothing_level'], 1.00, 2)
assert_almost_equal(fit1.params['smoothing_slope'], np.NaN, 2)
assert_almost_equal(fit1.params['damping_slope'], np.NaN, 2)
assert_almost_equal(fit1.params['initial_level'], 263.92, 2)
assert_almost_equal(fit1.params['initial_slope'], np.NaN, 2)
assert_almost_equal(fit1.sse, 6761.35, 2) # 6080.26
assert_almost_equal(fit4.params['smoothing_level'], 0.98, 2)
assert_almost_equal(fit4.params['smoothing_slope'], 0.00, 2)
assert_almost_equal(fit4.params['damping_slope'], 0.98, 2)
assert_almost_equal(fit4.params['initial_level'], 257.36, 2)
assert_almost_equal(fit4.params['initial_slope'], 6.64, 2)
assert_almost_equal(fit4.sse, 6036.56, 2) # 6080.26
assert_almost_equal(fit5.params['smoothing_level'], 0.97, 2)
assert_almost_equal(fit5.params['smoothing_slope'], 0.00, 2)
assert_almost_equal(fit5.params['damping_slope'], 0.98, 2)
assert_almost_equal(fit5.params['initial_level'], 258.95, 2)
assert_almost_equal(fit5.params['initial_slope'], 1.04, 2)
assert_almost_equal(fit5.sse, 6082.00, 2) # 6100.11
def test_holt_damp_R(self):
# Test the damping parameters against the R forecast packages `ets`
# library(ets)
# livestock2_livestock <- c(...)
# res <- ets(livestock2_livestock, model='AAN', damped=TRUE, phi=0.98)
mod = Holt(self.livestock2_livestock, damped=True)
params = {
'smoothing_level': 0.97402626,
'smoothing_slope': 0.00010006,
'damping_slope': 0.98,
'initial_level': 252.59039965,
'initial_slope': 6.90265918}
fit = mod.fit(optimized=False, **params)
# Check that we captured the parameters correctly
for key in params.keys():
assert_allclose(fit.params[key], params[key])
# Summary output
# print(res$mse)
assert_allclose(fit.sse / mod.nobs, 195.4397924865488, atol=1e-3)
# print(res$aicc)
# TODO: this fails - different AICC definition?
# assert_allclose(fit.aicc, 282.386426659408, atol=1e-3)
# print(res$bic)
# TODO: this fails - different BIC definition?
# assert_allclose(fit.bic, 287.1563626818338)
# print(res$states[,'l'])
# note: this array includes the initial level
desired = [
252.5903996514365, 263.7992355246843, 268.3623324350207,
261.0312983437606, 266.6590942700923, 277.3958197247272,
283.8256217863908, 290.2962560621914, 292.5701438129583,
300.7655919939834, 309.2118057241649, 318.2377698496536,
329.2238709362550, 338.7709778307978, 339.3669793596703,
329.0127022356033, 314.7684267018998, 314.5948077575944,
321.3612035017972, 329.6924360833211, 346.0712138652086,
352.2534120008911, 348.5862874190927, 415.8839400693967,
417.2018843196238, 417.8435306633725, 412.4857261252961,
412.0647865321129, 395.2500605270393, 401.4367438266322,
408.1907701386275, 414.1814574903921]
assert_allclose(np.r_[fit.params['initial_level'], fit.level], desired)
# print(res$states[,'b'])
# note: this array includes the initial slope
desired = [
6.902659175332394, 6.765062519124909, 6.629548973536494,
6.495537532917715, 6.365550989616566, 6.238702070454378,
6.113960476763530, 5.991730467006233, 5.871526257315264,
5.754346516684953, 5.639547926790058, 5.527116419415724,
5.417146212898857, 5.309238662451385, 5.202580636191761,
5.096941655567694, 4.993026494493987, 4.892645486210410,
4.794995106664251, 4.699468310763351, 4.606688340205792,
4.514725879754355, 4.423600168391240, 4.341595902295941,
4.254462303550087, 4.169010676686062, 4.084660399498803,
4.002512751871354, 3.920332298146730, 3.842166514133902,
3.765630194200260, 3.690553892582855]
# TODO: not sure why the precision is so low here...
assert_allclose(np.r_[fit.params['initial_slope'], fit.slope], desired,
atol=1e-3)
# print(res$fitted)
desired = [
259.3550056432622, 270.4289967934267, 274.8592904290865,
267.3969251260200, 272.8973342399166, 283.5097477537724,
289.8173030536191, 296.1681519198575, 298.3242395451272,
306.4048515803347, 314.7385626924191, 323.6543439406810,
334.5326742248959, 343.9740317200002, 344.4655083831382,
334.0077050580596, 319.6615926665040, 319.3896003340806,
326.0602987063282, 334.2979150278692, 350.5857684386102,
356.6778433630504, 352.9214155841161, 420.1387040536467,
421.3712573771029, 421.9291611265248, 416.4886933168049,
415.9872490289468, 399.0919861792231, 405.2020670104834,
411.8810877289437]
assert_allclose(fit.fittedvalues, desired, atol=1e-3)
# print(forecast(res)$mean)
desired = [
417.7982003051233, 421.3426082635598, 424.8161280628277,
428.2201774661102, 431.5561458813270, 434.8253949282395,
438.0292589942138, 441.1690457788685, 444.2460368278302,
447.2614880558126]
assert_allclose(fit.forecast(10), desired, atol=1e-4)
def test_hw_seasonal(self):
mod = ExponentialSmoothing(self.aust, seasonal_periods=4,
trend='additive', seasonal='additive')
fit1 = mod.fit(use_boxcox=True)
fit2 = ExponentialSmoothing(self.aust, seasonal_periods=4, trend='add',
seasonal='mul').fit(use_boxcox=True)
assert_almost_equal(fit1.forecast(8),
[61.34, 37.24, 46.84, 51.01, 64.47, 39.78, 49.64, 53.90],
2)
assert_almost_equal(fit2.forecast(8),
[60.97, 36.99, 46.71, 51.48, 64.46, 39.02, 49.29, 54.32],
2)
ExponentialSmoothing(self.aust, seasonal_periods=4, trend='mul',
seasonal='add').fit(use_boxcox='log')
ExponentialSmoothing(self.aust,
seasonal_periods=4,
trend='multiplicative',
seasonal='multiplicative').fit(use_boxcox='log')
# Skip since estimator is unstable
# assert_almost_equal(fit5.forecast(1), [60.60], 2)
# assert_almost_equal(fit6.forecast(1), [61.47], 2)
# FIXME: this is passing 2019-05-22; what has changed?
# @pytest.mark.xfail(reason='Optimizer does not converge')
def test_hw_seasonal_buggy(self):
fit3 = ExponentialSmoothing(self.aust, seasonal_periods=4,
seasonal='add').fit(use_boxcox=True)
assert_almost_equal(fit3.forecast(8),
[59.91, 35.71, 44.64, 47.62, 59.91, 35.71, 44.64, 47.62],
2)
fit4 = ExponentialSmoothing(self.aust, seasonal_periods=4,
seasonal='mul').fit(use_boxcox=True)
assert_almost_equal(fit4.forecast(8),
[60.71, 35.70, 44.63, 47.55, 60.71, 35.70, 44.63, 47.55],
2)
@pytest.mark.parametrize('trend_seasonal', (('mul', None), (None, 'mul'), ('mul', 'mul')))
def test_negative_multipliative(trend_seasonal):
trend, seasonal = trend_seasonal
y = -np.ones(100)
with pytest.raises(ValueError):
ExponentialSmoothing(y, trend=trend, seasonal=seasonal, seasonal_periods=10)
@pytest.mark.parametrize('seasonal', SEASONALS)
def test_dampen_no_trend(seasonal):
y = -np.ones(100)
with pytest.raises(TypeError):
ExponentialSmoothing(housing_data, trend=False, seasonal=seasonal, damped=True,
seasonal_periods=10)
@pytest.mark.parametrize('seasonal', ('add', 'mul'))
def test_invalid_seasonal(seasonal):
y = pd.Series(-np.ones(100),index=pd.date_range('2000-1-1', periods=100, freq='MS'))
with pytest.raises(ValueError):
ExponentialSmoothing(y, seasonal=seasonal, seasonal_periods=1)
def test_2d_data():
with pytest.raises(ValueError):
ExponentialSmoothing(pd.concat([housing_data, housing_data], 1)).fit()
def test_infer_freq():
hd2 = housing_data.copy()
hd2.index = list(hd2.index)
with warnings.catch_warnings(record=True) as w:
mod = ExponentialSmoothing(hd2, trend='add', seasonal='add')
assert len(w) == 1
assert 'ValueWarning' in str(w[0])
assert mod.seasonal_periods == 12
@pytest.mark.parametrize('trend', TRENDS)
@pytest.mark.parametrize('seasonal', SEASONALS)
def test_start_params(trend, seasonal):
mod = ExponentialSmoothing(housing_data, trend='add', seasonal='add')
res = mod.fit()
res2 = mod.fit(start_params=res.mle_retvals.x)
assert res2.sse <= res.sse
def test_no_params_to_optimize():
mod = ExponentialSmoothing(housing_data)
with pytest.warns(EstimationWarning):
mod.fit(smoothing_level=0.5, initial_level=housing_data.iloc[0])
def test_invalid_start_param_length():
mod = ExponentialSmoothing(housing_data)
with pytest.raises(ValueError):
mod.fit(start_params=np.array([0.5]))
def test_basin_hopping(reset_randomstate):
mod = ExponentialSmoothing(housing_data, trend='add')
res = mod.fit()
res2 = mod.fit(use_basinhopping=True)
# Basin hopping occasionally prduces a slightly larger objective
tol = 1e-5
assert res2.sse <= res.sse + tol
def test_debiased():
mod = ExponentialSmoothing(housing_data, trend='add')
res = mod.fit()
res2 = mod.fit(remove_bias=True)
assert np.any(res.fittedvalues != res2.fittedvalues)
@pytest.mark.smoke
@pytest.mark.parametrize('trend', TRENDS)
@pytest.mark.parametrize('seasonal', SEASONALS)
def test_float_boxcox(trend, seasonal):
res = ExponentialSmoothing(housing_data, trend=trend, seasonal=seasonal).fit(use_boxcox=0.5)
assert_allclose(res.params['use_boxcox'], 0.5)
@pytest.mark.parametrize('trend', TRENDS)
@pytest.mark.parametrize('seasonal', SEASONALS)
def test_equivalence_cython_python(trend, seasonal):
mod = ExponentialSmoothing(housing_data, trend=trend, seasonal=seasonal)
with pytest.warns(None):
# Overflow in mul-mul case fixed
res = mod.fit()
res.summary() # Smoke test
params = res.params
nobs = housing_data.shape[0]
y = np.squeeze(np.asarray(housing_data))
m = 12 if seasonal else 0
lvals = np.zeros(nobs)
b = np.zeros(nobs)
s = np.zeros(nobs + m - 1)
p = np.zeros(6 + m)
max_seen = np.finfo(np.double).max
alpha = params['smoothing_level']
beta = params['smoothing_slope']
gamma = params['smoothing_seasonal']
phi = params['damping_slope']
phi = 1.0 if np.isnan(phi) else phi
l0 = params['initial_level']
b0 = params['initial_slope']
p[:6] = alpha, beta, gamma, l0, b0, phi
if seasonal:
p[6:] = params['initial_seasons']
xi = np.ones_like(p).astype(np.uint8)
py_func = PY_SMOOTHERS[(seasonal, trend)]
cy_func = SMOOTHERS[(seasonal, trend)]
p_copy = p.copy()
sse_cy = cy_func(p, xi, p_copy, y, lvals, b, s, m, nobs, max_seen)
sse_py = py_func(p, xi, p_copy, y, lvals, b, s, m, nobs, max_seen)
assert_allclose(sse_py, sse_cy)
def test_direct_holt_add():
mod = SimpleExpSmoothing(housing_data)
res = mod.fit()
x = np.squeeze(np.asarray(mod.endog))
alpha = res.params['smoothing_level']
l, b, f, err, xhat = _simple_dbl_exp_smoother(x, alpha, beta=0.0,
l0=res.params['initial_level'], b0=0.0,
nforecast=5)
assert_allclose(l, res.level)
assert_allclose(f, res.level.iloc[-1] * np.ones(5))
assert_allclose(f, res.forecast(5))
mod = ExponentialSmoothing(housing_data, trend='add')
res = mod.fit()
x = np.squeeze(np.asarray(mod.endog))
alpha = res.params['smoothing_level']
beta = res.params['smoothing_slope']
l, b, f, err, xhat = _simple_dbl_exp_smoother(x, alpha, beta=beta,
l0=res.params['initial_level'],
b0=res.params['initial_slope'], nforecast=5)
assert_allclose(xhat, res.fittedvalues)
assert_allclose(l + b, res.level + res.slope)
assert_allclose(l, res.level)
assert_allclose(b, res.slope)
assert_allclose(f, res.level.iloc[-1] + res.slope.iloc[-1] * np.array([1, 2, 3, 4, 5]))
assert_allclose(f, res.forecast(5))
def test_integer_array(reset_randomstate):
rs = np.random.RandomState(12345)
e = 10*rs.standard_normal((1000,2))
y_star = np.cumsum(e[:,0])
y = y_star + e[:,1]
y = y.astype(np.long)
res = ExponentialSmoothing(y,trend='add').fit()
assert res.params['smoothing_level'] != 0.0
def test_damping_slope_zero():
endog = np.arange(10)
mod = ExponentialSmoothing(endog, trend='add', damped=True)
res1 = mod.fit(smoothing_level=1, smoothing_slope=0.0, damping_slope=1e-20)
pred1 = res1.predict(start=0)
assert_allclose(pred1, np.r_[0., np.arange(9)], atol=1e-10)
res2 = mod.fit(smoothing_level=1, smoothing_slope=0.0, damping_slope=0)
pred2 = res2.predict(start=0)
assert_allclose(pred2, np.r_[0., np.arange(9)], atol=1e-10)
assert_allclose(pred1, pred2, atol=1e-10)
@pytest.fixture
def austourists():
# austourists dataset from fpp2 package
# https://cran.r-project.org/web/packages/fpp2/index.html
data = [30.05251, 19.14850, 25.31769, 27.59144, 32.07646,
23.48796, 28.47594, 35.12375, 36.83848, 25.00702,
30.72223, 28.69376, 36.64099, 23.82461, 29.31168,
31.77031, 35.17788, 19.77524, 29.60175, 34.53884,
41.27360, 26.65586, 28.27986, 35.19115, 42.20566,
24.64917, 32.66734, 37.25735, 45.24246, 29.35048,
36.34421, 41.78208, 49.27660, 31.27540, 37.85063,
38.83704, 51.23690, 31.83855, 41.32342, 42.79900,
55.70836, 33.40714, 42.31664, 45.15712, 59.57608,
34.83733, 44.84168, 46.97125, 60.01903, 38.37118,
46.97586, 50.73380, 61.64687, 39.29957, 52.67121,
54.33232, 66.83436, 40.87119, 51.82854, 57.49191,
65.25147, 43.06121, 54.76076, 59.83447, 73.25703,
47.69662, 61.09777, 66.05576,]
index = pd.date_range("1999-03-01", "2015-12-01", freq="3MS")
return | pd.Series(data, index) | pandas.Series |
# coding: utf-8
import os
import re
import numpy as np
import pandas as pd
import ujson as json
patient_ids = []
for filename in os.listdir('./raw'):
# the patient data in PhysioNet contains 6-digits
match = re.search('\d{6}', filename)
if match:
id_ = match.group()
patient_ids.append(id_)
out = pd.read_csv('./raw/Outcomes-a.txt').set_index('RecordID')['In-hospital_death']
# we select 35 attributes which contains enough non-values
attributes = ['DiasABP', 'HR', 'Na', 'Lactate', 'NIDiasABP', 'PaO2', 'WBC', 'pH', 'Albumin', 'ALT', 'Glucose', 'SaO2',
'Temp', 'AST', 'Bilirubin', 'HCO3', 'BUN', 'RespRate', 'Mg', 'HCT', 'SysABP', 'FiO2', 'K', 'GCS',
'Cholesterol', 'NISysABP', 'TroponinT', 'MAP', 'TroponinI', 'PaCO2', 'Platelets', 'Urine', 'NIMAP',
'Creatinine', 'ALP']
# mean and std of 35 attributes
mean = np.array([59.540976152469405, 86.72320413227443, 139.06972964987443, 2.8797765291788986, 58.13833409690321,
147.4835678885565, 12.670222585415166, 7.490957887101613, 2.922874149659863, 394.8899400819931,
141.4867570064675, 96.66380228136883, 37.07362841054398, 505.5576196473552, 2.906465787821709,
23.118951553526724, 27.413004968675743, 19.64795551193981, 2.0277491155660416, 30.692432164676188,
119.60137167841977, 0.5404785381886381, 4.135790642787733, 11.407767149315339, 156.51746031746032,
119.15012244292181, 1.2004983498349853, 80.20321011673151, 7.127188940092161, 40.39875518672199,
191.05877024038804, 116.1171573535279, 77.08923183026529, 1.5052390166989214, 116.77122488658458])
std = np.array(
[13.01436781437145, 17.789923096504985, 5.185595006246348, 2.5287518090506755, 15.06074282896952, 85.96290370390257,
7.649058756791069, 8.384743923130074, 0.6515057685658769, 1201.033856726966, 67.62249645388543, 3.294112002091972,
1.5604879744921516, 1515.362517984297, 5.902070316876287, 4.707600932877377, 23.403743427107095, 5.50914416318306,
0.4220051299992514, 5.002058959758486, 23.730556355204214, 0.18634432509312762, 0.706337033602292,
3.967579823394297, 45.99491531484596, 21.97610723063014, 2.716532297586456, 16.232515568438338, 9.754483687298688,
9.062327978713556, 106.50939503021543, 170.65318497610315, 14.856134327604906, 1.6369529387005546,
133.96778334724377])
fs = open('./json/json', 'w')
def to_time_bin(x):
h, m = map(int, x.split(':'))
return h
def parse_data(x):
x = x.set_index('Parameter').to_dict()['Value']
values = []
for attr in attributes:
if attr in x:
values.append(x[attr])
else:
values.append(np.nan)
return values
def parse_delta(masks, dir_):
if dir_ == 'backward':
masks = masks[::-1]
deltas = []
for h in range(48):
if h == 0:
deltas.append(np.ones(35))
else:
deltas.append(np.ones(35) + (1 - masks[h]) * deltas[-1])
return np.array(deltas)
def parse_rec(values, masks, evals, eval_masks, dir_):
deltas = parse_delta(masks, dir_)
# only used in GRU-D
forwards = | pd.DataFrame(values) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
v17.csv (final submission) ... averaging model of v9s, v13 and v16
"""
from logging import getLogger, Formatter, StreamHandler, INFO, FileHandler
import subprocess
import importlib
import math
from pathlib import Path
import json
import re
import warnings
import tqdm
import click
import tables as tb
import pandas as pd
import numpy as np
import shapely.ops
import shapely.geometry
import skimage.transform
import rasterio.features
MODEL_NAME = 'v17'
ORIGINAL_SIZE = 650
INPUT_SIZE = 256
STRIDE_SZ = 197
LOGFORMAT = '%(asctime)s %(levelname)s %(message)s'
BASE_DIR = "/data/train"
# Parameters
MIN_POLYGON_AREA = 30
# Input files (required for validation)
FMT_TRAIN_SUMMARY_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("summaryData/{prefix:s}_Train_Building_Solutions.csv"))
# ---------------------------------------------------------
# Image list, Image container and mask container
V5_IMAGE_DIR = "/data/working/images/{}".format('v5')
FMT_VALTEST_IMAGELIST_PATH = V5_IMAGE_DIR + "/{prefix:s}_valtest_ImageId.csv"
FMT_TEST_IMAGELIST_PATH = V5_IMAGE_DIR + "/{prefix:s}_test_ImageId.csv"
# Model files
MODEL_DIR = "/data/working/models/{}".format(MODEL_NAME)
FMT_VALTESTPRED_PATH = MODEL_DIR + "/{}_eval_pred.h5"
FMT_VALTESTPOLY_PATH = MODEL_DIR + "/{}_eval_poly.csv"
FMT_VALTESTTRUTH_PATH = MODEL_DIR + "/{}_eval_poly_truth.csv"
FMT_VALTESTPOLY_OVALL_PATH = MODEL_DIR + "/eval_poly.csv"
FMT_VALTESTTRUTH_OVALL_PATH = MODEL_DIR + "/eval_poly_truth.csv"
FMT_VALMODEL_EVALTHHIST = MODEL_DIR + "/{}_val_evalhist_th.csv"
# ---------------------------------------------------------
# Prediction & polygon result
FMT_TESTPOLY_PATH = MODEL_DIR + "/{}_poly.csv"
FN_SOLUTION_CSV = "/data/{}.csv".format(MODEL_NAME)
# Logger
warnings.simplefilter("ignore", UserWarning)
warnings.simplefilter("ignore", FutureWarning)
handler = StreamHandler()
handler.setLevel(INFO)
handler.setFormatter(Formatter(LOGFORMAT))
fh_handler = FileHandler(".{}.log".format(MODEL_NAME))
fh_handler.setFormatter(Formatter(LOGFORMAT))
logger = getLogger('spacenet2')
logger.setLevel(INFO)
if __name__ == '__main__':
logger.addHandler(handler)
logger.addHandler(fh_handler)
def directory_name_to_area_id(datapath):
"""
Directory name to AOI number
Usage:
>>> directory_name_to_area_id("/data/test/AOI_2_Vegas")
2
"""
dir_name = Path(datapath).name
if dir_name.startswith('AOI_2_Vegas'):
return 2
elif dir_name.startswith('AOI_3_Paris'):
return 3
elif dir_name.startswith('AOI_4_Shanghai'):
return 4
elif dir_name.startswith('AOI_5_Khartoum'):
return 5
else:
raise RuntimeError("Unsupported city id is given.")
def _remove_interiors(line):
if "), (" in line:
line_prefix = line.split('), (')[0]
line_terminate = line.split('))",')[-1]
line = (
line_prefix +
'))",' +
line_terminate
)
return line
def get_model_parameter(area_id):
prefix = area_id_to_prefix(area_id)
fn_hist = FMT_VALMODEL_EVALTHHIST.format(prefix)
best_row = pd.read_csv(fn_hist).sort_values(
by='fscore',
ascending=False,
).iloc[0]
param = dict(
min_poly_area=int(best_row['min_area_th']),
)
return param
def _remove_interiors(line):
if "), (" in line:
line_prefix = line.split('), (')[0]
line_terminate = line.split('))",')[-1]
line = (
line_prefix +
'))",' +
line_terminate
)
return line
def area_id_to_prefix(area_id):
"""
area_id から prefix を返す
"""
area_dict = {
2: 'AOI_2_Vegas',
3: 'AOI_3_Paris',
4: 'AOI_4_Shanghai',
5: 'AOI_5_Khartoum',
}
return area_dict[area_id]
def _calc_fscore_per_aoi(area_id):
prefix = area_id_to_prefix(area_id)
truth_file = FMT_VALTESTTRUTH_PATH.format(prefix)
poly_file = FMT_VALTESTPOLY_PATH.format(prefix)
cmd = [
'java',
'-jar',
'/root/visualizer-2.0/visualizer.jar',
'-truth',
truth_file,
'-solution',
poly_file,
'-no-gui',
'-band-triplets',
'/root/visualizer-2.0/data/band-triplets.txt',
'-image-dir',
'pass',
]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = proc.communicate()
lines = [line for line in stdout_data.decode('utf8').split('\n')[-10:]]
# Expected lines:
"""
Overall F-score : 0.85029
AOI_2_Vegas:
TP : 27827
FP : 4999
FN : 4800
Precision: 0.847712
Recall : 0.852883
F-score : 0.85029
"""
if stdout_data.decode('utf8').strip().endswith("Overall F-score : 0"):
overall_fscore = 0
tp = 0
fp = 0
fn = 0
precision = 0
recall = 0
fscore = 0
elif len(lines) > 0 and lines[0].startswith("Overall F-score : "):
assert lines[0].startswith("Overall F-score : ")
assert lines[2].startswith("AOI_")
assert lines[3].strip().startswith("TP")
assert lines[4].strip().startswith("FP")
assert lines[5].strip().startswith("FN")
assert lines[6].strip().startswith("Precision")
assert lines[7].strip().startswith("Recall")
assert lines[8].strip().startswith("F-score")
overall_fscore = float(re.findall("([\d\.]+)", lines[0])[0])
tp = int(re.findall("(\d+)", lines[3])[0])
fp = int(re.findall("(\d+)", lines[4])[0])
fn = int(re.findall("(\d+)", lines[5])[0])
precision = float(re.findall("([\d\.]+)", lines[6])[0])
recall = float(re.findall("([\d\.]+)", lines[7])[0])
fscore = float(re.findall("([\d\.]+)", lines[8])[0])
else:
raise RuntimeError("Unsupported format")
return {
'overall_fscore': overall_fscore,
'tp': tp,
'fp': fp,
'fn': fn,
'precision': precision,
'recall': recall,
'fscore': fscore,
}
def extract_y_pred(mod, area_id):
prefix = area_id_to_prefix(area_id)
fn = mod.FMT_VALTESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'r') as f:
y_pred = np.array(f.get_node('/pred'))
return y_pred
def _internal_test_predict_best_param(area_id,
rescale_pred_list=[],
slice_pred_list=[]):
prefix = area_id_to_prefix(area_id)
# Load test imagelist
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
pred_values_array = np.zeros((len(df_test), 650, 650))
for idx, image_id in enumerate(df_test.index.tolist()):
pred_values = np.zeros((650, 650))
pred_count = np.zeros((650, 650))
for slice_pos in range(9):
slice_idx = idx * 9 + slice_pos
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
for slice_pred in slice_pred_list:
pred_values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += (
slice_pred[slice_idx][0])
pred_count[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += 1
for rescale_pred in rescale_pred_list:
y_pred_idx = skimage.transform.resize(
rescale_pred[idx][0], (650, 650))
pred_values += y_pred_idx
pred_count += 1
# Normalize
pred_values = pred_values / pred_count
pred_values_array[idx, :, :] = pred_values
return pred_values_array
def _internal_validate_predict_best_param(area_id,
save_pred=True,
enable_tqdm=False,
rescale_pred_list=[],
slice_pred_list=[]):
prefix = area_id_to_prefix(area_id)
# Load valtest imagelist
fn_valtest = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_valtest = pd.read_csv(fn_valtest, index_col='ImageId')
pred_values_array = np.zeros((len(df_valtest), 650, 650))
for idx, image_id in enumerate(df_valtest.index.tolist()):
pred_values = np.zeros((650, 650))
pred_count = np.zeros((650, 650))
for slice_pos in range(9):
slice_idx = idx * 9 + slice_pos
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
for slice_pred in slice_pred_list:
pred_values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += (
slice_pred[slice_idx][0])
pred_count[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += 1
for rescale_pred in rescale_pred_list:
y_pred_idx = skimage.transform.resize(
rescale_pred[idx][0], (650, 650))
pred_values += y_pred_idx
pred_count += 1
# Normalize
pred_values = pred_values / pred_count
pred_values_array[idx, :, :] = pred_values
return pred_values_array
def _internal_pred_to_poly_file_test(area_id,
y_pred,
min_th=MIN_POLYGON_AREA):
"""
Write out test poly
"""
prefix = area_id_to_prefix(area_id)
# Load test imagelist
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
# Make parent directory
fn_out = FMT_TESTPOLY_PATH.format(prefix)
if not Path(fn_out).parent.exists():
Path(fn_out).parent.mkdir(parents=True)
# Ensemble individual models and write out output files
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
for idx, image_id in enumerate(df_test.index.tolist()):
df_poly = mask_to_poly(y_pred[idx], min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
def _internal_pred_to_poly_file(area_id,
y_pred,
min_th=MIN_POLYGON_AREA):
"""
Write out valtest poly and truepoly
"""
prefix = area_id_to_prefix(area_id)
# Load valtest imagelist
fn_valtest = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_valtest = | pd.read_csv(fn_valtest, index_col='ImageId') | pandas.read_csv |
import numpy as np
from numpy import nan
import pytest
from pandas._libs import groupby, lib, reduction
from pandas.core.dtypes.common import ensure_int64
from pandas import Index, isna
from pandas.core.groupby.ops import generate_bins_generic
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
def test_series_grouper():
from pandas import Series
obj = Series(np.random.randn(10))
dummy = obj[:0]
labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)
grouper = reduction.SeriesGrouper(obj, np.mean, labels, 2, dummy)
result, counts = grouper.get_result()
expected = np.array([obj[3:6].mean(), obj[6:].mean()])
assert_almost_equal(result, expected)
exp_counts = np.array([3, 4], dtype=np.int64)
assert_almost_equal(counts, exp_counts)
def test_series_bin_grouper():
from pandas import Series
obj = Series(np.random.randn(10))
dummy = obj[:0]
bins = np.array([3, 6])
grouper = reduction.SeriesBinGrouper(obj, np.mean, bins, dummy)
result, counts = grouper.get_result()
expected = np.array([obj[:3].mean(), obj[3:6].mean(), obj[6:].mean()])
assert_almost_equal(result, expected)
exp_counts = np.array([3, 3, 4], dtype=np.int64)
assert_almost_equal(counts, exp_counts)
class TestBinGroupers:
def setup_method(self, method):
self.obj = np.random.randn(10, 1)
self.labels = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 2], dtype=np.int64)
self.bins = np.array([3, 6], dtype=np.int64)
def test_generate_bins(self):
values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
binner = np.array([0, 3, 6, 9], dtype=np.int64)
for func in [lib.generate_bins_dt64, generate_bins_generic]:
bins = func(values, binner, closed="left")
assert (bins == np.array([2, 5, 6])).all()
bins = func(values, binner, closed="right")
assert (bins == np.array([3, 6, 6])).all()
for func in [lib.generate_bins_dt64, generate_bins_generic]:
values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
binner = np.array([0, 3, 6], dtype=np.int64)
bins = func(values, binner, closed="right")
assert (bins == np.array([3, 6])).all()
msg = "Invalid length for values or for binner"
with pytest.raises(ValueError, match=msg):
generate_bins_generic(values, [], "right")
with pytest.raises(ValueError, match=msg):
generate_bins_generic(values[:0], binner, "right")
msg = "Values falls before first bin"
with pytest.raises(ValueError, match=msg):
| generate_bins_generic(values, [4], "right") | pandas.core.groupby.ops.generate_bins_generic |
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import matplotlib as mpl
from sklearn.model_selection import TimeSeriesSplit
import matplotlib.pyplot as plt
from numpy import nan
cv_splits = 3 # time series cross validator
'''
Load dataset
'''
def load_data(path):
raw_data = pd.read_csv(path,infer_datetime_format=True)
return raw_data
'''
Split data into training and validation sets
'''
def split_data(training, perc=10):
train_raw = np.arange(0, int(len(training)*(100-perc)/100)) # contains the first 90% of the data
validation_raw = np.arange(int(len(training)*(100-perc)/100+1), len(training)) # contains the last 10%
return train_raw, validation_raw
'''
Convert data to univariate, using a single feature (incident_date) as input
'''
def to_univariate(df):
df_uni = df.drop(columns=['cause_of_incident', 'city_name', 'description', 'cause_of_incident', 'from_road', 'to_road',
'affected_roads', 'incident_category_desc','magnitude_of_delay_desc', 'length_in_meters',
'delay_in_seconds', 'latitude','longitude'])
return df_uni
'''
Convert data to multiariate, using multiple features (incident, length_in_meters, latitude, longitude) as input
'''
def to_multivariate(df):
df_uni = df.drop(columns=['cause_of_incident', 'city_name', 'description', 'cause_of_incident', 'from_road', 'to_road',
'affected_roads', 'incident_category_desc','magnitude_of_delay_desc','delay_in_seconds', ])
return df_uni
'''
Prepare data to have the number of daily incidents
'''
def to_daily(method, df):
if method == 'univariate':
df_uni = to_univariate(df)
df_uni['incident_date'] = df_uni['incident_date'].str[:10] # delete the last 10 characters
df_uni['Incidents'] = pd.DataFrame([1 for x in range(len(df_uni.index))]) # create a column with 1 to sum the incidents per day
df_uni = df_uni.set_index('incident_date') # set the column incident_date to index
df_uni.index = | pd.to_datetime(df_uni.index) | pandas.to_datetime |
#import serial
import keras
import pandas as pd
#import serial.tools.list_ports
import os
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
#from com_serial import *
#from filter import *
from model import *
from sklearn.metrics import confusion_matrix,classification_report
rawdata = []
pipi = []
alis = []
wkt = []
count = 0
header_list = ['Waktu','Pipi','Alis']
data = []
X = []
y = []
#akuisisi()
#write()
#filtering()
#extract_feature('Data_filter')
#model = create_model()
model = keras.models.load_model('model_lstm2')
maindirs = 'Feature_extract'
dirs = os.listdir(maindirs)
emosi = ['kaget','marah','santai','senang']
df = pd.read_csv(maindirs+"/"+"tes_extracted.csv")
#df = pd.read_csv(maindirs+"/"+"tes_extracted4.csv")
d_t = df.drop('EMOSI',axis=1)
label = | pd.get_dummies(df['EMOSI']) | pandas.get_dummies |
#code will get the proper values like emyield, marketcap, cacl, etc, and supply a string and value to put back into the dataframe.
import pandas as pd
import numpy as np
import logging
import inspect
from scipy import stats
from dateutil.relativedelta import relativedelta
from datetime import datetime
from scipy import stats
import math
class quantvaluedata: #just contains functions, will NEVEFR actually get the data
def __init__(self,allitems=None):
if allitems is None:
self.allitems=[]
else:
self.allitems=allitems
return
def get_value(self,origdf,key,i=-1):
if key not in origdf.columns and key not in self.allitems and key not in ['timedepositsplaced','fedfundssold','interestbearingdepositsatotherbanks']:
logging.error(key+' not found in allitems')
#logging.error(self.allitems)
return None
df=origdf.copy()
df=df.sort_values('yearquarter')
if len(df)==0:
##logging.error("empty dataframe")
return None
if key not in df.columns:
#logging.error("column not found:"+key)
return None
interested_quarter=df['yearquarter'].iloc[-1]+i+1#because if we want the last quarter we need them equal
if not df['yearquarter'].isin([interested_quarter]).any(): #if the quarter we are interested in is not there
return None
s=df['yearquarter']==interested_quarter
df=df[s]
if len(df)>1:
logging.error(df)
logging.error("to many rows in df")
exit()
pass
value=df[key].iloc[0]
if pd.isnull(value):
return None
return float(value)
def get_sum_quarters(self,df,key,seed,length):
values=[]
#BIG BUG, this was origionally -length-1, which was always truncating the array and producing nans.
periods=range(seed,seed-length,-1)
for p in periods:
values.append(self.get_value(df,key,p))
#logging.info('values:'+str(values))
if pd.isnull(values).any(): #return None if any of the values are None
return None
else:
return float(np.sum(values))
def get_market_cap(self,statements_df,prices_df,seed=-1):
total_shares=self.get_value(statements_df,'weightedavedilutedsharesos',seed)
if pd.isnull(total_shares):
return None
end_date=statements_df['end_date'].iloc[seed]
if seed==-1: #get the latest price but see if there was a split between the end date and now
s=pd.to_datetime(prices_df['date'])>pd.to_datetime(end_date)
tempfd=prices_df[s]
splits=tempfd['split_ratio'].unique()
adj=pd.Series(splits).product() #multiply all the splits together to get the total adjustment factor from the last total_shares
total_shares=total_shares*adj
last_price=prices_df.sort_values('date').iloc[-1]['close']
price=float(last_price)
market_cap=price*float(total_shares)
return market_cap
else:
marketcap=self.get_value(statements_df,'marketcap',seed)
if pd.isnull(marketcap):
return None
else:
return marketcap
def get_netdebt(self,statements_df,seed=-1):
shorttermdebt=self.get_value(statements_df,'shorttermdebt',seed)
longtermdebt=self.get_value(statements_df,'longtermdebt',seed)
capitalleaseobligations=self.get_value(statements_df,'capitalleaseobligations',seed)
cashandequivalents=self.get_value(statements_df,'cashandequivalents',seed)
restrictedcash=self.get_value(statements_df,'restrictedcash',seed)
fedfundssold=self.get_value(statements_df,'fedfundssold',seed)
interestbearingdepositsatotherbanks=self.get_value(statements_df,'interestbearingdepositsatotherbanks',seed)
timedepositsplaced=self.get_value(statements_df,'timedepositsplaced',seed)
s=pd.Series([shorttermdebt,longtermdebt,capitalleaseobligations,cashandequivalents,restrictedcash,fedfundssold,interestbearingdepositsatotherbanks,timedepositsplaced]).astype('float')
if pd.isnull(s).all(): #return None if everything is null
return None
m=pd.Series([1,1,1,-1,-1,-1,-1])
netdebt=s.multiply(m).sum()
return float(netdebt)
def get_enterprise_value(self,statements_df,prices_df,seed=-1):
#calculation taken from https://intrinio.com/data-tag/enterprisevalue
marketcap=self.get_market_cap(statements_df,prices_df,seed)
netdebt=self.get_netdebt(statements_df,seed)
totalpreferredequity=self.get_value(statements_df,'totalpreferredequity',seed)
noncontrollinginterests=self.get_value(statements_df,'noncontrollinginterests',seed)
redeemablenoncontrollinginterest=self.get_value(statements_df,'redeemablenoncontrollinginterest',seed)
s=pd.Series([marketcap,netdebt,totalpreferredequity,noncontrollinginterests,redeemablenoncontrollinginterest])
if pd.isnull(s).all() or pd.isnull(marketcap):
return None
return float(s.sum())
def get_ebit(self,df,seed=-1,length=4):
ebit=self.get_sum_quarters(df,'totaloperatingincome',seed,length)
if pd.notnull(ebit):
return float(ebit)
totalrevenue=self.get_sum_quarters(df,'totalrevenue',seed,length)
provisionforcreditlosses=self.get_sum_quarters(df,'provisionforcreditlosses',seed,length)
totaloperatingexpenses=self.get_sum_quarters(df,'totaloperatingexpenses',seed,length)
s=pd.Series([totalrevenue,provisionforcreditlosses,totaloperatingexpenses])
if pd.isnull(s).all():
return None
ebit=(s.multiply(pd.Series([1,-1,-1]))).sum()
if pd.notnull(ebit):
return float(ebit)
return None
def get_emyield(self,statements_df,prices_df,seed=-1,length=4):
ebit=self.get_ebit(statements_df,seed,length)
enterprisevalue=self.get_enterprise_value(statements_df,prices_df,seed)
if pd.isnull([ebit,enterprisevalue]).any() or enterprisevalue==0:
return None
return float(ebit/enterprisevalue)
def get_scalednetoperatingassets(self,statements_df,seed=-1):
"""
SNOA = (Operating Assets Operating Liabilities) / Total Assets
where
OA = total assets cash and equivalents
OL = total assets ST debt LT debt minority interest - preferred stock - book common
oa=ttmsdfcompany.iloc[-1]['totalassets']-ttmsdfcompany.iloc[-1]['cashandequivalents']
ol=ttmsdfcompany.iloc[-1]['totalassets']-ttmsdfcompany.iloc[-1]['netdebt']-ttmsdfcompany.iloc[-1]['totalequityandnoncontrollinginterests']
snoa=(oa-ol)/ttmsdfcompany.iloc[-1]['totalassets']
"""
totalassets=self.get_value(statements_df,'totalassets',seed)
cashandequivalents=self.get_value(statements_df,'cashandequivalents',seed)
netdebt=self.get_netdebt(statements_df,seed)
totalequityandnoncontrollinginterests=self.get_value(statements_df,'totalequityandnoncontrollinginterests',seed)
if pd.isnull(totalassets) or totalassets==0:
return None
s=pd.Series([totalassets,cashandequivalents])
m=pd.Series([1,-1])
oa=s.multiply(m).sum()
s=pd.Series([totalassets,netdebt,totalequityandnoncontrollinginterests])
m=pd.Series([1,-1,-1])
ol=s.multiply(m).sum()
scalednetoperatingassets=(oa-ol)/totalassets
return float(scalednetoperatingassets)
def get_scaledtotalaccruals(self,statements_df,seed=-1,length=4):
netincome=self.get_sum_quarters(statements_df,'netincome',seed,length)
netcashfromoperatingactivities=self.get_sum_quarters(statements_df,'netcashfromoperatingactivities',seed,length)
start_assets=self.get_value(statements_df,'cashandequivalents',seed-length)
end_assets=self.get_value(statements_df,'cashandequivalents',seed)
if pd.isnull([start_assets,end_assets]).any():
return None
totalassets=np.mean([start_assets,end_assets])
if pd.isnull(totalassets):
return None
num=pd.Series([netincome,netcashfromoperatingactivities])
if pd.isnull(num).all():
return None
m=pd.Series([1,-1])
num=num.multiply(m).sum()
den=totalassets
if den==0:
return None
scaledtotalaccruals=num/den
return float(scaledtotalaccruals)
def get_grossmargin(self,statements_df,seed=-1,length=4):
totalrevenue=self.get_sum_quarters(statements_df, 'totalrevenue', seed, length)
totalcostofrevenue=self.get_sum_quarters(statements_df, 'totalcostofrevenue', seed, length)
if pd.isnull([totalrevenue,totalcostofrevenue]).any() or totalcostofrevenue==0:
return None
grossmargin=(totalrevenue-totalcostofrevenue)/totalcostofrevenue
return float(grossmargin)
def get_margingrowth(self,statements_df,seed=-1,length1=20,length2=4):
grossmargins=[]
for i in range(seed,seed-length1,-1):
grossmargins.append(self.get_grossmargin(statements_df, i, length2))
grossmargins=pd.Series(grossmargins)
if pd.isnull(grossmargins).any():
return None
growth=grossmargins.pct_change(periods=1)
growth=growth[pd.notnull(growth)]
if len(growth)==0:
return None
grossmargingrowth=stats.gmean(1+growth)-1
if pd.isnull(grossmargingrowth):
return None
return float(grossmargingrowth)
def get_marginstability(self,statements_df,seed=-1,length1=20,length2=4):
#length1=how far back to go, how many quarters to get 20 quarters
#length2=for each quarter, how far back to go 4 quarters
grossmargins=[]
for i in range(seed,seed-length1,-1):
grossmargins.append(self.get_grossmargin(statements_df, i, length2))
grossmargins=pd.Series(grossmargins)
if pd.isnull(grossmargins).any() or grossmargins.std()==0:
return None
marginstability=grossmargins.mean()/grossmargins.std()
if pd.isnull(marginstability):
return None
return float(marginstability)
def get_cacl(self,df,seed=-1):
a=self.get_value(df,'totalcurrentassets',seed)
l=self.get_value(df,'totalcurrentliabilities',seed)
if pd.isnull([a,l]).any() or l==0:
return None
else:
return a/l
def get_tatl(self,df,seed=-1):
a=self.get_value(df,'totalassets',seed)
l=self.get_value(df,'totalliabilities',seed)
if pd.isnull([a,l]).any() or l==0:
return None
else:
return a/l
def get_longterm_cacl(self,df,seed=-1,length=20):
ltcacls=[]
for i in range(seed,seed-length,-1):
ltcacls.append(self.get_cacl(df,i))
ltcacls=pd.Series(ltcacls)
if pd.isnull(ltcacls).any():
return None
return stats.gmean(1+ltcacls)-1 #not totally sure we need the 1+, and the -1 11/9/17
def get_longterm_tatl(self,df,seed=-1,length=20):
lttatls=[]
for i in range(seed,seed-length,-1):
lttatls.append(self.get_tatl(df,i))
lttatls=pd.Series(lttatls)
if pd.isnull(lttatls).any():
return None
return stats.gmean(1+lttatls)-1 #not totally sure we need the 1+, and the -1 11/9/17
def get_capex(self,df,seed=-1,length=4):
purchaseofplantpropertyandequipment=self.get_sum_quarters(df,'purchaseofplantpropertyandequipment',seed,length)
saleofplantpropertyandequipment=self.get_sum_quarters(df,'saleofplantpropertyandequipment',seed,length)
s=pd.Series([purchaseofplantpropertyandequipment,saleofplantpropertyandequipment])
if pd.isnull(s).all():
return None
m=pd.Series([-1,-1])
capex=(s*m).sum()
if capex is None:
return None
return float(capex)
def get_freecashflow(self,df,seed=-1):
netcashfromoperatingactivities=self.get_value(df,'netcashfromoperatingactivities',seed)
capex=self.get_capex(df,seed,length=1)
s=pd.Series([netcashfromoperatingactivities,capex])
if pd.isnull(s).all():
return None
m=pd.Series([1,-1])
fcf=(s*m).sum()
return float(fcf)
#add a length2 paramater so we take the sums of cash flows
def get_cashflowonassets(self,df,seed=-1,length1=20,length2=4):
cfoas=[]
for i in range(seed,seed-length1,-1):
start_assets=self.get_value(df,'totalassets',i-length2)
end_assets=self.get_value(df,'totalassets',i)
fcfs=[]
for k in range(i,i-length2,-1):
fcf=self.get_freecashflow(df,k)
fcfs.append(fcf)
if pd.isnull(fcfs).any():
return None
total_fcf=pd.Series(fcfs).sum()
avg_assets=pd.Series([start_assets,end_assets]).mean()
if pd.isnull([total_fcf,avg_assets]).any() or avg_assets==0:
return None
else:
cfoas.append(total_fcf/avg_assets)
if pd.isnull(cfoas).any():
return None
else:
if pd.isnull(stats.gmean(1+pd.Series(cfoas))-1):
return None
else:
return stats.gmean(1+pd.Series(cfoas))-1 #we want to punish variability because the higher number the better
def get_roa(self,df,seed=-1,length=4):
netincome=self.get_sum_quarters(df,'netincome',seed,length)
start_assets=self.get_value(df,'totalassets',seed-length)
end_assets=self.get_value(df,'totalassets',seed)
if pd.isnull([start_assets,end_assets]).any():
return None
totalassets=pd.Series([start_assets,end_assets]).mean()
if pd.isnull([netincome,totalassets]).any() or totalassets==0:
return None
roa=netincome/totalassets
return float(roa)
def get_roc(self,df,seed=-1,length=4):
ebit=self.get_ebit(df,seed,length)
dividends=self.get_sum_quarters(df,'paymentofdividends',seed,length)
start_debt=self.get_netdebt(df,seed-length)
end_debt=self.get_netdebt(df,seed)
netdebt=pd.Series([start_debt,end_debt]).mean()
start_equity=self.get_value(df,'totalequity',seed-length)
end_equity=self.get_value(df,'totalequity',seed)
totalequity=pd.Series([start_equity,end_equity]).mean()
num=pd.Series([ebit,dividends]).sum()
den=pd.Series([netdebt,totalequity]).sum()
if pd.isnull([num,den]).any() or den==0:
return None
else:
roc=(float(num/den))
return float(roc)
def get_longtermroa(self,df,seed=-1,length1=20,length2=4):
roas=[]
for i in range(seed,seed-length1,-1):
roas.append(self.get_roa(df,i,length2))
if pd.isnull(roas).any():
return None
longtermroagmean=stats.gmean(1+pd.Series(roas))-1
if pd.isnull(longtermroagmean):
return None
return float(longtermroagmean)
def get_longtermroc(self,df,seed=-1,length1=20,length2=4):
rocs=[]
for i in range(seed,seed-length1,-1):
rocs.append(self.get_roc(df,i,length2))
rocs=pd.Series(rocs)
if pd.isnull(rocs).any():
return None
roc=stats.gmean(1+rocs)-1
if pd.isnull(roc):
return None
return float(roc)
def get_momentum(self,df,period=relativedelta(months=11)):
df=df[pd.to_datetime(df['date'])>=pd.to_datetime(df['date'].max())-period]
df=df['adj_close'].astype('float')
pctchange=df.pct_change()
pctchange=pctchange.dropna()
pctchange=1+pctchange
pctchange=pctchange.tolist()
gain=np.prod(pctchange)
return float(gain-1)
def get_fip(self,df,period=relativedelta(years=1)):
orig_df=df.copy()
df=df[pd.to_datetime(df['date'])>=pd.to_datetime(df['date'].max())-period]
df=df['adj_close'].astype('float')
pctchange=df.pct_change()
pctchange=pctchange.dropna()
if len(pctchange)==0:
return None
updays=(pctchange>0).sum()
downdays=(pctchange<0).sum()
fip=float(downdays)/float(len(pctchange))-float(updays)/float(len(pctchange))
if self.get_momentum(orig_df)<0:
fip=-1*fip
return fip #the lower the better
def get_balance_sheet_mean_value(self,df,tag,seed=-1,length=1):
start=self.get_value(df,tag,seed-length)
end=self.get_value(df,tag,seed)
if pd.isnull(pd.Series([start,end])).any() or start==0 or end==0:
return None
average=pd.Series([start,end]).mean()
if pd.isnull(average):
return None
else:
return float(average)
def get_dsri(self,df,seed1=-1,seed2=-5,length=4):
#seed1 and 2 are the quarters we are comparing between
#dsri=(ttmsdfcompany.iloc[-1]['accountsreceivable']/ttmsdfcompany.iloc[-1]['totalrevenue'])/(ttmsdfcompany.iloc[-5]['accountsreceivable']/ttmsdfcompany.iloc[-5]['totalrevenue'])
#accountsreceivable1=self.get_value(cik,'balance_sheet','accountsreceivable',seed1)
#accountsreceivable2=self.get_value(cik,'balance_sheet','accountsreceivable',seed2)
accountsreceivable1=self.get_balance_sheet_mean_value(df, 'accountsreceivable', seed1,length)
accountsreceivable2=self.get_balance_sheet_mean_value(df, 'accountsreceivable', seed2,length)
totalrevenue1=self.get_sum_quarters(df,'totalrevenue',seed1,length)
totalrevenue2=self.get_sum_quarters(df,'totalrevenue',seed2,length)
if pd.isnull([accountsreceivable1,accountsreceivable2,totalrevenue1,totalrevenue2]).any() or totalrevenue1==0 or totalrevenue2==0:
return None
num=accountsreceivable1/totalrevenue1
den=accountsreceivable2/totalrevenue2
if den==0:
return None
dsri=num/den
return float(dsri)
def get_gmi(self,df,seed1=-1,seed2=-5,length=4):
#gmi=((ttmsdfcompany.iloc[-5]['totalrevenue']-ttmsdfcompany.iloc[-5]['totalcostofrevenue'])/ttmsdfcompany.iloc[-5]['totalrevenue'])/((ttmsdfcompany.iloc[-1]['totalrevenue']-ttmsdfcompany.iloc[-1]['totalcostofrevenue'])/ttmsdfcompany.iloc[-1]['totalrevenue'])
totalrevenue1=self.get_sum_quarters(df,'totalrevenue',seed1,length)
totalrevenue2=self.get_sum_quarters(df,'totalrevenue',seed2,length)
totalcostofrevenue1=self.get_sum_quarters(df,'totalcostofrevenue',seed1,length)
totalcostofrevenue2=self.get_sum_quarters(df,'totalcostofrevenue',seed2,length)
if pd.isnull([totalrevenue1,totalrevenue2,totalcostofrevenue1,totalcostofrevenue2]).any():
return None
if totalrevenue2==0 or totalrevenue1==0:
return None
num=(totalrevenue2-totalcostofrevenue2)/totalrevenue2
den=(totalrevenue1-totalcostofrevenue1)/totalrevenue1
gmi=num/den
if den==0:
return None
return float(gmi)
def get_aqi(self,df,seed1=-1,seed2=-5):
#https://www.oldschoolvalue.com/blog/investment-tools/beneish-earnings-manipulation-m-score/
#otherlta1=companydf.iloc[-1]['totalassets']-(companydf.iloc[-1]['totalcurrentassets']+companydf.iloc[-1]['netppe'])
#otherlta2=companydf.iloc[-5]['totalassets']-(companydf.iloc[-5]['totalcurrentassets']+companydf.iloc[-5]['netppe'])
# aqi=(otherlta1/companydf.iloc[-1]['totalassets'])/(otherlta2/companydf.iloc[-5]['totalassets'])
netppe1=self.get_value(df,'netppe',seed1)
netppe2=self.get_value(df,'netppe',seed2)
totalassets1=self.get_value(df,'totalassets',seed1)
totalassets2=self.get_value(df,'totalassets',seed2)
totalcurrentassets1=self.get_value(df,'totalcurrentassets',seed1)
totalcurrentassets2=self.get_value(df,'totalcurrentassets',seed2)
if pd.isnull([netppe1,netppe2,totalassets1,totalassets2,totalcurrentassets1,totalcurrentassets2]).any():
return None
a=totalassets1-totalcurrentassets1-netppe1
b=totalassets2-totalcurrentassets2-netppe2
if totalassets1==0 or totalassets2==0:
return None
num=a/totalassets1
den=b/totalassets2
if den==0:
return None
aqi=num/den
return float(aqi)
def get_sgi(self,df,seed1=-1,seed2=-5,length=4):
#sgi=ttmsdfcompany.iloc[-1]['totalrevenue']/ttmsdfcompany.iloc[-5]['totalrevenue']
totalrevenue1=self.get_sum_quarters(df,'totalrevenue',seed1,length)
totalrevenue2=self.get_sum_quarters(df,'totalrevenue',seed2,length)
if pd.isnull([totalrevenue1,totalrevenue2]).any():
return None
if totalrevenue2==0:
return None
sgi=totalrevenue1/totalrevenue2
return float(sgi)
def get_depi(self,df,seed1=-1,seed2=-5,length=4):
#depit=ttmsdfcompany.iloc[-1]['depreciationexpense']/(ttmsdfcompany.iloc[-1]['depreciationexpense']+ttmsdfcompany.iloc[-1]['netppe'])
#depit1=ttmsdfcompany.iloc[-5]['depreciationexpense']/(ttmsdfcompany.iloc[-5]['depreciationexpense']+ttmsdfcompany.iloc[-5]['netppe'])
#depi=depit1/depit
depreciationexpense1=self.get_sum_quarters(df,'depreciationexpense',seed1,length)
depreciationexpense2=self.get_sum_quarters(df,'depreciationexpense',seed2,length)
netppe1=self.get_balance_sheet_mean_value(df, 'netppe', seed1,length)
netppe2=self.get_balance_sheet_mean_value(df, 'netppe', seed2,length)
if pd.isnull([depreciationexpense1,depreciationexpense2,netppe1,netppe2]).any():
return None
num=depreciationexpense2/(depreciationexpense2+netppe2)
den=depreciationexpense1/(depreciationexpense1+netppe1)
if den==0:
return None
depi=num/den
return float(depi)
def get_sgai(self,df,seed1=-1,seed2=-5,length=4):
#sgait=ttmsdfcompany.iloc[-1]['sgaexpense']/ttmsdfcompany.iloc[-1]['totalrevenue']
#sgait1=ttmsdfcompany.iloc[-5]['sgaexpense']/ttmsdfcompany.iloc[-5]['totalrevenue']
#sgai=sgait/sgait1
sgaexpense1=self.get_sum_quarters(df,'sgaexpense',seed1,length)
sgaexpense2=self.get_sum_quarters(df,'sgaexpense',seed2,length)
totalrevenue1=self.get_sum_quarters(df,'totalrevenue',seed1,length)
totalrevenue2=self.get_sum_quarters(df,'totalrevenue',seed2,length)
if | pd.isnull([sgaexpense1,sgaexpense2,totalrevenue1,totalrevenue2]) | pandas.isnull |
"""
sess_load_util.py
This module contains functions for loading data from files generated by the
Allen Institute OpenScope experiments for the Credit Assignment Project.
Authors: <NAME>
Date: August, 2018
Note: this code uses python 3.7.
"""
import copy
import logging
from pathlib import Path
import cv2
import h5py
import json
import numpy as np
import pandas as pd
import pynwb
from util import file_util, gen_util, logger_util
from sess_util import sess_file_util, sess_gen_util, sess_sync_util
logger = logging.getLogger(__name__)
TAB = " "
NWB_FILTER_KS = 5
######################################
def get_sessid_from_mouse_df(mouse_n=1, sess_n=1, runtype="prod",
mouse_df="mouse_df.csv"):
"""
get_sessid_from_mouse_df(sessid)
Returns session ID, based on the mouse number, session number, and runtype,
based on the mouse dataframe.
Optional args:
- mouse_n (int) : mouse number
default: 1
- sess_n (int) : session number
default: 1
- runtype (str) : type of data
default: 1
- mouse_df (Path): path name of dataframe containing information on each
session. Dataframe should have the following columns:
mouse_n, sess_n, runtype
default: "mouse_df.csv"
Returns:
- sessid (int): session ID
"""
if isinstance(mouse_df, (str, Path)):
mouse_df = file_util.loadfile(mouse_df)
df_line = gen_util.get_df_vals(
mouse_df, ["mouse_n", "sess_n", "runtype"],
[int(mouse_n), int(sess_n), runtype],
single=True
)
sessid = int(df_line["sessid"].tolist()[0])
return sessid
######################################
def load_info_from_mouse_df(sessid, mouse_df="mouse_df.csv"):
"""
load_info_from_mouse_df(sessid)
Returns dictionary containing information from the mouse dataframe.
Required args:
- sessid (int): session ID
Optional args:
- mouse_df (Path): path name of dataframe containing information on each
session. Dataframe should have the following columns:
sessid, mouse_n, depth, plane, line, sess_n,
pass_fail, all_files, any_files, notes
default: "mouse_df.csv"
Returns:
- df_dict (dict): dictionary with following keys:
- all_files (bool) : if True, all files have been acquired for
the session
- any_files (bool) : if True, some files have been acquired for
the session
- dandi_id (str) : Dandi session ID
- date (str) : session date (i.e., yyyymmdd)
- depth (int) : recording depth
- plane (str) : recording plane ("soma" or "dend")
- line (str) : mouse line (e.g., "L5-Rbp4")
- mouse_n (int) : mouse number (e.g., 1)
- mouseid (int) : mouse ID (6 digits)
- notes (str) : notes from the dataframe on the session
- pass_fail (str) : whether session passed "P" or failed "F"
quality control
- runtype (str) : "prod" (production) or "pilot" data
- sess_n (int) : overall session number (e.g., 1)
- stim_seed (int) : random seed used to generated stimulus
"""
if isinstance(mouse_df, (str, Path)):
mouse_df = file_util.loadfile(mouse_df)
df_line = gen_util.get_df_vals(mouse_df, "sessid", sessid, single=True)
df_dict = {
"mouse_n" : int(df_line["mouse_n"].tolist()[0]),
"dandi_id" : df_line["dandi_session_id"].tolist()[0],
"date" : int(df_line["date"].tolist()[0]),
"depth" : df_line["depth"].tolist()[0],
"plane" : df_line["plane"].tolist()[0],
"line" : df_line["line"].tolist()[0],
"mouseid" : int(df_line["mouseid"].tolist()[0]),
"runtype" : df_line["runtype"].tolist()[0],
"sess_n" : int(df_line["sess_n"].tolist()[0]),
"stim_seed" : int(df_line["stim_seed"].tolist()[0]),
"pass_fail" : df_line["pass_fail"].tolist()[0],
"all_files" : bool(int(df_line["all_files"].tolist()[0])),
"any_files" : bool(int(df_line["any_files"].tolist()[0])),
"notes" : df_line["notes"].tolist()[0],
}
return df_dict
#############################################
def get_mouseid_sessid_nwb(nwb_files):
"""
get_mouseid_sessid_nwb(nwb_files)
Returns the mouse ID and session ID retrieve from NWB files. If several
files are passed, they are expected to be for the same session
(with identical mouse ID and session IDs).
Required args:
- nwb_files (Path or list): path(s) to the NWB file(s) for a single
session
Returns:
- mouseid (str): mouse ID (Allen)
- sessid (str) : session ID (Allen)
"""
nwb_files = gen_util.list_if_not(nwb_files)
mouseid, sessid = None, None
for nwb_file in nwb_files:
with pynwb.NWBHDF5IO(str(nwb_file), "r") as f:
nwbfile_in = f.read()
new_mouseid = nwbfile_in.subject.subject_id
if mouseid is None:
mouseid = new_mouseid
elif mouseid != new_mouseid:
nwb_filenames = [str(filename) for filename in nwb_files]
raise RuntimeError(
"Mouse IDs for different NWB files for the same session "
f"do not match: {', '.join(nwb_filenames)}."
)
new_sessid = nwbfile_in.identifier
if sessid is None:
sessid = new_sessid
elif sessid != new_sessid:
nwb_filenames = [str(filename) for filename in nwb_files]
raise RuntimeError(
"Session IDs for different NWB files for the same session "
f"do not match: {', '.join(nwb_filenames)}."
)
return mouseid, sessid
#############################################
def load_small_stim_pkl(stim_pkl, runtype="prod"):
"""
load_small_stim_pkl(stim_pkl)
Loads a smaller stimulus dictionary from the stimulus pickle file in which
"posbyframe" for visual flow stimuli is not included.
If it does not exist, small stimulus dictionary is created and saved as a
pickle with "_small" appended to name.
Reduces the pickle size about 10 fold.
Required args:
- stim_pkl (Path): full path name for the full stimulus pickle file
Optional args:
- runtype (str): runtype ("prod" or "pilot")
"""
stim_pkl = Path(stim_pkl)
stim_pkl_no_ext = Path(stim_pkl.parent, stim_pkl.stem)
small_stim_pkl_name = Path(f"{stim_pkl_no_ext}_small.pkl")
if small_stim_pkl_name.is_file():
return file_util.loadfile(small_stim_pkl_name)
else:
logger.info("Creating smaller stimulus pickle.", extra={"spacing": TAB})
stim_dict = file_util.loadfile(stim_pkl)
if runtype == "pilot":
stim_par_key = "stimParams"
elif runtype == "prod":
stim_par_key = "stim_params"
else:
gen_util.accepted_values_error(
"runtype", runtype, ["prod", "pilot"])
for i in range(len(stim_dict["stimuli"])):
stim_keys = stim_dict["stimuli"][i][stim_par_key].keys()
stim_par = stim_dict["stimuli"][i][stim_par_key]
if runtype == "pilot" and "posByFrame" in stim_keys:
_ = stim_par.pop("posByFrame")
elif runtype == "prod" and "square_params" in stim_keys:
_ = stim_par["session_params"].pop("posbyframe")
file_util.saveinfo(stim_dict, small_stim_pkl_name)
return stim_dict
#############################################
def load_stim_df_info(stim_pkl, stim_sync_h5, time_sync_h5, align_pkl, sessid,
runtype="prod"):
"""
load_stim_df_info(stim_pkl, stim_sync_h5, time_sync_h5, align_pkl, sessid)
Creates the alignment dataframe (stim_df) and saves it as a pickle
in the session directory, if it does not already exist. Returns dataframe,
alignment arrays, and frame rate.
Required args:
- stim_pkl (Path) : full path name of the experiment stim pickle
file
- stim_sync_h5 (Path): full path name of the experiment sync hdf5 file
- time_sync_h5 (Path): full path name of the time synchronization hdf5
file
- align_pkl (Path) : full path name of the output pickle file to
create
- sessid (int) : session ID, needed the check whether this
session needs to be treated differently
(e.g., for alignment bugs)
Optional args:
- runtype (str): runtype ("prod" or "pilot")
default: "prod"
Returns:
- stim_df (pd DataFrame): stimlus alignment dataframe with columns:
"stimtype", "unexp", "stim_seg", "gabfr",
"gab_ori", "gabk", "visflow_dir",
"visflow_size", "start_twop_fr",
"end_twop_fr", "num_twop_fr"
- stimtype_order (list) : stimulus type order
- stim2twopfr (1D array): 2p frame numbers for each stimulus frame,
as well as the flanking
blank screen frames
- twop_fps (num) : mean 2p frames per second
- twop_fr_stim (int) : number of 2p frames recorded while stim
was playing
"""
align_pkl = Path(align_pkl)
sessdir = align_pkl.parent
# create stim_df if doesn't exist
if not align_pkl.is_file():
logger.info(f"Stimulus alignment pickle not found in {sessdir}, and "
"will be created.", extra={"spacing": TAB})
sess_sync_util.get_stim_frames(
stim_pkl, stim_sync_h5, time_sync_h5, align_pkl, sessid, runtype,
)
align = file_util.loadfile(align_pkl)
stim_df = align["stim_df"]
stim_df = stim_df.rename(
columns={"GABORFRAME": "gabfr",
"surp": "unexp", # rename surprise to unexpected
"stimType": "stimtype",
"stimSeg": "stim_seg",
"start_frame": "start_twop_fr",
"end_frame": "end_twop_fr",
"num_frames": "num_twop_fr"})
# rename bricks -> visflow
stim_df["stimtype"] = stim_df["stimtype"].replace({"b": "v"})
stim_df = modify_visflow_segs(stim_df, runtype)
stim_df = stim_df.sort_values("start_twop_fr").reset_index(drop=True)
# note: STIMULI ARE NOT ORDERED IN THE PICKLE
stimtype_map = {
"g": "gabors",
"v": "visflow"
}
stimtype_order = stim_df["stimtype"].map(stimtype_map).unique()
stimtype_order = list(
filter(lambda s: s in stimtype_map.values(), stimtype_order))
# split stimPar1 and stimPar2 into all stimulus parameters
stim_df["gab_ori"] = stim_df["stimPar1"]
stim_df["gabk"] = stim_df["stimPar2"]
stim_df["visflow_size"] = stim_df["stimPar1"]
stim_df["visflow_dir"] = stim_df["stimPar2"]
stim_df = stim_df.drop(columns=["stimPar1", "stimPar2"])
for col in stim_df.columns:
if "gab" in col:
stim_df.loc[stim_df["stimtype"] != "g", col] = -1
if "visflow" in col:
stim_df.loc[stim_df["stimtype"] != "v", col] = -1
# expand on direction info
for direc in ["right", "left"]:
stim_df.loc[(stim_df["visflow_dir"] == direc), "visflow_dir"] = \
sess_gen_util.get_visflow_screen_mouse_direc(direc)
stim2twopfr = align["stim_align"].astype("int")
twop_fps = sess_sync_util.get_frame_rate(stim_sync_h5)[0]
twop_fr_stim = int(max(align["stim_align"]))
return stim_df, stimtype_order, stim2twopfr, twop_fps, twop_fr_stim
#############################################
def load_max_projection_nwb(sess_files):
"""
load_max_projection_nwb(sess_files)
Returns maximum projection image of downsampled z-stack as an array, from
NWB files.
Required args:
- sess_files (Path): full path names of the session files
Returns:
- max_proj (2D array): maximum projection image across downsampled
z-stack (hei x wei), with pixel intensity
in 0 (incl) to 256 (excl) range
("uint8" datatype).
"""
ophys_file = sess_file_util.select_nwb_sess_path(sess_files, ophys=True)
with pynwb.NWBHDF5IO(str(ophys_file), "r") as f:
nwbfile_in = f.read()
ophys_module = nwbfile_in.get_processing_module("ophys")
main_field = "PlaneImages"
data_field = "max_projection"
try:
max_proj = ophys_module.get_data_interface(
main_field).get_image(data_field)[()].astype("uint8")
except KeyError as err:
raise KeyError(
"Could not find a maximum projection plane image "
f"for {ophys_file} due to: {err}"
)
return max_proj
#############################################
def load_max_projection(max_proj_png):
"""
load_max_projection(max_proj_png)
Returns maximum projection image of downsampled z-stack as an array.
Required args:
- max_proj_png (Path): full path names of the maximum projection png
Returns:
- max_proj (2D array): maximum projection image across downsampled
z-stack (hei x wei), with pixel intensity
in 0 (incl) to 256 (excl) range
("uint8" datatype).
"""
if not Path(max_proj_png).is_file():
raise OSError(f"{max_proj_png} does not exist.")
import imageio
max_proj = imageio.imread(max_proj_png).astype("uint8")
return max_proj
#############################################
def get_registration_transform_params(nway_match_path, sessid, targ_sess_idx=0):
"""
get_registration_transform_params(nway_match_path, sessid)
Returns cv2.warpPerspective registration transform parameters used to
register session planes to one another, saved in the n-way match files.
(cv2.warpPerspective should be used with flags cv2.INTER_LINEAR and
cv2.WARP_INVERSE_MAP)
Required args:
- nway_match_path (Path): full path name of the n-way registration path
(should be a local path in a directory that
other session registrations are also stored)
- sessid (int) : session ID
Optional args:
- targ_sess_idx (int): session that the registration transform should
be targetted to
default: 0
Returns:
- transform_params (3D array): registration transformation parameters
for the session (None if the session was
the registration target)
"""
if not Path(nway_match_path).is_file():
raise OSError(f"{nway_match_path} does not exist.")
with open(nway_match_path, "r") as f:
nway_metadata = pd.DataFrame().from_dict(json.load(f)["metadata"])
if len(nway_metadata) != 1:
raise NotImplementedError(
"Metadata dataframe expected to only have one line."
)
nway_row = nway_metadata.loc[nway_metadata.index[0]]
if sessid not in nway_row["sess_ids"]:
raise RuntimeError(
"sessid not found in the n-way match metadata dataframe."
)
sess_idx = nway_row["sess_ids"].index(sessid)
sess_n = nway_row["sess_ns"][sess_idx]
n_reg_sess = len(nway_row["sess_ids"])
if targ_sess_idx >= n_reg_sess:
raise ValueError(
f"targ_sess_idx is {targ_sess_idx}, but only {n_reg_sess} "
"sessions were registered to one another.")
targ_sess_id = nway_row["sess_ids"][targ_sess_idx]
targ_sess_n = nway_row["sess_ns"][targ_sess_idx]
if targ_sess_id == sessid:
return None # no transform needed
# get transform from the target session's nway file
if str(sessid) not in str(nway_match_path):
raise ValueError(
"Expected the n-way_match_path to contain the session ID."
)
targ_nway_match_path = Path(
str(nway_match_path).replace(str(sessid), str(targ_sess_id))
)
if not Path(targ_nway_match_path).is_file():
raise RuntimeError(f"Expected to find {targ_nway_match_path} to "
"retrieve registration transform, but file does not exist.")
with open(targ_nway_match_path, "r") as f:
target_nway_metadata = pd.DataFrame().from_dict(json.load(f)["metadata"])
column_name = f"sess_{sess_n}_to_sess_{targ_sess_n}_transformation_matrix"
if column_name not in target_nway_metadata.columns:
raise RuntimeError(
f"Expected to find {column_name} column in the metadata "
"dataframe for the target session."
)
if len(nway_metadata) != 1:
raise NotImplementedError(
"Target session metadata dataframe expected to only have one line."
)
target_nway_row = target_nway_metadata.loc[target_nway_metadata.index[0]]
transform_params = np.asarray(target_nway_row[column_name])
return transform_params
#############################################
def apply_registration_transform(nway_match_path, sessid, image,
targ_sess_idx=0):
"""
apply_registration_transform(nway_match_path, sessid, image)
Returns an image transformed using registration transform parameters, saved
in the n-way match files.
Required args:
- nway_match_path (Path): full path name of the n-way registration path
(should be a local path in a directory that
other session registrations are also stored)
- sessid (int) : session ID
- image (2 or 3D array) : image to transform, with dimensions
(item x) hei x wid. Only certain datatypes
are supported by the OpenCV function used,
e.g. float or uint8.
Optional args:
- targ_sess_idx (int) : session that the registration transform
should be targetted to
default: 0
Returns:
- registered_image (2 or 3D array) : transformed image, with dimensions
(item x) hei x wid.
"""
registration_transform_params = get_registration_transform_params(
nway_match_path, sessid, targ_sess_idx=targ_sess_idx
)
if registration_transform_params is None:
registered_image = image
else:
len_image_shape = len(image.shape)
if len_image_shape == 2:
image = np.asarray([image])
elif len_image_shape != 3:
raise ValueError("image must be a 2D or 3D array.")
image = np.asarray(image)
if registration_transform_params.shape != (3, 3):
raise RuntimeError(
"registration_transform_params retrieved is expected to have "
"shape (3, 3), but found shape "
f"{registration_transform_params.shape}."
)
try:
registered_image = np.asarray(
[cv2.warpPerspective(
sub,
registration_transform_params,
dsize=sub.shape,
flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP
)
for sub in image]
)
except cv2.error as err:
# try to capture and clarify obscure datatype errors raised by
# OpenCV
if "ifunc" in str(err) or "argument 'src'" in str(err):
raise RuntimeError(
"The following error was raised by OpenCV during image "
f"warping: {err}May be due to the use of an unsupported "
"datatype. Supported datatypes include uint8, int16, "
"uint16, float32, and float64."
)
else:
raise err
if len_image_shape == 2:
registered_image = registered_image[0]
return registered_image
#############################################
def get_tracking_perm_example_df(nway_match_path, sessid=None,
idx_after_rem_bad=False):
"""
get_tracking_perm_example_df(nway_match_path)
Returns dataframe with tracking permutation example data. (Only a few mice
have this data included in their nway-match files.)
Required args:
- nway_match_path (Path): full path name of the n-way registration path
(should be a local path in a directory that
other session registrations are also stored)
Optional args:
- sessid (int) : session ID, for error message if file does
not contain the tracking permutation
example key
default: None
- idx_after_rem_bad (bool): if True, the ROI indices (not IDs, however)
are shifted to as if bad ROIs did not exist
(bad ROIs computed for dF/F only)
default: False
Returns:
- nway_tracking_ex_df (pd.DataFrame): dataframe listing ROI tracking
matches that were yielded using different session permutations,
with columns:
['match_level'] (str):
whether this permutation produces the 'most' or 'fewest'
matches, or whether the row reflects the 'union'
['n_total'] (int):
total number of ROIs for the match level
if 'match_level' is 'most' or 'fewest' (NaN if 'union')
['sess_order'] (list):
session number order for this permutation
['dff_local_missing_roi_idx'] (list):
indices of ROIs that are included in the final tracked ROIs for
the session, but were not identified with this permutation
['dff_local_extra_roi_idx'] (list):
indices of ROIs that are not included in the final tracked ROIs
for the session, but were identified with this permutation
['sess{}_missing_roi_id'] (list):
ROI IDs/names corresponding to 'dff_local_missing_roi_idx'
['sess{}_extra_roi_id'] (list):
ROI IDs/names corresponding to 'dff_local_extra_roi_idx'
"""
if not Path(nway_match_path).is_file():
raise OSError(f"{nway_match_path} does not exist.")
with open(nway_match_path, "r") as f:
nway_dict = json.load(f)
match_key = "match_perm_examples"
if match_key not in nway_dict.keys():
sess_str = "" if sessid is None else f" for session {sessid}"
raise RuntimeError(f"nway-match file{sess_str} does not contain "
f"example tracking permutation data under {match_key}."
)
nway_tracking_ex_df = pd.DataFrame().from_dict(nway_dict[match_key])
# check that missing ROI indices are all tracked, and extra ROI indices are
# all untracked for the session
rois_df = pd.DataFrame().from_dict(nway_dict["rois"])
for col in nway_tracking_ex_df.columns:
if "roi_idx" not in col:
continue
targ_vals = rois_df["dff-ordered_roi_index"].tolist()
for row_idx in nway_tracking_ex_df.index:
if nway_tracking_ex_df.loc[row_idx, "match_level"] == "union":
continue
roi_idxs = nway_tracking_ex_df.loc[row_idx, col]
for n in roi_idxs:
if n in targ_vals and "extra" in col:
raise RuntimeError(
"Some ROIs identified as 'extra' are in fact tracked."
)
elif n not in targ_vals and "missing" in col:
raise RuntimeError(
"Some ROIs identified as 'missing' are not in fact "
"tracked."
)
# shift ROI indices to as if bad ROIs did not exist
if idx_after_rem_bad:
bad_rois_df = | pd.DataFrame() | pandas.DataFrame |
from config_chbp_eeg import bids_root, deriv_root, N_JOBS
import pandas as pd
from joblib import Parallel, delayed
import mne
import coffeine
from config_chbp_eeg import bids_root
subjects_list = list(bids_root.glob('*'))
subjects_list = list(bids_root.glob('*'))
subjects_df = | pd.read_csv(bids_root / "participants.tsv", sep='\t') | pandas.read_csv |
import os
import json
import pandas as pd
import datetime
import numpy as np
import itertools
from pprint import pprint
from tqdm import tqdm as pbar
from textblob import TextBlob
pd.plotting.register_matplotlib_converters()
#Input: N/A
#Return: A list of strings containing all chat names
def get_chats_names():
chat_names = []
for file in os.listdir(os.getcwd() + '/json'):
chat_names.append(file.split('.')[0])
return chat_names
#Input: String input
#Return: JSON of chatname
def get_json(name):
with open('json/' + name + '.json', 'r') as json_file:
return json.load(json_file)
#Input: A list of JSON objects holding all the messages of a specific chat
#Return: A list holding JSON of sentiment analysis per message
def get_chat_sentiment(messages):
analyzed_messages = [{"sentiment": message['sentiment'], "date": message['date']} for message in messages ]
return analyzed_messages
#Input: A list of JSON holding sentiment analysis
#Return: A JSON of lists of the average sentiment over a weekly basis (Used to be data frame, might change it back)
#Note: Changing the time is dependent on the resample library, in which the basis are 'D'ays, 'W'eeks, 'M'onths, etc.
def sentiment_over_time(messages, time='W'):
df = pd.io.json.json_normalize(messages)
df['date'] = pd.to_datetime(df['date'])
df = df[df.sentiment != 0]
df = df.resample(time, on='date').mean()
df = df.dropna()
# df = df.loc[datetime.date(year=2019,month=3,day=1):datetime.date(year=2019,month=5,day=5)] #Personal Use, ignore this line
dates = df.index.tolist()
sentiments = df['sentiment'].tolist()
return {
#These are lists
"date": dates,
"sentiment": sentiments
}
#Input: N/A
#Return: JSON of all messages amongst all conversations
def all_conversations():
chats = get_chats_names()
convs = [get_json(chat)['messages'] for chat in chats]
all_convs = list(itertools.chain.from_iterable(convs))
return all_convs
#Input: A JSON of a conversation
#Return: A JSON of the conversation, divided by sender
def split_conversation(messages):
split_messages = {}
for message in messages:
if message['sender'] in split_messages:
split_messages[message['sender']].append(message)
else:
split_messages[message['sender']] = []
return [{"name": conversation[0], "messages": conversation[1]} for conversation in split_messages.items()]
#Input: A JSON of a conversation
#Return: A list of JSONs of the sentiments split by person over time
def split_sentiment(messages):
conversations = split_conversation(messages)
sentiments = []
for conversation in conversations:
if len(conversation['messages']) == 0: #Person never sent messages
continue
sentiment = sentiment_over_time(get_chat_sentiment(conversation['messages']))
sentiment['name'] = conversation['name']
sentiments.append(sentiment)
return sentiments
#Input: A JSON of a conversation
#Return: A JSON of a list of Dates and Message Counts.
#This could use reworking because I literally just count the rows, no restructuring.
def message_count_over_time(messages, time='W'):
df = pd.io.json.json_normalize(messages)
df['date'] = pd.to_datetime(df['date'])
df = df.resample(time, on='date')['text'].agg('count')
# df = df.loc[datetime.date(year=2019,month=3,day=25):datetime.date(year=2019,month=5,day=5)] #Personal Use, ignore this line
dates = df.index.tolist()
message_count = df.values.tolist()
return {
#These are lists
"date": dates,
"message_count": message_count
}
#Input: A JSON of a conversation
#Return: Returns the sentiment sorted by time instead of date
def count_by_time(messages):
df = | pd.io.json.json_normalize(messages) | pandas.io.json.json_normalize |
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from decouple import config
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def pandas_df(path_to_jsons):
json_files = [pos_json for pos_json in os.listdir(path_to_jsons) if pos_json.endswith('.json')]
json_file = path_to_jsons + json_files[0]
data = | pd.read_json(json_file, lines=True) | pandas.read_json |
from utils.support_functions import calculate_rate_exact_day, calculate_rate_exact_day_cop, \
calculate_rate_exact_day_cop_inversed
from decimal import Decimal
import os
import pandas as pd
from settings import CURRENCIES, calc_categories
from statement_parser.preproccessing import get_category
def conversion(x, to_currency):
year = x[0]
month = x[1]
value = x[2]
from_currency = x[3]
# in case it is the same currency
if from_currency == to_currency:
return Decimal(str(round(value, 4)))
# in case we have COP (not suppported with daily rates)
if from_currency == 'COP': # we use an approximated way:
rate = calculate_rate_exact_day_cop(to_currency)
elif to_currency == 'COP': # we use an apporximated way:
rate = calculate_rate_exact_day_cop_inversed(from_currency)
else:
rate = calculate_rate_exact_day(from_currency, month, year, to_currency)
if value == "":
return value
else:
#print(year,month,value, rate, from_currency, to_currency)
new_with = Decimal(str(round(rate, 4))) * Decimal(str(round(value, 4)))
return new_with
def MANUAL_parser(file_path, directories):
CATEGORIES, CATEGORIES_DEPOSIT, CATEGORIES_WITHDRAWAL = calc_categories(directories)
# process data
registry_accounts = [] # here we collect the name of the accounts we have read
registry_currency_per_account = [] # here we collect the currency of every account
registry_type_per_account = [] # here we collect the type of every account
registry = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import pandas as pd
import networkx as nx
from src.utils.utils_features import NetworkFeatureComputation
from src.data.financial_network import (
IndustryNetworkCreation,
IndustryNetworkCreationEORA,
)
from src.data.migration_network import MigrationNetworkCreation, EstimatedMigrationNetwork
from src.data.panel_data_etl import PanelDataETL
from src.utils.utils_s3 import read_s3_graphml, write_s3_graphml
def network_from_adjacency(adjacency_matrix,
node_index,
path,
tol_gfi=0.01,
tol_favor=0.0001):
df_adj = | pd.DataFrame(adjacency_matrix, index=node_index, columns=node_index) | pandas.DataFrame |
# 국경일 : getHoliDeInfo
# 공휴일 : getRestDeInfo
import requests
from bs4 import BeautifulSoup
import csv
import json
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# holiday_csv 파일 만드는 함수
def make_holiday_csv(years_lst=[2018, 2019, 2020, 2021], filename='C:/Users/km_mz/Desktop/dacon/daconcup/Data/holiday.csv'):
data_lst = []
for year in years_lst:
url = "http://apis.data.go.kr/B090041/openapi/service/SpcdeInfoService/getRestDeInfo"
key = '<KEY>'
query = f"?solYear={year}&ServiceKey={key}&_type=json&numOfRows={100}"
endpoint = url + query
res = requests.get(endpoint)
data = json.loads(res.text)['response']['body']['items']['item']
data_lst += data
with open(filename, 'w', newline='') as f:
cols = ['dateKind', 'dateName', 'isHoliday', 'locdate', 'seq']
writer = csv.DictWriter(f, fieldnames=cols)
writer.writeheader()
for row in data_lst:
writer.writerow(row)
print("Done making holiday.csv...")
def make_holiday_df(years_lst=[2018, 2019, 2020, 2021]):
data_lst = []
for year in years_lst:
url = "http://apis.data.go.kr/B090041/openapi/service/SpcdeInfoService/getRestDeInfo"
key = '<KEY>MTRowquqx%2BvsNwUWykYKqhmpX6jHhbDsIjhgYaq5UARFaZLTkQ7o75H%2BC%2FCfOHKMaLcA%3D%3D'
query = f"?solYear={year}&ServiceKey={key}&_type=json&numOfRows={100}"
endpoint = url + query
res = requests.get(endpoint)
data = json.loads(res.text)['response']['body']['items']['item']
data_lst += data
cols = ['dateKind', 'dateName', 'isHoliday', 'date', 'seq']
df = pd.DataFrame(data_lst, columns = cols)
return df
# pandas에 공휴일 칼럼 추가하는 함수
def add_isHoliday_column(df_having_date):
holiday = make_holiday_df()
holiday.columns = ['dateKind', 'dateName', 'isHoliday', 'date', 'seq']
holiday['date'] = pd.to_datetime(holiday['date'])
df = pd.merge(df_having_date, holiday[['date', 'isHoliday']], on='date', how='left')
df['isHoliday'] = df.apply(lambda x: 1 if (x['date'].dayofweek == 5)|(x['date'].dayofweek == 6) | (x['isHoliday'] == 'Y') else 0, axis=1)
return df
# train 데이터 날짜별 데이터로 바꾸기
def train_prep(df):
df['DateTime'] = pd.to_datetime(df['DateTime'])
df['date'] = df.DateTime.dt.date
df = df.groupby('date').sum().reset_index()
return df
# 데이터 날짜별 count 데이터프레임으로 바꾸기
def info_prep(df, col='count'):
# date 변수 추출
df['c_time'] = pd.to_datetime(df['c_time'])
df['date'] = df['c_time'].dt.date
# missing value 제거
df = df.dropna(how='all') # 모든 row가 missing value 일 때
df = df.groupby('date')['date'].count().to_frame(name=col).reset_index()
return df
# 날짜별 히트맵
def check_date(df, title=''):
"""
input:
> 'date', '원하는 count' 두 칼럼으로 이루어진 df
"""
import seaborn as sns
df['date'] = pd.to_datetime(df['date'])
df['year_month'] = df.apply(lambda x: x['date'].strftime('%Y') + '-' + x['date'].strftime('%m'), axis=1)
df['day'] = df['date'].dt.strftime('%d')
df_pivot = | pd.pivot_table(df, index='year_month', columns='day', values=df.columns[1]) | pandas.pivot_table |
from mpl_toolkits import mplot3d
import sys, os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from plotnine import *
import copy, math
dist = 10
def find_min_discm_each_hyperparam(df):
x = df.sort_values(by=['Discm_percent', 'Points-Removed']).groupby("Model-count", as_index=False).first()
assert len(x) == 240
return x
def process_rows(row, batches):
# global batches
model_count = 0
for perm in range(20):
for h1units in [16, 24, 32]:
for h2units in [8, 12]:
for batch in batches: # different batch sizes for this dataset
if perm == row['Dataperm'] and h1units == row['H1Units'] and h2units == row['H2Units'] and batch == row['Batch']:
return model_count
else:
model_count += 1
def process_dfs(name, batches, df):
# import ipdb; ipdb.set_trace()
if 'Model-count' in df.columns:
df['Model-count2'] = df.apply(process_rows, axis=1, args=((batches,)))
assert (df['Model-count'] == df['Model-count2']).all()
df.drop(columns=['Model-count2'], inplace=True)
else:
df['Model-count'] = df.apply(process_rows, axis=1, args=((batches,)))
assert len(df['Model-count'].unique()) == 240 and df['Model-count'].max() == 239 and df['Model-count'].min() == 0
df = df.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
# df = df[['Model-count','Discm_percent','Test_acc']]
df = df[['Model-count','Discm_percent','Test_acc', 'Class0_Pos', 'Class1_Pos']]
df['diff'] = abs(df['Class0_Pos'] - df['Class1_Pos']) * 100
df['Test_acc'] = df['Test_acc'].apply(lambda x: x * 100)
df['Techniques'] = name
if len(name.split()) > 1:
words = name.split()
letters = [word[0] for word in words]
x = "".join(letters)
df['Baseline'] = x
else:
df['Baseline'] = name[:2]
return df
def boxplots_datasets(dataset, plot):
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1.csv")
batches = sorted(list(df1.Batch.unique())) # sorting is important
assert(len(batches) == 2)
df_our = find_min_discm_each_hyperparam(df1)
df_our = df_our[['Model-count','Discm_percent', 'Test_acc', 'Class0_Pos', 'Class1_Pos']]
df_our['diff'] = abs(df_our['Class0_Pos'] - df_our['Class1_Pos'])*100
df_our['Test_acc'] = df_our['Test_acc'].apply(lambda x: x*100)
df_our['Techniques'] = "Our Technique"
df_our['Baseline'] = "Our"
# Massaging
df_massaging = process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}.csv"))
# Preferential Sampling
df_ps = process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}.csv"))
# Learning Fair representations
df_lfr = process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}.csv"))
# Disparate Impact Removed
df_DIR = process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}.csv"))
# Adversarial Sampling
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}.csv")
df_adver['Model-count'] = df_adver['Dataperm']*12
df_adver = df_adver.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
df_adver = df_adver[['Model-count','Discm_percent','Test_acc','diff']]
df_adver['diff'] = df_adver['diff']*100
df_adver['Test_acc'] = df_adver['Test_acc'].apply(lambda x: x*100)
df_adver['Techniques'] = "Adversa. debias"
df_adver['Baseline'] = "AD"
# # Sensitive Attribute removed, therefore no discrimination
df_nosensitive = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv")
df_nosensitive = df_nosensitive[['Model-count','Test_acc', 'Class0_Pos', 'Class1_Pos']]
df_nosensitive['diff'] = abs(df_nosensitive['Class0_Pos'] - df_nosensitive['Class1_Pos'])*100
df_nosensitive['Discm_percent'] = 0.0
df_nosensitive['Test_acc'] = df_nosensitive['Test_acc'].apply(lambda x: x*100)
df_nosensitive['Techniques'] = "Sens. Removed"
df_nosensitive['Baseline'] = "SR"
# df_nosensitive = process_dfs("Sensitive Removed", batches, pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv"))
# No technique used
df_noremoval = process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval.csv"))
df_main = pd.concat([df_noremoval, df_nosensitive, df_massaging, df_ps, df_lfr, df_DIR, df_adver, df_our])
try:
assert(len(df_main) == 7*240 + 20)
except:
import ipdb; ipdb.set_trace()
if dataset == "compas-score":
dataset = "Recidivism-score"
elif dataset == "compas-ground":
dataset = "Recidivism-ground"
# df_main['Dataset'] = dataset.capitalize()
if dataset == "adult":
sizeofPSI = 4522200
id_ = "D1"
elif dataset == "adult_race":
sizeofPSI = 4313100
id_ = "D2"
elif dataset == "german":
sizeofPSI = 100000
id_ = "D3"
elif dataset == "student":
sizeofPSI = 64900
id_ = "D4"
elif dataset == "Recidivism-ground":
sizeofPSI = 615000
id_ = "D5"
elif dataset == "Recidivism-score":
sizeofPSI = 615000
id_ = "D6"
elif dataset == "default":
sizeofPSI = 3000000
id_ = "D7"
elif dataset == "salary":
sizeofPSI = 5200
id_ = "D8"
else:
raise NotImplementedError
df_main['Dataset'] = id_
precision = 1
if plot == 0:
min_discm = True
test_accuracy_for_min_discm = True
max_accuracy = True
discm_for_max_accuracy = True
median_discm = False
mean_accuracy = False
median_accuracy = False
if min_discm:
x = ' & '.join([f"{id_}", f"{df_noremoval['Discm_percent'].min():.{precision}e}", '0.0' ,f"{df_DIR['Discm_percent'].min():.{precision}e}", f"{df_ps['Discm_percent'].min():.{precision}e}", f"{df_massaging['Discm_percent'].min():.{precision}e}", f"{df_lfr['Discm_percent'].min():.{precision}e}", f"{df_adver['Discm_percent'].min():.{precision}e}", f"{df_our['Discm_percent'].min():.{precision}e}"])
print_to_tex(x, 'min-discm.tex', dataset)
if max_accuracy:
y = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].max():.{precision}e}", f"{df_nosensitive['Test_acc'].max():.{precision}e}", f"{df_DIR['Test_acc'].max():.{precision}e}", f"{df_ps['Test_acc'].max():.{precision}e}", f"{df_massaging['Test_acc'].max():.{precision}e}", f"{df_lfr['Test_acc'].max():.{precision}e}", f"{df_adver['Test_acc'].max():.{precision}e}", f"{df_our['Test_acc'].max():.{precision}e}"])
print_to_tex(y, 'max-test-accuracy.tex', dataset)
if test_accuracy_for_min_discm:
# for sensitive there is always 0 discrimination.
z = ' & '.join([f"{id_}", f"{df_noremoval.loc[df_noremoval['Discm_percent'] == df_noremoval['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_nosensitive['Test_acc'].max():.{precision}e}",
f"{df_DIR.loc[df_DIR['Discm_percent'] == df_DIR['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_ps.loc[df_ps['Discm_percent'] == df_ps['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_massaging.loc[df_massaging['Discm_percent'] == df_massaging['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_lfr.loc[df_lfr['Discm_percent'] == df_lfr['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_adver.loc[df_adver['Discm_percent'] == df_adver['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['Test_acc'].max():.{precision}e}"])
print_to_tex(z, 'test_accuracy_for_min_discm.tex', dataset)
if median_discm:
x = ' & '.join([f"{id_}", f"{df_noremoval['Discm_percent'].median():.{precision}e}", "\\textbf{%s}"%(0.0) ,f"{df_DIR['Discm_percent'].median():.{precision}e}", f"{df_ps['Discm_percent'].median():.{precision}e}", f"{df_massaging['Discm_percent'].median():.{precision}e}", f"{df_lfr['Discm_percent'].median():.{precision}e}", f"{df_adver['Discm_percent'].median():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Discm_percent'].median():.{precision}e}")])
print_to_tex(x, 'median-discm.tex', dataset)
if mean_accuracy:
a = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].mean():.{precision}e}", f"{df_nosensitive['Test_acc'].mean():.{precision}e}", f"{df_DIR['Test_acc'].mean():.{precision}e}", f"{df_ps['Test_acc'].mean():.{precision}e}", f"{df_massaging['Test_acc'].mean():.{precision}e}", f"{df_lfr['Test_acc'].mean():.{precision}e}", f"{df_adver['Test_acc'].mean():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Test_acc'].mean():.{precision}e}")])
print_to_tex(a, 'mean-test-accuracy.tex', dataset)
if median_accuracy:
b = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].median():.{precision}e}", f"{df_nosensitive['Test_acc'].median():.{precision}e}", f"{df_DIR['Test_acc'].median():.{precision}e}", f"{df_ps['Test_acc'].median():.{precision}e}", f"{df_massaging['Test_acc'].median():.{precision}e}", f"{df_lfr['Test_acc'].median():.{precision}e}", f"{df_adver['Test_acc'].median():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Test_acc'].median():.{precision}e}")])
print_to_tex(b, 'median-test-accuracy.tex', dataset)
if discm_for_max_accuracy:
k = ' & '.join([f"{id_}", f"{df_noremoval.loc[df_noremoval['Test_acc'] == df_noremoval['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
"0.0",
f"{df_DIR.loc[df_DIR['Test_acc'] == df_DIR['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_ps.loc[df_ps['Test_acc'] == df_ps['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_massaging.loc[df_massaging['Test_acc'] == df_massaging['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_lfr.loc[df_lfr['Test_acc'] == df_lfr['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_adver.loc[df_adver['Test_acc'] == df_adver['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_our.loc[df_our['Test_acc'] == df_our['Test_acc'].max()]['Discm_percent'].min():.{precision}e}"])
print_to_tex(k, 'discm_for_max_accuracy.tex', dataset)
return df_main
def boxplots_datasets_dist(dataset, plot):
df1 = pd.read_csv(f"{dataset}/results_{dataset}_method1_dist{dist}.csv")
batches = sorted(list(df1.Batch.unique())) # sorting is important
assert(len(batches) == 2)
df_our = find_min_discm_each_hyperparam(df1)
df_our = df_our[['Model-count', 'Discm_percent', 'Test_acc', 'Class0_Pos', 'Class1_Pos']]
df_our['diff'] = abs(df_our['Class0_Pos'] - df_our['Class1_Pos']) * 100 # Statistical parity diff
df_our['Test_acc'] = df_our['Test_acc'].apply(lambda x: x * 100)
df_our['Techniques'] = "Our Technique"
df_our['Baseline'] = "Our"
# Massaging
df_massaging = process_dfs("MAssaging", batches, pd.read_csv(f"{dataset}/massaging/results_massaged_{dataset}_dist{dist}.csv"))
# Preferential Sampling
df_ps = process_dfs("Prefer. Sampling", batches, pd.read_csv(f"{dataset}/preferential_sampling/results_resampling_{dataset}_dist{dist}.csv"))
# Learning Fair representations
df_lfr = process_dfs("Learning Fair Repr.", batches, pd.read_csv(f"{dataset}/learning_fair_representations/results_lfr_{dataset}_dist{dist}.csv"))
# Disparate Impact Removed
df_DIR = process_dfs("Disp. Impact Rem", batches, pd.read_csv(f"{dataset}/disparate_impact_removed/results_disparate_removed_{dataset}_dist{dist}.csv"))
# Adversarial Sampling
df_adver = pd.read_csv(f"{dataset}/adversarial_debiasing/results_adversarial_debiased_{dataset}_dist{dist}.csv")
df_adver['Model-count'] = df_adver['Dataperm'] * 12
df_adver = df_adver.sort_values("Discm_percent").groupby("Model-count", as_index=False).first() # must be sorted in order of model count for comparison across baselines
df_adver = df_adver[['Model-count','Discm_percent','Test_acc','diff']]
df_adver['diff'] = df_adver['diff'] * 100
df_adver['Test_acc'] = df_adver['Test_acc'].apply(lambda x: x*100)
df_adver['Techniques'] = "Adversa. debias"
df_adver['Baseline'] = "AD"
# # Sensitive Attribute removed, therefore no discrimination
# df_nosensitive = pd.read_csv(f"{dataset}/results_{dataset}_nosensitive.csv")
df_nosensitive = process_dfs("Sens. Removed", batches, pd.read_csv(f"{dataset}/results_{dataset}_nosensitive_dist{dist}.csv"))
# df_nosensitive = df_nosensitive[['Model-count','Test_acc', 'Class0_Pos', 'Class1_Pos']]
# df_nosensitive['diff'] = abs(df_nosensitive['Class0_Pos'] - df_nosensitive['Class1_Pos'])*100
# df_nosensitive['Discm_percent'] = 0.0
# df_nosensitive['Test_acc'] = df_nosensitive['Test_acc'].apply(lambda x: x*100)
# df_nosensitive['Techniques'] = "Sens. Removed"
# df_nosensitive['Baseline'] = "SR"
# No technique used
df_noremoval = process_dfs("FULL", batches, pd.read_csv(f"{dataset}/results_{dataset}_noremoval_dist{dist}.csv"))
df_main = pd.concat([df_noremoval, df_nosensitive, df_massaging, df_ps, df_lfr, df_DIR, df_adver, df_our], sort=True)
try:
assert(len(df_main) == 7*240 + 20)
except:
import ipdb; ipdb.set_trace()
if dataset == "compas-score":
dataset = "Recidivism-score"
elif dataset == "compas-ground":
dataset = "Recidivism-ground"
# df_main['Dataset'] = dataset.capitalize()
if dataset == "adult":
sizeofPSI = 4522200
id_ = "D1"
elif dataset == "adult_race":
sizeofPSI = 4313100
id_ = "D2"
elif dataset == "german":
sizeofPSI = 100000
id_ = "D3"
elif dataset == "student":
sizeofPSI = 64900
id_ = "D4"
elif dataset == "Recidivism-ground":
sizeofPSI = 615000
id_ = "D5"
elif dataset == "Recidivism-score":
sizeofPSI = 615000
id_ = "D6"
elif dataset == "default":
sizeofPSI = 3000000
id_ = "D7"
elif dataset == "salary":
sizeofPSI = 5200
id_ = "D8"
else:
raise NotImplementedError
df_main['Dataset'] = id_
precision = 1
if plot == 0:
min_discm = True
test_accuracy_for_min_discm = True
max_accuracy = True
discm_for_max_accuracy = True
median_discm = False
mean_accuracy = False
median_accuracy = False
if min_discm:
x = ' & '.join([f"{id_}", f"{df_noremoval['Discm_percent'].min():.{precision}e}", f"{df_nosensitive['Discm_percent'].min():.{precision}e}" ,f"{df_DIR['Discm_percent'].min():.{precision}e}", f"{df_ps['Discm_percent'].min():.{precision}e}", f"{df_massaging['Discm_percent'].min():.{precision}e}", f"{df_lfr['Discm_percent'].min():.{precision}e}", f"{df_adver['Discm_percent'].min():.{precision}e}", f"{df_our['Discm_percent'].min():.{precision}e}"])
print_to_tex(x, f'min-discm_dist{dist}.tex', dataset)
if max_accuracy:
y = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].max():.{precision}e}", f"{df_nosensitive['Test_acc'].max():.{precision}e}", f"{df_DIR['Test_acc'].max():.{precision}e}", f"{df_ps['Test_acc'].max():.{precision}e}", f"{df_massaging['Test_acc'].max():.{precision}e}", f"{df_lfr['Test_acc'].max():.{precision}e}", f"{df_adver['Test_acc'].max():.{precision}e}", f"{df_our['Test_acc'].max():.{precision}e}"])
print_to_tex(y, f'max-test-accuracy_dist{dist}.tex', dataset)
if test_accuracy_for_min_discm:
z = ' & '.join([f"{id_}", f"{df_noremoval.loc[df_noremoval['Discm_percent'] == df_noremoval['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_nosensitive.loc[df_nosensitive['Discm_percent'] == df_nosensitive['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_DIR.loc[df_DIR['Discm_percent'] == df_DIR['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_ps.loc[df_ps['Discm_percent'] == df_ps['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_massaging.loc[df_massaging['Discm_percent'] == df_massaging['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_lfr.loc[df_lfr['Discm_percent'] == df_lfr['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_adver.loc[df_adver['Discm_percent'] == df_adver['Discm_percent'].min()]['Test_acc'].max():.{precision}e}",
f"{df_our.loc[df_our['Discm_percent'] == df_our['Discm_percent'].min()]['Test_acc'].max():.{precision}e}"])
print_to_tex(z, f'test_accuracy_for_min_discm_dist{dist}.tex', dataset)
if median_discm:
raise NotImplementedError
x = ' & '.join([f"{id_}", f"{df_noremoval['Discm_percent'].median():.{precision}e}", "\\textbf{%s}"%(0.0) ,f"{df_DIR['Discm_percent'].median():.{precision}e}", f"{df_ps['Discm_percent'].median():.{precision}e}", f"{df_massaging['Discm_percent'].median():.{precision}e}", f"{df_lfr['Discm_percent'].median():.{precision}e}", f"{df_adver['Discm_percent'].median():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Discm_percent'].median():.{precision}e}")])
print_to_tex(x, 'median-discm.tex', dataset)
if mean_accuracy:
raise NotImplementedError
a = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].mean():.{precision}e}", f"{df_nosensitive['Test_acc'].mean():.{precision}e}", f"{df_DIR['Test_acc'].mean():.{precision}e}", f"{df_ps['Test_acc'].mean():.{precision}e}", f"{df_massaging['Test_acc'].mean():.{precision}e}", f"{df_lfr['Test_acc'].mean():.{precision}e}", f"{df_adver['Test_acc'].mean():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Test_acc'].mean():.{precision}e}")])
print_to_tex(a, 'mean-test-accuracy.tex', dataset)
if median_accuracy:
raise NotImplementedError
b = ' & '.join([f"{id_}", f"{df_noremoval['Test_acc'].median():.{precision}e}", f"{df_nosensitive['Test_acc'].median():.{precision}e}", f"{df_DIR['Test_acc'].median():.{precision}e}", f"{df_ps['Test_acc'].median():.{precision}e}", f"{df_massaging['Test_acc'].median():.{precision}e}", f"{df_lfr['Test_acc'].median():.{precision}e}", f"{df_adver['Test_acc'].median():.{precision}e}", "\\textbf{%s}"%(f"{df_our['Test_acc'].median():.{precision}e}")])
print_to_tex(b, 'median-test-accuracy.tex', dataset)
if discm_for_max_accuracy:
k = ' & '.join([f"{id_}", f"{df_noremoval.loc[df_noremoval['Test_acc'] == df_noremoval['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_nosensitive.loc[df_nosensitive['Test_acc'] == df_nosensitive['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_DIR.loc[df_DIR['Test_acc'] == df_DIR['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_ps.loc[df_ps['Test_acc'] == df_ps['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_massaging.loc[df_massaging['Test_acc'] == df_massaging['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_lfr.loc[df_lfr['Test_acc'] == df_lfr['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_adver.loc[df_adver['Test_acc'] == df_adver['Test_acc'].max()]['Discm_percent'].min():.{precision}e}",
f"{df_our.loc[df_our['Test_acc'] == df_our['Test_acc'].max()]['Discm_percent'].min():.{precision}e}"])
print_to_tex(k, f'discm_for_max_accuracy_dist{dist}.tex', dataset)
return df_main
def print_to_tex(string, file, dataset, mode=None):
if mode == None:
if dataset == "adult":
mode = "w"
else:
mode = "a"
# with open(f"../../neurips_fairness_paper/tables/{file}", mode) as f:
with open(f"tables/{file}", mode) as f:
if dataset == "salary":
string += " \\\ \midrule"
else:
string += " \\\\ "
print(string, file=f)
# print(dataset)
# print("Min discm: ", df_DIR['Discm_percent'].min())
# print("Min discm: ", df_ps['Discm_percent'].min())
# print("Min discm: ", df_massaging['Discm_percent'].min())
# print("Min discm: ", df_lfr['Discm_percent'].min())
# print("Min discm: ", df_adver['Discm_percent'].min())
# print("Min discm: ", df_our['Discm_percent'].min())
def main(plot):
df_main = None
benchmarks = ["adult", "adult_race", "german", "student", "compas-ground", "compas-score", "default", "salary"]
for dataset in benchmarks:
# df_onedataset = boxplots_datasets(dataset, plot)
df_onedataset = boxplots_datasets_dist(dataset, plot)
if not df_main is None:
df_main = pd.concat([df_main, df_onedataset])
else:
df_main = copy.deepcopy(df_onedataset)
print(f"Done {dataset}")
if plot == 0:
return
labels = ['FU', 'SR', 'DIR', 'PS', 'MA', 'LFR', 'AD', 'Our']
tech_cat = pd.Categorical(df_main['Baseline'], categories=labels)
df_main = df_main.assign(Technique_x = tech_cat)
dataset_order = ["D1", "D2", "D3", "D4", "D5", "D6", "D7", "D8"]
data_cat = pd.Categorical(df_main['Dataset'], categories=dataset_order)
df_main = df_main.assign(Dataset_x = data_cat)
# x = (ggplot(aes(x='Technique_x', y='Discm_percent', color='Techniques'), data=df_main) +\
# geom_boxplot() +\
# facet_wrap(['Dataset'], scales = 'free', nrow=2, labeller='label_both', shrink=False) + \
# ylab("Remaining Individual Discrimination") + \
# xlab("Discrimination reducing techniques") + \
# # ylim(0, 20) + \
# # ggtitle("Box plot showing remaining discrimination for each technique in each dataset") +\
# theme(axis_text_x = element_text(size=6), dpi=151) + \
# theme_seaborn()
# )
# This is responsible for the legend - remove color='Techniques'
x = (ggplot(aes(x='Technique_x', y='Discm_percent'), data=df_main) +\
geom_boxplot() +\
facet_wrap(['Dataset_x'], scales = 'free', nrow=2, labeller='label_value', shrink=True) + \
ylab("Remaining Individual Discrimination") + \
xlab("Discrimination reducing techniques") + \
# ylim(0, 20) + \
# ggtitle("Box plot showing remaining discrimination for each technique in each dataset") +\
theme(axis_text_x = element_text(size=6), dpi=151) + \
theme_seaborn()
)
x = x.draw()
x.set_figwidth(20)
x.set_figheight(12)
for ax in range(len(benchmarks)):
low_limit = -0.05
top_limit = df_main[df_main['Dataset'] == f'D{ax+1}']['Discm_percent'].max()
if df_main[df_main['Dataset'] == f'D{ax+1}']['Discm_percent'].max() > 20:
top_limit = 20
if top_limit > 13: # These hacks are for aligning the 0 at the bottom of the plots.
low_limit = -0.3
x.axes[ax].set_ylim(low_limit, top_limit)
# x.tight_layout() # This didn't work
x.savefig(f"boxplots/boxplot_discm_freeaxis_matplotlib_dist{dist}.eps", format='eps', bbox_inches='tight')
x.savefig(f"boxplots/boxplot_discm_freeaxis_matplotlib_dist{dist}.png", bbox_inches='tight')
# x.save(f"boxplot_discm_freeaxis_matplotlib.png", height=8, width=18)
# x.save(f"boxplot_discm_freeaxis_withoutfull.png", height=12, width=15)
# x.save(f"boxplot_discm_fixedaxis.png", height=5, width=12)
y = (ggplot(aes(x='Technique_x', y='Test_acc'), data=df_main) +\
geom_boxplot() +\
facet_wrap(['Dataset_x'], scales = 'free', nrow=2, labeller='label_value', shrink=True) + \
ylab("Test Accuracy") + \
xlab("Discrimination reducing techniques") + \
# ylim(0, 100) + \
# ggtitle("Box plot showing remaining discrimination for each technique in each dataset") +\
theme(axis_text_x = element_text(size=6), dpi=151) + \
theme_seaborn()
)
# y.save(f"boxplot_accuracy_freeaxis.png", height=8, width=18)
y = y.draw()
y.set_figwidth(20)
y.set_figheight(12)
for ax in range(len(benchmarks)):
bot_limit = df_main[df_main['Dataset'] == f'D{ax+1}']['Test_acc'].min()
top_limit = df_main[df_main['Dataset'] == f'D{ax+1}']['Test_acc'].max()
y.axes[ax].set_ylim(bot_limit - 1, top_limit + 2)
# y.tight_layout()
y.savefig(f"boxplots/boxplot_accuracy_freeaxis_matplotlib_dist{dist}.eps", format='eps', bbox_inches='tight')
y.savefig(f"boxplots/boxplot_accuracy_freeaxis_matplotlib_dist{dist}.png", bbox_inches='tight')
def real_accuracy_tables(debiased):
dataset = "compas-score"
if debiased:
deb = "debiased"
else:
deb = "full"
df1 = | pd.read_csv(f"{dataset}/results_{dataset}_method1.csv") | pandas.read_csv |
# AUTOGENERATED! DO NOT EDIT! File to edit: 02_process_duplicates_image_level.ipynb (unless otherwise specified).
__all__ = ['create_vocab', 'convert_category_lists_to_probability_vectors', 'get_test_csvs', 'get_train_csv',
'get_image_level_csvs']
# Cell
from fastcore.all import *
from .find_duplicates import set_index_and_sort
import numpy as np
import pandas as pd
from PIL import Image
# Cell
def create_vocab(category_names_df):
categories = L(category_names_df.category_id.unique().tolist())
categories.sort()
return categories.val2idx()
# Cell
def _category_list_to_probability_vector_helper(category_list, vocab):
"""Converts list of categories to probability vector."""
probs = np.zeros(len(vocab))
for category in category_list:
probs[vocab[category]] += 1/len(category_list)
return probs
# Cell
def convert_category_lists_to_probability_vectors(hash2categories: dict, vocab: dict):
"""Converts hash2categories category lists to probability vectors."""
for hash_, category_list in hash2categories.items():
hash2categories[hash_] = _category_list_to_probability_vector_helper(category_list, vocab)
# Cell
def get_test_csvs(test_hashes_df, hash2categories: dict):
"""Returns test images mapped to probability vector and test images to predict on."""
duplicated_imgs_idx = test_hashes_df.index.isin(hash2categories)
duplicated_test_images_df, unique_test_images_df = test_hashes_df[duplicated_imgs_idx], test_hashes_df[~duplicated_imgs_idx]
labeled_test = [
(name, hash2categories[hash_])
for hash_, name
in zip(duplicated_test_images_df.index, duplicated_test_images_df.image_name)
]
to_predict = unique_test_images_df.image_name.to_list()
labeled_test_df = | pd.DataFrame(labeled_test, columns=["image_name", "probability"]) | pandas.DataFrame |
import time
import sys
import datetime
import shutil
import os
import py7zr
from ftplib import FTP
import numpy as np
import pandas as pd
from stock.globalvar import *
from stock.utils.symbol_util import symbol_to_exsymbol, is_symbol_kc
from stock.utils.calc_price import get_zt_price
pd.set_option('max_columns', None)
pd.set_option('max_rows', None)
def get_filepath(today):
today_str = today.strftime("%Y%m%d")
filename = "{}.7z".format(today_str)
filepath = "/tmp/{}".format(filename)
return filepath
def download_kaipan(today):
ftp = FTP('172.16.31.10')
result = ftp.login(user='szq_stkauction_tick', passwd='<PASSWORD>')
print(result)
today_str = today.strftime("%Y%m%d")
filename = "{}.7z".format(today_str)
filepath = get_filepath(today)
f = open(filepath, 'wb')
cmd = 'RETR {}'.format(filename)
ftp.retrbinary(cmd, f.write)
ftp.quit()
print("downloaded", filename)
def extract_archive(today):
filepath = get_filepath(today)
with py7zr.SevenZipFile(filepath, mode='r') as z:
z.extractall(path='/tmp')
today_str = today.strftime("%Y%m%d")
source_dir = '/tmp/{}'.format(today_str)
target_dir = KAIPAN_DIR['stock']
for filename in os.listdir(target_dir):
filepath = os.path.join(target_dir, filename)
os.remove(filepath)
filenames = os.listdir(source_dir)
for filename in filenames:
source_file = os.path.join(source_dir, filename)
target_file = os.path.join(target_dir, filename)
if os.path.isfile(target_file):
continue
shutil.move(source_file, target_dir)
print("extracted all")
def filter_stocks(today):
filenames = os.listdir(KAIPAN_DIR['stock'])
critical_time = datetime.datetime(year=today.year, month=today.month, day=today.day,
hour=9, minute=20, second=0)
dtype = {
'Stkcd': str,
'TradingDay': str,
'TimeStamp': str,
'PreClosePrice': np.float64,
'Volume': np.int64,
'Amount': np.float64,
'BidPrice1': np.float64,
'BidQty1': np.int64,
'BidQty2': np.int64,
'AskPrice1': np.float64,
'AskQty1': np.int64,
'AskQty2': np.int64,
'LastPrice': np.float64,
}
columns = dtype.keys()
df_res = | pd.DataFrame(columns=["max_matched", "max_unmatched", "zt_seconds", "open_incr"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
def traceZig(bars):
# size = len(bars.index)
last_bar = bars.iloc[0]
go_up = True
go_down = True
zigzags = []
# print(bars)
for t, bar in bars[1:].iterrows():
# 最低价是否低于last_bar True 继续 | False last_bar为zig
if go_down:
if bar.l > last_bar.l:
zigzags.append(last_bar)
go_up = True
go_down = False
# 最高价是否高于last_bar
if go_up:
if bar.h < last_bar.h:
zigzags.append(last_bar)
go_down = True
go_up = False
# 重定义last_bar
last_bar = bar
# print (zigzags)
return | pd.DataFrame(zigzags) | pandas.DataFrame |
import typing as T
from pathlib import Path
import defopt
import pandas as pd
import seaborn as sb
import matplotlib.pyplot as plt
import sklearn.metrics as metrics
from sklearn.calibration import calibration_curve
def plot_calibration_curves(dset, ax=None):
if ax is None:
_, ax = plt.subplots()
ax.plot([0, 1], [0, 1], "--k")
for key, group in dset.groupby("fold"):
prob_true, prob_pred = calibration_curve(group["injuryCrash"], group["y_prob"])
ax.plot(prob_true, prob_pred, "-o", label=key)
ax.grid(True)
ax.set_xlabel("True probabilities")
ax.set_ylabel("Predicted probabilities")
ax.legend()
return ax
def plot_roc_curves(dset, ax=None):
if ax is None:
_, ax = plt.subplots()
ax.plot([0, 1], [0, 1], "k--")
for key, group in dset.groupby("fold"):
fpr, tpr, _ = metrics.roc_curve(group["injuryCrash"], group["y_prob"])
ax.plot(fpr, tpr, label=key)
ax.grid(True)
ax.set_xlabel("False positive rate")
ax.set_ylabel("True positive rate")
ax.legend()
return ax
def plot_precision_recall(dset, ax=None):
if ax is None:
_, ax = plt.subplots()
for key, group in dset.groupby("fold"):
precision, recall, _ = metrics.precision_recall_curve(
group["injuryCrash"], group["y_prob"]
)
ax.plot(recall, precision, label=key)
ax.grid(True)
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.set_xlabel("Recall")
ax.set_ylabel("Precision")
ax.legend()
return ax
def plot_curves(dset, axes=None):
if axes is None:
_, axes = plt.subplot(1, 3)
plot_roc_curves(dset, ax=axes[0])
axes[0].set_title("ROC curve")
plot_precision_recall(dset, ax=axes[1])
axes[1].set_title("Precision-Recall curve")
plot_calibration_curves(dset, ax=axes[2])
axes[2].set_title("Calibration curve")
return axes
METRICS_FCN = {
"accuracy": metrics.accuracy_score,
"F1": metrics.f1_score,
"precision": metrics.precision_score,
"recall": metrics.recall_score,
}
def _score_method(dset):
def score_fold(x):
scores = {
key: metric_fcn(x["injuryCrash"], x["y_pred"])
for key, metric_fcn in METRICS_FCN.items()
}
scores["neg_log_loss"] = metrics.log_loss(x["injuryCrash"], x["y_prob"])
scores["roc_auc"] = metrics.roc_auc_score(x["injuryCrash"], x["y_prob"])
scores["brier_score"] = metrics.brier_score_loss(x["injuryCrash"], x["y_prob"])
return | pd.Series(scores) | pandas.Series |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for USEquityPricingLoader and related classes.
"""
from parameterized import parameterized
import sys
import numpy as np
from numpy.testing import (
assert_allclose,
assert_array_equal,
)
import pandas as pd
from pandas.testing import assert_frame_equal
from toolz.curried.operator import getitem
from zipline.lib.adjustment import Float64Multiply
from zipline.pipeline.domain import US_EQUITIES
from zipline.pipeline.loaders.synthetic import (
NullAdjustmentReader,
make_bar_data,
expected_bar_values_2d,
)
from zipline.pipeline.loaders.equity_pricing_loader import (
USEquityPricingLoader,
)
from zipline.errors import WindowLengthTooLong
from zipline.pipeline.data import USEquityPricing
from zipline.testing import (
seconds_to_timestamp,
str_to_seconds,
MockDailyBarReader,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
ZiplineTestCase,
)
import pytest
# Test calendar ranges over the month of June 2015
# June 2015
# Mo Tu We Th Fr Sa Su
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
TEST_CALENDAR_START = pd.Timestamp("2015-06-01", tz="UTC")
TEST_CALENDAR_STOP = pd.Timestamp("2015-06-30", tz="UTC")
TEST_QUERY_START = pd.Timestamp("2015-06-10", tz="UTC")
TEST_QUERY_STOP = pd.Timestamp("2015-06-19", tz="UTC")
# One asset for each of the cases enumerated in load_raw_arrays_from_bcolz.
EQUITY_INFO = pd.DataFrame(
[
# 1) The equity's trades start and end before query.
{"start_date": "2015-06-01", "end_date": "2015-06-05"},
# 2) The equity's trades start and end after query.
{"start_date": "2015-06-22", "end_date": "2015-06-30"},
# 3) The equity's data covers all dates in range.
{"start_date": "2015-06-02", "end_date": "2015-06-30"},
# 4) The equity's trades start before the query start, but stop
# before the query end.
{"start_date": "2015-06-01", "end_date": "2015-06-15"},
# 5) The equity's trades start and end during the query.
{"start_date": "2015-06-12", "end_date": "2015-06-18"},
# 6) The equity's trades start during the query, but extend through
# the whole query.
{"start_date": "2015-06-15", "end_date": "2015-06-25"},
],
index=np.arange(1, 7),
columns=["start_date", "end_date"],
).astype(np.datetime64)
EQUITY_INFO["symbol"] = [chr(ord("A") + n) for n in range(len(EQUITY_INFO))]
EQUITY_INFO["exchange"] = "TEST"
TEST_QUERY_SIDS = EQUITY_INFO.index
# ADJUSTMENTS use the following scheme to indicate information about the value
# upon inspection.
#
# 1s place is the equity
#
# 0.1s place is the action type, with:
#
# splits, 1
# mergers, 2
# dividends, 3
#
# 0.001s is the date
SPLITS = pd.DataFrame(
[
# Before query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-03"),
"ratio": 1.103,
"sid": 1,
},
# First day of query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-10"),
"ratio": 3.110,
"sid": 3,
},
# Third day of query range, should have last_row of 2
{
"effective_date": str_to_seconds("2015-06-12"),
"ratio": 3.112,
"sid": 3,
},
# After query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-21"),
"ratio": 6.121,
"sid": 6,
},
# Another action in query range, should have last_row of 1
{
"effective_date": str_to_seconds("2015-06-11"),
"ratio": 3.111,
"sid": 3,
},
# Last day of range. Should have last_row of 7
{
"effective_date": str_to_seconds("2015-06-19"),
"ratio": 3.119,
"sid": 3,
},
],
columns=["effective_date", "ratio", "sid"],
)
MERGERS = pd.DataFrame(
[
# Before query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-03"),
"ratio": 1.203,
"sid": 1,
},
# First day of query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-10"),
"ratio": 3.210,
"sid": 3,
},
# Third day of query range, should have last_row of 2
{
"effective_date": str_to_seconds("2015-06-12"),
"ratio": 3.212,
"sid": 3,
},
# After query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-25"),
"ratio": 6.225,
"sid": 6,
},
# Another action in query range, should have last_row of 2
{
"effective_date": str_to_seconds("2015-06-12"),
"ratio": 4.212,
"sid": 4,
},
# Last day of range. Should have last_row of 7
{
"effective_date": str_to_seconds("2015-06-19"),
"ratio": 3.219,
"sid": 3,
},
],
columns=["effective_date", "ratio", "sid"],
)
DIVIDENDS = pd.DataFrame(
[
# Before query range, should be excluded.
{
"declared_date": pd.Timestamp("2015-05-01", tz="UTC").to_datetime64(),
"ex_date": pd.Timestamp("2015-06-01", tz="UTC").to_datetime64(),
"record_date": pd.Timestamp("2015-06-03", tz="UTC").to_datetime64(),
"pay_date": pd.Timestamp("2015-06-05", tz="UTC").to_datetime64(),
"amount": 90.0,
"sid": 1,
},
# First day of query range, should be excluded.
{
"declared_date": pd.Timestamp("2015-06-01", tz="UTC").to_datetime64(),
"ex_date": pd.Timestamp("2015-06-10", tz="UTC").to_datetime64(),
"record_date": pd.Timestamp("2015-06-15", tz="UTC").to_datetime64(),
"pay_date": pd.Timestamp("2015-06-17", tz="UTC").to_datetime64(),
"amount": 80.0,
"sid": 3,
},
# Third day of query range, should have last_row of 2
{
"declared_date": pd.Timestamp("2015-06-01", tz="UTC").to_datetime64(),
"ex_date": pd.Timestamp("2015-06-12", tz="UTC").to_datetime64(),
"record_date": pd.Timestamp("2015-06-15", tz="UTC").to_datetime64(),
"pay_date": pd.Timestamp("2015-06-17", tz="UTC").to_datetime64(),
"amount": 70.0,
"sid": 3,
},
# After query range, should be excluded.
{
"declared_date": pd.Timestamp("2015-06-01", tz="UTC").to_datetime64(),
"ex_date": pd.Timestamp("2015-06-25", tz="UTC").to_datetime64(),
"record_date": pd.Timestamp("2015-06-28", tz="UTC").to_datetime64(),
"pay_date": pd.Timestamp("2015-06-30", tz="UTC").to_datetime64(),
"amount": 60.0,
"sid": 6,
},
# Another action in query range, should have last_row of 3
{
"declared_date": pd.Timestamp("2015-06-01", tz="UTC").to_datetime64(),
"ex_date": pd.Timestamp("2015-06-15", tz="UTC").to_datetime64(),
"record_date": pd.Timestamp("2015-06-18", tz="UTC").to_datetime64(),
"pay_date": pd.Timestamp("2015-06-20", tz="UTC").to_datetime64(),
"amount": 50.0,
"sid": 3,
},
# Last day of range. Should have last_row of 7
{
"declared_date": pd.Timestamp("2015-06-01", tz="UTC").to_datetime64(),
"ex_date": pd.Timestamp("2015-06-19", tz="UTC").to_datetime64(),
"record_date": pd.Timestamp("2015-06-22", tz="UTC").to_datetime64(),
"pay_date": pd.Timestamp("2015-06-30", tz="UTC").to_datetime64(),
"amount": 40.0,
"sid": 3,
},
],
columns=[
"declared_date",
"ex_date",
"record_date",
"pay_date",
"amount",
"sid",
],
)
DIVIDENDS_EXPECTED = pd.DataFrame(
[
# Before query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-01"),
"ratio": 0.1,
"sid": 1,
},
# First day of query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-10"),
"ratio": 0.20,
"sid": 3,
},
# Third day of query range, should have last_row of 2
{
"effective_date": str_to_seconds("2015-06-12"),
"ratio": 0.30,
"sid": 3,
},
# After query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-25"),
"ratio": 0.40,
"sid": 6,
},
# Another action in query range, should have last_row of 3
{
"effective_date": str_to_seconds("2015-06-15"),
"ratio": 0.50,
"sid": 3,
},
# Last day of range. Should have last_row of 7
{
"effective_date": str_to_seconds("2015-06-19"),
"ratio": 0.60,
"sid": 3,
},
],
columns=["effective_date", "ratio", "sid"],
)
class USEquityPricingLoaderTestCase(WithAdjustmentReader, ZiplineTestCase):
START_DATE = TEST_CALENDAR_START
END_DATE = TEST_CALENDAR_STOP
asset_ids = 1, 2, 3
@classmethod
def make_equity_info(cls):
return EQUITY_INFO
@classmethod
def make_splits_data(cls):
return SPLITS
@classmethod
def make_mergers_data(cls):
return MERGERS
@classmethod
def make_dividends_data(cls):
return DIVIDENDS
@classmethod
def make_adjustment_writer_equity_daily_bar_reader(cls):
return MockDailyBarReader(
dates=cls.calendar_days_between(cls.START_DATE, cls.END_DATE),
)
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
return make_bar_data(
EQUITY_INFO,
cls.equity_daily_bar_days,
)
@classmethod
def init_class_fixtures(cls):
super(USEquityPricingLoaderTestCase, cls).init_class_fixtures()
cls.sids = TEST_QUERY_SIDS
cls.asset_info = EQUITY_INFO
def test_input_sanity(self):
# Ensure that the input data doesn't contain adjustments during periods
# where the corresponding asset didn't exist.
for table in SPLITS, MERGERS:
for eff_date_secs, _, sid in table.itertuples(index=False):
eff_date = pd.Timestamp(eff_date_secs, unit="s")
asset_start, asset_end = EQUITY_INFO.loc[
sid, ["start_date", "end_date"]
]
assert eff_date >= asset_start
assert eff_date <= asset_end
@classmethod
def calendar_days_between(cls, start_date, end_date, shift=0):
slice_ = cls.equity_daily_bar_days.slice_indexer(start_date, end_date)
start = slice_.start + shift
stop = slice_.stop + shift
if start < 0:
raise KeyError(start_date, shift)
return cls.equity_daily_bar_days[start:stop]
def expected_adjustments(self, start_date, end_date, tables, adjustment_type):
price_adjustments = {}
volume_adjustments = {}
should_include_price_adjustments = (
adjustment_type == "all" or adjustment_type == "price"
)
should_include_volume_adjustments = (
adjustment_type == "all" or adjustment_type == "volume"
)
query_days = self.calendar_days_between(start_date, end_date)
start_loc = query_days.get_loc(start_date)
for table in tables:
for eff_date_secs, ratio, sid in table.itertuples(index=False):
eff_date = pd.Timestamp(eff_date_secs, unit="s", tz="UTC")
# Ignore adjustments outside the query bounds.
if not (start_date <= eff_date <= end_date):
continue
eff_date_loc = query_days.get_loc(eff_date)
delta = eff_date_loc - start_loc
# Pricing adjustments should be applied on the date
# corresponding to the effective date of the input data. They
# should affect all rows **before** the effective date.
if should_include_price_adjustments:
price_adjustments.setdefault(delta, []).append(
Float64Multiply(
first_row=0,
last_row=delta,
first_col=sid - 1,
last_col=sid - 1,
value=ratio,
)
)
# Volume is *inversely* affected by *splits only*.
if table is SPLITS and should_include_volume_adjustments:
volume_adjustments.setdefault(delta, []).append(
Float64Multiply(
first_row=0,
last_row=delta,
first_col=sid - 1,
last_col=sid - 1,
value=1.0 / ratio,
)
)
output = {}
if should_include_price_adjustments:
output["price_adjustments"] = price_adjustments
if should_include_volume_adjustments:
output["volume_adjustments"] = volume_adjustments
return output
@parameterized.expand(
[
([SPLITS, MERGERS, DIVIDENDS_EXPECTED], "all"),
([SPLITS, MERGERS, DIVIDENDS_EXPECTED], "price"),
([SPLITS, MERGERS, DIVIDENDS_EXPECTED], "volume"),
([SPLITS, MERGERS, None], "all"),
([SPLITS, MERGERS, None], "price"),
]
)
def test_load_adjustments(self, tables, adjustment_type):
query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP,
)
adjustments = self.adjustment_reader.load_adjustments(
query_days,
self.sids,
should_include_splits=tables[0] is not None,
should_include_mergers=tables[1] is not None,
should_include_dividends=tables[2] is not None,
adjustment_type=adjustment_type,
)
expected_adjustments = self.expected_adjustments(
TEST_QUERY_START,
TEST_QUERY_STOP,
[table for table in tables if table is not None],
adjustment_type,
)
if adjustment_type == "all" or adjustment_type == "price":
expected_price_adjustments = expected_adjustments["price_adjustments"]
for key in expected_price_adjustments:
price_adjustment = adjustments["price"][key]
for j, adj in enumerate(price_adjustment):
expected = expected_price_adjustments[key][j]
assert adj.first_row == expected.first_row
assert adj.last_row == expected.last_row
assert adj.first_col == expected.first_col
assert adj.last_col == expected.last_col
assert_allclose(adj.value, expected.value)
if adjustment_type == "all" or adjustment_type == "volume":
expected_volume_adjustments = expected_adjustments["volume_adjustments"]
for key in expected_volume_adjustments:
volume_adjustment = adjustments["volume"][key]
for j, adj in enumerate(volume_adjustment):
expected = expected_volume_adjustments[key][j]
assert adj.first_row == expected.first_row
assert adj.last_row == expected.last_row
assert adj.first_col == expected.first_col
assert adj.last_col == expected.last_col
assert_allclose(adj.value, expected.value)
@parameterized.expand([(True,), (False,)])
@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows")
def test_load_adjustments_to_df(self, convert_dts):
reader = self.adjustment_reader
adjustment_dfs = reader.unpack_db_to_component_dfs(convert_dates=convert_dts)
name_and_raw = (
("splits", SPLITS),
("mergers", MERGERS),
("dividends", DIVIDENDS_EXPECTED),
)
def create_expected_table(df, name):
expected_df = df.copy()
if convert_dts:
for colname in reader._datetime_int_cols[name]:
expected_df[colname] = (
expected_df[colname]
.astype("datetime64[s]")
.dt.tz_localize("UTC")
)
return expected_df
def create_expected_div_table(df, name):
expected_df = df.copy()
for colname in reader._datetime_int_cols[name]:
if not convert_dts:
# todo: fix nanosecond hack
expected_df[colname] = (
expected_df[colname]
.astype("datetime64[s]")
.view(int)
.div(1000000000)
.astype(int)
)
else:
expected_df[colname] = (
expected_df[colname]
.astype("datetime64[s]")
.dt.tz_localize("UTC")
)
return expected_df
for action_name, raw_tbl in name_and_raw:
# todo: fix missing dividend value
if action_name == "dividends":
continue
exp = create_expected_table(raw_tbl, action_name)
| assert_frame_equal(adjustment_dfs[action_name], exp) | pandas.testing.assert_frame_equal |
# import all the required files i.e. numpy , pandas and math library
from graphlib.financialGraph import Data
import numpy as np
import pandas as pd
from pandas import DataFrame , Series
import math
# All the indicators are defined and arranged in Alphabetical order
# ------------------> A <------------------------
# [0] __ Average True Range (ATR)
# Moving Average of True Range(TR)
def atr(data: DataFrame, period: int = 14) -> Series:
TR = tr(data)
return pd.Series(
TR.rolling(center=False, window=period,
min_periods=1).mean(),
name=f'{period} ATR'
)
# [0] __ Adaptive Price Zone (APZ)
# TODO
def apz(data: DataFrame,period: int = 21,dev_factor: int = 2,
MA: Series = None,adjust: bool = True,) -> DataFrame:
if not isinstance(MA, pd.Series):
MA = dema(data, period)
price_range = pd.Series(
(data["high"] - data["low"]).ewm(span=period, adjust=adjust).mean()
)
volatility_value = pd.Series(
price_range.ewm(span=period, adjust=adjust).mean(), name="vol_val"
)
upper_band = pd.Series((volatility_value * dev_factor) + MA, name="UPPER")
lower_band = pd.Series(MA - (volatility_value * dev_factor), name="LOWER")
return pd.concat([upper_band, lower_band], axis=1)
# ------------------> B <------------------------
# [0] __ Bollinger Bands (BBANDS)
# TODO
def bbands(data: DataFrame,period: int = 20,MA: Series = None,
column: str = "close",std_multiplier: float = 2,) -> DataFrame:
std = data[column].rolling(window=period).std()
if not isinstance(MA, pd.core.series.Series):
middle_band = pd.Series(sma(data, period), name="BB_MIDDLE")
else:
middle_band = pd.Series(MA, name="BB_MIDDLE")
upper_bb = pd.Series(middle_band + (std_multiplier * std), name="BB_UPPER")
lower_bb = pd.Series(middle_band - (std_multiplier * std), name="BB_LOWER")
return pd.concat([upper_bb, middle_band, lower_bb], axis=1)
# [0] __ Bollinger Bands Width (BBWidth)
# TODO
def bbwidth(
data: DataFrame, period: int = 20, MA: Series = None, column: str = "close"
) -> Series:
BB = bbands(data, period, MA, column)
return pd.Series(
(BB["BB_UPPER"] - BB["BB_LOWER"]) / BB["BB_MIDDLE"],
name="{0} period BBWITH".format(period),
)
# ------------------> D <------------------------
# [0] __ Double Exponential Moving Average (DEMA)
# 2 * EWMA - ewm(EWMA)
def dema(data : DataFrame,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
DEMA = (
2*ema(data,period) - ema(data,period).ewm(span=period , adjust=adjust).mean()
)
return pd.Series(
DEMA ,
name = f'{period}_DEMA'
)
# [0] __ Directional Movement Index (DMI)
# TODO
def dmi(data: DataFrame, column: str = "close", adjust: bool = True) -> Series:
def _get_time(close):
sd = close.rolling(5).std()
asd = sd.rolling(10).mean()
v = sd / asd
t = 14 / v.round()
t[t.isna()] = 0
t = t.map(lambda x: int(min(max(x, 5), 30)))
return t
def _dmi(index):
time = t.iloc[index]
if (index - time) < 0:
subset = data.iloc[0:index]
else:
subset = data.iloc[(index - time) : index]
return rsi(subset, period=time, adjust=adjust).values[-1]
dates = Series(data.index)
periods = Series(range(14, len(dates)), index=dates.index[14:].values)
t = _get_time(data[column])
return periods.map(lambda x: _dmi(x))
# ------------------> E <------------------------
# [0] __ Exponential Weighted Moving Average (EWMA) or Exponential Moving Average(EMA)
# Exponential average of prev n day prices
def ema(data : DataFrame,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
return pd.Series(
data[column].ewm(span=period, adjust=adjust).mean(),
name = f'{period}_EMA'
)
# [0] __ Kaufman Efficiency indicator (KER) or (ER)
# change in price / volatility Here change and volatility are absolute
def er(data : DataFrame,period: int = 10,column: str ='close') -> Series:
change = data[column].diff(period).abs()
volatility = data[column].diff().abs().rolling(window=period,min_periods=1).sum()
return pd.Series(change / volatility,
name=f'{period}_ER'
)
# [0] __ TODO (EVSTC)
# TODO
def evstc(data: DataFrame,period_fast: int = 12,period_slow: int = 30,
k_period: int = 10,d_period: int = 3,adjust: bool = True) -> Series:
ema_slow = evwma(data, period_slow)
ema_fast = evwma(data, period_fast)
macd = ema_fast - ema_slow
STOK = pd.Series((
(macd - macd.rolling(window=k_period).min())
/ (macd.rolling(window=k_period).max() - macd.rolling(window=k_period).min())
) * 100)
STOD = STOK.rolling(window=d_period).mean()
STOD_DoubleSmooth = STOD.rolling(window=d_period).mean()
return pd.Series(STOD_DoubleSmooth, name="{0} period EVSTC".format(k_period))
# [0] __ Elastic Volume Weighted Moving Average (EVWMA)
# x is ((volume sum for n period) - volume ) divided by (volume sum for n period)
# y is volume * close / (volume sum for n period)
def evwma(data, period: int = 20) -> Series:
vol_sum = (data["volume"].rolling(window=period,min_periods=1).sum())
x = (vol_sum - data["volume"]) / vol_sum
y = (data["volume"] * data["close"]) / vol_sum
evwma = [0]
for x, y in zip(x.fillna(0).iteritems(), y.iteritems()):
if x[1] == 0 or y[1] == 0:
evwma.append(0)
else:
evwma.append(evwma[-1] * x[1] + y[1])
return pd.Series(
evwma[1:], index=data.index,
name=f'{period}_EVWMA'
)
# [0] __ Elastic Volume Weighted Moving average convergence divergence (EV_MACD)
# MACD calculation on basis of Elastic Volume Weighted Moving average (EVWMA)
def ev_macd(data: DataFrame,period_fast: int = 20,period_slow: int = 40,
signal: int = 9,adjust: bool = True,) -> DataFrame:
evwma_slow = evwma(data, period_slow)
evwma_fast = evwma(data, period_fast)
MACD = pd.Series(evwma_fast - evwma_slow, name="EV MACD")
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
return pd.concat([MACD, MACD_signal], axis=1)
# ------------------> F <------------------------
# [0] __ Fisher Transform
# TODO
def fish(data: DataFrame, period: int = 10, adjust: bool = True) -> Series:
from numpy import log, seterr
seterr(divide="ignore")
med = (data["high"] + data["low"]) / 2
ndaylow = med.rolling(window=period).min()
ndayhigh = med.rolling(window=period).max()
raw = (2 * ((med - ndaylow) / (ndayhigh - ndaylow))) - 1
smooth = raw.ewm(span=5, adjust=adjust).mean()
_smooth = smooth.fillna(0)
return pd.Series(
(log((1 + _smooth) / (1 - _smooth))).ewm(span=3, adjust=adjust).mean(),
name="{0} period FISH.".format(period),
)
# [0] __ Fractal Adaptive Moving Average (FRAMA)
# TODO
def FRAMA(data: DataFrame, period: int = 16, batch: int=10) -> Series:
assert period % 2 == 0, print("FRAMA period must be even")
c = data.close.copy()
window = batch * 2
hh = c.rolling(batch).max()
ll = c.rolling(batch).min()
n1 = (hh - ll) / batch
n2 = n1.shift(batch)
hh2 = c.rolling(window).max()
ll2 = c.rolling(window).min()
n3 = (hh2 - ll2) / window
# calculate fractal dimension
D = (np.log(n1 + n2) - np.log(n3)) / np.log(2)
alp = np.exp(-4.6 * (D - 1))
alp = np.clip(alp, .01, 1).values
filt = c.values
for i, x in enumerate(alp):
cl = c.values[i]
if i < window:
continue
filt[i] = cl * x + (1 - x) * filt[i - 1]
return pd.Series(filt, index=data.index,
name= f'{period} FRAMA'
)
# [0] __ Finite Volume Element (FVE)
# TODO
def fve(data: DataFrame, period: int = 22, factor: int = 0.3) -> Series:
hl2 = (data["high"] + data["low"]) / 2
tp_ = tp(data)
smav = data["volume"].rolling(window=period).mean()
mf = pd.Series((data["close"] - hl2 + tp_.diff()), name="mf")
_mf = pd.concat([data["close"], data["volume"], mf], axis=1)
def vol_shift(row):
if row["mf"] > factor * row["close"] / 100:
return row["volume"]
elif row["mf"] < -factor * row["close"] / 100:
return -row["volume"]
else:
return 0
_mf["vol_shift"] = _mf.apply(vol_shift, axis=1)
_sum = _mf["vol_shift"].rolling(window=period).sum()
return pd.Series((_sum / smav) / period * 100)
# ------------------> H <------------------------
# [0] __ Hull Moving Average (HMA)
# wma of change in wma where change in wma is 2 * (wma half period) - (wma full period)
def hma(data, period: int = 16) -> Series:
half_length = int(period / 2)
sqrt_length = int(math.sqrt(period))
wmaf = wma(data, period=half_length)
wmas = wma(data, period=period)
data["deltawma"] = 2 * wmaf - wmas
hma = wma(data, column="deltawma", period=sqrt_length)
return pd.Series(hma, name=f'{period}_HMA')
# ------------------> I <------------------------
# [0] __ Ichimoku Cloud
# TODO
def ichimoku(data: DataFrame,tenkan_period: int = 9,kijun_period: int = 26,
senkou_period: int = 52,chikou_period: int = 26,) -> DataFrame:
tenkan_sen = pd.Series(
(
data["high"].rolling(window=tenkan_period).max()
+ data["low"].rolling(window=tenkan_period).min()
)
/ 2,
name="TENKAN",
) ## conversion line
kijun_sen = pd.Series(
(
data["high"].rolling(window=kijun_period).max()
+ data["low"].rolling(window=kijun_period).min()
)
/ 2,
name="KIJUN",
) ## base line
senkou_span_a = pd.Series(
((tenkan_sen + kijun_sen) / 2), name="senkou_span_a"
) .shift(kijun_period) ## Leading span
senkou_span_b = pd.Series(
(
(
data["high"].rolling(window=senkou_period).max()
+ data["low"].rolling(window=senkou_period).min()
)
/ 2
),
name="SENKOU",
).shift(kijun_period)
chikou_span = pd.Series(
data["close"].shift(-chikou_period),
name="CHIKOU",
)
return pd.concat(
[tenkan_sen, kijun_sen, senkou_span_a, senkou_span_b, chikou_span], axis=1
)
# [0] __ Inverse Fisher Transform (IFTRSI)
# TODO
def ift_rsi(data: DataFrame,column: str = "close",rsi_period: int = 5,
wma_period: int = 9,) -> Series:
v1 = pd.Series(0.1 * (rsi(data, rsi_period) - 50), name="v1")
d = (wma_period * (wma_period + 1)) / 2
weights = np.arange(1, wma_period + 1)
def linear(w):
def _compute(x):
return (w * x).sum() / d
return _compute
_wma = v1.rolling(wma_period, min_periods=wma_period)
v2 = _wma.apply(linear(weights), raw=True)
return pd.Series(
((v2 ** 2 - 1) / (v2 ** 2 + 1)),
name="IFT_RSI"
)
# ------------------> K <------------------------
# [0] __ Kaufman's Adaptive Moving Average (KAMA)
# first KAMA is SMA
# Current KAMA = Previous KAMA + smoothing_constant * (Price - Previous KAMA)
def kama(data,er_: int = 10,ema_fast: int = 2,
ema_slow: int = 30,period: int = 20,
column: str ='close') -> Series:
er_ = er(data)
fast_alpha = 2 / (ema_fast + 1)
slow_alpha = 2 / (ema_slow + 1)
sc = pd.Series(
(er_ * (fast_alpha - slow_alpha) + slow_alpha) ** 2,
name="smoothing_constant",
)
sma = pd.Series(
data[column].rolling(period).mean(), name="SMA"
)
kama = []
for s, ma, price in zip(
sc.iteritems(), sma.shift().iteritems(), data[column].iteritems()
):
try:
kama.append(kama[-1] + s[1] * (price[1] - kama[-1]))
except (IndexError, TypeError):
if pd.notnull(ma[1]):
kama.append(ma[1] + s[1] * (price[1] - ma[1]))
else:
kama.append(None)
sma["KAMA"] = pd.Series(
kama, index=sma.index, name=f'{period}_KAMA')
return sma['KAMA']
# [0] __ Keltner Channels (KC)
# TODO
def kc(ohlc: DataFrame,period: int = 20,atr_period: int = 10,
MA: Series = None,kc_mult: float = 2,) -> DataFrame:
if not isinstance(MA, pd.core.series.Series):
middle = pd.Series(ema(ohlc, period), name="KC_MIDDLE")
else:
middle = pd.Series(MA, name="KC_MIDDLE")
up = pd.Series(middle + (kc_mult * atr(ohlc, atr_period)), name="KC_UPPER")
down = pd.Series(
middle - (kc_mult * atr(ohlc, atr_period)), name="KC_LOWER"
)
return pd.concat([up, down], axis=1)
# ------------------> M <------------------------
# [0] __ Moving average convergence divergence (MACD)
# MACD is Difference of ema fast and ema slow
# Here fast period is 12 and slow period is 26
# MACD Signal is ewm of MACD
def macd(data,period_fast: int = 12,period_slow: int = 26,
signal: int = 9,column: str = "close",adjust: bool = True
) -> DataFrame:
EMA_fast = pd.Series(
data[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(),
name=f'{period_fast}_EMA_fast')
EMA_slow = pd.Series(
data[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(),
name=f'{period_slow}_EMA_slow')
MACD = pd.Series(EMA_fast - EMA_slow,name='MACD')
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(),name=f'{signal}_SIGNAL'
)
DIFF = pd.Series(
MACD - MACD_signal,
name="diff MACD_MSIGNAL"
)
return pd.concat(
[DIFF, MACD, MACD_signal ],
axis=1
)
# [0] __ Moving Standard Deviation (MSD)
# Standard deviation of a given period for the column passed as arguement
def msd(data: DataFrame, period: int = 21, column: str = "close") -> Series:
return pd.Series(data[column].rolling(period).std(), name="MSD")
# Momentum Breakout Bands (MOBO)
# TODO
def mobo(data: DataFrame,period: int = 10,std_multiplier: float = 0.8,
column: str = "close",) -> DataFrame:
BB = bbands(data, period=10, std_multiplier=0.8, column=column)
return BB
# [0] __ Market momentum (MOM)
def mom(data: DataFrame, period: int = 10, column: str = "close") -> Series:
return pd.Series(data[column].diff(period),
name=f'{period}_MOM'
)
# [0] __ Moving Volume Weighted Average Price (MVWAP)
# SMA of (close * volume ) divided by SMA of volume
def mvwap(data: DataFrame, period:int = 9) -> Series:
data["cv"] =(data["close"] * data["volume"])
return pd.Series(
(sma(data,period = period,column = "cv")/sma(data,period=period,column="volume")),
name="MVWAP."
)
# ------------------> P <------------------------
# ------------|| Pivot ||------------------------
# [0] __ Pivot Camarilla
# TODO
def pivot_camarilla(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = df_['close']+(1.1*(df_['high']-df_['low'])/12)
s2 = df_['close']-(1.1*(df_['high']-df_['low'])/6)
s3 = df_['close']-(1.1*(df_['high']-df_['low'])/4)
s4 =df_['close']-(1.1*(df_['high']-df_['low'])/2)
r1 = df_['close']+(1.1*(df_['high']-df_['low'])/12)
r2 = df_['close']+(1.1*(df_['high']-df_['low'])/6)
r3 =df_['close']+(1.1*(df_['high']-df_['low'])/4)
r4 = df_['close']+(1.1*(df_['high']-df_['low'])/2)
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
| pd.Series(s2, name="s2") | pandas.Series |
import pandas as pd
import pyomo.environ as pe
import pyomo.gdp as pyogdp
import os
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from itertools import product
class TheatreScheduler:
def __init__(self, case_file_path, session_file_path):
"""
Read case and session data into Pandas DataFrames
Args:
case_file_path (str): path to case data in CSV format
session_file_path (str): path to theatre session data in CSV format
"""
try:
self.df_cases = pd.read_csv(case_file_path)
except FileNotFoundError:
print("Case data not found.")
try:
self.df_sessions = pd.read_csv(session_file_path)
except FileNotFoundError:
print("Session data not found")
self.model = self.create_model()
def _generate_case_durations(self):
"""
Generate mapping of cases IDs to median case time for the procedure
Returns:
(dict): dictionary with CaseID as key and median case time (mins) for procedure as value
"""
return pd.Series(self.df_cases["Median Duration"].values, index=self.df_cases["CaseID"]).to_dict()
def _generate_session_durations(self):
"""
Generate mapping of all theatre sessions IDs to session duration in minutes
Returns:
(dict): dictionary with SessionID as key and session duration as value
"""
return pd.Series(self.df_sessions["Duration"].values, index=self.df_sessions["SessionID"]).to_dict()
def _generate_session_start_times(self):
"""
Generate mapping from SessionID to session start time
Returns:
(dict): dictionary with SessionID as key and start time in minutes since midnight as value
"""
# Convert session start time from HH:MM:SS format into seconds elapsed since midnight
self.df_sessions.loc[:, "Start"] = pd.to_timedelta(self.df_sessions["Start"])
self.df_sessions.loc[:, "Start"] = self.df_sessions["Start"].dt.total_seconds() / 60
return pd.Series(self.df_sessions["Start"].values, index=self.df_sessions["SessionID"]).to_dict()
def _get_ordinal_case_deadlines(self):
"""
#TODO
Returns:
"""
self.df_cases.loc[:, "TargetDeadline"] = pd.to_datetime(self.df_cases["TargetDeadline"], format="%d/%m/%Y")
self.df_cases.loc[:, "TargetDeadline"] = self.df_cases["TargetDeadline"].apply(lambda date: date.toordinal())
return pd.Series(self.df_cases["TargetDeadline"].values, index=self.df_cases["CaseID"]).to_dict()
def _get_ordinal_session_dates(self):
"""
#TODO
Returns:
"""
self.df_sessions.loc[:, "Date"] = pd.to_datetime(self.df_sessions["Date"], format="%d/%m/%Y")
self.df_sessions.loc[:, "Date"] = self.df_sessions["Date"].apply(lambda date: date.toordinal())
return pd.Series(self.df_sessions["Date"].values, index=self.df_sessions["SessionID"]).to_dict()
def _generate_disjunctions(self):
"""
#TODO
Returns:
disjunctions (list): list of tuples containing disjunctions
"""
cases = self.df_cases["CaseID"].to_list()
sessions = self.df_sessions["SessionID"].to_list()
disjunctions = []
for (case1, case2, session) in product(cases, cases, sessions):
if (case1 != case2) and (case2, case1, session) not in disjunctions:
disjunctions.append((case1, case2, session))
return disjunctions
def create_model(self):
model = pe.ConcreteModel()
# Model Data
# List of case IDs in surgical waiting list
model.CASES = pe.Set(initialize=self.df_cases["CaseID"].tolist())
# List of sessions IDs
model.SESSIONS = pe.Set(initialize=self.df_sessions["SessionID"].tolist())
# List of tasks - all possible (caseID, sessionID) combination
model.TASKS = pe.Set(initialize=model.CASES * model.SESSIONS, dimen=2)
# The duration (median case time) for each operation
model.CASE_DURATION = pe.Param(model.CASES, initialize=self._generate_case_durations())
# The duration of each theatre session
model.SESSION_DURATION = pe.Param(model.SESSIONS, initialize=self._generate_session_durations())
# The start time of each theatre session
model.SESSION_START_TIME = pe.Param(model.SESSIONS, initialize=self._generate_session_start_times())
# The deadline of each case
model.CASE_DEADLINES = pe.Param(model.CASES, initialize=self._get_ordinal_case_deadlines())
# The date of each theatre session
model.SESSION_DATES = pe.Param(model.SESSIONS, initialize=self._get_ordinal_session_dates())
model.DISJUNCTIONS = pe.Set(initialize=self._generate_disjunctions(), dimen=3)
ub = 1440 # seconds in a day
model.M = pe.Param(initialize=1e3*ub) # big M
max_util = 0.85
num_cases = self.df_cases.shape[0]
# Decision Variables
model.SESSION_ASSIGNED = pe.Var(model.TASKS, domain=pe.Binary)
model.CASE_START_TIME = pe.Var(model.TASKS, bounds=(0, ub), within=pe.PositiveReals)
model.CASES_IN_SESSION = pe.Var(model.SESSIONS, bounds=(0, num_cases), within=pe.PositiveReals)
# Objective
def objective_function(model):
return pe.summation(model.CASES_IN_SESSION)
#return sum([model.SESSION_ASSIGNED[case, session] for case in model.CASES for session in model.SESSIONS])
model.OBJECTIVE = pe.Objective(rule=objective_function, sense=pe.maximize)
# Constraints
# Case start time must be after start time of assigned theatre session
def case_start_time(model, case, session):
return model.CASE_START_TIME[case, session] >= model.SESSION_START_TIME[session] - \
((1 - model.SESSION_ASSIGNED[(case, session)])*model.M)
model.CASE_START = pe.Constraint(model.TASKS, rule=case_start_time)
# Case end time must be before end time of assigned theatre session
def case_end_time(model, case, session):
return model.CASE_START_TIME[case, session] + model.CASE_DURATION[case] <= model.SESSION_START_TIME[session] + \
model.SESSION_DURATION[session]*max_util + ((1 - model.SESSION_ASSIGNED[(case, session)]) * model.M)
model.CASE_END_TIME = pe.Constraint(model.TASKS, rule=case_end_time)
# Cases can be assigned to a maximum of one session
def session_assignment(model, case):
return sum([model.SESSION_ASSIGNED[(case, session)] for session in model.SESSIONS]) <= 1
model.SESSION_ASSIGNMENT = pe.Constraint(model.CASES, rule=session_assignment)
def set_deadline_condition(model, case, session):
return model.SESSION_DATES[session] <= model.CASE_DEADLINES[case] + ((1 - model.SESSION_ASSIGNED[case, session])*model.M)
model.APPLY_DEADLINE = pe.Constraint(model.TASKS, rule=set_deadline_condition)
def no_case_overlap(model, case1, case2, session):
return [model.CASE_START_TIME[case1, session] + model.CASE_DURATION[case1] <= model.CASE_START_TIME[case2, session] + \
((2 - model.SESSION_ASSIGNED[case1, session] - model.SESSION_ASSIGNED[case2, session])*model.M),
model.CASE_START_TIME[case2, session] + model.CASE_DURATION[case2] <= model.CASE_START_TIME[case1, session] + \
((2 - model.SESSION_ASSIGNED[case1, session] - model.SESSION_ASSIGNED[case2, session])*model.M)]
model.DISJUNCTIONS_RULE = pyogdp.Disjunction(model.DISJUNCTIONS, rule=no_case_overlap)
def theatre_util(model, session):
return model.CASES_IN_SESSION[session] == \
sum([model.SESSION_ASSIGNED[case, session] for case in model.CASES])
model.THEATRE_UTIL = pe.Constraint(model.SESSIONS, rule=theatre_util)
pe.TransformationFactory("gdp.bigm").apply_to(model)
return model
def solve(self, solver_name, options=None, solver_path=None, local=True):
if solver_path is not None:
solver = pe.SolverFactory(solver_name, executable=solver_path)
else:
solver = pe.SolverFactory(solver_name)
# TODO remove - too similar to alstom
if options is not None:
for key, value in options.items():
solver.options[key] = value
if local:
solver_results = solver.solve(self.model, tee=True)
else:
solver_manager = pe.SolverManagerFactory("neos")
solver_results = solver_manager.solve(self.model, opt=solver)
results = [{"Case": case,
"Session": session,
"Session Date": self.model.SESSION_DATES[session],
"Case Deadline": self.model.CASE_DEADLINES[case],
"Days before deadline": self.model.CASE_DEADLINES[case] - self.model.SESSION_DATES[session],
"Start": self.model.CASE_START_TIME[case, session](),
"Assignment": self.model.SESSION_ASSIGNED[case, session]()}
for (case, session) in self.model.TASKS]
self.df_times = | pd.DataFrame(results) | pandas.DataFrame |
from googleapiclient.discovery import build
from datetime import datetime, timedelta
from pandas import DataFrame, Timedelta, to_timedelta
from structures import Structure
from networkdays import networkdays
from calendar import monthrange
class Timesheet:
def __init__(self, credentials, sheetid):
# The ID and range of a sample spreadsheet.
self.SAMPLE_SPREADSHEET_ID = sheetid
self.service = build('sheets', 'v4', credentials=credentials)
# Call the Sheets API
self.sheet = self.service.spreadsheets()
# Get the Sheet name
sheet_metadata = self.sheet.get(spreadsheetId=self.SAMPLE_SPREADSHEET_ID).execute()
sheetname = sheet_metadata['sheets'][0]['properties']['title']
# Get sheet version
self.SAMPLE_RANGE_NAME = sheetname + "!" + "B2:N"
# Google Credentials
self.credentials = credentials
self.values = list()
self.data = list()
self.total_hours = Timedelta("00:00:00")
# Metadata information of the sheet
self.person = ''
self.month = ''
self.year = ''
self.max_working_hours = | Timedelta("00:00:00") | pandas.Timedelta |
"""
Used examples from SO:
https://stackoverflow.com/questions/22780563/group-labels-in-matplotlib-barchart-using-pandas-multiindex
"""
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from itertools import groupby
import numpy as np
def add_line(ax, xpos, ypos):
line = plt.Line2D([xpos, xpos], [ypos + .1, ypos],
transform=ax.transAxes, color='gray')
line.set_clip_on(False)
ax.add_line(line)
def label_len(my_index, level):
labels = my_index.get_level_values(level)
return [(k, sum(1 for i in g)) for k, g in groupby(labels)]
def label_group_bar_table(ax, df):
ypos = -.1
scale = 1./df.index.size
for level in range(df.index.nlevels)[::-1]:
pos = 0
for label, rpos in label_len(df.index, level):
lxpos = (pos + .5 * rpos)*scale
ax.text(lxpos, ypos, label, ha='center', transform=ax.transAxes)
add_line(ax, pos*scale, ypos)
pos += rpos
add_line(ax, pos*scale, ypos)
ypos -= .1
# set standard font size
matplotlib.rcParams.update({'font.size': 8})
# read data
raw_rmse_df = pd.read_csv("C:/Users/<NAME>/PycharmProjects/Tensorflow/rnn_lstm_comparison_results/"
"full_testset_rmse.csv")
raw_rmse_df.set_index(['Well', 'Forecast'], inplace=True)
raw_nse_df = pd.read_csv("C:/Users/<NAME>/PycharmProjects/Tensorflow/rnn_lstm_comparison_results/"
"full_testset_nse.csv")
raw_nse_df.set_index(['Well', 'Forecast'], inplace=True)
# set up multi-index dataframes
group = ('MMPS-043', 'MMPS-125', 'MMPS-129', 'MMPS-153', 'MMPS-155', 'MMPS-170', 'MMPS-175')
subgroup = ('t+1', 't+9', 't+18')
obs = ('RNN', 'LSTM')
index = pd.MultiIndex.from_tuples([('MMPS-043', 't+1'), ('MMPS-043', 't+9'), ('MMPS-043', 't+18'),
('MMPS-125', 't+1'), ('MMPS-125', 't+9'), ('MMPS-125', 't+18'),
('MMPS-129', 't+1'), ('MMPS-129', 't+9'), ('MMPS-129', 't+18'),
('MMPS-153', 't+1'), ('MMPS-153', 't+9'), ('MMPS-153', 't+18'),
('MMPS-155', 't+1'), ('MMPS-155', 't+9'), ('MMPS-155', 't+18'),
('MMPS-170', 't+1'), ('MMPS-170', 't+9'), ('MMPS-170', 't+18'),
('MMPS-175', 't+1'), ('MMPS-175', 't+9'), ('MMPS-175', 't+18')],
names=['group', 'subgroup'])
rmse_values = np.asarray(raw_rmse_df[["RNN", "LSTM"]])
nse_values = np.asarray(raw_nse_df[["RNN", "LSTM"]])
rmse_df = | pd.DataFrame(index=index) | pandas.DataFrame |
# In[2]:
"""
Basic Configurations
"""
configs = {}
configs['cdsign'] = '\\'
configs['path_root'] = 'C:\\VQA_Project\\'
configs['path_datasets'] = configs['path_root'] + 'Datasets' + configs['cdsign']
configs['datasets'] = {}
##################################################################### VQAv1 #########################################################################
configs['datasets']['VQAv1'] = {}
configs['datasets']['VQAv1']['Train'] = {}
configs['datasets']['VQAv1']['Train']['Questions'] = {}
configs['datasets']['VQAv1']['Train']['Questions']['File'] = 'OpenEnded_mscoco_train2014_questions.json'
configs['datasets']['VQAv1']['Train']['Questions']['Zip_File'] = 'Questions_Train_mscoco.zip'
configs['datasets']['VQAv1']['Train']['Questions']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/Questions_Train_mscoco.zip'
configs['datasets']['VQAv1']['Train']['Questions']['Path'] = configs['path_datasets'] + 'VQAv1' + configs['cdsign'] + 'Questions' + configs['cdsign'] + configs['datasets']['VQAv1']['Train']['Questions']['File']
configs['datasets']['VQAv1']['Train']['Annotations'] = {}
configs['datasets']['VQAv1']['Train']['Annotations']['File'] = 'mscoco_train2014_annotations.json'
configs['datasets']['VQAv1']['Train']['Annotations']['Zip_File'] = 'Annotations_Train_mscoco.zip'
configs['datasets']['VQAv1']['Train']['Annotations']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/Annotations_Train_mscoco.zip'
configs['datasets']['VQAv1']['Train']['Annotations']['Path'] = configs['path_datasets'] + 'VQAv1' + configs['cdsign'] + 'Annotations' + configs['cdsign'] + configs['datasets']['VQAv1']['Train']['Annotations']['File']
configs['datasets']['VQAv1']['Train']['Images'] = {}
configs['datasets']['VQAv1']['Train']['Images']['File'] = 'train2014'
configs['datasets']['VQAv1']['Train']['Images']['Zip_File'] = 'train2014.zip'
configs['datasets']['VQAv1']['Train']['Images']['Link'] = 'http://images.cocodataset.org/zips/train2014.zip'
configs['datasets']['VQAv1']['Train']['Images']['Path'] = configs['path_datasets'] + 'VQAv1' + configs['cdsign'] + 'Images' + configs['cdsign'] + configs['datasets']['VQAv1']['Train']['Images']['File'] + configs['cdsign']
configs['datasets']['VQAv1']['Val'] = {}
configs['datasets']['VQAv1']['Val']['Questions'] = {}
configs['datasets']['VQAv1']['Val']['Questions']['File'] = 'OpenEnded_mscoco_val2014_questions.json'
configs['datasets']['VQAv1']['Val']['Questions']['Zip_File'] = 'Questions_Val_mscoco.zip'
configs['datasets']['VQAv1']['Val']['Questions']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/Questions_Val_mscoco.zip'
configs['datasets']['VQAv1']['Val']['Questions']['Path'] = configs['path_datasets'] + 'VQAv1' + configs['cdsign'] + 'Questions' + configs['cdsign'] + configs['datasets']['VQAv1']['Val']['Questions']['File']
configs['datasets']['VQAv1']['Val']['Annotations'] = {}
configs['datasets']['VQAv1']['Val']['Annotations']['File'] = 'mscoco_val2014_annotations.json'
configs['datasets']['VQAv1']['Val']['Annotations']['Zip_File'] = 'Annotations_Val_mscoco.zip'
configs['datasets']['VQAv1']['Val']['Annotations']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/Annotations_Val_mscoco.zip'
configs['datasets']['VQAv1']['Val']['Annotations']['Path'] = configs['path_datasets'] + 'VQAv1' + configs['cdsign'] + 'Annotations' + configs['cdsign'] + configs['datasets']['VQAv1']['Val']['Annotations']['File']
configs['datasets']['VQAv1']['Val']['Images'] = {}
configs['datasets']['VQAv1']['Val']['Images']['File'] = 'val2014'
configs['datasets']['VQAv1']['Val']['Images']['Zip_File'] = 'val2014.zip'
configs['datasets']['VQAv1']['Val']['Images']['Link'] = 'http://images.cocodataset.org/zips/val2014.zip'
configs['datasets']['VQAv1']['Val']['Images']['Path'] = configs['path_datasets'] + 'VQAv1' + configs['cdsign'] + 'Images' + configs['cdsign'] + configs['datasets']['VQAv1']['Val']['Images']['File'] + configs['cdsign']
###################################################################################################################################################
##################################################################### VQAv2 #########################################################################
configs['datasets']['VQAv2'] = {}
configs['datasets']['VQAv2']['Train'] = {}
configs['datasets']['VQAv2']['Train']['Questions'] = {}
configs['datasets']['VQAv2']['Train']['Questions']['File'] = 'v2_OpenEnded_mscoco_train2014_questions.json'
configs['datasets']['VQAv2']['Train']['Questions']['Zip_File'] = 'v2_Questions_Train_mscoco.zip'
configs['datasets']['VQAv2']['Train']['Questions']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Questions_Train_mscoco.zip'
configs['datasets']['VQAv2']['Train']['Questions']['Path'] = configs['path_datasets'] + 'VQAv2' + configs['cdsign'] + 'Questions' + configs['cdsign'] + configs['datasets']['VQAv2']['Train']['Questions']['File']
configs['datasets']['VQAv2']['Train']['Annotations'] = {}
configs['datasets']['VQAv2']['Train']['Annotations']['File'] = 'v2_mscoco_train2014_annotations.json'
configs['datasets']['VQAv2']['Train']['Annotations']['Zip_File'] = 'v2_Annotations_Train_mscoco.zip'
configs['datasets']['VQAv2']['Train']['Annotations']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Annotations_Train_mscoco.zip'
configs['datasets']['VQAv2']['Train']['Annotations']['Path'] = configs['path_datasets'] + 'VQAv2' + configs['cdsign'] + 'Annotations' + configs['cdsign'] + configs['datasets']['VQAv2']['Train']['Annotations']['File']
configs['datasets']['VQAv2']['Train']['Images'] = configs['datasets']['VQAv1']['Train']['Images']
configs['datasets']['VQAv2']['Val'] = {}
configs['datasets']['VQAv2']['Val']['Questions'] = {}
configs['datasets']['VQAv2']['Val']['Questions']['File'] = 'v2_OpenEnded_mscoco_val2014_questions.json'
configs['datasets']['VQAv2']['Val']['Questions']['Zip_File'] = 'v2_Questions_Val_mscoco.zip'
configs['datasets']['VQAv2']['Val']['Questions']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Questions_Val_mscoco.zip'
configs['datasets']['VQAv2']['Val']['Questions']['Path'] = configs['path_datasets'] + 'VQAv2' + configs['cdsign'] + 'Questions' + configs['cdsign'] + configs['datasets']['VQAv2']['Val']['Questions']['File']
configs['datasets']['VQAv2']['Val']['Annotations'] = {}
configs['datasets']['VQAv2']['Val']['Annotations']['File'] = 'v2_mscoco_val2014_annotations.json'
configs['datasets']['VQAv2']['Val']['Annotations']['Zip_File'] = 'v2_Annotations_Val_mscoco.zip'
configs['datasets']['VQAv2']['Val']['Annotations']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Annotations_Val_mscoco.zip'
configs['datasets']['VQAv2']['Val']['Annotations']['Path'] = configs['path_datasets'] + 'VQAv2' + configs['cdsign'] + 'Annotations' + configs['cdsign'] + configs['datasets']['VQAv2']['Val']['Annotations']['File']
configs['datasets']['VQAv2']['Val']['Images'] = configs['datasets']['VQAv1']['Val']['Images']
###################################################################################################################################################
##################################################################### VGv1.0 #########################################################################
configs['datasets']['VGv1.0'] = {}
configs['datasets']['VGv1.0']['Images'] = {}
configs['datasets']['VGv1.0']['Images']['Part_1'] = {}
configs['datasets']['VGv1.0']['Images']['Part_1']['File'] = 'VG_100K'
configs['datasets']['VGv1.0']['Images']['Part_1']['Zip_File'] = 'images.zip'
configs['datasets']['VGv1.0']['Images']['Part_1']['Link'] = 'https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip'
configs['datasets']['VGv1.0']['Images']['Part_1']['Path'] = configs['path_datasets'] + 'VGv1.0' + configs['cdsign'] + 'Images' + configs['cdsign'] + configs['datasets']['VGv1.0']['Images']['Part_1']['File'] + configs['cdsign']
configs['datasets']['VGv1.0']['Images']['Part_2'] = {}
configs['datasets']['VGv1.0']['Images']['Part_2']['File'] = 'VG_100K_2'
configs['datasets']['VGv1.0']['Images']['Part_2']['Zip_File'] = 'images2.zip'
configs['datasets']['VGv1.0']['Images']['Part_2']['Link'] = 'https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip'
configs['datasets']['VGv1.0']['Images']['Part_2']['Path'] = configs['path_datasets'] + 'VGv1.0' + configs['cdsign'] + 'Images' + configs['cdsign'] + configs['datasets']['VGv1.0']['Images']['Part_2']['File'] + configs['cdsign']
configs['datasets']['VGv1.0']['Questions_Answers'] = {}
configs['datasets']['VGv1.0']['Questions_Answers']['File'] = 'question_answers.json'
configs['datasets']['VGv1.0']['Questions_Answers']['Zip_File'] = 'question_answers.json.zip'
configs['datasets']['VGv1.0']['Questions_Answers']['Link'] = 'https://visualgenome.org/static/data/dataset/question_answers.json.zip'
configs['datasets']['VGv1.0']['Questions_Answers']['Path'] = configs['path_datasets'] + 'VGv1.0' + configs['cdsign'] + 'Questions_Answers' + configs['cdsign'] + configs['datasets']['VGv1.0']['Questions_Answers']['File']
####################################################### VGv1.2##### ###############################################################################
configs['datasets']['VGv1.2'] = configs['datasets']['VGv1.0']
###################################################################################################################################################
configs['chosen_datasets_list'] = ['VQAv1'] #['VQAv1', 'VQAv2', 'VGv1.0', 'VGv1.2']
configs['chosen_datasets_str'] = '_'.join(configs['chosen_datasets_list'])
configs['image_model'] = 'YOLOv4-448-1024' # ['ResNet152-448-2048', 'YOLOv4-448-1024']
configs['random_seed'] = 100
configs['path_image_model'] = configs['path_root'] + 'Image_Models' + configs['cdsign'] + configs['image_model'] + configs['cdsign']
configs['path_images_features'] = configs['path_image_model'] + 'Images_Features' + configs['cdsign']
configs['path_histories'] = configs['path_root'] + 'Histories' + configs['cdsign'] + configs['chosen_datasets_str'] + '_' + configs['image_model'] + configs['cdsign']
configs['path_plots'] = configs['path_root'] + 'Plots' + configs['cdsign'] + configs['chosen_datasets_str'] + '_' + configs['image_model'] + configs['cdsign']
configs['path_test_images'] = configs['path_root'] + 'Test_Images' + configs['cdsign']
# In[3]:
def create_directories(configs):
for key, value in configs.items():
if 'path_' in key:
Path(value).mkdir(parents=True, exist_ok=True)
print('Created ', value)
return True
# In[4]:
def save_pickle_data(data, path):
try:
with open(path, 'wb') as handle:
pk.dump(data, handle, protocol=pk.HIGHEST_PROTOCOL)
return True
except:
return False
# In[5]:
def load_pickle_data(path):
if os.path.exists(path) == True:
with open(path, 'rb') as handle:
data = pk.load(handle)
return data
else:
return False
# In[6]:
def check_MSCOCO_data(configs, dataset):
directory = configs['path_datasets'] + dataset + configs['cdsign']
Path(directory).mkdir(parents=True, exist_ok=True)
train_val = list(configs['datasets'][dataset].keys())
for tv in train_val:
data_types = list(configs['datasets'][dataset][tv].keys())
for dt in data_types:
file_name = configs['datasets'][dataset][tv][dt]['File']
file_path = configs['datasets'][dataset][tv][dt]['Path']
# file_path = directory + dt + configs['cdsign'] + file_name
zip_file = configs['datasets'][dataset][tv][dt]['Zip_File']
file_link = configs['datasets'][dataset][tv][dt]['Link']
##### Just for Development. Remove it Later ######
if dt == 'Images':
Path(file_path).mkdir(parents=True, exist_ok=True)
##################################################
if not os.path.exists(file_path):
print('Donwloading {0}'.format(file_name))
os.system('cd {0} && wget {1} && unzip {2} && rm {2}'.format(directory, file_link, zip_file))
else:
print('Exists {0}'.format(file_name))
# In[7]:
def check_VG_data(configs, dataset):
directory = configs['path_datasets'] + dataset + configs['cdsign']
Path(directory).mkdir(parents=True, exist_ok=True)
data_types = list(configs['datasets'][dataset].keys())
for dt in data_types:
if dt == 'Images':
for p in configs['datasets'][dataset]['Images']:
file_name = configs['datasets'][dataset][dt][p]['File']
file_path = configs['datasets'][dataset][dt][p]['Path']
# file_path = directory + dt + configs['cdsign'] + file_name
zip_file = configs['datasets'][dataset][dt][p]['Zip_File']
file_link = configs['datasets'][dataset][dt][p]['Link']
##### Just for Development. Remove it Later ######
Path(file_path).mkdir(parents=True, exist_ok=True)
##################################################
if not os.path.exists(file_path):
print('Donwloading {0}'.format(file_name))
os.system('cd {0} && wget {1} && unzip {2} && rm {2}'.format(directory, file_link, zip_file))
else:
print('Exists {0}'.format(file_name))
else:
file_name = configs['datasets'][dataset][dt]['File']
file_path = configs['datasets'][dataset][dt]['Path']
# file_path = directory + dt + configs['cdsign'] + file_name
zip_file = configs['datasets'][dataset][dt]['Zip_File']
file_link = configs['datasets'][dataset][dt]['Link']
if not os.path.exists(file_path):
print('Donwloading {0}'.format(file_name))
os.system('cd {0} && wget {1} && unzip {2} && rm {2}'.format(directory, file_link, zip_file))
else:
print('Exists {0}'.format(file_name))
# In[8]:
def get_MSCOCO_data(configs, dataset):
results = []
for key, value in configs['datasets'][dataset].items():
path_questions = configs['path_datasets'] + dataset + configs['cdsign'] + 'Questions' + configs['cdsign'] + value['Questions']['File']
path_annotations = configs['path_datasets'] + dataset + configs['cdsign'] + 'Annotations' + configs['cdsign'] + value['Annotations']['File']
with open(file=path_questions, mode='r') as questions:
data_questions = json.load(questions)
with open(file=path_annotations, mode='r') as answers:
data_answers = json.load(answers)
data_size = len(data_answers['annotations'])
for i in range(data_size):
question = data_questions['questions'][i]
answer = data_answers['annotations'][i]
answer_type = answer['answer_type']
data = {}
data['image_id'] = question['image_id']
data['question_id'] = question['question_id']
data['question'] = question['question'].lower()
data['answer'] = answer['multiple_choice_answer'].lower()
data['answers'] = answer['answers']
data['question_type'] = answer['question_type'].lower()
results.append(data)
print('{0} Q/A: {1}'.format(dataset, len(results)))
return results
# In[9]:
def get_VG_data(configs, dataset):
results = []
qap = configs['path_datasets'] + dataset + configs['cdsign'] + 'Questions_Answers' + configs['cdsign'] + configs['datasets'][dataset]['Questions_Answers']['File']
with open(file=qap, mode='r') as qa:
data = json.load(qa)
for d in data:
for j in range(len(d['qas'])):
que_ans = d['qas'][j]
temp = {}
temp['image_id'] = que_ans['image_id']
temp['question_id'] = que_ans['qa_id']
temp['answer'] = que_ans['answer'][:-1].lower()
temp['question'] = que_ans['question'].lower()
results.append(temp)
print('{0} Q/A: {1}'.format(dataset, len(results)))
return results
# In[10]:
def get_original_data(configs):
data = []
for dataset in configs['chosen_datasets_list']:
print('Preparing {0} Dataset:'.format(dataset))
if 'VQA' in dataset:
check_MSCOCO_data(configs, dataset)
data = data + get_MSCOCO_data(configs, dataset)
elif 'VG' in dataset:
check_VG_data(configs, dataset)
data = data + get_VG_data(configs, dataset)
return data
# In[11]:
def plot_questions_types_frequency(freqs, fname='Questions_Types'):
temp = {}
temp['keys'] = list(freqs.keys())
temp['vals'] = list(freqs.values())
temp = | pd.DataFrame(temp) | pandas.DataFrame |
"""
Copyright 2018 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime as dt
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal, assert_series_equal
from gs_quant.api.gs.data import GsDataApi
from gs_quant.context_base import ContextMeta
from gs_quant.errors import MqValueError
from gs_quant.markets import MarketDataCoordinate
from gs_quant.session import GsSession, Environment
from gs_quant.target.assets import FieldFilterMap
from gs_quant.target.coordinates import MDAPIDataQuery
from gs_quant.target.data import MarketDataVendor, DataSetEntity, DataQuery, DataSetFieldEntity
test_coordinates = (
MarketDataCoordinate(mkt_type='Prime', mkt_quoting_style='price', mkt_asset='335320934'),
MarketDataCoordinate(mkt_type='IR', mkt_asset='USD', mkt_class='Swap', mkt_point=('2Y',)),
)
test_str_coordinates = (
'Prime_335320934_.price',
'IR_USD_Swap_2Y'
)
test_defn_dict = {'id': 'EXAMPLE_FROM_SLANG',
'name': 'Example DataSet',
'description': 'This is a test.',
'shortDescription': '',
'vendor': 'Goldman Sachs',
'dataProduct': 'TEST',
'entitlements': {'query': ['internal'],
'view': ['internal', 'role:DataServiceView', 'role:DataServiceAdmin'],
'upload': ['internal'],
'admin': ['internal', 'role:DataServiceAdmin'],
'edit': ['internal', 'role:DataServiceAdmin']},
'parameters': {'methodology': '',
'coverage': '',
'notes': '',
'history': '',
'frequency': '',
'applyMarketDataEntitlements': False,
'uploadDataPolicy': 'DEFAULT_POLICY',
'logicalDb': 'STUDIO_DAILY',
'symbolStrategy': 'ARCTIC_LINK',
'immutable': False,
'includeInCatalog': False,
'coverageEnabled': True},
'dimensions': {'timeField': 'date',
'transactionTimeField': 'updateTime',
'symbolDimensions': ['assetId'],
'nonSymbolDimensions': [{'field': 'price', 'column': 'PRICE'}],
'measures': [{'field': 'updateTime', 'column': 'UPDATE_TIME'}],
'entityDimension': 'assetId'},
'defaults': {'startSeconds': 2592000.0},
'createdById': '9eb7226166a44236905cae2913cfbd3c',
'createdTime': '2018-07-24T00:32:25.77Z',
'lastUpdatedById': '4ad8ebb6480d49e6b2e9eea9210685cf',
'lastUpdatedTime': '2019-10-24T14:20:13.653Z'}
bond_data = [
{
'mktType': 'Prime',
'mktAsset': '335320934',
'mktQuotingStyle': 'price',
'price': 1.0139,
'time': pd.to_datetime('2019-01-20T01:03:00Z')
},
{
'mktType': 'Prime',
'mktAsset': '335320934',
'mktQuotingStyle': 'price',
'price': 1.0141,
'time': | pd.to_datetime('2019-01-20T01:08:00Z') | pandas.to_datetime |
import os
import pathlib
import pickle
import random
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from S2S_load_sensor_data import read_data_datefolder_hourfile
from S2S_settings import settings
FPS = settings["FPS"]
FRAME_INTERVAL = settings["FRAME_INTERVAL"]
sample_counts = settings["sample_counts"]
def load_start_time(start_time_file, vid):
"""
load start time
Args:
start_time_file: str
vid: str, video
Returns:
int, start time
"""
df_start_time = pd.read_csv(start_time_file).set_index("video_name")
if vid not in df_start_time.index:
print("Error: ", vid, " not in ", start_time_file)
exit()
start_time = df_start_time.loc[vid]["start_time"]
return int(start_time)
def reliability_df_to_consecutive_seconds(
df_sensor_rel, window_size_sec, stride_sec, threshold=sample_counts
):
"""
Convert from reliability df to consecutive seconds represented with start and end time.
Args:
df_sensor_rel: dataframe, sensor reliability
window_size_sec:, int, window_size
stride_sec: int, stride
threshold: float
Returns:
win_start_end: a list of all the possible [window_start, window_end] pairs.
"""
# use the threshold criterion to select 'good' seconds
rel_seconds = (
df_sensor_rel[df_sensor_rel["SampleCounts"] > threshold]
.sort_values(by="Time")["Time"]
.values
)
win_start_end = consecutive_seconds(rel_seconds, window_size_sec, stride_sec)
return win_start_end
def consecutive_seconds(rel_seconds, window_size_sec, stride_sec=1):
"""
Return a list of all the possible [window_start, window_end] pairs
containing consecutive seconds of length window_size_sec inside.
Args:
rel_seconds: a list of qualified seconds
window_size_sec: int
stride_sec: int
Returns:
win_start_end: a list of all the possible [window_start, window_end] pairs.
Test:
>>> rel_seconds = [2,3,4,5,6,7,9,10,11,12,16,17,18]; window_size_sec = 3; stride_sec = 1
>>> print(consecutive_seconds(rel_seconds, window_size_sec))
>>> [[2, 4], [3, 5], [4, 6], [5, 7], [9, 11], [10, 12], [16, 18]]
"""
win_start_end = []
for i in range(0, len(rel_seconds) - window_size_sec + 1, stride_sec):
if rel_seconds[i + window_size_sec - 1] - rel_seconds[i] == window_size_sec - 1:
win_start_end.append([rel_seconds[i], rel_seconds[i + window_size_sec - 1]])
return win_start_end
def load_vid_feat(vid_file, fps, start_time):
feat = np.load(vid_file)["feature"][0]
print("video feature shape:", feat.shape)
frame_len = 1000.0 / fps # duration of a frame in ms
frames = feat.shape[0] # number of frames
len_ms = frames * frame_len # duration of all frames in ms
timestamps_int = np.arange(
start_time,
start_time + len_ms,
frame_len
).astype(int)
l = min(len(timestamps_int), feat.shape[0])
timestamps_int = timestamps_int[:l]
feat = feat[:l, :]
df_flow = pd.DataFrame(
data=np.hstack((timestamps_int[:,None], feat)),
index=[i for i in range(feat.shape[0])],
columns=["time"]+['f'+str(i) for i in range(feat.shape[1])]
)
df_flow["second"] = (df_flow["time"] / 1000).astype(int)
df_flow = df_flow.reset_index()
return df_flow, len_ms
def load_sensors_cubic(
sensor_path, sub, device, sensors, sensor_col_headers, start_time, end_time, fps
):
"""
load sensor data with cubic spline resampling
Args:
sensor_path: str,
sub: str, subject
device: str
sensors: list, sensors
sensor_col_headers: list of sensor column headers
start_time: int
end_time: int
fps: float
Returns:
dataframe, sensor data
"""
df_list = []
for s, col in zip(sensors, sensor_col_headers):
df_sensor = read_data_datefolder_hourfile(
sensor_path, sub, device, s, start_time, end_time
)
df_sensor = df_sensor[["time", col]]
df_sensor["time"] = pd.to_datetime(df_sensor["time"], unit="ms")
df_sensor = df_sensor.set_index("time")
df_resample = df_sensor.resample(FRAME_INTERVAL).mean()
# FRAME_INTERVAL as 0.03336707S is the most closest value to 1/29.969664 pandas accepts
df_resample = df_resample.interpolate(method="spline", order=3) # cubic spline interpolation
df_list.append(df_resample)
df_sensors = | pd.concat(df_list, axis=1) | pandas.concat |
import os
import json
import requests
from pathlib import Path
import pandas as pd
from .formatUtil import formatTopStocks
from .crawler import Crawler
from datetime import datetime
import tushare as ts
class Plate(Crawler):
def __init__(self):
super().__init__()
self.__fileBasePath = str(os.path.abspath('.')) + '/Data'
self.__baseURL = "https://pchq.kaipanla.com/w1/api/index.php"
def __getPlateFileData(self, fileName):
filePath = self.__fileBasePath + "/plateMap/" + fileName
if os.path.exists(filePath):
with open(filePath, 'r', encoding='utf-8') as f:
data = json.load(f)
return data
else:
print(filePath + " does not exist!")
return None
def getAllStockPlateMapping(self):
return self.__getPlateFileData("STOCKS_TO_PLATES.json")
def getAllNormalPlates(self):
return self.__getPlateFileData("NORMAL_TO_STOCKS.json")
def getAllCarePlates(self):
return self.__getPlateFileData("CARES_TO_STOCKS.json")
def findOnePlateInfo(self, code):
code = str(code)
ctsMapInfo = self.getAllCarePlates()
ntsMapInfo = self.getAllNormalPlates()
if code in ctsMapInfo.keys():
name = ctsMapInfo[code]["name"]
stocks = ctsMapInfo[code]["stocks"]
return {"code": code, "name": name, "stocks": stocks}
elif code in ntsMapInfo.keys():
name = ntsMapInfo[code]["name"]
stocks = ntsMapInfo[code]["stocks"]
return {"code": code, "name": name, "stocks": stocks}
else:
return None
def getOnePlateData(self, code, lagDays=0):
code = str(code)
p = self.__fileBasePath + '/plateData/'
fileName = str(code) + ".csv"
filePath = Path(p + fileName)
if filePath.is_file():
old_df = | pd.read_csv(filePath) | pandas.read_csv |
"""
/*
* Copyright (C) 2019-2021 University of South Florida
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
import os
# Import dependencies
from collections import defaultdict
from pathlib import Path
import haversine as hs
import numpy as np
import pandas as pd
from haversine import Unit
from src.gt_merger import constants
from src.gt_merger.args import get_parser
from src.gt_merger.preprocess import preprocess_gt_data, preprocess_oba_data, is_valid_oba_dataframe, \
is_valid_gt_dataframe
# -------------------------------------------
def main():
# Verify if the OBA input file exists
if not os.path.isfile(command_line_args.obaFile):
print("OBA data file not found:", command_line_args.obaFile)
exit()
# Verify if GT input file exists
if not os.path.isfile(command_line_args.gtFile):
print("Ground truth data file not found:", command_line_args.gtFile)
exit()
# Verify if there is a list of devices
if command_line_args.deviceList:
# Verify if the list of devices file exists
if os.path.isfile(command_line_args.deviceList):
with open(command_line_args.deviceList) as f:
list_of_devices = f.readline().split(",")
list_of_devices = [s.strip() for s in list_of_devices]
else:
print("File with white list of devices not found:", command_line_args.deviceList)
exit()
else:
list_of_devices = []
# Verify if the data folder exists
if not os.path.isdir(command_line_args.outputDir):
print("Data folder not found, trying to create it in the current working directory:",
command_line_args.outputDir)
try:
os.makedirs(command_line_args.outputDir, exist_ok=True)
except OSError:
print("There was an error while creating the data folder:", command_line_args.outputDir)
exit()
# Create sub-folders for output an logs
path_logs = os.path.join(command_line_args.outputDir, constants.FOLDER_LOGS)
if not os.path.isdir(path_logs):
try:
os.mkdir(path_logs)
except OSError:
print("There was an error while creating the sub folder for logs:", path_logs)
exit()
path_output = os.path.join(command_line_args.outputDir, constants.FOLDER_MERGED_DATA)
if not os.path.isdir(path_output):
try:
os.mkdir(path_output)
except OSError:
print("There was an error while creating the sub-folder for output files:", path_logs)
exit()
# Create path OS independent for excel file
excel_path = Path(command_line_args.gtFile)
# Load ground truth data to a dataframe
gt_data = pd.read_excel(excel_path)
# Validate gt dataframe
if not is_valid_gt_dataframe(gt_data):
print("Ground truth data frame is empty or does not have the required columns.")
exit()
# Preprocess ground truth data
gt_data, data_gt_dropped = preprocess_gt_data(gt_data, command_line_args.removeStillMode)
print("Ground truth data preprocessed.")
# Save data to be dropped to a csv file
dropped_file_path = os.path.join(command_line_args.outputDir, constants.FOLDER_LOGS,
constants.GT_DROPPED_DATA_FILE_NAME)
data_gt_dropped.to_csv(path_or_buf=dropped_file_path, index=False)
# Create path OS independent for csv file
csv_path = Path(command_line_args.obaFile)
# Load OBA data
oba_data = pd.read_csv(csv_path)
# Validate oba dataframe
if not is_valid_oba_dataframe(oba_data):
print("OBA data frame is empty or does not have the required columns.")
exit()
# If a devices white list was provided, list the devices
if list_of_devices:
oba_data = oba_data[oba_data["User ID"].isin(list_of_devices)]
# Preprocess OBA data
oba_data, data_csv_dropped = preprocess_oba_data(oba_data, command_line_args.minActivityDuration,
command_line_args.minTripLength, command_line_args.removeStillMode)
print("OBA data preprocessed.")
print(oba_data.info())
print(gt_data.info())
# Data preprocessing IS OVER
# Save oba dropped data to a csv file
dropped_file_path = os.path.join(command_line_args.outputDir, constants.FOLDER_LOGS,
constants.OBA_DROPPED_DATA_FILE_NAME)
data_csv_dropped.to_csv(path_or_buf=dropped_file_path, index=False)
if command_line_args.iterateOverTol:
first_tol = 30000
save_to_path = os.path.join(constants.FOLDER_MERGED_DATA, "batch")
else:
save_to_path = os.path.join(constants.FOLDER_MERGED_DATA)
first_tol = constants.TOLERANCE
for tol in range(first_tol, command_line_args.tolerance + 1, constants.CALCULATE_EVERY_N_SECS):
print("TOLERANCE:", str(tol))
# merge dataframes one to one or one to many according to the commandline parameter
if command_line_args.mergeOneToOne:
merged_data_frame, num_matches_df = merge(gt_data, oba_data, tol)
else:
merged_data_frame, num_matches_df, unmatched_oba_trips_df = merge_to_many(gt_data, oba_data, tol)
# Save unmatched oba records to csv
unmatched_file_path = os.path.join(command_line_args.outputDir, save_to_path,
"oba_records_without_match_on_GT.csv")
unmatched_oba_trips_df.to_csv(path_or_buf=unmatched_file_path, index=False)
# Calculate difference
merged_data_frame['Time_Difference'] = merged_data_frame.apply(
lambda x: (x['Activity Start Date and Time* (UTC)'] - x['GT_DateTimeOrigUTC_Backup']) / np.timedelta64(1, 's')
if pd.notna(x['Activity Start Date and Time* (UTC)']) else "", 1)
# Calculate distance between GT and OBA starting points
merged_data_frame['Distance_Difference'] = merged_data_frame.apply(
lambda row: hs.haversine((row['GT_LatOrig'], row['GT_LonOrig']),
(row['Origin latitude (*best)'], row['Origin longitude (*best)']),
unit=Unit.METERS), axis=1)
# Add Manual Assignment Column before reorganize
merged_data_frame["Manual Assignment"] = ''
# Reorder merged dataframe columns
new_column_orders = constants.GT_NEW_COLUMNS_ORDER + constants.OBA_NEW_COLUMNS_ORDER
merged_data_frame = merged_data_frame[new_column_orders]
# Save merged data to csv
merged_file_path = os.path.join(command_line_args.outputDir, save_to_path,
constants.MERGED_DATA_FILE_NAME + "_" + str(tol) + ".csv")
num_matches_file_path = os.path.join(command_line_args.outputDir, save_to_path,
"num_matches" + "_" + str(tol) + ".csv")
merged_data_frame.to_csv(path_or_buf=merged_file_path, index=False)
num_matches_df.to_csv(path_or_buf=num_matches_file_path, index=False)
def merge(gt_data, oba_data, tolerance):
"""
Merge gt_data dataframe and oba_data dataframe using the nearest value between columns 'gt_data.GT_DateTimeOrigUTC' and
'oba_data.Activity Start Date and Time* (UTC)'. Before merging, the data is grouped by 'GT_Collector' on gt_data and
each row on gt_data will be paired with one or none of the rows on oba_data grouped by userId.
:param tolerance: maximum allowed difference (seconds) between 'gt_data.GT_DateTimeOrigUTC' and
'oba_data.Activity Start Date and Time* (UTC)'.
:param gt_data: dataframe with preprocessed data from ground truth XLSX data file
:param oba_data: dataframe with preprocessed data from OBA firebase export CSV data file
:return: dataframe with the merged data and a dataframe with summary of matches by collector/oba_user(phone).
"""
list_collectors = gt_data['GT_Collector'].unique()
list_oba_users = oba_data['User ID'].unique()
merged_df = pd.DataFrame()
matches_df = pd.DataFrame(list_collectors, columns=['GT_Collector'])
list_total_trips = []
list_matches = []
matches_dict = defaultdict(list)
for collector in list_collectors:
print("Merging data for collector ", collector)
# Create dataframe for a collector on list_collectors
gt_data_collector = gt_data[gt_data["GT_Collector"] == collector]
# Make sure dataframe is sorted by 'ClosesTime'
gt_data_collector.sort_values('GT_DateTimeOrigUTC', inplace=True)
# Add total trips per collector
list_total_trips.append(len(gt_data_collector))
i = 0
list_matches_by_phone = []
for oba_user in list_oba_users:
# Create a dataframe with the oba_user activities only
oba_data_user = oba_data[oba_data["User ID"] == oba_user]
# Make sure dataframes is sorted by 'Activity Start Date and Time* (UTC)'
oba_data_user.sort_values('Activity Start Date and Time* (UTC)', inplace=True)
temp_merge = pd.merge_asof(gt_data_collector, oba_data_user, left_on="GT_DateTimeOrigUTC",
right_on="Activity Start Date and Time* (UTC)",
direction="forward",
tolerance=pd.Timedelta(str(tolerance) + "ms"), left_by='GT_Mode',
right_by='Google Activity')
merged_df = pd.concat([merged_df, temp_merge], ignore_index=True)
# Print number of matches
print("\t Oba user", oba_user[-4:], "\tMatches: ", (temp_merge["User ID"] == oba_user).sum(), " out of ",
(temp_merge["GT_Collector"] == collector).sum())
list_matches_by_phone.append((temp_merge["User ID"] == oba_user).sum())
matches_dict[oba_user[-4:]].append((temp_merge["User ID"] == oba_user).sum())
i += 1
list_matches.append(list_matches_by_phone)
matches_df['total_trips'] = list_total_trips
numbers_df = | pd.DataFrame.from_dict(matches_dict) | pandas.DataFrame.from_dict |
from __future__ import absolute_import, division, print_function
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
from torch.distributions import constraints
import pyro
import pyro.distributions as dist
from pyro.infer import EmpiricalMarginal, SVI, Trace_ELBO
from pyro.contrib.autoguide import AutoMultivariateNormal
from pyro.infer.mcmc.api import MCMC
from pyro.infer.mcmc import NUTS
import pyro.optim as optim
from pyro.util import optional
from pyro import poutine
pyro.set_rng_seed(1)
logging.basicConfig(format='%(message)s', level=logging.INFO)
# Enable validation checks
pyro.enable_validation(True)
smoke_test = ('CI' in os.environ)
pyro.set_rng_seed(1)
# Draw fake parameters -- set number of patches and subpatches
SUBPATCHES_IN_PATCH = 16
N_PATCHES = 100
N_CHAINS = 3
training_data = | pd.DataFrame() | pandas.DataFrame |
"""
@<NAME>
==============================================
Training the different models by multiple sequential trials
==============================================
How to train mixtures and HMMs with various observation models on the same dataset.
"""
import bnpy
import numpy as np
import os
from matplotlib import pylab
import glob
import pandas as pd
import warnings
warnings.filterwarnings("ignore", category= DeprecationWarning)
SMALL_FIG_SIZE = (2.5, 2.5)
FIG_SIZE = (5, 5)
pylab.rcParams['figure.figsize'] = FIG_SIZE
pylab.close('all')
dataPath = '/home/birl/npBayesHMM/HIRO_SA_DATA/REAL_HIRO_ONE_SA_SUCCESS'
file_id = ['02', '03', '04', '05', '06','07', '08', '09', '10', '11', '12', '13', '14', '15', '16']
nLap = 500
time_step = 0.005
dataType = ['R_Torques.dat'] #'R_Angles.dat','R_CartPos.dat' tuples can not edited
STATE = ['APPROACH', 'ROTATION', 'INSERTION', 'MATING']
###############################################################################
def load_one_trial(dataPath,id):
sensor = pd.DataFrame()
Rstate = pd.DataFrame()
for folders in glob.glob(os.path.join(dataPath, "*" + id)):
for dat_file in os.listdir(folders):
if dat_file in dataType:
raw_data = pd.read_csv(folders +'/' + dat_file, sep='\s+', header=None, skiprows=1, usecols = range(1,7))
sensor = raw_data.transpose().append(sensor) # transpose to [ndim x length]
elif dat_file == 'R_State.dat':
Rstate = | pd.read_csv(folders + '/' + dat_file, sep='\s+', header=None, skiprows=1) | pandas.read_csv |
from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@pytest.mark.parametrize("orient", ["split", "values"])
@pytest.mark.parametrize(
"data",
[
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
],
)
def test_frame_non_unique_columns(self, orient, data):
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
result = read_json(
df.to_json(orient=orient), orient=orient, convert_dates=["x"]
)
if orient == "values":
expected = pd.DataFrame(data)
if expected.iloc[:, 0].dtype == "datetime64[ns]":
# orient == "values" by default will write Timestamp objects out
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
elif orient == "split":
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
def test_frame_non_unique_columns_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = f"DataFrame columns must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
def test_frame_default_orient(self, float_frame):
assert float_frame.to_json() == float_frame.to_json(orient="columns")
@pytest.mark.parametrize("dtype", [False, float])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame):
data = float_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = float_frame
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
data = self.intframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.intframe.copy()
if (
numpy
and (is_platform_32bit() or is_platform_windows())
and not dtype
and orient != "split"
):
# TODO: see what is causing roundtrip dtype loss
expected = expected.astype(np.int32)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
dtype=dtype,
)
# TODO: do we even need to support U3 dtypes?
if numpy and dtype == "U3" and orient != "split":
pytest.xfail("Can't decode directly to array")
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = df.copy()
if not dtype:
expected = expected.astype(np.int64)
# index columns, and records orients cannot fully preserve the string
# dtype for axes as the index and column labels are used as keys in
# JSON objects. JSON keys are by definition strings, so there's no way
# to disambiguate whether those keys actually were strings or numeric
# beforehand and numeric wins out.
# TODO: Split should be able to support this
if convert_axes and (orient in ("split", "index", "columns")):
expected.columns = expected.columns.astype(np.int64)
expected.index = expected.index.astype(np.int64)
elif orient == "records" and convert_axes:
expected.columns = expected.columns.astype(np.int64)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_categorical(self, orient, convert_axes, numpy):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
data = self.categorical.to_json(orient=orient)
if numpy and orient in ("records", "values"):
pytest.xfail(f"Orient {orient} is broken with numpy=True")
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.categorical.copy()
expected.index = expected.index.astype(str) # Categorical not preserved
expected.index.name = None # index names aren't preserved in JSON
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_empty(self, orient, convert_axes, numpy, empty_frame):
data = empty_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = empty_frame.copy()
# TODO: both conditions below are probably bugs
if convert_axes:
expected.index = expected.index.astype(float)
expected.columns = expected.columns.astype(float)
if numpy and orient == "values":
expected = expected.reindex([0], axis=1).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
# TODO: improve coverage with date_format parameter
data = self.tsframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.tsframe.copy()
if not convert_axes: # one off for ts handling
# DTI gets converted to epoch values
idx = expected.index.astype(np.int64) // 1000000
if orient != "split": # TODO: handle consistently across orients
idx = idx.astype(str)
expected.index = idx
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_mixed(self, orient, convert_axes, numpy):
if numpy and orient != "split":
pytest.xfail("Can't decode directly to array")
index = pd.Index(["a", "b", "c", "d", "e"])
values = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
df = DataFrame(data=values, index=index)
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = df.copy()
expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize(
"data,msg,orient",
[
('{"key":b:a:d}', "Expected object or value", "columns"),
# too few indices
(
'{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)",
"split",
),
# too many columns
(
'{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
"3 columns passed, passed data had 2 columns",
"split",
),
# bad key
(
'{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"unexpected key\(s\): badkey",
"split",
),
],
)
def test_frame_from_json_bad_data_raises(self, data, msg, orient):
with pytest.raises(ValueError, match=msg):
read_json(StringIO(data), orient=orient)
@pytest.mark.parametrize("dtype", [True, False])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_frame_from_json_missing_data(self, orient, convert_axes, numpy, dtype):
num_df = DataFrame([[1, 2], [4, 5, 6]])
result = read_json(
num_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
assert np.isnan(result.iloc[0, 2])
obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])
result = read_json(
obj_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
if not dtype: # TODO: Special case for object data; maybe a bug?
assert result.iloc[0, 2] is None
else:
assert np.isnan(result.iloc[0, 2])
@pytest.mark.parametrize("inf", [np.inf, np.NINF])
@pytest.mark.parametrize("dtype", [True, False])
def test_frame_infinity(self, orient, inf, dtype):
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = inf
result = read_json(df.to_json(), dtype=dtype)
assert np.isnan(result.iloc[0, 2])
@pytest.mark.skipif(
is_platform_32bit(), reason="not compliant on 32-bit, xref #15865"
)
@pytest.mark.parametrize(
"value,precision,expected_val",
[
(0.95, 1, 1.0),
(1.95, 1, 2.0),
(-1.95, 1, -2.0),
(0.995, 2, 1.0),
(0.9995, 3, 1.0),
(0.99999999999999944, 15, 1.0),
],
)
def test_frame_to_json_float_precision(self, value, precision, expected_val):
df = pd.DataFrame([dict(a_float=value)])
encoded = df.to_json(double_precision=precision)
assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=["jim", "joe"])
assert not df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
# GH 7445
result = pd.DataFrame({"test": []}, index=[]).to_json(orient="columns")
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=["jim", "joe"])
df["joe"] = df["joe"].astype("i8")
assert df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
def test_frame_mixedtype_orient(self): # GH10289
vals = [
[10, 1, "foo", 0.1, 0.01],
[20, 2, "bar", 0.2, 0.02],
[30, 3, "baz", 0.3, 0.03],
[40, 4, "qux", 0.4, 0.04],
]
df = DataFrame(
vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"]
)
assert df._is_mixed_type
right = df.copy()
for orient in ["split", "index", "columns"]:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
tm.assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient="records")
left = read_json(inp, orient="records", convert_axes=False)
tm.assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient="values")
left = read_json(inp, orient="values", convert_axes=False)
tm.assert_frame_equal(left, right)
def test_v12_compat(self, datapath):
df = DataFrame(
[
[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478],
],
columns=["A", "B", "C", "D"],
index=pd.date_range("2000-01-03", "2000-01-07"),
)
df["date"] = pd.Timestamp("19920106 18:21:32.12")
df.iloc[3, df.columns.get_loc("date")] = pd.Timestamp("20130101")
df["modified"] = df["date"]
df.iloc[1, df.columns.get_loc("modified")] = pd.NaT
dirpath = datapath("io", "json", "data")
v12_json = os.path.join(dirpath, "tsframe_v012.json")
df_unser = pd.read_json(v12_json)
tm.assert_frame_equal(df, df_unser)
df_iso = df.drop(["modified"], axis=1)
v12_iso_json = os.path.join(dirpath, "tsframe_iso_v012.json")
df_unser_iso = pd.read_json(v12_iso_json)
tm.assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range("20000101", periods=10, freq="H")
df_mixed = DataFrame(
OrderedDict(
float_1=[
-0.92077639,
0.77434435,
1.25234727,
0.61485564,
-0.60316077,
0.24653374,
0.28668979,
-2.51969012,
0.95748401,
-1.02970536,
],
int_1=[
19680418,
75337055,
99973684,
65103179,
79373900,
40314334,
21290235,
4991321,
41903419,
16008365,
],
str_1=[
"78c608f1",
"64a99743",
"13d2ff52",
"ca7f4af2",
"97236474",
"bde7e214",
"1a6bde47",
"b1190be5",
"7a669144",
"8d64d068",
],
float_2=[
-0.0428278,
-1.80872357,
3.36042349,
-0.7573685,
-0.48217572,
0.86229683,
1.08935819,
0.93898739,
-0.03030452,
1.43366348,
],
str_2=[
"14f04af9",
"d085da90",
"4bcfac83",
"81504caf",
"2ffef4a9",
"08e2f5c4",
"07e1af03",
"addbd4a7",
"1f6a09ba",
"4bfc4d87",
],
int_2=[
86967717,
98098830,
51927505,
20372254,
12601730,
20884027,
34193846,
10561746,
24867120,
76131025,
],
),
index=index,
)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype("unicode")
df_roundtrip = pd.read_json(df_mixed.to_json(orient="split"), orient="split")
tm.assert_frame_equal(
df_mixed,
df_roundtrip,
check_index_type=True,
check_column_type=True,
by_blocks=True,
check_exact=True,
)
def test_frame_nonprintable_bytes(self):
# GH14256: failing column caused segfaults, if it is not the last one
class BinaryThing:
def __init__(self, hexed):
self.hexed = hexed
self.binary = bytes.fromhex(hexed)
def __str__(self) -> str:
return self.hexed
hexed = "574b4454ba8c5eb4f98a8f45"
binthing = BinaryThing(hexed)
# verify the proper conversion of printable content
df_printable = DataFrame({"A": [binthing.hexed]})
assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}'
# check if non-printable content throws appropriate Exception
df_nonprintable = DataFrame({"A": [binthing]})
msg = "Unsupported UTF-8 sequence length when encoding string"
with pytest.raises(OverflowError, match=msg):
df_nonprintable.to_json()
# the same with multiple columns threw segfaults
df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"])
with pytest.raises(OverflowError):
df_mixed.to_json()
# default_handler should resolve exceptions for non-string types
result = df_nonprintable.to_json(default_handler=str)
expected = f'{{"A":{{"0":"{hexed}"}}}}'
assert result == expected
assert (
df_mixed.to_json(default_handler=str)
== f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}'
)
def test_label_overflow(self):
# GH14256: buffer length not checked when writing label
result = pd.DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json()
expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}'
assert result == expected
def test_series_non_unique_index(self):
s = Series(["a", "b"], index=[1, 1])
msg = "Series index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="index")
tm.assert_series_equal(
s, read_json(s.to_json(orient="split"), orient="split", typ="series")
)
unser = read_json(s.to_json(orient="records"), orient="records", typ="series")
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_default_orient(self, string_series):
assert string_series.to_json() == string_series.to_json(orient="index")
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_simple(self, orient, numpy, string_series):
data = string_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = string_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [False, None])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_object(self, orient, numpy, dtype, object_series):
data = object_series.to_json(orient=orient)
result = pd.read_json(
data, typ="series", orient=orient, numpy=numpy, dtype=dtype
)
expected = object_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_empty(self, orient, numpy, empty_series):
data = empty_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = empty_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
else:
expected.index = expected.index.astype(float)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_timeseries(self, orient, numpy, datetime_series):
data = datetime_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = datetime_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [np.float64, np.int])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_numeric(self, orient, numpy, dtype):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
data = s.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = s.copy()
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
tm.assert_series_equal(result, expected)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", precise_float=True)
tm.assert_series_equal(result, s, check_index_type=False)
def test_series_with_dtype(self):
# GH 21986
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", dtype=np.int64)
expected = Series([4] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype,expected",
[
(True, Series(["2000-01-01"], dtype="datetime64[ns]")),
(False, Series([946684800000])),
],
)
def test_series_with_dtype_datetime(self, dtype, expected):
s = Series(["2000-01-01"], dtype="datetime64[ns]")
data = s.to_json()
result = pd.read_json(data, typ="series", dtype=dtype)
tm.assert_series_equal(result, expected)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
tm.assert_frame_equal(
result, df, check_index_type=False, check_column_type=False
)
def test_typ(self):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64")
result = read_json(s.to_json(), typ=None)
tm.assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
def test_path(self, float_frame):
with tm.ensure_clean("test.json") as path:
for df in [
float_frame,
self.intframe,
self.tsframe,
self.mixed_frame,
]:
df.to_json(path)
read_json(path)
def test_axis_dates(self, datetime_series):
# frame
json = self.tsframe.to_json()
result = read_json(json)
tm.assert_frame_equal(result, self.tsframe)
# series
json = datetime_series.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, datetime_series, check_names=False)
assert result.name is None
def test_convert_dates(self, datetime_series):
# frame
df = self.tsframe.copy()
df["date"] = Timestamp("20130101")
json = df.to_json()
result = read_json(json)
tm.assert_frame_equal(result, df)
df["foo"] = 1.0
json = df.to_json(date_unit="ns")
result = read_json(json, convert_dates=False)
expected = df.copy()
expected["date"] = expected["date"].values.view("i8")
expected["foo"] = expected["foo"].astype("int64")
tm.assert_frame_equal(result, expected)
# series
ts = Series(Timestamp("20130101"), index=datetime_series.index)
json = ts.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, ts)
@pytest.mark.parametrize("date_format", ["epoch", "iso"])
@pytest.mark.parametrize("as_object", [True, False])
@pytest.mark.parametrize(
"date_typ", [datetime.date, datetime.datetime, pd.Timestamp]
)
def test_date_index_and_values(self, date_format, as_object, date_typ):
data = [date_typ(year=2020, month=1, day=1), pd.NaT]
if as_object:
data.append("a")
ser = pd.Series(data, index=data)
result = ser.to_json(date_format=date_format)
if date_format == "epoch":
expected = '{"1577836800000":1577836800000,"null":null}'
else:
expected = (
'{"2020-01-01T00:00:00.000Z":"2020-01-01T00:00:00.000Z","null":null}'
)
if as_object:
expected = expected.replace("}", ',"a":"a"}')
assert result == expected
@pytest.mark.parametrize(
"infer_word",
[
"trade_time",
"date",
"datetime",
"sold_at",
"modified",
"timestamp",
"timestamps",
],
)
def test_convert_dates_infer(self, infer_word):
# GH10747
from pandas.io.json import dumps
data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]
expected = DataFrame(
[[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
)
result = read_json(dumps(data))[["id", infer_word]]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_frame(self, date, date_unit):
df = self.tsframe.copy()
df["date"] = Timestamp(date)
df.iloc[1, df.columns.get_loc("date")] = pd.NaT
df.iloc[5, df.columns.get_loc("date")] = pd.NaT
if date_unit:
json = df.to_json(date_format="iso", date_unit=date_unit)
else:
json = df.to_json(date_format="iso")
result = read_json(json)
expected = df.copy()
expected.index = expected.index.tz_localize("UTC")
expected["date"] = expected["date"].dt.tz_localize("UTC")
tm.assert_frame_equal(result, expected)
def test_date_format_frame_raises(self):
df = self.tsframe.copy()
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
df.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_series(self, date, date_unit, datetime_series):
ts = Series(Timestamp(date), index=datetime_series.index)
ts.iloc[1] = pd.NaT
ts.iloc[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format="iso", date_unit=date_unit)
else:
json = ts.to_json(date_format="iso")
result = read_json(json, typ="series")
expected = ts.copy()
expected.index = expected.index.tz_localize("UTC")
expected = expected.dt.tz_localize("UTC")
tm.assert_series_equal(result, expected)
def test_date_format_series_raises(self, datetime_series):
ts = Series(Timestamp("20130101 20:43:42.123"), index=datetime_series.index)
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
ts.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])
def test_date_unit(self, unit):
df = self.tsframe.copy()
df["date"] = Timestamp("20130101 20:43:42")
dl = df.columns.get_loc("date")
df.iloc[1, dl] = Timestamp("19710101 20:43:42")
df.iloc[2, dl] = Timestamp("21460101 20:43:42")
df.iloc[4, dl] = pd.NaT
json = df.to_json(date_format="epoch", date_unit=unit)
# force date unit
result = read_json(json, date_unit=unit)
tm.assert_frame_equal(result, df)
# detect date unit
result = read_json(json, date_unit=None)
tm.assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
s = r"""{
"status": "success",
"data": {
"posts": [
{
"id": 1,
"title": "A blog post",
"body": "Some useful content"
},
{
"id": 2,
"title": "Another blog post",
"body": "More content"
}
]
}
}"""
read_json(s)
def test_doc_example(self):
dfj2 = DataFrame(np.random.randn(5, 2), columns=list("AB"))
dfj2["date"] = Timestamp("20130101")
dfj2["ints"] = range(5)
dfj2["bools"] = True
dfj2.index = pd.date_range("20130101", periods=5)
json = dfj2.to_json()
result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_})
tm.assert_frame_equal(result, result)
def test_misc_example(self):
# parsing unordered input fails
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
error_msg = """DataFrame\\.index are different
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\['a', 'b'\\], dtype='object'\\)
\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
with pytest.raises(AssertionError, match=error_msg):
tm.assert_frame_equal(result, expected, check_index_type=False)
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
@tm.network
@pytest.mark.single
def test_round_trip_exception_(self):
# GH 3867
csv = "https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv"
df = pd.read_csv(csv)
s = df.to_json()
result = pd.read_json(s)
tm.assert_frame_equal(result.reindex(index=df.index, columns=df.columns), df)
@tm.network
@pytest.mark.single
@pytest.mark.parametrize(
"field,dtype",
[
["created_at", pd.DatetimeTZDtype(tz="UTC")],
["closed_at", "datetime64[ns]"],
["updated_at", pd.DatetimeTZDtype(tz="UTC")],
],
)
def test_url(self, field, dtype):
url = "https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5" # noqa
result = read_json(url, convert_dates=True)
assert result[field].dtype == dtype
def test_timedelta(self):
converter = lambda x: pd.to_timedelta(x, unit="ms")
s = Series([timedelta(23), timedelta(seconds=5)])
assert s.dtype == "timedelta64[ns]"
result = pd.read_json(s.to_json(), typ="series").apply(converter)
tm.assert_series_equal(result, s)
s = Series([timedelta(23), timedelta(seconds=5)], index=pd.Index([0, 1]))
assert s.dtype == "timedelta64[ns]"
result = pd.read_json(s.to_json(), typ="series").apply(converter)
tm.assert_series_equal(result, s)
frame = DataFrame([timedelta(23), timedelta(seconds=5)])
assert frame[0].dtype == "timedelta64[ns]"
tm.assert_frame_equal(frame, pd.read_json(frame.to_json()).apply(converter))
frame = DataFrame(
{
"a": [timedelta(days=23), timedelta(seconds=5)],
"b": [1, 2],
"c": pd.date_range(start="20130101", periods=2),
}
)
result = pd.read_json(frame.to_json(date_unit="ns"))
result["a"] = pd.to_timedelta(result.a, unit="ns")
result["c"] = pd.to_datetime(result.c)
tm.assert_frame_equal(frame, result)
def test_mixed_timedelta_datetime(self):
frame = DataFrame(
{"a": [timedelta(23), pd.Timestamp("20130101")]}, dtype=object
)
expected = DataFrame(
{"a": [pd.Timedelta(frame.a[0]).value, pd.Timestamp(frame.a[1]).value]}
)
result = pd.read_json(frame.to_json(date_unit="ns"), dtype={"a": "int64"})
tm.assert_frame_equal(result, expected, check_index_type=False)
@pytest.mark.parametrize("as_object", [True, False])
@pytest.mark.parametrize("date_format", ["iso", "epoch"])
@pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta])
def test_timedelta_to_json(self, as_object, date_format, timedelta_typ):
# GH28156: to_json not correctly formatting Timedelta
data = [timedelta_typ(days=1), timedelta_typ(days=2), pd.NaT]
if as_object:
data.append("a")
ser = pd.Series(data, index=data)
if date_format == "iso":
expected = (
'{"P1DT0H0M0S":"P1DT0H0M0S","P2DT0H0M0S":"P2DT0H0M0S","null":null}'
)
else:
expected = '{"86400000":86400000,"172800000":172800000,"null":null}'
if as_object:
expected = expected.replace("}", ',"a":"a"}')
result = ser.to_json(date_format=date_format)
assert result == expected
def test_default_handler(self):
value = object()
frame = DataFrame({"a": [7, value]})
expected = DataFrame({"a": [7, str(value)]})
result = pd.read_json(frame.to_json(default_handler=str))
tm.assert_frame_equal(expected, result, check_index_type=False)
def test_default_handler_indirect(self):
from pandas.io.json import dumps
def default(obj):
if isinstance(obj, complex):
return [("mathjs", "Complex"), ("re", obj.real), ("im", obj.imag)]
return str(obj)
df_list = [
9,
DataFrame(
{"a": [1, "STR", complex(4, -5)], "b": [float("nan"), None, "N/A"]},
columns=["a", "b"],
),
]
expected = (
'[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
'["re",4.0],["im",-5.0]],"N\\/A"]]]'
)
assert dumps(df_list, default_handler=default, orient="values") == expected
def test_default_handler_numpy_unsupported_dtype(self):
# GH12554 to_json raises 'Unhandled numpy dtype 15'
df = DataFrame(
{"a": [1, 2.3, complex(4, -5)], "b": [float("nan"), None, complex(1.2, 0)]},
columns=["a", "b"],
)
expected = (
'[["(1+0j)","(nan+0j)"],'
'["(2.3+0j)","(nan+0j)"],'
'["(4-5j)","(1.2+0j)"]]'
)
assert df.to_json(default_handler=str, orient="values") == expected
def test_default_handler_raises(self):
msg = "raisin"
def my_handler_raises(obj):
raise TypeError(msg)
with pytest.raises(TypeError, match=msg):
DataFrame({"a": [1, 2, object()]}).to_json(
default_handler=my_handler_raises
)
with pytest.raises(TypeError, match=msg):
DataFrame({"a": [1, 2, complex(4, -5)]}).to_json(
default_handler=my_handler_raises
)
def test_categorical(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
df["B"] = df["A"]
expected = df.to_json()
df["B"] = df["A"].astype("category")
assert expected == df.to_json()
s = df["A"]
sc = df["B"]
assert s.to_json() == sc.to_json()
def test_datetime_tz(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
tz_range = pd.date_range("20130101", periods=3, tz="US/Eastern")
tz_naive = tz_range.tz_convert("utc").tz_localize(None)
df = DataFrame({"A": tz_range, "B": pd.date_range("20130101", periods=3)})
df_naive = df.copy()
df_naive["A"] = tz_naive
expected = df_naive.to_json()
assert expected == df.to_json()
stz = Series(tz_range)
s_naive = Series(tz_naive)
assert stz.to_json() == s_naive.to_json()
def test_sparse(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = pd.DataFrame(np.random.randn(10, 4))
df.loc[:8] = np.nan
sdf = df.astype("Sparse")
expected = df.to_json()
assert expected == sdf.to_json()
s = pd.Series(np.random.randn(10))
s.loc[:8] = np.nan
ss = s.astype("Sparse")
expected = s.to_json()
assert expected == ss.to_json()
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-10 05:00:00Z"),
Timestamp("2013-01-10 00:00:00", tz="US/Eastern"),
Timestamp("2013-01-10 00:00:00-0500"),
],
)
def test_tz_is_utc(self, ts):
from pandas.io.json import dumps
exp = '"2013-01-10T05:00:00.000Z"'
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
@pytest.mark.parametrize(
"tz_range",
[
pd.date_range("2013-01-01 05:00:00Z", periods=2),
pd.date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"),
pd.date_range("2013-01-01 00:00:00-0500", periods=2),
],
)
def test_tz_range_is_utc(self, tz_range):
from pandas.io.json import dumps
exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
dfexp = (
'{"DT":{'
'"0":"2013-01-01T05:00:00.000Z",'
'"1":"2013-01-02T05:00:00.000Z"}}'
)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({"DT": dti})
result = dumps(df, iso_dates=True)
assert result == dfexp
def test_read_inline_jsonl(self):
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
@td.skip_if_not_us_locale
def test_read_s3_jsonl(self, s3_resource):
# GH17200
result = read_json("s3n://pandas-test/items.jsonl", lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_read_local_jsonl(self):
# GH17200
with tm.ensure_clean("tmp_items.json") as path:
with open(path, "w") as infile:
infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n')
result = read_json(path, lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_read_jsonl_unicode_chars(self):
# GH15132: non-ascii unicode characters
# \u201d == RIGHT DOUBLE QUOTATION MARK
# simulate file handle
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
json = StringIO(json)
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
# simulate string
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_read_json_large_numbers(self):
# GH18842
json = '{"articleId": "1404366058080022500245"}'
json = StringIO(json)
result = read_json(json, typ="series")
expected = Series(1.404366e21, index=["articleId"])
tm.assert_series_equal(result, expected)
json = '{"0": {"articleId": "1404366058080022500245"}}'
json = StringIO(json)
result = read_json(json)
expected = DataFrame(1.404366e21, index=["articleId"], columns=[0])
tm.assert_frame_equal(result, expected)
def test_to_jsonl(self):
# GH9180
df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a":1,"b":2}\n{"a":1,"b":2}'
assert result == expected
df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}'
assert result == expected
tm.assert_frame_equal(pd.read_json(result, lines=True), df)
# GH15096: escaped characters in columns and data
df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}'
assert result == expected
tm.assert_frame_equal(pd.read_json(result, lines=True), df)
# TODO: there is a near-identical test for pytables; can we share?
def test_latin_encoding(self):
# GH 13774
pytest.skip("encoding not implemented in .to_json(), xref #13774")
values = [
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
]
values = [
[x.decode("latin-1") if isinstance(x, bytes) else x for x in y]
for y in values
]
examples = []
for dtype in ["category", object]:
for val in values:
examples.append(Series(val, dtype=dtype))
def roundtrip(s, encoding="latin-1"):
with tm.ensure_clean("test.json") as path:
s.to_json(path, encoding=encoding)
retr = | read_json(path, encoding=encoding) | pandas.read_json |
import base64
import os, shutil, io, zipfile
from re import L, match
import json
from datetime import datetime, timedelta
from urllib.parse import urljoin
import requests
import pandas as pd
import pint
import numpy as np
#import geopandas as gpd
from django.views.decorators.csrf import csrf_protect, csrf_exempt
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404, HttpResponseBadRequest
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import authenticate
from django.core.files.storage import FileSystemStorage
from django.shortcuts import redirect, render
from django.contrib.auth.decorators import login_required
from django.utils.text import slugify
from api.exceptions import ModelNotExistException
from api.models.outputs import Run, Cambium
from api.tasks import run_model, task_status, build_model,upload_ts
from api.models.calliope import Abstract_Tech, Abstract_Tech_Param, Parameter
from api.models.configuration import Model, ParamsManager, Model_User,User_File, Location, Technology, Tech_Param, Loc_Tech, Loc_Tech_Param, Timeseries_Meta
from api.utils import zip_folder, initialize_units, convert_units, noconv_units
from taskmeta.models import CeleryTask
@csrf_protect
def build(request):
"""
Build and save the input files (YAML and CSV) for a Calliope run.
Parameters:
model_uuid (uuid): required
scenario_id (int): required
start_date (timestamp): required
end_date (timestamp): required
cluster (bool): optional
manual (bool): optional
Returns (json): Action Confirmation
Example:
GET: /api/build/
"""
# Input parameters
model_uuid = request.GET.get("model_uuid", None)
scenario_id = request.GET.get("scenario_id", None)
start_date = request.GET.get("start_date", None)
end_date = request.GET.get("end_date", None)
cluster = (request.GET.get("cluster", 'true') == 'true')
manual = (request.GET.get("manual", 'false') == 'true')
timestep = request.GET.get("timestep", '1H')
try:
pd.tseries.frequencies.to_offset(timestep)
except ValueError:
payload = {
"status": "Failed",
"message": "'"+timestep+"' is not a valid timestep.",
}
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
try:
start_date = datetime.strptime(start_date, "%Y-%m-%d")
end_date = datetime.strptime(end_date,
"%Y-%m-%d") + timedelta(hours=23)
subset_time = str(start_date.date()) + " to " + str(end_date.date())
year = start_date.year
# model and scenario instances
scenario = model.scenarios.get(id=scenario_id)
# Create run instance
run = Run.objects.create(
model=model,
scenario=scenario,
year=year,
subset_time=subset_time,
status=task_status.QUEUED,
inputs_path="",
cluster=cluster,
manual=manual,
timestep=timestep,
)
# Generate File Path
timestamp = datetime.now().strftime("%Y-%m-%d %H%M%S")
model_name = ParamsManager.simplify_name(model.name)
scenario_name = ParamsManager.simplify_name(scenario.name)
inputs_path = "{}/{}/{}/{}/{}/{}/{}/inputs".format(
settings.DATA_STORAGE,
model.uuid,
model_name,
scenario_name,
year,
subset_time,
timestamp,
)
inputs_path = inputs_path.lower().replace(" ", "-")
os.makedirs(inputs_path, exist_ok=True)
# Celery task
async_result = build_model.apply_async(
kwargs={
"inputs_path": inputs_path,
"run_id": run.id,
"model_uuid": model_uuid,
"scenario_id": scenario_id,
"start_date": start_date,
"end_date": end_date,
}
)
build_task = CeleryTask.objects.get(task_id=async_result.id)
run.build_task = build_task
run.save()
payload = {
"status": "Success",
"model_uuid": model_uuid,
"scenario_id": scenario_id,
"year": start_date.year,
}
except Exception as e:
payload = {
"status": "Failed",
"message": "Please contact admin at <EMAIL> ' \
'regarding this error: {}".format(
str(e)
),
}
return HttpResponse(json.dumps(payload, indent=4),
content_type="application/json")
@csrf_protect
def optimize(request):
"""
Optimize a Calliope problem
Parameters:
model_uuid (uuid): required
run_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/optimize/
"""
run_id = request.POST["run_id"]
model_uuid = request.POST["model_uuid"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
payload = {"run_id": run_id}
# run instance does not exist.
try:
run = model.runs.get(id=run_id)
except ObjectDoesNotExist as e:
print(e)
payload["message"] = "Run ID {} does not exist.".format(run_id)
return HttpResponse(
json.dumps(payload, indent=4), content_type="application/json"
)
# model path does not exist
model_path = os.path.join(run.inputs_path, "model.yaml")
if not os.path.exists(model_path):
payload["message"] = "Invalid model config path!"
return HttpResponse(
json.dumps(payload, indent=4), content_type="application/json"
)
# run celery task
async_result = run_model.apply_async(
kwargs={"run_id": run_id, "model_path": model_path,
"user_id": request.user.id}
)
run_task, _ = CeleryTask.objects.get_or_create(task_id=async_result.id)
run.run_task = run_task
run.status = task_status.QUEUED
run.save()
payload = {"task_id": async_result.id}
return HttpResponse(json.dumps(payload, indent=4),
content_type="application/json")
@csrf_protect
def delete_run(request):
"""
Delete a scenario run
Parameters:
model_uuid (uuid): required
run_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/delete_run/
"""
model_uuid = request.POST["model_uuid"]
run_id = request.POST["run_id"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
run = model.runs.get(id=run_id)
if run.outputs_key:
data = {
'filename': run.outputs_key,
'project_uuid': str(model.uuid),
'private_key': settings.CAMBIUM_API_KEY,
}
try:
url = urljoin(settings.CAMBIUM_URL, "api/remove-data/")
requests.post(url, data=data).json()
except Exception as e:
print("Cambium removal failed - {}".format(e))
run.delete()
return HttpResponseRedirect("")
@csrf_protect
def publish_run(request):
"""
Publish a scenario run to Cambium (https://cambium.nrel.gov/)
Parameters:
model_uuid (uuid): required
run_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/publish_run/
"""
model_uuid = request.POST["model_uuid"]
run_id = request.POST["run_id"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
run = model.runs.filter(id=run_id).first()
msg = Cambium.push_run(run)
payload = {'message': msg}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def update_run_description(request):
"""
Update the description for a run
Parameters:
model_uuid (uuid): required
run_id (int): required
description (str): required
Returns (json): Action Confirmation
Example:
POST: /api/update_run_description/
"""
model_uuid = request.POST["model_uuid"]
run_id = int(request.POST["id"])
description = request.POST["value"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
try:
run = model.runs.get(id=run_id)
except ObjectDoesNotExist as e:
print(e)
payload = {}
payload["message"] = "Run ID {} does not exist.".format(run_id)
return HttpResponse(
json.dumps(payload, indent=4), content_type="application/json"
)
if description != run.description:
run.description = description
run.save()
payload = description
return HttpResponse(payload, content_type="text/plain")
def basic_auth_required(api_view):
def wrapper(request, *args, **kwargs):
try:
auth = request.META["HTTP_AUTHORIZATION"].split()
assert auth[0].lower() == "basic"
email, password = base64.b64decode(auth[1]).decode("utf-8").split(":")
user = authenticate(username=email, password=password)
if user is not None and user.is_active:
request.user = user
return api_view(request, *args, **kwargs)
else:
msg = "Invalid email or password! Please try again."
return HttpResponse(json.dumps({"error": msg}),
content_type="application/json")
except Exception as e:
msg = str(e)
if str(e) == "'HTTP_AUTHORIZATION'":
msg = "Authentorization failed! Please try Basic Auth."
return HttpResponse(json.dumps({"error": msg}),
content_type="application/json")
return wrapper
@csrf_protect
def download(request):
"""
Download files from a path to client machine
Parameters:
model_uuid (uuid): required
run_id (int): required
Returns (json): Action Confirmation
Example:
GET: /api/download/
"""
model_uuid = request.GET['model_uuid']
run_id = request.GET['run_id']
download_type = request.GET['type']
model = Model.by_uuid(model_uuid)
model.handle_view_access(request.user)
try:
run = Run.objects.get(id=run_id)
except Exception:
raise Http404
if download_type == 'inputs':
path = run.inputs_path
elif download_type == "outputs":
path = run.outputs_path
else:
raise Http404
if os.path.exists(path):
file_path = zip_folder(path)
with open(file_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="application/text")
response['Content-Disposition'] = 'inline; filename='+slugify(model.name+'_'+run.scenario.name+'_'+str(run.year)+'_')+os.path.basename(file_path)
return response
return HttpResponse(
json.dumps({"message": "Not Found!"}, indent=4),
content_type="application/json"
)
@csrf_protect
def upload_outputs(request):
"""
Upload a zipped outputs file.
Parameters:
model_uuid (uuid): required
run_id (int): required
description (str): optional
myfile (file): required
Returns:
Example:
POST: /api/upload_outputs/
"""
model_uuid = request.POST["model_uuid"]
run_id = request.POST["run_id"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
try:
run = Run.objects.get(id=run_id)
except Exception:
print("No Run Found")
raise Http404
if (request.method == "POST") and ("myfile" in request.FILES):
myfile = request.FILES["myfile"]
if os.path.splitext(myfile.name)[1].lower() == '.zip':
model_dir = run.inputs_path.replace("/inputs","")
out_dir = os.path.join(model_dir,"outputs")
if not os.path.exists(out_dir):
os.makedirs(out_dir, exist_ok=True)
print(myfile.name)
fs = FileSystemStorage()
filename = fs.save(os.path.join(out_dir,myfile.name), myfile)
# Default assumes CSV files were directly zipped into archive
run.outputs_path = out_dir
shutil.unpack_archive(filename,out_dir)
# Loop through options for archived output directories rather than base CSVs
# TODO: Add user input on location of output CSVs via API option
for dir in ['outputs','model_outputs']:
if dir in os.listdir(out_dir):
run.outputs_path = os.path.join(out_dir,dir)
run.save()
return redirect("/%s/runs/" % model_uuid)
return redirect("/%s/runs/" % model_uuid)
print("No File Found")
raise Http404
@csrf_protect
@login_required
def upload_locations(request):
"""
Upload a CSV file with new/updated locations.
Parameters:
model_uuid (uuid): required
description (str): optional
myfile (file): required
col_map (dict): optional
Returns:
Example:
POST: /api/upload_locations/
"""
model_uuid = request.POST["model_uuid"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
context = {
'logs':[],
"model": model
}
if (request.method == "POST") and ("myfile" in request.FILES):
myfile = request.FILES["myfile"]
if os.path.splitext(myfile.name)[1].lower() == '.csv':
df = pd.read_csv(myfile)
else:
context['logs'].append('File format not supported. Please use a .csv.')
return render(request, "bulkresults.html", context)
if not set(['pretty_name','longitude','latitude']).issubset(set(df.columns)):
context['logs'].append('Missing required columns. pretty_name, longitude, latitude are required.')
return render(request, "bulkresults.html", context)
df = df.loc[:,df.columns.isin(['id','pretty_name','longitude','latitude','available_area','description'])]
df['model_id'] = model.id
df['name'] = df['pretty_name'].apply(lambda x: ParamsManager.simplify_name(x))
for i,row in df.iterrows():
if pd.isnull(row['pretty_name']):
context['logs'].append(str(i)+'- Missing pretty name. Skipped')
continue
if pd.isnull(row['latitude']) or pd.isnull(row['longitude']):
context['logs'].append(str(i)+'- Missing latitude or longitude. Skipped')
continue
if pd.isnull(row['available_area']):
row['available_area'] = None
if pd.isnull(row['description']):
row['description'] = None
if 'id' not in row.keys() or pd.isnull(row['id']):
location = Location.objects.create(**(row.dropna()))
else:
location = Location.objects.filter(id=row['id']).first()
if not location:
context['logs'].append(str(i)+'- Location '+row['pretty_name']+': No location with id '+str(row['id'])+' found to update. Skipped.')
continue
location.name = row['name']
location.pretty_name = row['pretty_name']
location.longitude = row['longitude']
location.latitude = row['latitude']
location.available_area = row['available_area']
location.description = row['description']
location.save()
return render(request, "bulkresults.html", context)
context['logs'].append("No file found")
return render(request, "bulkresults.html", context)
@csrf_protect
@login_required
def upload_techs(request):
"""
Upload a CSV file with new/updated technologies.
Parameters:
model_uuid (uuid): required
description (str): optional
myfile (file): required
col_map (dict): optional
Returns:
Example:
POST: /api/upload_techs/
"""
model_uuid = request.POST["model_uuid"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
context = {
'logs':[],
"model": model
}
if (request.method == "POST") and ("myfile" in request.FILES):
myfile = request.FILES["myfile"]
if os.path.splitext(myfile.name)[1].lower() == '.csv':
df = pd.read_csv(myfile)
else:
context['logs'].append('File format not supported. Please use a .csv.')
return render(request, "bulkresults.html", context)
if not set(['pretty_name','abstract_tech']).issubset(set(df.columns)):
context['logs'].append('Missing required columns. pretty_name, abstract_tech are required.')
return render(request, "bulkresults.html", context)
df['name'] = df['pretty_name'].apply(lambda x: ParamsManager.simplify_name(str(x)))
if 'pretty_tag' in df.columns:
df['tag'] = df['pretty_tag'].apply(lambda x: ParamsManager.simplify_name(str(x)))
ureg = initialize_units()
for i,row in df.iterrows():
if pd.isnull(row['abstract_tech']):
context['logs'].append(str(i)+'- Tech '+row['pretty_name']+': Missing abstract_tech. Skipped.')
continue
if row['abstract_tech'] in ['conversion','conversion_plus']:
if 'carrier_in' not in row.keys() or 'carrier_out' not in row.keys() or pd.isnull(row['carrier_in']) or pd.isnull(row['carrier_out']):
context['logs'].append(str(i)+'- Tech '+row['pretty_name']+': Conversion techs require both carrier_in and carrier_out. Skipped.')
continue
else:
if 'carrier' not in row.keys() or pd.isnull(row['carrier']):
context['logs'].append(str(i)+'- Tech '+row['pretty_name']+': Missing a carrier. Skipped.')
continue
if 'id' not in row.keys() or pd.isnull(row['id']):
if pd.isnull(row['tag']):
technology = Technology.objects.create(
model_id=model.id,
abstract_tech_id=Abstract_Tech.objects.filter(name=row['abstract_tech']).first().id,
name=row['name'],
pretty_name=row['pretty_name'],
)
else:
technology = Technology.objects.create(
model_id=model.id,
abstract_tech_id=Abstract_Tech.objects.filter(name=row['abstract_tech']).first().id,
name=row['name'],
pretty_name=row['pretty_name'],
tag=row['tag'],
pretty_tag=row['pretty_tag']
)
else:
technology = Technology.objects.filter(model=model,id=row['id']).first()
if not technology:
context['logs'].append(str(i)+'- Tech '+row['pretty_name']+': No tech with id '+str(row['id'])+' found to update. Skipped.')
continue
technology.abstract_tech = Abstract_Tech.objects.filter(name=row['abstract_tech']).first()
technology.name = row['name']
technology.pretty_name = row['pretty_name']
if pd.isnull(row['tag']) or pd.isnull(row['pretty_tag']):
technology.tag = None
technology.pretty_tag = None
else:
technology.tag = row['tag']
technology.pretty_tag = row['pretty_tag']
technology.save()
Tech_Param.objects.filter(model_id=model.id,technology_id=technology.id).delete()
Tech_Param.objects.create(
model_id=model.id,
technology_id=technology.id,
parameter_id=1,
value=row['abstract_tech'],
)
Tech_Param.objects.create(
model_id=model.id,
technology_id=technology.id,
parameter_id=2,
value=row['pretty_name'],
)
update_dict = {'edit':{'parameter':{},'timeseries':{}},'add':{},'essentials':{}}
for f,v in row.iteritems():
if pd.isnull(v):
continue
pyear = f.rsplit('_',1)
if len(pyear) > 1 and match('[0-9]{4}',pyear[1]):
proot = pyear[0].rsplit('.',1)
if len(proot) > 1:
p = Parameter.objects.filter(root=proot[0],name=proot[1]).first()
else:
p = Parameter.objects.filter(name=pyear[0]).first()
if p == None:
p = Parameter.objects.filter(name=f).first()
if p == None:
continue
else:
pyear = pyear[1]
else:
pyear = None
proot = f.rsplit('.',1)
if len(proot) > 1:
p = Parameter.objects.filter(root=proot[0],name=proot[1]).first()
else:
p = Parameter.objects.filter(name=f).first()
if p == None:
continue
# Essential params
if p.is_essential:
update_dict['essentials'][p.pk] = v
# Timeseries params
elif str(v).startswith('file='):
fields = v.split('=')[1].split(':')
filename = fields[0]
t_col = fields[1]
v_col = fields[2]
file = User_File.objects.filter(model=model, filename='user_files/'+filename)
if not file:
context['logs'].append(str(i)+'- Tech '+row['pretty_name']+': Column '+f+' missing file "' + filename+ '" for timeseries. Parameter skipped.')
continue
existing = Timeseries_Meta.objects.filter(model=model,
original_filename=filename,
original_timestamp_col=t_col,
original_value_col=v_col).first()
if not existing:
existing = Timeseries_Meta.objects.create(
model=model,
name=filename+str(t_col)+str(v_col),
original_filename=filename,
original_timestamp_col=t_col,
original_value_col=v_col,
)
try:
async_result = upload_ts.apply_async(
kwargs={
"model_uuid": model_uuid,
"timeseries_meta_id": existing.id,
"file_id": file.first().id,
"timestamp_col": t_col,
"value_col": v_col,
"has_header": True,
}
)
upload_task = CeleryTask.objects.get(task_id=async_result.id)
existing.upload_task = upload_task
existing.is_uploading = True
existing.save()
except Exception as e:
context['logs'].append(e)
update_dict['edit']['timeseries'][p.pk] = existing.id
else:
if p.units in noconv_units:
if pyear:
if p.pk not in update_dict['add'].keys():
update_dict['add'][p.pk] = {'year':[],'value':[]}
if p.pk in update_dict['edit']['parameter']:
update_dict['add'][p.pk]['year'].append('0')
update_dict['add'][p.pk]['value'].append(update_dict['edit']['parameter'][p.pk])
update_dict['edit']['parameter'].pop(p.pk)
update_dict['add'][p.pk]['year'].append(pyear)
update_dict['add'][p.pk]['value'].append(v)
elif p.pk in update_dict['add'].keys():
update_dict['add'][p.pk]['year'].append('0')
update_dict['add'][p.pk]['value'].append(v)
else:
update_dict['edit']['parameter'][p.pk] = v
else:
try:
if pyear:
if p.pk not in update_dict['add'].keys():
update_dict['add'][p.pk] = {'year':[],'value':[]}
if p.pk in update_dict['edit']['parameter']:
update_dict['add'][p.pk]['year'].append('0')
update_dict['add'][p.pk]['value'].append(update_dict['edit']['parameter'][p.pk])
update_dict['edit']['parameter'].pop(p.pk)
update_dict['add'][p.pk]['year'].append(pyear)
update_dict['add'][p.pk]['value'].append(convert_units(ureg,v,p.units))
elif p.pk in update_dict['add'].keys():
update_dict['add'][p.pk]['year'].append('0')
update_dict['add'][p.pk]['value'].append(v)
else:
update_dict['edit']['parameter'][p.pk] = convert_units(ureg,v,p.units)
except pint.errors.DimensionalityError as e:
context['logs'].append(str(i)+'- Tech '+row['pretty_name']+': Column '+f+' '+str(e)+'. Parameter skipped.')
continue
technology.update(update_dict)
return render(request, "bulkresults.html", context)
context['logs'].append("No file found")
return render(request, "bulkresults.html", context)
@csrf_protect
@login_required
def upload_loctechs(request):
"""
Upload a CSV file with new/updated location technologies.
Parameters:
model_uuid (uuid): required
description (str): optional
myfile (file): required
col_map (dict): optional
Returns:
Example:
POST: /api/upload_loctechs/
"""
model_uuid = request.POST["model_uuid"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
context = {
'logs':[],
"model": model
}
if (request.method == "POST") and ("myfile" in request.FILES):
myfile = request.FILES["myfile"]
if os.path.splitext(myfile.name)[1].lower() == '.csv':
df = pd.read_csv(myfile)
else:
context['logs'].append('File format not supported. Please use a .csv.')
return render(request, 'bulkresults.html', context)
if not set(['technology','location_1']).issubset(set(df.columns)):
context['logs'].append("Missing required columns. technology, location_1 are required.")
return render(request, 'bulkresults.html', context)
df['tech'] = df['technology'].apply(lambda x: ParamsManager.simplify_name(x))
if 'pretty_tag' in df.columns:
df['tag'] = df['pretty_tag'].apply(lambda x: ParamsManager.simplify_name(x))
if 'tag' not in df.columns:
df['tag'] = None
df['loc'] = df['location_1'].apply(lambda x: ParamsManager.simplify_name(x))
ureg = initialize_units()
for i,row in df.iterrows():
if pd.isnull(row['tag']):
row['tag'] = None
technology = Technology.objects.filter(model_id=model.id,name=row['tech'],tag=row['tag']).first()
if technology == None:
technology = Technology.objects.filter(model_id=model.id,name=row['technology'],tag=row['tag']).first()
if technology == None:
if | pd.isnull(row['tag']) | pandas.isnull |
# coding: utf-8
# # JDE ETL Source Design
# ## Goal: Generate source SQL with friendly names and built-in data Conversion
# 1. Pull *ALL* Field metadata based on QA 9.3: Name, Datatype, Decimals
# 2. Pull *Specific* Table fields
# 3. Create SQL mapiing pull with data-conversion
# In[254]:
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
import os, sys
import warnings
warnings.filterwarnings('ignore')
# ### Connect to SQL DB
# In[255]:
sql_connection_str = 'mssql+pymssql://sql2srv:[email protected]:1433/BRSales'
engine = create_engine(sql_connection_str)
# ### 1. Pull *ALL* Field metadata based on QA 9.3: Name, Datatype, Decimals
# In[256]:
sql_field_meta_server = 'ESYS_PROD'
sql_field_meta_lib = 'ARCPCOM93'
#sql_field_meta_lib = 'ARCPCOM71'
#sql_field_meta_lib = 'HSIPCOM93Q'
# In[257]:
sql_field_meta = '''
SELECT
RTRIM("FRDTAI") AS data_item
,"FRDTAT" AS data_item_type
,"FROWTP" AS data_type
,"FRDTAS" AS data_item_size
,ISNULL("FRCDEC", 0) AS display_decimals
,ISNULL("FRDSCR", 'zNA') AS row_description
FROM
OPENQUERY ({}, '
SELECT
t.FRDTAI
,FRDTAT
,FROWTP
,FRDTAS
,FRCDEC
,FRDSCR
FROM
{}.F9210 t
LEFT JOIN {}.F9202 d
ON t.FRDTAI = d.FRDTAI AND
d.FRLNGP = '' '' AND
d.FRSYR = '' ''
')
'''.format(sql_field_meta_server, sql_field_meta_lib, sql_field_meta_lib)
# In[258]:
print(sql_field_meta)
# In[259]:
df_field_meta = pd.read_sql_query(sql_field_meta, engine);
# In[260]:
df_field_meta.iloc[:,[2,3,4]] = df_field_meta.iloc[:,[2,3,4]].apply(lambda x: pd.to_numeric(x, errors='coerce'))
df_field_meta.fillna(value=0,inplace=True)
# ### 2. Pull *Specific* Table fields
# # Set Table Name HERE
# In[261]:
sql_table = 'F555116'
sql_link_server = 'ESYS_PROD'
sql_lib = 'ARCPDTA71'
#sql_lib = 'HSIPDTA71'
stage_db_schema = 'etl.'
convert_julian_date = True
# [ARC | HSI] [P | D] [DTA | CDC] [ 71 | 94]
# In[262]:
sql_table_fields = '''
SELECT * from OPENQUERY ({}, '
SELECT
*
FROM
QSYS2.SYSCOLUMNS
WHERE
TABLE_SCHEMA = ''{}'' AND
TABLE_NAME in( ''{}'')
ORDER BY
ORDINAL_POSITION
')
''' .format(sql_link_server, sql_lib, sql_table)
# In[263]:
#print (sql_table_fields)
# In[264]:
df_table_fields = | pd.read_sql_query(sql_table_fields, engine) | pandas.read_sql_query |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import spearmanr as sr
from scipy.cluster import hierarchy as hc
from typing import List, Any, Union, Tuple, Optional, Dict
import random, math
# TODO: Remove Dependencies, starting with Sklearn
from sklearn.metrics import roc_curve, \
precision_recall_curve, roc_auc_score, \
confusion_matrix
import itertools
import logging
# TODO: Make categorical_cols optional argument (None) to
# avoid ambiguity when there are no categorical cols
def normalize_numeric(
df,
numerical_cols: List[str] = []):
"""
Normalizes numeric columns by substracting the mean and dividing
by standard deviation. If the parameter numerical_cols is not
provided, it will take all the columns of dtype np.number.
:Example:
norm_df = xai.normalize_numeric(
df,
normalize_numeric=["age", "other_numeric_attribute"])
:param df: Pandas Dataframe containing data (inputs and target)
:type df: pd.DataFrame
:param numerical_cols: List of strings containing numercial cols
:type categorical_cols: str
:returns: Dataframe with normalized numerical values.
:rtype: pandas.DataFrame
"""
tmp_df = df.copy()
if not len(numerical_cols):
numerical_cols = df.select_dtypes(include=[np.number]).columns
for k in numerical_cols:
tmp_df[k] = tmp_df[k].astype(np.float32)
tmp_df[k] -= tmp_df[k].mean()
tmp_df[k] /= tmp_df[k].std()
return tmp_df
def convert_categories(
df,
categorical_cols: List[str] = []):
"""
Converts columns to numeric categories. If the categorical_cols
parameter is passed as a list then those columns are converted.
Otherwise, all np.object columns are converted.
:Example:
import xai
cat_df = xai.convert_categories(df)
:param df: Pandas Dataframe containing data (inputs and target)
:type df: pandas.DataFrame
:param categorical_cols: List of strings containing categorical cols
:type categorical_cols: str
:returns: Dataframe with categorical numerical values.
:rtype: pandas.DataFrame
"""
tmp_df = df.copy()
if not len(categorical_cols):
categorical_cols = df.select_dtypes(include=[np.object, np.bool]).columns
tmp_df[categorical_cols] = tmp_df[categorical_cols].astype('category')
tmp_df[categorical_cols] = tmp_df[categorical_cols].apply(lambda x: x.cat.codes)
tmp_df[categorical_cols] = tmp_df[categorical_cols].astype('int8')
return tmp_df
def group_by_columns(
df: pd.DataFrame,
columns: List[str],
bins: int = 6,
categorical_cols: List[str] = []):
"""
Groups dataframe by the categories (or bucketized values) for all columns provided.
If categorical it uses categories,
if numeric, it uses bins. If more than one column is provided, the columns
provided are, for example, age and binary_target_label, then the result
would be a pandas DataFrame that is grouped by age groups for each of the
positive and negative/positive labels.
:Example:
columns=["loan", "gender"]
df_groups = xai.group_by_columns(
df,
columns=columns,
bins=10,
categorical_cols=["gender", "loan"])
for group, df_group in df_groups:
print(group)
print(grouped_df.head())
:param df: Pandas Dataframe containing data (inputs and target)
:type df: pandas.DataFrame
:param bins: [Default: 6] Number of bins to be used for numerical cols
:type bins: int
:param categorical_cols: [Default: []] Columns within dataframe that are
categorical. Columns that are not np.objects or np.bool and
are not part explicitly
provided here will be treated as numeric, and bins will be used.
:type categorical_cols: List[str]
:returns: Dataframe with categorical numerical values.
:rtype: pandas.core.groupby.groupby.DataFrameGroupBy
"""
if not len(categorical_cols):
categorical_cols = _infer_categorical(df)
group_list = []
for c in columns:
col = df[c]
if c in categorical_cols or not bins:
grp = c
else:
col_min = col.min()
col_max = col.max()
# TODO: Use the original bins for display purposes as they may come normalised
col_bins = pd.cut(col, list(np.linspace(col_min, col_max, bins)))
grp = col_bins
group_list.append(grp)
grouped = df.groupby(group_list)
return grouped
def imbalance_plot(
df: pd.DataFrame,
*cross_cols: str,
categorical_cols: List[str] = [],
bins: int = 6,
threshold: float = 0.5):
"""
Shows the number of examples provided for each of the values across the
product tuples in the columns provided. If you would like to do processing
with the sub-groups created by this class please see the
group_by_columns function.
:Example:
import xai
class_counts = xai.imbalance_plot(
df,
"gender", "loan",
bins=10,
threshold=0.8)
:param df: Pandas Dataframe containing data (inputs and target)
:type df: pandas.DataFrame
:param *cross_cols: One or more positional arguments (passed as *args) that
are used to split the data into the cross product of their values
:type cross_cross: List[str]
:param categorical_cols: [Default: []] Columns within dataframe that are
categorical. Columns that are not np.objects and are not part explicitly
provided here will be treated as numeric, and bins will be used.
:type categorical_cols: List[str]
:param bins: [Default: 6] Number of bins to be used for numerical cols
:type bins: int
:param threshold: [Default: 0.5] Threshold to display in the chart.
:type threshold: float
:returns: Null
:rtype: None
"""
if not cross_cols:
raise TypeError("imbalance_plot requires at least 1 string column name")
grouped = group_by_columns(
df,
list(cross_cols),
bins=bins,
categorical_cols=categorical_cols)
grouped_col = grouped[cross_cols[0]]
count_grp = grouped_col.count()
count_max = count_grp.values.max()
ratios = round(count_grp/count_max,4)
# TODO: Make threshold a minimum number of examples per class
imbalances = ratios < threshold
cm = plt.cm.get_cmap('RdYlBu_r')
colors = [cm(1-r/threshold/2) if t else cm(0) \
for r,t in zip(ratios, imbalances)]
ax = count_grp.plot.bar(color=colors)
lp = plt.axhline(threshold*count_max, color='r')
lp.set_label(f"Threshold: {threshold*count_max:.2f} ({threshold*100:.2f}%)")
plt.legend()
plt.show()
def balance(
df: pd.DataFrame,
*cross_cols: str,
upsample: float = 0.5,
downsample: int = 1,
bins: int = 6,
categorical_cols: List[str] = [],
plot: bool = True):
"""
Balances a dataframe based on the columns and cross columns provided.
The results can be upsampled or downsampled. By default, there is no
downsample, and the upsample is towards a minimum of 50% of the
frequency of the highest class.
:Example:
cat_df = xai.balance(
df,
"gender", "loan",
upsample=0.8,
downsample=0.8)
:param df: Pandas Dataframe containing data (inputs and target )
:type df: pandas.DataFrame
:param *cross_cols: One or more positional arguments (passed as *args) that
are used to split the data into the cross product of their values
:type cross_cols: List[str]
:param upsample: [Default: 0.5] Target upsample for columns lower
than percentage.
:type upsample: float
:param downsample: [Default: 1] Target downsample for columns higher
than percentage.
:type downsample: float
:param bins: [Default: 6] Number of bins to be used for numerical cols
:type bins: int
:param categorical_cols: [Default: []] Columns within dataframe that are
categorical. Columns that are not np.objects and are not part explicitly
provided here will be treated as numeric, and bins will be used.
:type categorical_cols: List[str]
:param threshold: [Default: 0.5] Threshold to display in the chart.
:type threshold: float
:returns: Dataframe with categorical numerical values.
:rtype: pandas.DataFrame
"""
if not len(categorical_cols):
categorical_cols = df.select_dtypes(include=[np.object, np.bool]).columns
grouped = group_by_columns(
df,
list(cross_cols),
bins=bins,
categorical_cols=categorical_cols)
count_grp = grouped.count()
count_max = count_grp.values.max()
count_upsample = int(upsample*count_max)
count_downsample = int(downsample*count_max)
def norm(x):
if x.shape[0] < count_upsample:
return x.sample(count_upsample, replace=True)
elif x.shape[0] > count_downsample:
return x.sample(count_downsample)
else:
return x
tmp_df = grouped.apply(norm) \
.reset_index(drop=True)
if plot:
imbalance_plot(
tmp_df,
*cross_cols,
bins=bins,
categorical_cols=categorical_cols)
return tmp_df
def _plot_correlation_dendogram(
corr: pd.DataFrame,
cols: List[str],
plt_kwargs={}):
"""
Plot dendogram of a correlation matrix, using the columns provided.
This consists of a chart that that shows hierarchically the variables
that are most correlated by the connecting trees. The closer to the right
that the connection is, the more correlated the features are.
If you would like to visualise this as a tree, please
see the function _plot_correlation_dendogram.
:Example:
columns_to_include=["age", "loan", "gender"]
xai._plot_correlation_dendogram(df, cols=columns_to_include)
:returns: Null
:rtype: None
"""
corr = np.round(corr, 4)
corr_condensed = hc.distance.squareform(1-corr)
z = hc.linkage(corr_condensed, method="average")
fig = plt.figure(**plt_kwargs)
dendrogram = hc.dendrogram(
z, labels=cols, orientation="left", leaf_font_size=16)
plt.show()
def _plot_correlation_matrix(
corr,
cols: List[str],
plt_kwargs={}):
"""
Plot a matrix of the correlation matrix, using the columns provided in params.
This visualisation contains all the columns in the X and Y axis, where the
intersection of the column and row displays the correlation value.
The closer this correlation factor is to 1, the more correlated the features
are. If you would like to visualise this as a tree, please see
the function _plot_correlation_dendogram.
:Example:
columns_to_include=["age", "loan", "gender"]
xai._plot_correlation_matrix(df, cols=columns_to_include)
:returns: Null
:rtype: None
"""
fig = plt.figure(**plt_kwargs)
ax = fig.add_subplot(111)
cax = ax.matshow(
corr,
cmap='coolwarm',
vmin=-1,
vmax=1)
fig.colorbar(cax)
ticks = np.arange(0,len(cols),1)
ax.set_xticks(ticks)
plt.xticks(rotation=90)
ax.set_yticks(ticks)
ax.set_xticklabels(cols)
ax.set_yticklabels(cols)
plt.show()
def correlations(
df: pd.DataFrame,
include_categorical: bool = False,
plot_type: str = "dendogram",
plt_kwargs={},
categorical_cols: List[str] = []):
"""
Computes the correlations for the columns provided and plots the relevant
image as requested by the parameters.
:Example:
cat_df = xai.balance(
df,
"gender", "loan",
upsample=0.8,
downsample=0.8)
:param df: Pandas Dataframe containing data (inputs and target )
:type df: pandas.DataFrame
:param *cross_cols: One or more positional arguments (passed as *args) that
are used to split the data into the cross product of their values
:type cross_cols: List[str]
:param upsample: [Default: 0.5] Target upsample for columns lower
than percentage.
:type upsample: float
:param downsample: [Default: 1] Target downsample for columns higher
than percentage.
:type downsample: float
:param bins: [Default: 6] Number of bins to be used for numerical cols
:type bins: int
:param categorical_cols: [Default: []] Columns within dataframe that are
categorical. Columns that are not np.objects and are not part explicitly
provided here will be treated as numeric, and bins will be used.
:type categorical_cols: List[str]
:param threshold: [Default: 0.5] Threshold to display in the chart.
:type threshold: float
:returns: Returns a dataframe containing the correlation values for the features
:rtype: pandas.DataFrame
"""
corr = None
cols: List = []
if include_categorical:
corr = sr(df).correlation
cols = df.columns
else:
if not len(categorical_cols):
categorical_cols = df.select_dtypes(include=[np.object, np.bool]).columns
cols = [c for c in df.columns if c not in categorical_cols]
corr = df[cols].corr()
cols = corr.columns
if plot_type == "dendogram":
_plot_correlation_dendogram(corr, cols, plt_kwargs=plt_kwargs)
elif plot_type == "matrix":
_plot_correlation_matrix(corr, cols, plt_kwargs=plt_kwargs)
else:
raise ValueError(f"Variable plot_type not valid. Provided: {plot_type}")
return corr
def confusion_matrix_plot(
y_test,
pred,
scaled=True,
label_x_neg="PREDICTED NEGATIVE",
label_x_pos="PREDICTED POSITIVE",
label_y_neg="ACTUAL NEGATIVE",
label_y_pos="ACTUAL POSITIVE"):
"""
Plots a confusion matrix for a binary classifier with the expected and
predicted values provided.
:Example:
xai.confusion_matrix_plot(
actual_labels,
predicted_labels,
scaled=True)
:param y_test: Array containing binary "actual" labels for data
:type y_test: Union[np.array, list]
:param pred: Array containing binary "predictedd" labels for data
:type pred: Union[np.array, list]
:param scaled: [Default: True] Whether the values are scaled to 0-1 or
displayed as total number of instances
:type scaled: bool
:param label_x_neg: [Default: "PREDICTED NEGATIVE"] Plot label for
the predicted negative values
:type label_x_neg: str
:param label_x_pos: [Default: "PREDICTED POSITIVE"] Plot label for
the predicted positive values
:type label_x_pos: str
:param label_y_neg: [Default: "ACTUAL NEGATIVE"] Plot label for
the actual negative values
:type label_y_neg: str
:param label_y_pos: [Default: "ACTUAL POSITIVE"] Plot label for
the actual positive values
:type label_y_pos: str
:returns: Null
:rtype: None
"""
confusion = confusion_matrix(y_test, pred)
columns = [label_y_neg, label_y_pos]
index = [label_x_neg, label_x_pos]
if scaled:
confusion_scaled = (confusion.astype("float") /
confusion.sum(axis=1)[:, np.newaxis])
confusion = pd.DataFrame(
confusion_scaled,
index=index,
columns=columns)
else:
confusion = pd.DataFrame(
confusion,
index=index,
columns=columns)
cmap = plt.get_cmap("Blues")
plt.figure()
plt.imshow(confusion, interpolation="nearest", cmap=cmap)
plt.title("Confusion matrix")
plt.colorbar()
plt.xticks(np.arange(2), columns, rotation=45)
plt.yticks(np.arange(2), index, rotation=45)
threshold = 0.5 if scaled else confusion.max().max() / 2
for i, j in itertools.product(
range(confusion.shape[0]),
range(confusion.shape[1])):
txt = "{:,}".format(confusion.iloc[i,j])
if scaled: txt = "{:0.4f}".format(confusion.iloc[i,j])
plt.text(j, i, txt,
horizontalalignment="center",
color=("white" if confusion.iloc[i,j] > threshold else "black"))
plt.tight_layout()
plt.show()
def balanced_train_test_split(
x: pd.DataFrame,
y: Union[np.ndarray, list],
*cross_cols: str,
categorical_cols: List[str] = [],
min_per_group: int = 20,
max_per_group: Optional[int] = None,
fallback_type: str = "upsample",
bins: int =6,
random_state: int=None
) -> Tuple[
pd.DataFrame,
np.ndarray,
pd.DataFrame,
np.ndarray,
np.ndarray,
np.ndarray]:
"""
Splits the "x" DataFrame and "y" Array into train/test split training sets with
a balanced number of examples for each of the categories of the columns provided.
For example, if the columns provided are "gender" and "loan", the resulting splits
would contain an equal number of examples for Male with Loan Approved, Male with
Loan Rejected, Female with Loan Approved, and Female with Loan Rejected. The
"fallback_type" parameter provides the behaviour that is triggered if there are not
enough datapoint examples for one of the subcategory groups - the default is "half"
Example
-------
.. code-block:: python
x: pd.DataFrame # Contains the input features
y: np.array # Contains the labels for the data
categorical_cols: List[str] # Name of columns that are categorical
x_train, y_train, x_test, y_test, train_idx, test_idx = \\
xai.balanced_train_test_split(
x, y, balance_on=["gender"],
categorical_cols=categorical_cols, min_per_group=300,
fallback_type="half")
Args
-----
x :
Pandas dataframe containing all the features in dataset
y :
Array containing "actual" labels for the dataset
*cross_cols :
One or more positional arguments (passed as *args) that
are used to split the data into the cross product of their values
categorical_cols :
[Default: []] Columns within dataframe that are
categorical. Columns that are not np.objects and are not part explicitly
provided here will be treated as numeric, and bins will be used.
min_per_group :
[Default: 20] This is the number of examples for each
of the groups created
max_per_group :
[Default: None] This is the maximum number of examples for
each group to be provided with.
fallback_type :
[Default: upsample] This is the fallback mechanism for when
one of the groups contains less elements than the number provided
through min_per_group. The options are "upsample", "ignore" and "error".
- "upsample": This will get samples with replacement so will repeat elements
- "ignore": Will just ignore and return all the elements available
- "error": Throw an exception for any groups with less elements
bins :
[Default: 6] Number of bins to be used for numerical cols
random_state:
[Default: None] Random seed for the internal sampling
Returns
-------
x_train : pd.DataFrame
DataFrame containing traning datapoints
y_train : np.ndarray
Array containing labels for training datapoints
x_test : pd.DataFrame
DataFrame containing test datapoints
y_test : np.ndarray
Array containing labels for test datapoints
train_idx : np.ndarray
Boolean array with True on Training indexes
test_idx : np.ndarray
Boolean array with True on Testing indexes
"""
if not cross_cols:
raise TypeError("imbalance_plot requires at least 1 string column name")
if min_per_group < 1:
raise TypeError("min_per_group must be at least 1")
if max_per_group and max_per_group < min_per_group:
raise TypeError(f"min_per_group ({min_per_group}) must be less or equal than "
f"max_per_group ({max_per_group}) if max_per_group is provided.")
if random_state:
random.setstate(random_state)
tmp_df = x.copy()
tmp_df["target"] = y
cross = ["target"] + list(cross_cols)
if not categorical_cols:
categorical_cols = _infer_categorical(tmp_df)
# TODO: Enable for non-categorical targets
categorical_cols = ["target"] + categorical_cols
grouped = group_by_columns(
tmp_df,
cross,
bins=bins,
categorical_cols=categorical_cols)
def resample(x):
group_size = x.shape[0]
if max_per_group:
if group_size > max_per_group:
return x.sample(max_per_group)
if group_size > min_per_group:
return x.sample(min_per_group)
if fallback_type == "upsample":
return x.sample(min_per_group, replace=True)
elif fallback_type == "ignore":
return x
elif fallback_type == "error":
raise ValueError("Number of samples for group are not enough,"
" and fallback_type provided was 'error'")
else:
raise(f"Sampling type provided not found: given {fallback_type}, "\
"expected: 'error', or 'half'")
group = grouped.apply(resample)
selected_idx = [g[-1] for g in group.index.values]
train_idx = np.full(tmp_df.shape[0], True, dtype=bool)
train_idx[selected_idx] = False
test_idx = np.full(tmp_df.shape[0], False, dtype=bool)
test_idx[selected_idx] = True
df_train = tmp_df.iloc[train_idx]
df_test = tmp_df.iloc[test_idx]
x_train = df_train.drop("target", axis=1)
y_train = df_train["target"].values
x_test = df_test.drop("target", axis=1)
y_test = df_test["target"].values
return x_train, y_train, x_test, y_test, train_idx, test_idx
def convert_probs(
probs: np.ndarray,
threshold: float = 0.5
) -> np.ndarray:
"""
Converts all the probabilities in the array provided into binary labels
as per the threshold provided which is 0.5 by default.
Example
---------
.. code-block:: python
probs = np.array([0.1, 0.2, 0.7, 0.8, 0.6])
labels = xai.convert_probs(probs, threshold=0.65)
print(labels)
> [0, 0, 1, 1, 0]
Args
-------
probs :
Numpy array or list containing a list of floats between 0 and 1
threshold :
Float that provides the threshold for which probabilities over the
threshold will be converted to 1
Returns
----------
: np.ndarray
Numpy array containing the labels based on threshold provided
"""
return (probs >= threshold).astype(int)
def evaluation_metrics(
y_valid,
y_pred
) -> Dict[str, float]:
"""
Calculates model performance metrics (accuracy, precision, recall, etc)
from the actual and predicted lables provided.
Example
---------
.. code-block:: python
y_actual: np.ndarray
y_predicted: np.ndarray
metrics = xai.evaluation_metrics(y_actual, y_predicted)
for k,v in metrics.items():
print(f"{k}: {v}")
> precision: 0.8,
> recall: 0.9,
> specificity: 0.7,
> accuracy: 0.8,
> auc: 0.7,
> f1: 0.8
Args
-------
y_valid :
Numpy array with the actual labels for the datapoints
y_pred :
Numpy array with the predicted labels for the datapoints
Returns
----------
: Dict[str, float]
Dictionary containing the metrics as follows:
.. code-block:: python
return {
"precision": precision,
"recall": recall,
"specificity": specificity,
"accuracy": accuracy,
"auc": auc,
"f1": f1
}
"""
TP = np.sum( y_pred[y_valid==1] )
TN = np.sum( y_pred[y_valid==0] == 0 )
FP = np.sum( y_pred[y_valid==0] )
FN = np.sum( y_pred[y_valid==1] == 0 )
# Adding an OR to ensure it doesn't divide by zero
precision = TP / ((TP+FP) or 0.001)
recall = TP / ((TP+FN) or 0.001)
specificity = TN / ((TN+FP) or 0.001)
accuracy = (TP+TN) / (TP+TN+FP+FN)
f1 = 2 * (precision * recall) / ((precision + recall) or 0.001)
try:
auc = roc_auc_score(y_valid, y_pred)
except ValueError:
auc = 0
return {
"precision": precision,
"recall": recall,
"specificity": specificity,
"accuracy": accuracy,
"auc": auc,
"f1": f1
}
def metrics_plot(
target: np.ndarray,
predicted: np.ndarray,
df: pd.DataFrame = pd.DataFrame(),
cross_cols: List[str] = [],
categorical_cols: List[str] = [],
bins: int = 6,
plot: bool = True,
exclude_metrics: List[str] = [],
plot_threshold: float = 0.5
) -> pd.DataFrame:
"""
Creates a plot that displays statistical metrics including precision,
recall, accuracy, auc, f1 and specificity for each of the groups created
for the columns provided by cross_cols. For example, if the columns passed
are "gender" and "age", the resulting plot will show the statistical metrics
for Male and Female for each binned group.
Example
---------
.. code-block:: python
target: np.ndarray
predicted: np.ndarray
df_metrics = xai.metrics_plot(
target,
predicted,
df=df_data,
cross_cols=["gender", "age"],
bins=3
Args
-------
target:
Numpy array containing the target labels for the datapoints
predicted :
Numpy array containing the predicted labels for the datapoints
df :
Pandas dataframe containing all the features for the datapoints.
It can be empty if only looking to calculate global metrics, but
if you would like to compute for categories across columns, the
columns you are grouping by need to be provided
cross_cols :
Contains the columns that you would like to use to cross the values
bins :
[Default: 6] The number of bins in which you'd like
numerical columns to be split
plot :
[Default: True] If True a plot will be drawn with the results
exclude_metrics :
These are the metrics that you can choose to exclude if you only
want specific ones (for example, excluding "f1", "specificity", etc)
plot_threshold:
The percentage that will be used to draw the threshold line in the plot
which would provide guidance on what is the ideal metrics to achieve.
Returns
----------
: pd.DataFrame
Pandas Dataframe containing all the metrics for the groups provided
"""
grouped = _group_metrics(
target,
predicted,
df,
cross_cols,
categorical_cols,
bins,
target_threshold=plot_threshold)
prfs = []
classes = []
for group, group_df in grouped:
group_valid = group_df['target'].values
group_pred = group_df["predicted"].values
metrics_dict = \
evaluation_metrics(group_valid, group_pred)
# Remove metrics as specified by params
[metrics_dict.pop(k, None) for k in exclude_metrics]
prfs.append(list(metrics_dict.values()))
classes.append(str(group))
prfs_cols = metrics_dict.keys()
prfs_df = pd.DataFrame(
np.array(prfs).transpose(),
columns=classes,
index=prfs_cols)
if plot:
prfs_df.plot.bar(figsize=(20,5))
lp = plt.axhline(0.5, color='r')
lp = plt.axhline(1, color='g')
return prfs_df
def roc_plot(
target,
predicted,
df=pd.DataFrame(),
cross_cols=[],
categorical_cols=[],
bins=6,
plot=True):
return _curve(
target=target,
predicted=predicted,
curve_type="roc",
df=df,
cross_cols=cross_cols,
categorical_cols=categorical_cols,
bins=bins,
plot=plot)
def pr_plot(
target,
predicted,
df=pd.DataFrame(),
cross_cols=[],
categorical_cols=[],
bins=6,
plot=True):
return _curve(
target=target,
predicted=predicted,
curve_type="pr",
df=df,
cross_cols=cross_cols,
categorical_cols=categorical_cols,
bins=bins,
plot=plot)
def _curve(
target,
predicted,
curve_type="roc",
df=pd.DataFrame(),
cross_cols=[],
categorical_cols=[],
bins=6,
plot=True):
if curve_type == "roc":
curve_func = roc_curve
y_label = 'False Positive Rate'
x_label = 'True Positive Rate'
p1 = [0,1]
p2 = [0,1]
y_lim = [0, 1.05]
legend_loc = "lower right"
elif curve_type == "pr":
curve_func = precision_recall_curve
y_label = "Recall"
x_label = "Precision"
p1 = [1,0]
p2 = [0.5,0.5]
y_lim = [0.25, 1.05]
legend_loc = "lower left"
else:
raise ValueError("Curve function provided not valid. "
f" curve_func provided: {curve_func}")
grouped = _group_metrics(
target,
predicted,
df,
cross_cols,
categorical_cols,
bins)
if plot:
plt.figure()
r1s = r2s = []
for group, group_df in grouped:
group_valid = group_df["target"]
group_pred = group_df["predicted"]
r1, r2, _ = curve_func(group_valid, group_pred)
r1s.append(r1)
r2s.append(r2)
if plot:
if curve_type == "pr": r1,r2 = r2,r1
plt.plot(r1, r2, label=group)
plt.plot(p1, p2, 'k--')
if plot:
plt.xlim([0.0, 1.0])
plt.ylim(y_lim)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend(loc=legend_loc)
plt.show()
return r1s, r2s
def _infer_categorical(df):
categorical_cols = df.select_dtypes(
include=[np.object, np.bool, np.int8]).columns
logging.warn("No categorical_cols passed so inferred using np.object, "
f"np.int8 and np.bool: {categorical_cols}. If you see an error"
" these are not "
"correct, please provide them as a string array as: "
"categorical_cols=['col1', 'col2', ...]")
return categorical_cols
def _group_metrics(
target,
predicted,
df,
cross_cols,
categorical_cols,
bins,
target_threshold=None):
if not all(c in df.columns for c in cross_cols):
raise KeyError("Cross columns don't match columns in dataframe provided.")
df_tmp = df.copy()
df_tmp["target"] = target
df_tmp["predicted"] = predicted
# Convert predictions into classes
if target_threshold and df_tmp["predicted"].dtype.kind == 'f':
df_tmp["predicted"] = convert_probs(
df_tmp["predicted"], threshold=target_threshold)
if not categorical_cols and cross_cols:
categorical_cols = _infer_categorical(df_tmp)
if not cross_cols:
grouped = [("target", df_tmp),]
else:
grouped = group_by_columns(
df_tmp,
cross_cols,
bins=bins,
categorical_cols=categorical_cols)
return grouped
def smile_imbalance(
y_test,
probs: np.ndarray,
threshold=0.5,
manual_review=None,
display_breakdown=False,
bins=10):
# TODO: Change function so it only iterates once
probs = np.array(probs)
preds = convert_probs(probs, threshold).flatten()
d = | pd.DataFrame(probs) | pandas.DataFrame |
#!/usr/bin/env python
import argparse
import pandas as pd
import numpy as np
import umap
import warnings
import sys
warnings.filterwarnings('ignore')
from bokeh.plotting import figure, output_file, show
from bokeh.layouts import column
from bokeh.core.properties import value
from bokeh.palettes import all_palettes
from bokeh.models import Legend, HoverTool, LegendItem
from bokeh.transform import linear_cmap
from bokeh.palettes import inferno
import pdb
def read_input(input_file):
print("Reading data..")
in_data = pd.read_csv(input_file, sep = "\s+" )
return in_data
def do_UMAP(data):
print("Performing dimensionality reduction")
reducer = umap.UMAP()
data_NO_index=data.drop(data.columns[[0,1]], axis=1)
embedding = reducer.fit_transform(data_NO_index)
return embedding
def plotting(umaped_data, raw_data, output, key_file):
key=key_file
print("Building plot")
umaped_df = pd.DataFrame(data=umaped_data)
source = | pd.concat([raw_data[['FID','IID']], umaped_df], axis=1) | pandas.concat |
from pycountry_convert import country_alpha2_to_continent_code, country_name_to_country_alpha2
import pandas as pd
from sklearn.linear_model import LinearRegression as LR
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import numpy as np
producers = [
# ' Qatar',
' United States (USA)',
' Nigeria',
' Algeria',
# ' Canada', on n'a pas le canada ?
# ' Norway',
' Russia',
# ' Mozambique', idem
# ' Indonesia',
# ' Australia'
]
cn_to_ca2 = {
# Bad country name to country alpha 2
'United Arab Emirates (UAE)': 'AE',
'Korea': 'KR', # South Korea
'Trinidad & Tobago': 'TT',
'United States (USA)': 'US',
'United Kingdom (UK)': 'GB',
"Cote d'Ivoire": 'CI',
}
price_of_country = {
'NA': 'America', # North America
'SA': 'America', # South America
'AS': 'Asia', # Asia
'OC': 'Asia', # Australia
'AF': 'Europe', # Africa
'EU': 'Europe', # Europe
}
def convertNameToCode(country_name, continent):
# Permet de passer de noms de pays dans nos documents à un code correspondant à un continent
try:
# code a deux lettres du pays
calpha2 = country_name_to_country_alpha2(country_name.strip())
except KeyError:
# Certains noms de pays de nos jeux de données ne respectent pas la norme dispo sur wikipedia
# https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
calpha2 = cn_to_ca2[country_name.strip()]
# par exemple 'EU'
concode = country_alpha2_to_continent_code(calpha2)
return int(concode == continent)
def changeDate(date_str):
return datetime.strptime(date_str, "%m/%d, %H:%M").strftime("2021-%m-%d")
def loadTripsAndPriceData():
df_trips = pd.read_csv('./Data/Portcalls/voyages.csv')
# On ajoute des colonnes avec les codes des pays de départ et d'arrivée
for continent in price_of_country.keys():
df_trips[f'D_{continent}'] = df_trips['Dcountry'].apply(convertNameToCode, args=(continent,))
df_trips[f'A_{continent}'] = df_trips['Acountry'].apply(convertNameToCode, args=(continent,))
# On modifie l'écriture des dates de départ et d'arrivées pour matcher celle des prix Europe
df_trips['Dtime'] = df_trips['Dtime'].apply(changeDate)
df_trips['Atime'] = df_trips['Atime'].apply(changeDate)
# On charge les prix spots de l'Europe
df_eur_price = | pd.read_csv('./Data/SpotEur.csv', index_col='Date') | pandas.read_csv |
from src.report_generators.base_report_generator import BaseReportGenerator
from src.helpers.preprocess_text import extract_links_from_html
from src.helpers.preprocess_text import extract_from_path
from src.helpers.preprocess_text import extract_subtext
import os
import ast
import re
from bs4 import BeautifulSoup
import pandas as pd
class NonHtmlAttachmentReportGenerator(BaseReportGenerator):
@property
def headers(self):
return ["base_path",
"primary_publishing_organisation",
"publishing_app",
"document_type",
"first_published_at",
"attachment_path"]
@property
def filename(self):
return "non_html_page_report.csv"
def process_page(self, content_item, html):
content_item['primary_publishing_organisation'] = extract_subtext(text=content_item['organisations'],
key='primary_publishing_organisation',
index=1)
# ignore cases we do not want to return
publishers = ["publisher", "service-manual-publisher", "specialist-publisher", "travel-advice-publisher"]
if not content_item['publishing_app'] in publishers:
return []
attachments = (".chm|.csv|.diff|.doc|.docx|.dot|.dxf|.eps|"
+ ".gif|.gml|.ics|.jpg|.kml|.odp|.ods|.odt|.pdf|"
+ ".png|.ppt|.pptx|.ps|.rdf|.ris|.rtf|.sch|.txt|"
+ ".vcf|.wsdl|.xls|.xlsm|.xlsx|.xlt|.xml|.xsd|.xslt|"
+ ".zip")
if not any(re.findall(pattern=attachments, string=content_item['details'])):
return []
if | pd.isna(content_item['details']) | pandas.isna |
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
housing = fetch_california_housing()
X_train_full, X_test, y_train_full, y_test = train_test_split(housing.data, housing.target, random_state=42)
X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full, random_state=42)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_valid = scaler.transform(X_valid)
X_test = scaler.transform(X_test)
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=X_train.shape[1:]),
keras.layers.Dense(1)
])
model.compile(loss="mean_squared_error", optimizer=keras.optimizers.SGD(learning_rate=1e-3))
history = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid))
mse_test = model.evaluate(X_test, y_test)
X_new = X_test[:3]
y_pred = model.predict(X_new)
plt.plot( | pd.DataFrame(history.history) | pandas.DataFrame |
import asyncio
import threading
import logging
import logging.handlers
import numpy as np
import os
import time
import tqdm
from datetime import datetime
from itertools import zip_longest
from collections import OrderedDict
import pandas as pd
import IPython
from ophyd import Device
from ophyd.status import Status
from ophyd.device import Staged
from ophyd.areadetector import FilePlugin
from nslsii.detectors.xspress3 import Xspress3Detector
from hxntools.shutter import (shutter_open, shutter_close)
from . import SCRIPT_PATH
from ppmac.pp_comm import (GPError, ScriptCancelled,
TimeoutError as PpmacTimeoutError)
from .ppmac_util import (ppmac_connect, filter_ppmac_data as filter_data,
gather_setup, is_closed_loop)
from .log import log_time
# from .plot import plot_position
# ip = IPython.get_ipython()
# global_state = ip.user_ns['gs']
# zebra = ip.user_ns['zebra']
# sclr1 = ip.user_ns['sclr1']
# from hxntools import HxnTriggeringScaler
# sclr1 = HxnTriggeringScaler('XF:03IDC-ES{Sclr:1}', name='sclr1')
logger = logging.getLogger(__name__)
UNSTAGE_TIME_LIMIT = 300
class FlyStatus(Status):
pass
def get_dataframe(df_data, n_points):
'''Create a dataframe from df_data, adjusting the size of lists if
necessary
Note: this should not really be happening (and hasn't recently I believe),
but we'd rather a point or two to be clipped off than for the whole scan to
die in case something went a bit wrong.
'''
for key in df_data.keys():
len_ = len(df_data[key])
if len_ < n_points:
df_data[key] = np.pad(df_data[key], (0, n_points - len_),
mode='constant')
elif len_ > n_points:
df_data[key] = df_data[key][:n_points]
return | pd.DataFrame(df_data) | pandas.DataFrame |
from flask import Flask
from flask_restful import Resource, Api, reqparse
import pandas as pd
import ast
app = Flask(__name__)
api = Api(app)
class Users(Resource):
def get(self):
data = pd.read_csv('users.csv') # read local CSV
data = data.to_dict() # convert dataframe to dict
return {'data': data}, 200 # return data and 200 OK
def post(self):
parser = reqparse.RequestParser() # initialize
parser.add_argument('userId', required=True) # add args
parser.add_argument('name', required=True)
parser.add_argument('city', required=True)
args = parser.parse_args() # parse arguments to dictionary
# read our CSV
data = pd.read_csv('users.csv')
if args['userId'] in list(data['userId']):
return {
'message': f"'{args['userId']}' already exists."
}, 409
else:
# create new dataframe containing new values
new_data = pd.DataFrame({
'userId': [args['userId']],
'name': [args['name']],
'city': [args['city']],
'locations': [[]]
})
# add the newly provided values
data = data.append(new_data, ignore_index=True)
data.to_csv('users.csv', index=False) # save back to CSV
return {'data': data.to_dict()}, 200 # return data with 200 OK
def put(self):
parser = reqparse.RequestParser() # initialize
parser.add_argument('userId', required=True) # add args
parser.add_argument('location', required=True)
args = parser.parse_args() # parse arguments to dictionary
# read our CSV
data = pd.read_csv('users.csv')
if args['userId'] in list(data['userId']):
# evaluate strings of lists to lists !!! never put something like this in prod
data['locations'] = data['locations'].apply(
lambda x: ast.literal_eval(x)
)
# select our user
user_data = data[data['userId'] == args['userId']]
# update user's locations
user_data['locations'] = user_data['locations'].values[0] \
.append(args['location'])
# save back to CSV
data.to_csv('users.csv', index=False)
# return data and 200 OK
return {'data': data.to_dict()}, 200
else:
# otherwise the userId does not exist
return {
'message': f"'{args['userId']}' user not found."
}, 404
def delete(self):
parser = reqparse.RequestParser() # initialize
parser.add_argument('userId', required=True) # add userId arg
args = parser.parse_args() # parse arguments to dictionary
# read our CSV
data = pd.read_csv('users.csv')
if args['userId'] in list(data['userId']):
# remove data entry matching given userId
data = data[data['userId'] != args['userId']]
# save back to CSV
data.to_csv('users.csv', index=False)
# return data and 200 OK
return {'data': data.to_dict()}, 200
else:
# otherwise we return 404 because userId does not exist
return {
'message': f"'{args['userId']}' user not found."
}, 404
class Locations(Resource):
def get(self):
data = | pd.read_csv('locations.csv') | pandas.read_csv |
from selenium import webdriver
import pandas as pd
from bs4 import BeautifulSoup
import hashlib
import datetime
import multiprocessing as mp
import concurrent as cc
df = pd.DataFrame(columns=['Title','Location','Company','Salary','Sponsored','Description','Time'])
class Indeed():
@staticmethod
def get_data(start, end, location='India', webdriver_location='D:/A/3T_Project/chromedriver.exe'):
'''
start: The starting page of search to retrieve data from
end: The ending page of search to retrieve data from
location: which particular place, city or country you want to retrive data of
This is a static method hence will return the dataframe which is processed during the training
'''
df = | pd.DataFrame(columns=['Title','Location','Company','Salary','Sponsored','Description','Time']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 5 22:54:58 2020
@author: arti
"""
import pandas as pd
import numpy as np
student1 = pd.Series({'Korean': np.nan, 'English':80, 'Math':90})
student2 = | pd.Series({'Korean':80, 'Math':90}) | pandas.Series |
'''
This script provides code for training a neural network with entity embeddings
of the 'cat' variables. For more details on entity embedding, see:
https://github.com/entron/entity-embedding-rossmann
8-Fold training with 3 averaged runs per fold. Results may improve with more folds & runs.
'''
import numpy as np
import pandas as pd
#random seeds for stochastic parts of neural network
np.random.seed(10)
from tensorflow import set_random_seed
set_random_seed(15)
from keras.models import Model
from keras.layers import Input, Dense, Concatenate, Reshape, Dropout
from keras.layers.embeddings import Embedding
from sklearn.model_selection import StratifiedKFold
#Data loading & preprocessing
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
X_train, y_train = df_train.iloc[:,2:], df_train.target
X_test = df_test.iloc[:,1:]
cols_use = [c for c in X_train.columns if (not c.startswith('ps_calc_'))]
X_train = X_train[cols_use]
X_test = X_test[cols_use]
col_vals_dict = {c: list(X_train[c].unique()) for c in X_train.columns if c.endswith('_cat')}
embed_cols = []
for c in col_vals_dict:
if len(col_vals_dict[c])>2:
embed_cols.append(c)
print(c + ': %d values' % len(col_vals_dict[c])) #look at value counts to know the embedding dimensions
print('\n')
def build_embedding_network():
inputs = []
embeddings = []
input_ps_ind_02_cat = Input(shape=(1,))
embedding = Embedding(5, 3, input_length=1)(input_ps_ind_02_cat)
embedding = Reshape(target_shape=(3,))(embedding)
inputs.append(input_ps_ind_02_cat)
embeddings.append(embedding)
input_ps_ind_04_cat = Input(shape=(1,))
embedding = Embedding(3, 2, input_length=1)(input_ps_ind_04_cat)
embedding = Reshape(target_shape=(2,))(embedding)
inputs.append(input_ps_ind_04_cat)
embeddings.append(embedding)
input_ps_ind_05_cat = Input(shape=(1,))
embedding = Embedding(8, 5, input_length=1)(input_ps_ind_05_cat)
embedding = Reshape(target_shape=(5,))(embedding)
inputs.append(input_ps_ind_05_cat)
embeddings.append(embedding)
input_ps_car_01_cat = Input(shape=(1,))
embedding = Embedding(13, 7, input_length=1)(input_ps_car_01_cat)
embedding = Reshape(target_shape=(7,))(embedding)
inputs.append(input_ps_car_01_cat)
embeddings.append(embedding)
input_ps_car_02_cat = Input(shape=(1,))
embedding = Embedding(3, 2, input_length=1)(input_ps_car_02_cat)
embedding = Reshape(target_shape=(2,))(embedding)
inputs.append(input_ps_car_02_cat)
embeddings.append(embedding)
input_ps_car_03_cat = Input(shape=(1,))
embedding = Embedding(3, 2, input_length=1)(input_ps_car_03_cat)
embedding = Reshape(target_shape=(2,))(embedding)
inputs.append(input_ps_car_03_cat)
embeddings.append(embedding)
input_ps_car_04_cat = Input(shape=(1,))
embedding = Embedding(10, 5, input_length=1)(input_ps_car_04_cat)
embedding = Reshape(target_shape=(5,))(embedding)
inputs.append(input_ps_car_04_cat)
embeddings.append(embedding)
input_ps_car_05_cat = Input(shape=(1,))
embedding = Embedding(3, 2, input_length=1)(input_ps_car_05_cat)
embedding = Reshape(target_shape=(2,))(embedding)
inputs.append(input_ps_car_05_cat)
embeddings.append(embedding)
input_ps_car_06_cat = Input(shape=(1,))
embedding = Embedding(18, 8, input_length=1)(input_ps_car_06_cat)
embedding = Reshape(target_shape=(8,))(embedding)
inputs.append(input_ps_car_06_cat)
embeddings.append(embedding)
input_ps_car_07_cat = Input(shape=(1,))
embedding = Embedding(3, 2, input_length=1)(input_ps_car_07_cat)
embedding = Reshape(target_shape=(2,))(embedding)
inputs.append(input_ps_car_07_cat)
embeddings.append(embedding)
input_ps_car_09_cat = Input(shape=(1,))
embedding = Embedding(6, 3, input_length=1)(input_ps_car_09_cat)
embedding = Reshape(target_shape=(3,))(embedding)
inputs.append(input_ps_car_09_cat)
embeddings.append(embedding)
input_ps_car_10_cat = Input(shape=(1,))
embedding = Embedding(3, 2, input_length=1)(input_ps_car_10_cat)
embedding = Reshape(target_shape=(2,))(embedding)
inputs.append(input_ps_car_10_cat)
embeddings.append(embedding)
input_ps_car_11_cat = Input(shape=(1,))
embedding = Embedding(104, 10, input_length=1)(input_ps_car_11_cat)
embedding = Reshape(target_shape=(10,))(embedding)
inputs.append(input_ps_car_11_cat)
embeddings.append(embedding)
input_numeric = Input(shape=(24,))
embedding_numeric = Dense(16)(input_numeric)
inputs.append(input_numeric)
embeddings.append(embedding_numeric)
x = Concatenate()(embeddings)
x = Dense(80, activation='relu')(x)
x = Dropout(.35)(x)
x = Dense(20, activation='relu')(x)
x = Dropout(.15)(x)
x = Dense(10, activation='relu')(x)
x = Dropout(.15)(x)
output = Dense(1, activation='sigmoid')(x)
model = Model(inputs, output)
model.compile(loss='binary_crossentropy', optimizer='adam')
return model
#converting data to list format to match the network structure
def preproc(X_train, X_val, X_test):
input_list_train = []
input_list_val = []
input_list_test = []
#the cols to be embedded: rescaling to range [0, # values)
for c in embed_cols:
raw_vals = np.unique(X_train[c])
val_map = {}
for i in range(len(raw_vals)):
val_map[raw_vals[i]] = i
input_list_train.append(X_train[c].map(val_map).values)
input_list_val.append(X_val[c].map(val_map).fillna(0).values)
input_list_test.append(X_test[c].map(val_map).fillna(0).values)
#the rest of the columns
other_cols = [c for c in X_train.columns if (not c in embed_cols)]
input_list_train.append(X_train[other_cols].values)
input_list_val.append(X_val[other_cols].values)
input_list_test.append(X_test[other_cols].values)
return input_list_train, input_list_val, input_list_test
#gini scoring function from kernel at:
#https://www.kaggle.com/tezdhar/faster-gini-calculation
def ginic(actual, pred):
n = len(actual)
a_s = actual[np.argsort(pred)]
a_c = a_s.cumsum()
giniSum = a_c.sum() / a_c[-1] - (n + 1) / 2.0
return giniSum / n
def gini_normalizedc(a, p):
return ginic(a, p) / ginic(a, a)
#network training
K = 8
runs_per_fold = 3
n_epochs = 15
cv_ginis = []
full_val_preds = np.zeros(np.shape(X_train)[0])
y_preds = np.zeros((np.shape(X_test)[0],K))
kfold = StratifiedKFold(n_splits = K,
random_state = 231,
shuffle = True)
for i, (f_ind, outf_ind) in enumerate(kfold.split(X_train, y_train)):
X_train_f, X_val_f = X_train.loc[f_ind].copy(), X_train.loc[outf_ind].copy()
y_train_f, y_val_f = y_train[f_ind], y_train[outf_ind]
X_test_f = X_test.copy()
#upsampling adapted from kernel:
#https://www.kaggle.com/ogrellier/xgb-classifier-upsampling-lb-0-283
pos = (pd.Series(y_train_f == 1))
# Add positive examples
X_train_f = pd.concat([X_train_f, X_train_f.loc[pos]], axis=0)
y_train_f = pd.concat([y_train_f, y_train_f.loc[pos]], axis=0)
# Shuffle data
idx = np.arange(len(X_train_f))
np.random.shuffle(idx)
X_train_f = X_train_f.iloc[idx]
y_train_f = y_train_f.iloc[idx]
#preprocessing
proc_X_train_f, proc_X_val_f, proc_X_test_f = preproc(X_train_f, X_val_f, X_test_f)
#track oof prediction for cv scores
val_preds = 0
for j in range(runs_per_fold):
NN = build_embedding_network()
NN.fit(proc_X_train_f, y_train_f.values, epochs=n_epochs, batch_size=4096, verbose=0)
val_preds += NN.predict(proc_X_val_f)[:,0] / runs_per_fold
y_preds[:,i] += NN.predict(proc_X_test_f)[:,0] / runs_per_fold
full_val_preds[outf_ind] += val_preds
cv_gini = gini_normalizedc(y_val_f.values, val_preds)
cv_ginis.append(cv_gini)
print ('\nFold %i prediction cv gini: %.5f\n' %(i,cv_gini))
print('Mean out of fold gini: %.5f' % np.mean(cv_ginis))
print('Full validation gini: %.5f' % gini_normalizedc(y_train.values, full_val_preds))
y_pred_final = np.mean(y_preds, axis=1)
df_sub = pd.DataFrame({'id' : df_test.id,
'target' : y_pred_final},
columns = ['id','target'])
df_sub.to_csv('NN_EntityEmbed_10fold-sub.csv', index=False)
| pd.DataFrame(full_val_preds) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from causalml.inference.tree import UpliftTreeClassifier
from causalml.inference.tree import UpliftRandomForestClassifier
from causalml.metrics import get_cumgain
from .const import RANDOM_SEED, N_SAMPLE, CONTROL_NAME, TREATMENT_NAMES, CONVERSION
def test_make_uplift_classification(generate_classification_data):
df, _ = generate_classification_data()
assert df.shape[0] == N_SAMPLE * len(TREATMENT_NAMES)
def test_UpliftRandomForestClassifier(generate_classification_data):
df, x_names = generate_classification_data()
df_train, df_test = train_test_split(df,
test_size=0.2,
random_state=RANDOM_SEED)
# Train the UpLift Random Forest classifer
uplift_model = UpliftRandomForestClassifier(
min_samples_leaf=50,
control_name=TREATMENT_NAMES[0]
)
uplift_model.fit(df_train[x_names].values,
treatment=df_train['treatment_group_key'].values,
y=df_train[CONVERSION].values)
y_pred = uplift_model.predict(df_test[x_names].values)
result = pd.DataFrame(y_pred, columns=uplift_model.classes_)
best_treatment = np.where((result < 0).all(axis=1),
CONTROL_NAME,
result.idxmax(axis=1))
# Create a synthetic population:
# Create indicator variables for whether a unit happened to have the
# recommended treatment or was in the control group
actual_is_best = np.where(
df_test['treatment_group_key'] == best_treatment, 1, 0
)
actual_is_control = np.where(
df_test['treatment_group_key'] == CONTROL_NAME, 1, 0
)
synthetic = (actual_is_best == 1) | (actual_is_control == 1)
synth = result[synthetic]
auuc_metrics = synth.assign(
is_treated=1 - actual_is_control[synthetic],
conversion=df_test.loc[synthetic, CONVERSION].values,
uplift_tree=synth.max(axis=1)
).drop(columns=list(uplift_model.classes_))
cumgain = get_cumgain(auuc_metrics,
outcome_col=CONVERSION,
treatment_col='is_treated',
steps=20)
# Check if the cumulative gain of UpLift Random Forest is higher than
# random
assert cumgain['uplift_tree'].sum() > cumgain['Random'].sum()
def test_UpliftTreeClassifier(generate_classification_data):
df, x_names = generate_classification_data()
df_train, df_test = train_test_split(df,
test_size=0.2,
random_state=RANDOM_SEED)
# Train the UpLift Random Forest classifer
uplift_model = UpliftTreeClassifier(control_name=TREATMENT_NAMES[0])
uplift_model.fit(df_train[x_names].values,
treatment=df_train['treatment_group_key'].values,
y=df_train[CONVERSION].values)
_, _, _, y_pred = uplift_model.predict(df_test[x_names].values,
full_output=True)
result = | pd.DataFrame(y_pred) | pandas.DataFrame |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gps_building_blocks.ml.statistical_inference.inference."""
from unittest import mock
from absl.testing import parameterized
import numpy as np
import pandas as pd
from scipy import stats
from sklearn import datasets
from sklearn import model_selection
from absl.testing import absltest
from gps_building_blocks.ml.statistical_inference import data_preparation
class InferenceTest(parameterized.TestCase):
_missing_data = pd.DataFrame(
data=[[np.nan, 0.0000],
[0.6000, 0.0000],
[0.4000, 3.0000],
[0.2000, np.nan]],
columns=['first', 'second'])
def test_missing_value_emits_warning_twice(self):
with self.assertWarns(data_preparation.MissingValueWarning):
data_preparation.InferenceData(self._missing_data)
with self.assertWarns(data_preparation.MissingValueWarning):
data_preparation.InferenceData(self._missing_data)
def test_check_data_raises_exception_on_missing_data(self):
inference_data = data_preparation.InferenceData(self._missing_data)
with self.assertRaises(data_preparation.MissingValueError):
inference_data.data_check(raise_on_error=True)
def test_invalid_target_column_raise_exception(self):
with self.assertRaises(KeyError):
data_preparation.InferenceData(
initial_data=self._missing_data,
target_column='non_ci_sono')
def test_impute_missing_values_replaced_with_mean(self):
inference_data = data_preparation.InferenceData(self._missing_data)
expected_result = pd.DataFrame(
data=[[0.4000, 0.0000],
[0.6000, 0.0000],
[0.4000, 3.0000],
[0.2000, 1.0000]],
columns=['first', 'second'])
result = inference_data.impute_missing_values(strategy='mean')
pd.testing.assert_frame_equal(result, expected_result)
def test_fixed_effect_raise_exception_on_categorical_covariate(self):
data = pd.DataFrame(
data=[['0', 0.0, '1', 3.0],
['1', 0.0, '2', 2.0],
['1', 1.0, '3', 2.0],
['1', 1.0, '4', 1.0]],
columns=['control_1', 'control_2', 'variable_1', 'variable_2'],
index=['group1', 'group2', 'group3', 'group3'])
inference_data = data_preparation.InferenceData(data)
with self.assertRaises(data_preparation.CategoricalCovariateError):
inference_data.control_with_fixed_effect(
strategy='quick',
control_columns=['control_1', 'control_2'],
min_frequency=1)
def test_fixed_effect_demeaning_subtract_mean_in_groups(self):
data = pd.DataFrame(
data=[['0', 0.0, 1, 3.0],
['1', 0.0, 2, 2.0],
['1', 1.0, 3, 2.0],
['1', 1.0, 4, 1.0]],
columns=['control_1', 'control_2', 'variable_1', 'variable_2'],
index=['group1', 'group2', 'group3', 'group3'])
expected_result = pd.DataFrame(
data=[['0', 0.0, 2.5, 2.0],
['1', 0.0, 2.5, 2.0],
['1', 1.0, 2.0, 2.5],
['1', 1.0, 3.0, 1.5]],
columns=data.columns,
index=data.index).set_index(['control_1', 'control_2'], append=True)
inference_data = data_preparation.InferenceData(data)
result = inference_data.control_with_fixed_effect(
strategy='quick',
control_columns=['control_1', 'control_2'],
min_frequency=1)
pd.testing.assert_frame_equal(result, expected_result)
def test_address_low_variance_removes_column(self):
data = pd.DataFrame(
data=[[0.0, 1.0, 0.0, 10.0],
[0.0, 1.0, 0.0, 10.0],
[1.0, 1.0, 0.0, 5.00],
[1.0, 0.0, 0.0, 0.00]],
columns=['control', 'variable', 'variable_1', 'outcome'])
expected_result = pd.DataFrame(
data=[[0.0, 1.0, 10.0],
[0.0, 1.0, 10.0],
[1.0, 1.0, 5.00],
[1.0, 0.0, 0.00]],
columns=['control', 'variable', 'outcome'])
inference_data = data_preparation.InferenceData(
data, target_column='outcome')
result = inference_data.address_low_variance(drop=True)
pd.testing.assert_frame_equal(result, expected_result)
def test_vif_raises_error_on_singular_correlation_matrix(self):
singular_correlation_matrix_df = pd.DataFrame(
data=[[1.1, 2.1, 3.1, 4.1, 1.0],
[1.0, 2.0, 3.0, 4.0, 1.0],
[1.0, 2.0, 3.0, 4.0, 1.0],
[1.0, 2.0, 3.0, 4.0, 1.0]],
columns=['control', 'variable_1', 'variable_2', 'variable_3',
'outcome'])
inference_data = data_preparation.InferenceData(
singular_correlation_matrix_df, target_column='outcome')
with self.assertRaises(data_preparation.SingularDataError):
inference_data.address_collinearity_with_vif(
handle_singular_data_errors_automatically=False)
def test_vif_raises_error_on_ill_conditioned_correlation_matrix(self):
ill_conditioned_correlation_matrix_df = pd.DataFrame(
data=[[1.0, 2.0, 3.0, 4.0, 1.0],
[0.0, 2.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 2.0, 5.0, 1.0],
[0.0, 2.0, 3.0, 0.0, 1.0]],
columns=['control', 'variable_1', 'variable_2', 'variable_3',
'outcome'])
inference_data = data_preparation.InferenceData(
ill_conditioned_correlation_matrix_df, target_column='outcome')
with self.assertRaises(data_preparation.SingularDataError):
inference_data.address_collinearity_with_vif(
handle_singular_data_errors_automatically=False)
def test_vif_error_has_correct_message(self):
ill_conditioned_correlation_matrix_df = pd.DataFrame(
data=[[1.0, 2.0, 3.0, 4.0, 1.0],
[0.0, 2.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 2.0, 5.0, 1.0],
[0.0, 2.0, 3.0, 0.0, 1.0]],
columns=['control', 'variable_1', 'variable_2', 'variable_3',
'outcome'])
inference_data = data_preparation.InferenceData(
ill_conditioned_correlation_matrix_df, target_column='outcome')
expected_message = (
'Inference Data has a singular or nearly singular correlation matrix. '
'This could be caused by extremely correlated or collinear columns. '
'The three pairs of columns with the highest absolute correlation '
'coefficients are: (control,variable_3): 0.970, (variable_1,variable_3)'
': -0.700, (control,variable_1): -0.577. This could also be caused by '
'columns with extremiely low variance. Recommend running the '
'address_low_variance() method before VIF. Alternatively, consider '
'running address_collinearity_with_vif() with '
'use_correlation_matrix_inversion=False to avoid this error.'
)
with self.assertRaises(
data_preparation.SingularDataError, msg=expected_message):
inference_data.address_collinearity_with_vif(
handle_singular_data_errors_automatically=False)
def test_vif_noise_injection_catches_perfect_correlation(self):
iris = datasets.load_iris()
iris_data = pd.DataFrame(
data=np.c_[iris['data'], iris['target']],
columns=iris['feature_names'] + ['target'])
iris_data['perfectly_correlated_column'] = iris_data['petal length (cm)']
expected_result = iris_data.drop(
columns=['petal length (cm)', 'perfectly_correlated_column'])
inference_data = data_preparation.InferenceData(
iris_data, target_column='target')
result = inference_data.address_collinearity_with_vif(
vif_method='quick',
drop=True,
handle_singular_data_errors_automatically=True,
vif_threshold=50.0)
pd.testing.assert_frame_equal(result, expected_result)
def test_vif_noise_injection_catches_perfect_collinearity(self):
iris = datasets.load_iris()
iris_data = pd.DataFrame(
data=np.c_[iris['data'], iris['target']],
columns=iris['feature_names'] + ['target'])
iris_data['perfectly_collinear_column'] = iris_data[
'petal length (cm)'] + iris_data['petal width (cm)']
expected_result = iris_data.drop(columns=[
'petal length (cm)', 'petal width (cm)', 'perfectly_collinear_column'
])
inference_data = data_preparation.InferenceData(
iris_data, target_column='target')
result = inference_data.address_collinearity_with_vif(
vif_method='quick',
drop=True,
handle_singular_data_errors_automatically=True,
vif_threshold=50.0)
pd.testing.assert_frame_equal(result, expected_result)
def test_vif_noise_injection_fails_correctly_when_too_few_samples(self):
too_few_samples_df = pd.DataFrame(
data=[[1.0, 2.0, 3.0, 4.0, 1.0],
[0.0, 2.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 2.0, 5.0, 1.0]],
columns=['control', 'variable_1', 'variable_2', 'variable_3',
'outcome'])
inference_data = data_preparation.InferenceData(
too_few_samples_df, target_column='outcome')
expected_regex = (
'Automatic attempt to resolve SingularDataError by '
'injecting artifical noise to the data has failed. This '
'probably means the dataset has too many features relative '
'to the number of samples.')
with self.assertRaisesRegex(data_preparation.SingularDataError,
expected_regex):
inference_data.address_collinearity_with_vif(
handle_singular_data_errors_automatically=True)
def test_vif_method_fails_correctly_with_unknown_value(self):
inference_data = data_preparation.InferenceData(self._missing_data)
with self.assertRaises(ValueError):
inference_data.address_collinearity_with_vif(
vif_method='incorrect_value')
@parameterized.named_parameters({
'testcase_name': 'scale_10',
'scaling': 10,
}, {
'testcase_name': 'scale_50',
'scaling': 50,
}, {
'testcase_name': 'scale_-50',
'scaling': -50,
})
def test_minmaxscaling_drops_appropriate_variables(self, scaling):
data = pd.DataFrame(
data=[[0.0, 1.0, 0.0, 10.0], [-0.5, 1.0, 0.0, 10.0],
[0.1, 1.0, 0.0, 5.00], [0.2, 0.0, 0.0, 0.00]],
columns=['variable_0', 'variable_1', 'variable_2', 'outcome'])
data = data * scaling
expected_result = data[['variable_1', 'outcome']]
inference_data = data_preparation.InferenceData(
data)
result = inference_data.address_low_variance(
threshold=.15,
drop=True,
minmax_scaling=True,
)
pd.testing.assert_frame_equal(result, expected_result)
def test_zscored_input_raises_warning(self):
data = pd.DataFrame(
data=[[0.0, 1.0, 0.0, 10.0], [-0.5, 1.0, 0.0, 10.0],
[0.1, 1.0, 0.0, 5.00], [0.2, 0.0, 0.0, 0.00]],
columns=['variable_0', 'variable_1', 'variable_2', 'variable_3'])
data = data.apply(stats.zscore).fillna(0)
inference_data = data_preparation.InferenceData(data)
with self.assertWarns(Warning):
_ = inference_data.address_low_variance()
def test_minmaxscaling_with_invalid_threshold_raises_warning(self):
data = pd.DataFrame(
data=[[0.0, 1.0, 0.0, 10.0], [-0.5, 1.0, 0.0, 10.0],
[0.1, 1.0, 0.0, 5.00], [0.2, 0.0, 0.0, 0.00]],
columns=['variable_0', 'variable_1', 'variable_2', 'variable_3'])
inference_data = data_preparation.InferenceData(data)
with self.assertWarns(Warning):
_ = inference_data.address_low_variance(minmax_scaling=True, threshold=.5)
def test_address_collinearity_with_vif_removes_column(self):
iris = datasets.load_iris()
iris_data = pd.DataFrame(
data=np.c_[iris['data'], iris['target']],
columns=iris['feature_names'] + ['target'])
expected_result = iris_data.drop(columns='petal length (cm)')
inference_data = data_preparation.InferenceData(
iris_data, target_column='target')
result = inference_data.address_collinearity_with_vif(
vif_method='sequential',
drop=True)
pd.testing.assert_frame_equal(result, expected_result)
def test_encode_categorical_covariate_dummy_variable_2(self):
data = pd.DataFrame(
data=[[0.0, 1.0, 'a', 10.0],
[0.0, 1.0, 'b', 10.0],
[1.0, 1.0, 'c', 5.00],
[1.0, 0.0, 'a', 0.00]],
columns=['control', 'variable_1', 'variable_2', 'outcome'])
expected_result = pd.DataFrame(
data=[[0.0, 1.0, 10.0, 1, 0, 0],
[0.0, 1.0, 10.0, 0, 1, 0],
[1.0, 1.0, 5.00, 0, 0, 1],
[1.0, 0.0, 0.00, 1, 0, 0]],
columns=[
'control', 'variable_1', 'outcome', 'variable_2_a', 'variable_2_b',
'variable_2_c'
])
inference_data = data_preparation.InferenceData(
data, target_column='outcome')
result = inference_data.encode_categorical_covariates(
columns=['variable_2'])
pd.testing.assert_frame_equal(result, expected_result)
@parameterized.named_parameters(
('single_selections', ['1', '2', '3'], ['1', '2', '3']),
('double_selection', ['1,2', '3'], ['1', '2', '3']),
('early_stopping', ['1', ''], ['1']),
('all_at_once', ['1,2,3'], ['1', '2', '3']),
)
def test_address_collinearity_with_vif_interactive(
self, user_inputs, expected_dropped):
dataframe = pd.DataFrame(
data=[[1.1, 2.1, 3.1, 4.1, 0],
[1.0, 2.0, 3.0, 4.0, 0],
[1.0, 2.0, 3.0, 4.0, 0],
[1.0, 2.0, 3.0, 4.0, 1]],
columns=['1', '2', '3', '4', 'target'])
data = data_preparation.InferenceData(dataframe, target_column='target')
with mock.patch.object(data_preparation, '_input_mock') as input_mock:
# Avoid Colab\Notebook prints in tests output
with mock.patch.object(data_preparation, '_print_mock') as _:
user_inputs = list(reversed(user_inputs))
input_mock.side_effect = lambda x: user_inputs.pop()
result = data.address_collinearity_with_vif(
vif_method='interactive',
drop=True,
use_correlation_matrix_inversion=False
)
pd.testing.assert_frame_equal(
result,
dataframe.drop(expected_dropped, axis=1))
@parameterized.named_parameters(
('onehot_returns_expected_bins', False, False, pd.DataFrame(
[[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 1]],
columns=['variable_(-0.02, 4.0]', 'variable_(4.0, 8.0]',
'variable_(8.0, 12.0]', 'variable_(12.0, 16.0]',
'variable_(16.0, 20.0]'])),
('equal_sized_onehot_returns_expected_bins', True, False, pd.DataFrame(
[[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1]],
columns=['variable_(-0.001, 2.0]', 'variable_(2.0, 4.0]',
'variable_(4.0, 6.0]', 'variable_(6.0, 8.0]',
'variable_(8.0, 20.0]'])),
('scalar_numeric_returns_expected_bins', False, True, pd.DataFrame(
[0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 4], columns=['variable'])),
('equal_sized_numeric_expected_bins', True, True, pd.DataFrame(
[0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4], columns=['variable'])),
)
def test_descretize(self, equal_sized_bins, numeric, expected_result):
data = data_preparation.InferenceData(pd.DataFrame(
data=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 20],
columns=['variable']))
result = data.discretize_numeric_covariate(
'variable', equal_sized_bins=equal_sized_bins, bins=5, numeric=numeric)
pd.testing.assert_frame_equal(result, expected_result, check_dtype=False)
@parameterized.named_parameters(
('with_groups_kfold_as_int',
3,
np.array([0, 0, 1, 1, 2, 2, 3, 3, 3, 3]),
[pd.DataFrame({'variable': [0, 1, 2, 3, 4, 5]}),
pd.DataFrame({'variable': [2, 3, 6, 7, 8, 9]},
index=[2, 3, 6, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 4, 5, 6, 7, 8, 9]},
index=[0, 1, 4, 5, 6, 7, 8, 9])],
[pd.DataFrame({'variable': [6, 7, 8, 9]}, index=[6, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 4, 5]}, index=[0, 1, 4, 5]),
pd.DataFrame({'variable': [2, 3]}, index=[2, 3])]),
('with_groups_kfold_as_object',
model_selection.GroupKFold(n_splits=3),
np.array([0, 0, 1, 1, 2, 2, 3, 3, 3, 3]),
[pd.DataFrame({'variable': [0, 1, 2, 3, 4, 5]}),
pd.DataFrame({'variable': [2, 3, 6, 7, 8, 9]},
index=[2, 3, 6, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 4, 5, 6, 7, 8, 9]},
index=[0, 1, 4, 5, 6, 7, 8, 9])],
[pd.DataFrame({'variable': [6, 7, 8, 9]}, index=[6, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 4, 5]}, index=[0, 1, 4, 5]),
pd.DataFrame({'variable': [2, 3]}, index=[2, 3])]),
)
def test_split_with_groups_yields_expected_folds_with_non_overlaping_groups(
self,
cross_validation,
groups,
expected_trains,
expected_tests):
data = data_preparation.InferenceData(
pd.DataFrame({
'variable': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}))
iterator = zip(data.split(cross_validation=cross_validation, groups=groups),
expected_trains,
expected_tests)
for (train_data, test_data), expected_train, expected_test in iterator:
train_groups = set(groups[train_data.data.index.tolist()])
test_groups = set(groups[test_data.data.index.tolist()])
pd.testing.assert_frame_equal(
train_data.data, expected_train, check_dtype=False)
pd.testing.assert_frame_equal(
test_data.data, expected_test, check_dtype=False)
self.assertEmpty(train_groups.intersection(test_groups))
@parameterized.named_parameters(
('without_groups_kfold_as_int', 3,
[pd.DataFrame({'variable': [4, 5, 6, 7, 8, 9]},
index=[4, 5, 6, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 2, 3, 7, 8, 9]},
index=[0, 1, 2, 3, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 2, 3, 4, 5, 6]},
index=[0, 1, 2, 3, 4, 5, 6])],
[pd.DataFrame({'variable': [0, 1, 2, 3]}, index=[0, 1, 2, 3]),
pd.DataFrame({'variable': [4, 5, 6]}, index=[4, 5, 6]),
pd.DataFrame({'variable': [7, 8, 9]}, index=[7, 8, 9])]),
('without_groups_kfold_as_object',
model_selection.KFold(n_splits=3),
[pd.DataFrame({'variable': [4, 5, 6, 7, 8, 9]},
index=[4, 5, 6, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 2, 3, 7, 8, 9]},
index=[0, 1, 2, 3, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 2, 3, 4, 5, 6]},
index=[0, 1, 2, 3, 4, 5, 6])],
[ | pd.DataFrame({'variable': [0, 1, 2, 3]}, index=[0, 1, 2, 3]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pymatgen as mg
# %% define filepath constant
TABLE_PATH = "./torrance_tabulated.xlsx"
# %% read in the tables
# read in the tabulated data of closed_shell oxides in table 2 in the original paper
closed_shell_oxides = pd.read_excel(TABLE_PATH, sheet_name="table_2")
# rename beta-Ga2O3 as Ga2O3
closed_shell_oxides.loc[closed_shell_oxides.formula == "b-Ga2O3", "formula"] = "Ga2O3"
# read in the tabulated data of all the other oxides in table 3 in the original paper
all_other_oxides = | pd.read_excel(TABLE_PATH, sheet_name="table_3") | pandas.read_excel |
import argparse
import json
import os
import math
import heapq
from sklearn.cluster import KMeans
import pandas as pd
import matplotlib.pyplot as plt
import random
import time
from rich.progress import track
# --roadnetFile Shuanglong.json --dir .\tools\generator
# --roadnetFile roadnet_10_10.json --dir .\tools\generator
# --roadnetFile nanjingmega.json --dir .\tools\generator --engineNum 200
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--rowNum", type=int)
parser.add_argument("--colNum", type=int)
parser.add_argument("--engineNum", type=int, default=4)
parser.add_argument("--roadnetFile", type=str)
parser.add_argument("--dir", type=str, default="./")
parser.add_argument("--output", type=str)
parser.add_argument("--turn", action="store_true")
return parser.parse_args()
def group_roads_by_length(roads, n):
lists = [[] for _ in range(n)]
number_list = list(roads)
totals = [(0, i) for i in range(n)]
heapq.heapify(totals)
for _ in range(len(roads)):
total, index = heapq.heappop(totals)
lists[index].append(roads[_])
heapq.heappush(totals, (total + roads[_]['length'], index))
number_list[_] = index
return number_list, lists
def group_roads_by_midpoint(roads, n):
kmeans = KMeans(n_clusters=n,
init='k-means++',
n_init=10,
max_iter=300,
tol=0.0001,
verbose=0,
random_state=None,
copy_x=True,
algorithm='auto'
)
midpoints = pd.DataFrame(data=roads, columns=['midpoint'])[
'midpoint'].apply(pd.Series)
kmeans.fit(midpoints)
labels = list()
for _ in range(len(kmeans.labels_)):
labels.append(int(kmeans.labels_[_]))
return labels
def group_intersections_by_point(intersections, n):
kmeans = KMeans(n_clusters=n,
init='k-means++',
n_init=10,
max_iter=300,
tol=0.0001,
verbose=0,
random_state=None,
copy_x=True,
algorithm='auto'
)
points = | pd.DataFrame(data=intersections, columns=['point']) | pandas.DataFrame |
"""Compilation of functions used for data processing."""
import os
import yaml
from itertools import compress
from datetime import datetime
import pandas as pd
import numpy as np
from ideotype.utils import get_filelist
from ideotype import DATA_PATH
def read_sims(path):
"""
Read and condense all maizsim raw output.
1. Reset column names for model output
2. Fetch year/site/cvar info from file name
3. Read in last line of model output
4. Document files with abnormal line output length and does not read them
5. Compile year/site/cvar info & last line of model output
6. Compile issues
Parameters
----------
files : str
Root directory of all simulation outputs.
Returns
-------
DataFrame
df_sims : df with all simulated yield.
df_issues : df recording failed site-year recordings.
"""
fpaths = get_filelist(path)
fpaths_select = [
(fpath.split('/')[-1].split('_')[0] == 'out1') and
(fpath.split('/')[-1].split('.')[-1] == 'txt') for fpath in fpaths]
fpath_sims = list(compress(fpaths, fpaths_select))
cols = ['year', 'cvar', 'site', 'date', 'jday', 'time',
'leaves', 'mature_lvs', 'drop_lvs', 'LA', 'LA_dead', 'LAI',
'RH', 'leaf_WP', 'PFD', 'Solrad',
'temp_soil', 'temp_air', 'temp_can',
'ET_dmd', 'ET_suply', 'Pn', 'Pg', 'resp', 'av_gs',
'LAI_sunlit', 'LAI_shaded',
'PFD_sunlit', 'PFD_shaded',
'An_sunlit', 'An_shaded',
'Ag_sunlit', 'Ag_shaded',
'gs_sunlit', 'gs_shaded',
'VPD', 'N', 'N_dmd', 'N_upt', 'N_leaf', 'PCRL',
'dm_total', 'dm_shoot', 'dm_ear', 'dm_totleaf',
'dm_dropleaf', 'df_stem', 'df_root',
'soil_rt', 'mx_rootdept',
'available_water', 'soluble_c', 'note']
data_all = []
issues = []
for fpath_sim in fpath_sims:
# extrating basic file info
year = int(fpath_sim.split('/')[-3])
site = fpath_sim.split('/')[-1].split('_')[1]
cvar = int(fpath_sim.split('/')[-1].split('_')[-1].split('.')[0])
# reading in file and setting up structure
with open(fpath_sim, 'r') as f:
f.seek(0, os.SEEK_END) # move pointer to end of file
# * f.seek(offset, whence)
# * Position computed from adding offset to a reference point,
# * the reference point is selected by the whence argument.
# * os.SEEK_SET (=0)
# * os.SEEK_CUR (=1)
# * os.SEEK_END (=2)
try:
# find current position (now at the end of file)
# and count back a few positions and read forward from there
f.seek(f.tell() - 3000, os.SEEK_SET)
# * f.tell() returns an integer giving the file object’s
# * current position in the file represented as number of bytes
# * from the beginning of the file when in binary mode
# * and an opaque number when in text mode.
for line in f:
f_content = f.readlines()
if len(f_content[-1]) == 523: # normal character length
sim_output = list(f_content[-1].split(','))
data = [i.strip() for i in sim_output]
data.insert(0, year)
data.insert(1, cvar)
data.insert(2, site)
data_all.append(data)
else:
issues.append(fpath_sim)
except ValueError:
issues.append(fpath_sim.split('/')[-1] + str(', value_error'))
df_sims = pd.DataFrame(data_all, columns=cols)
df_sims.dm_total = df_sims.dm_total.astype(float)
df_sims.dm_ear = df_sims.dm_ear.astype(float)
df_issues = | pd.Series(issues, dtype='str') | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Home Broker API - Market data downloader
# https://github.com/crapher/pyhomebroker.git
#
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import numpy as np
from datetime import datetime
from .exceptions import DataException
__settlements_int = {
'1': 'spot',
'2': '24hs',
'3': '48hs'}
__settlements_str = {
'spot': '1',
'24hs': '2',
'48hs': '3'}
__callput = {
0: '',
1: 'CALL',
2: 'PUT'}
############################
## PROCESS JSON DOCUMENTS ##
############################
def convert_to_numeric_columns(df, columns):
for col in columns:
df[col] = df[col].apply(lambda x: x.replace('.', '').replace(',','.') if isinstance(x, str) else x)
df[col] = pd.to_numeric(df[col].apply(lambda x: np.nan if x == '-' else x))
return df
def process_personal_portfolio(df):
result_index = ['symbol', 'settlement']
filter_columns = ['Symbol', 'Term', 'BuyQuantity', 'BuyPrice', 'SellPrice', 'SellQuantity', 'LastPrice', 'VariationRate', 'StartPrice', 'MaxPrice', 'MinPrice', 'PreviousClose', 'TotalAmountTraded', 'TotalQuantityTraded', 'Trades', 'TradeDate', 'MaturityDate', 'StrikePrice', 'PutOrCall']
result_columns = ['symbol', 'settlement', 'bidsize', 'bid', 'ask', 'asksize', 'last', 'change', 'open', 'high', 'low', 'previous_close', 'turnover', 'volume', 'operations', 'datetime', 'expiration', 'strike', 'kind']
numeric_columns = ['last', 'open', 'high', 'low', 'volume', 'turnover', 'operations', 'change', 'bidsize', 'bid', 'asksize', 'ask', 'previous_close', 'strike']
options_columns = ['MaturityDate','StrikePrice','PutOrCall']
if not df.empty:
df.TradeDate = pd.to_datetime(df.TradeDate, format='%Y%m%d', errors='coerce') + pd.to_timedelta(df.Hour, errors='coerce')
df.loc[df.StrikePrice == 0, options_columns] = np.nan
df.MaturityDate = pd.to_datetime(df.MaturityDate, format='%Y%m%d', errors='coerce')
df.PutOrCall = df.PutOrCall.apply(lambda x: __callput[x] if x in __callput else __callput[0])
df.Term = df.Term.apply(lambda x: __settlements_int[x] if x in __settlements_int else '')
df = df[filter_columns].copy()
df.columns = result_columns
df = convert_to_numeric_columns(df, numeric_columns)
else:
df = pd.DataFrame(columns=result_columns)
df = df.set_index(result_index)
return df
def process_securities(df):
result_index = ['symbol', 'settlement']
filter_columns = ['Symbol', 'Term', 'BuyQuantity', 'BuyPrice', 'SellPrice', 'SellQuantity', 'LastPrice', 'VariationRate', 'StartPrice', 'MaxPrice', 'MinPrice', 'PreviousClose', 'TotalAmountTraded', 'TotalQuantityTraded', 'Trades', 'TradeDate']
result_columns = ['symbol', 'settlement', 'bidsize', 'bid', 'ask', 'asksize', 'last', 'change', 'open', 'high', 'low', 'previous_close', 'turnover', 'volume', 'operations', 'datetime']
numeric_columns = ['last', 'open', 'high', 'low', 'volume', 'turnover', 'operations', 'change', 'bidsize', 'bid', 'asksize', 'ask', 'previous_close']
if not df.empty:
df.TradeDate = pd.to_datetime(df.TradeDate, format='%Y%m%d', errors='coerce') + pd.to_timedelta(df.Hour, errors='coerce')
df.Term = df.Term.apply(lambda x: __settlements_int[x] if x in __settlements_int else '')
df = df[filter_columns].copy()
df.columns = result_columns
df = convert_to_numeric_columns(df, numeric_columns)
else:
df = | pd.DataFrame(columns=result_columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# @Time : 2018/10/3 下午2:36
# @Author : yidxue
import pandas as pd
from common.util_function import *
"""
http://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.concat.html
http://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html
"""
df1 = | pd.DataFrame({'a': ['a', 'c', 'd'], 'b': [4, 6, 7]}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import requests
import pandas as pd
from bs4 import BeautifulSoup
import numpy as np
from alphacast import Alphacast
from dotenv import dotenv_values
API_KEY = dotenv_values(".env").get("API_KEY")
alphacast = Alphacast(API_KEY)
page = requests.get('https://www.indec.gob.ar/Nivel4/Tema/3/35/45')
soup = BeautifulSoup(page.content, 'html.parser')
link_xls=[]
for link in soup.find_all('a'):
if 'Estadísticas integradas de balanza de pagos, posición de inversión internacional y deuda externa. Años 2006-2021' in link.get_text():
link_xls.append('https://www.indec.gob.ar' + link.get('href'))
file_xls = requests.get(link_xls[0])
##### Por Categoría de Mercado #####
df = pd.read_excel(file_xls.content, sheet_name='Cuadro 15', skiprows=3)
df = df.dropna(how = 'all', axis = 0).dropna(how = 'all', axis = 1)
df = df.dropna(how = 'all', subset = df.columns[1:])
#Replico el nombre en todas las columnas 'Unnamed'
df.columns = df.columns.astype(str)
df.columns = pd.Series([np.nan if 'Unnamed:' in x else x for x in df.columns.values]).ffill().values.flatten()
# Reemplazo 'Año' y *
df.columns = df.columns.astype(str)
for col in df.columns:
new_col = col.replace("Año", "").replace('*','')
df = df.rename(columns={col: new_col})
#Transpongo el df
df.rename(columns={df.columns[0]: "Date"}, inplace=True)
df = df.set_index('Date')
df = df.T
df = df.reset_index()
#Arreglo las fechas de los trimestres y armo 'Date'
df.rename(columns={df.columns[1]: "Trim"}, inplace=True)
df['Trim'] = df['Trim'].replace({
np.nan:'12-01',
'I':'03-01',
'II':'06-01',
'III':'09-01',
'IV':'12-01'
})
df['Date'] = df['index'].astype('str')+'-'+df['Trim'].astype('str')
df = df.set_index('Date')
df = df.drop(['index','Trim'], axis = 1)
#Agrego el prefijo 'Activos' y 'Pasivos' a las columnas segun corresponda
#Pasivos
df2 = df.copy()
df2 = df2.iloc[:,25:]
newCols=[]
for col in df2.columns:
newCols += ['PASIVOS - '+col]
df2.columns = newCols
#Activos
df = df.iloc[:,:25]
newCols=[]
for col in df.columns:
newCols += ['ACTIVOS - '+col]
df.columns = newCols
#Junto de nuevo los dfs y renombro las columnas de totales
df = df.merge(df2, how='outer', left_index=True, right_index=True)
df = df.rename(columns={'ACTIVOS - B90. POSICIÓN DE INVERSIÓN INTERNACIONAL NETA (A-L)':'B90. POSICIÓN DE INVERSIÓN INTERNACIONAL NETA (A-L)',
'ACTIVOS - A. ACTIVOS':'A. ACTIVOS',
'PASIVOS - L. PASIVOS':'L. PASIVOS'})
df['country'] = 'Argentina'
alphacast.datasets.dataset(8298).upload_data_from_df(df,
deleteMissingFromDB = False, onConflictUpdateDB = True, uploadIndex=True)
##### Por Sector Institucional #####
df_inst = pd.read_excel(file_xls.content, sheet_name='Cuadro 16', skiprows=3)
df_inst = df_inst.dropna(how = 'all', axis = 0).dropna(how = 'all', axis = 1)
df_inst = df_inst.dropna(how = 'all', subset = df_inst.columns[1:])
#Replico el nombre en todas las columnas 'Unnamed'
df_inst.columns = df_inst.columns.astype(str)
df_inst.columns = pd.Series([np.nan if 'Unnamed:' in x else x for x in df_inst.columns.values]).ffill().values.flatten()
# Reemplazo 'Año' y *
df_inst.columns = df_inst.columns.astype(str)
for col in df_inst.columns:
new_col = col.replace("Año", "").replace('*','')
df_inst = df_inst.rename(columns={col: new_col})
# #Transpongo el df_inst
df_inst.rename(columns={df_inst.columns[0]: "Date"}, inplace=True)
df_inst = df_inst.set_index('Date')
df_inst = df_inst.T
df_inst = df_inst.reset_index()
#Arreglo las fechas de los trimestres y armo 'Date'
df_inst.rename(columns={df_inst.columns[1]: "Trim"}, inplace=True)
df_inst['Trim'] = df_inst['Trim'].replace({
np.nan:'12-01',
'I':'03-01',
'II':'06-01',
'III':'09-01',
'IV':'12-01'
})
df_inst['Date'] = df_inst['index'].astype('str')+'-'+df_inst['Trim'].astype('str')
df_inst = df_inst.drop(['index','Trim'], axis = 1)
df_inst.drop(df_inst.index[30], inplace = True)
df_inst = df_inst.set_index('Date')
#Agrego el prefijo 'Activos' y 'Pasivos' a las columnas segun corresponda
#Pasivos
df_inst2 = df_inst.copy()
df_inst2 = df_inst2.iloc[:,20:]
newCols=[]
for col in df_inst2.columns:
newCols += ['PASIVOS - '+col]
df_inst2.columns = newCols
#Activos
df_inst = df_inst.iloc[:,:20]
newCols=[]
for col in df_inst.columns:
newCols += ['ACTIVOS - '+col]
df_inst.columns = newCols
#Junto de nuevo los dfs y renombro las columnas de totales
df_inst = df_inst.merge(df_inst2, how='outer', left_index=True, right_index=True)
df_inst = df_inst.rename(columns={'ACTIVOS - B90. POSICIÓN DE INVERSIÓN INTERNACIONAL NETA (A-L)':'B90. POSICIÓN DE INVERSIÓN INTERNACIONAL NETA (A-L)',
'ACTIVOS - S121. Banco Central':'S121. Banco Central',
'ACTIVOS - S122. Sociedades captadoras de depósitos':'S122. Sociedades captadoras de depósitos',
'ACTIVOS - S13. Gobierno general':'S13. Gobierno general',
'ACTIVOS - S1Z. Otros sectores':'S1Z. Otros sectores',
'ACTIVOS - A. ACTIVOS':'A. ACTIVOS',
'PASIVOS - L. PASIVOS':'L. PASIVOS'})
df_inst['country'] = 'Argentina'
alphacast.datasets.dataset(8299).upload_data_from_df(df_inst,
deleteMissingFromDB = False, onConflictUpdateDB = True, uploadIndex=True)
##### Por categoría funcional e instrumento #####
df_cat = | pd.read_excel(file_xls.content, sheet_name='Cuadro 18', skiprows=3) | pandas.read_excel |
import requests
import pandas as pd
import json
import os
from pandas.io.json import json_normalize #package for flattening json in pandas df
import flatjson
import warnings
warnings.filterwarnings('ignore')
def show_table():
jtoken = os.getenv('GITHUB_TOKEN', '')
ztoken = ''
url = f"https://api.github.com/repositories/292545304/issues"
urlz = 'https://api.zenhub.io/p1/repositories/292545304/board'
headers = {'Authorization': f'token {jtoken}'}
r = requests.get(url, headers=headers, params={'state': 'all'}).json()
data = json_normalize(r, max_level=1)
dfn = pd.DataFrame.from_dict(data)
dfn1 = dfn[['created_at', 'state','closed_at','user.login','author_association','title','body', 'number', 'assignee.login']]
dfn = dfn[['state', 'number', 'assignee.login']]
headersz = {'X-Authentication-Token': ztoken, }
rz = requests.get(urlz, headers=headersz).json()
dataz = flatjson.dumps(rz)
data1 = json_normalize(dataz)
df = pd.DataFrame.from_dict(data1)
df = df.loc[:, ~df.columns.str.endswith('id')]
df = df.loc[:, ~df.columns.str.endswith('is_epic')]
df = df.loc[:, ~df.columns.str.endswith('position')]
new = df[df.columns[pd.Series(df.columns).str.startswith('pipelines[0]')]]
new = new.transpose()
new.columns = new.iloc[0]
new = new[1:]
new = new.rename({'New Issues':'number'}, axis=1)
new['New Issues'] = 'New Issues'
bak = df[df.columns[pd.Series(df.columns).str.startswith('pipelines[1]')]]
bak = bak.transpose()
bak.columns = bak.iloc[0]
bak = bak[1:]
bak = bak.rename({'Backlog':'number'}, axis=1)
bak['Backlog'] = 'Backlog'
prog = df[df.columns[pd.Series(df.columns).str.startswith('pipelines[2]')]]
prog = prog.transpose()
prog.columns = prog.iloc[0]
prog = prog[1:]
prog = prog.rename({'In Progress':'number'}, axis=1)
prog['In Progress'] = 'In Progress'
peer = df[df.columns[pd.Series(df.columns).str.startswith('pipelines[3]')]]
peer = peer.transpose()
peer.columns = peer.iloc[0]
peer = peer[1:]
peer = peer.rename({'Peer Review':'number'}, axis=1)
peer['Peer Review'] = 'Peer Review'
gw = df[df.columns[pd.Series(df.columns).str.startswith('pipelines[4]')]]
gw = gw.transpose()
gw.columns = gw.iloc[0]
gw = gw[1:]
gw = gw.rename({'GW Team Review':'number'}, axis=1)
gw['GW Team Review'] = 'GW Team Review'
dfv = | pd.concat([new, bak, prog, peer, gw]) | pandas.concat |
import numpy as np
import pandas as pd
import random
from rpy2.robjects.packages import importr
utils = importr('utils')
prodlim = importr('prodlim')
survival = importr('survival')
#KMsurv = importr('KMsurv')
#cvAUC = importr('pROC')
#utils.install_packages('pseudo')
#utils.install_packages('prodlim')
#utils.install_packages('survival')
#utils.install_packages('KMsurv')
#utils.install_packages('pROC')
import rpy2.robjects as robjects
from rpy2.robjects import r
def sim_event_times_case1(trainset, num_samples):
train_n = int( .8 * num_samples)
test_n = int( (.2) * num_samples)
cov = np.random.standard_normal(size=(num_samples, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_samples)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatenate((treatment, cov), axis=1)
index = np.arange(len(trainset.targets))
idx_sample = np.random.choice(index, num_samples,replace=False)
digits = np.array(trainset.targets)[idx_sample]
denom = np.exp( 1.7* digits+ .6*np.cos(digits)*clinical_data[:,0]+.2*clinical_data[:,1]+.3*clinical_data[:,0] )
true_times = np.sqrt(-np.log( np.random.uniform(low=0,high=1,size=num_samples) )/ denom )
censored_times = np.random.uniform(low=0,high=true_times)
censored_indicator = np.random.binomial(n=1,p=.3,size=digits.shape[0])
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.sum(censored_indicator)/num_samples
cens_perc_train = np.sum(censored_indicator[:train_n])/train_n
df = np.concatenate((np.expand_dims(idx_sample,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
df = pd.DataFrame(df,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) # the ID is the image chosen
#split data
train_clindata_all = df.iloc[0:train_n,:]
order_time = np.argsort(train_clindata_all['time'])
train_clindata_all = train_clindata_all.iloc[order_time,:]
test_clindata_all = df.iloc[train_n:,:]
time_r = robjects.FloatVector(train_clindata_all['time'])
event_r = robjects.BoolVector(train_clindata_all['event'])
cutoff_r = robjects.FloatVector(cutoff)
robjects.globalenv["time_r"] = time_r
robjects.globalenv["event_r"] = event_r
robjects.globalenv["cutoff"] = cutoff_r
r('km_out <- prodlim(Hist(time_r,event_r)~1)')
r(' surv_pso <- jackknife(km_out,times=cutoff) ' )
risk_pso1 = r('1-surv_pso[,1]')
risk_pso2 = r('1-surv_pso[,2]')
risk_pso3 = r('1-surv_pso[,3]')
risk_pso4 = r('1-surv_pso[,4]')
risk_pso5 = r('1-surv_pso[,5]')
train_clindata_all = train_clindata_all.assign(risk_pso1 = np.array(risk_pso1,dtype=np.float64),
risk_pso2 = np.array(risk_pso2,dtype=np.float64),
risk_pso3 = np.array(risk_pso3,dtype=np.float64),
risk_pso4 = np.array(risk_pso4,dtype=np.float64),
risk_pso5 = np.array(risk_pso5,dtype=np.float64)
)
long_df = pd.melt(train_clindata_all, id_vars=['ID'],value_vars=['risk_pso1','risk_pso2','risk_pso3','risk_pso4','risk_pso5'] )
long_df.rename(columns={'variable': 'time_point','value': 'ps_risk'}, inplace=True)
mymap= {'risk_pso1': 'time1', 'risk_pso2': 'time2', 'risk_pso3': 'time3', 'risk_pso4': 'time4', 'risk_pso5': 'time5' }
long_df = long_df.applymap(lambda s : mymap.get(s) if s in mymap else s)
train_val_clindata = pd.get_dummies(long_df, columns=['time_point'])
test_clindata_all = test_clindata_all.assign( time_point1=1,time_point2=2,time_point3=3,time_point4=4,time_point5=5 )
long_test_df = pd.melt(test_clindata_all, id_vars=['ID'],value_vars=['time_point1','time_point2','time_point3','time_point4','time_point5'] )
long_test_df.rename(columns={'value': 'time_point'}, inplace=True)
long_test_clindata_all = pd.merge(left=long_test_df, right=test_clindata_all, how='left',left_on='ID' ,right_on='ID')
cols_test = long_test_clindata_all.columns.tolist()
long_test_clindata = long_test_clindata_all[ ['ID'] + ['time_point'] + ['time'] + ['event'] + ['event_1'] + ['event_2'] + ['event_3'] + ['event_4'] + ['event_5']]
long_test_clindata = pd.get_dummies(long_test_clindata, columns=['time_point'])
covariates = df[['ID'] + df.columns.tolist()[8:]]
clindata = {'train_val':train_val_clindata , 'test':long_test_clindata, 'covariates': covariates,'time_train': train_clindata_all['time'], 'event_train': train_clindata_all['event'], 'slide_id_test': test_clindata_all['ID'], 'cutoff': cutoff , 'cens': cens_perc, 'cens_train': cens_perc_train}
return clindata
def sim_event_times_case2(trainset, num_samples):
train_n = int( .8 * num_samples)
test_n = int( (.2) * num_samples)
cov = np.random.standard_normal(size=(num_samples, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_samples)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatenate((treatment, cov), axis=1)
index = np.arange(len(trainset.targets))
idx_sample = np.random.choice(index, num_samples,replace=False)
digits = np.array(trainset.targets)[idx_sample]
denom = np.exp( 1.7* digits+ .6*np.cos(digits)*clinical_data[:,0]+.2*clinical_data[:,1]+.3*clinical_data[:,0] )
true_times = np.sqrt(-np.log( np.random.uniform(low=0,high=1,size=num_samples) )/ denom )
denom = np.exp( 1.4*clinical_data[:,0]+2.6*clinical_data[:,1] -.2*clinical_data[:,2] )*6
censored_times = np.sqrt(-np.log(np.random.uniform(low=0,high=1,size=num_samples))/denom )
censored_indicator = (true_times > censored_times)*1
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.sum(censored_indicator)/num_samples
cens_perc_train = np.sum(censored_indicator[:train_n])/train_n
df = np.concatenate((np.expand_dims(idx_sample,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
df = pd.DataFrame(df,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) # the ID is the image chosen
train_clindata_all = df.iloc[0:train_n,:]
order_time = np.argsort(train_clindata_all['time'])
train_clindata_all = train_clindata_all.iloc[order_time,:]
test_clindata_all = df.iloc[train_n:,:]
time_r = robjects.FloatVector(train_clindata_all['time'])
event_r = robjects.BoolVector(train_clindata_all['event'])
cutoff_r = robjects.FloatVector(cutoff)
robjects.globalenv["time_r"] = time_r
robjects.globalenv["event_r"] = event_r
robjects.globalenv["cutoff"] = cutoff_r
r('km_out <- prodlim(Hist(time_r,event_r)~1)')
r(' surv_pso <- jackknife(km_out,times=cutoff) ' )
risk_pso1 = r('1-surv_pso[,1]')
risk_pso2 = r('1-surv_pso[,2]')
risk_pso3 = r('1-surv_pso[,3]')
risk_pso4 = r('1-surv_pso[,4]')
risk_pso5 = r('1-surv_pso[,5]')
train_clindata_all = train_clindata_all.assign(risk_pso1 = np.array(risk_pso1,dtype=np.float64),
risk_pso2 = np.array(risk_pso2,dtype=np.float64),
risk_pso3 = np.array(risk_pso3,dtype=np.float64),
risk_pso4 = np.array(risk_pso4,dtype=np.float64),
risk_pso5 = np.array(risk_pso5,dtype=np.float64)
)
long_df = pd.melt(train_clindata_all, id_vars=['ID'],value_vars=['risk_pso1','risk_pso2','risk_pso3','risk_pso4','risk_pso5'] )
long_df.rename(columns={'variable': 'time_point','value': 'ps_risk'}, inplace=True)
mymap= {'risk_pso1': 'time1', 'risk_pso2': 'time2', 'risk_pso3': 'time3', 'risk_pso4': 'time4', 'risk_pso5': 'time5' }
long_df = long_df.applymap(lambda s : mymap.get(s) if s in mymap else s)
train_val_clindata = pd.get_dummies(long_df, columns=['time_point'])
test_clindata_all = test_clindata_all.assign( time_point1=1,time_point2=2,time_point3=3,time_point4=4,time_point5=5 )
long_test_df = pd.melt(test_clindata_all, id_vars=['ID'],value_vars=['time_point1','time_point2','time_point3','time_point4','time_point5'] )
long_test_df.rename(columns={'value': 'time_point'}, inplace=True)
long_test_clindata_all = pd.merge(left=long_test_df, right=test_clindata_all, how='left',left_on='ID' ,right_on='ID')
cols_test = long_test_clindata_all.columns.tolist()
long_test_clindata = long_test_clindata_all[ ['ID'] + ['time_point'] + ['time'] + ['event'] + ['event_1'] + ['event_2'] + ['event_3'] + ['event_4'] + ['event_5']]
long_test_clindata = pd.get_dummies(long_test_clindata, columns=['time_point'])
covariates = df[['ID'] + df.columns.tolist()[8:]]
clindata = {'train_val':train_val_clindata , 'test':long_test_clindata, 'covariates': covariates,'time_train': train_clindata_all['time'], 'event_train': train_clindata_all['event'], 'slide_id_test': test_clindata_all['ID'], 'cutoff': cutoff , 'cens': cens_perc, 'cens_train': cens_perc_train}
return clindata
def sim_event_times_case3(trainset, num_samples):
train_n = int( .8 * num_samples)
test_n = int( (.2) * num_samples)
cov = np.random.standard_normal(size=(num_samples, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_samples)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatenate((treatment, cov), axis=1)
index = np.arange(len(trainset.targets))
idx_sample = np.random.choice(index, num_samples,replace=False)
digits = np.array(trainset.targets)[idx_sample]
denom = np.exp( 1* digits- 1.6*np.cos(digits)*clinical_data[:,0]+.3*clinical_data[:,1]*clinical_data[:,0] )* (.7/2)
true_times = np.sqrt(-np.log( np.random.uniform(low=0,high=1,size=num_samples) )/ denom )
#denom = np.exp( 1.4*clinical_data[:,0]+2.6*clinical_data[:,1] -.2*clinical_data[:,2] )*6
shape_c = np.maximum(0.001,np.exp(-1.8*clinical_data[:,0]+1.4*clinical_data[:,1]+1.5 *clinical_data[:,0]*clinical_data[:,1]))
censored_times = np.random.gamma(shape_c,digits, num_samples)
censored_indicator = (true_times > censored_times)*1
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.sum(censored_indicator)/num_samples
cens_perc_train = np.sum(censored_indicator[:train_n])/train_n
df = np.concatenate((np.expand_dims(idx_sample,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
df = pd.DataFrame(df,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) # the ID is the image chosen
train_clindata_all = df.iloc[0:train_n,:]
order_time = np.argsort(train_clindata_all['time'])
train_clindata_all = train_clindata_all.iloc[order_time,:]
test_clindata_all = df.iloc[train_n:,:]
time_r = robjects.FloatVector(train_clindata_all['time'])
event_r = robjects.BoolVector(train_clindata_all['event'])
cutoff_r = robjects.FloatVector(cutoff)
robjects.globalenv["time_r"] = time_r
robjects.globalenv["event_r"] = event_r
robjects.globalenv["cutoff"] = cutoff_r
r('km_out <- prodlim(Hist(time_r,event_r)~1)')
r(' surv_pso <- jackknife(km_out,times=cutoff) ' )
risk_pso1 = r('1-surv_pso[,1]')
risk_pso2 = r('1-surv_pso[,2]')
risk_pso3 = r('1-surv_pso[,3]')
risk_pso4 = r('1-surv_pso[,4]')
risk_pso5 = r('1-surv_pso[,5]')
train_clindata_all = train_clindata_all.assign(risk_pso1 = np.array(risk_pso1,dtype=np.float64),
risk_pso2 = np.array(risk_pso2,dtype=np.float64),
risk_pso3 = np.array(risk_pso3,dtype=np.float64),
risk_pso4 = np.array(risk_pso4,dtype=np.float64),
risk_pso5 = np.array(risk_pso5,dtype=np.float64)
)
long_df = pd.melt(train_clindata_all, id_vars=['ID'],value_vars=['risk_pso1','risk_pso2','risk_pso3','risk_pso4','risk_pso5'] )
long_df.rename(columns={'variable': 'time_point','value': 'ps_risk'}, inplace=True)
mymap= {'risk_pso1': 'time1', 'risk_pso2': 'time2', 'risk_pso3': 'time3', 'risk_pso4': 'time4', 'risk_pso5': 'time5' }
long_df = long_df.applymap(lambda s : mymap.get(s) if s in mymap else s)
train_val_clindata = pd.get_dummies(long_df, columns=['time_point'])
test_clindata_all = test_clindata_all.assign( time_point1=1,time_point2=2,time_point3=3,time_point4=4,time_point5=5 )
long_test_df = pd.melt(test_clindata_all, id_vars=['ID'],value_vars=['time_point1','time_point2','time_point3','time_point4','time_point5'] )
long_test_df.rename(columns={'value': 'time_point'}, inplace=True)
long_test_clindata_all = | pd.merge(left=long_test_df, right=test_clindata_all, how='left',left_on='ID' ,right_on='ID') | pandas.merge |
import math
from tqdm import tqdm
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder, QuantileTransformer
from sklearn.neighbors import NearestNeighbors
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
'''
In this file we have all the preprocessing methods
1- Intrinsic dimensionality
2- define feature and target
3- identify numeric and categorical features
'''
"""
Python Implementation of 'Maximum Likelihood Estimation of Intrinsic Dimension' <NAME> and <NAME>, Advances in neural information processing systems, 2005
----------
The goal is to estimate intrinsic dimensionality of data, the estimation of dimensionality is scale dependent
(depending on how much you zoom into the data distribution you can find different dimesionality), so they
propose to average it over different scales, the interval of the scales [k1, k2] are the only parameters of the algorithm.
This code also provides a way to repeat the estimation with bootstrapping to estimate uncertainty.
"""
def load_data(data, filetype):
"""
loads data from CSV file into dataframe.
Parameters
----------
data : String
path to input dataset
filetype : String
type of file
Returns
----------
df_Data : dataframe
dataframe with data
"""
if filetype == "csv":
df_Data = pd.read_csv(data)
else:
df_Data = | pd.read_excel(data) | pandas.read_excel |
# RESpost.py
import json
from numpy import product
import pandas as pd
import os
import glob
wd = os.getcwd()
simulations_raw = glob.glob(wd+'/cache/2030*.json')
extractor = lambda x: x.split('\\')[-1].replace(".json", '').replace("2030RES_", '')
simulations_keys = [extractor(x) for x in simulations_raw]
#%%
# sim_list = [json.load(sim) for sim in simulations_raw]
roundd = lambda x: [round(i,1) for i in x]
results = list()
for sim in simulations_raw:
with open(sim) as infile:
results.append(json.load(infile))
pv, wind, bat, h2, etm, grid = [], [], [], [], [], []
for res in results:
for key in res.keys():
if 'pv' in key:
pv.append(res[key]['settings']['installed'])
if 'wind' in key:
wind.append(res[key]['settings']['installed'])
if 'lith' in key:
bat.append(res[key]['settings']['installed'])
if 'hydro' in key:
h2.append(res[key]['settings']['installed'])
if 'dem' in key:
etm.append(-min(res[key]['state']['power [-]']))
if 'grid' in key:
grid.append(res[key]['settings']['installed'])
predf = {
"PV power capacity (MWp)": roundd(pv),
"Wind power capacity (MWp)": roundd(wind),
"Lithium storage capacity (MWh)": roundd(bat),
"Hydrogen storage capacity (MWh)": roundd(h2),
"Maximum power demand (MW)": roundd(etm),
"Installed grid capacity (MW)": roundd(grid),
}
df = | pd.DataFrame(predf, index=simulations_keys) | pandas.DataFrame |
import argparse
import os
import shutil
import zipfile
import pathlib
import re
from datetime import datetime
import collections
import pandas as pd
import geohash
import math
import helpers
import plotly.express as px
ControlInfo = collections.namedtuple("ControlInfo", ["num_tracks", "date", "duration"])
def parse_args():
parser = argparse.ArgumentParser(
description="Converts HYSPLIT Locust model output into a geohashed CSV"
)
parser.add_argument("--input_dir", type=str, required=True)
parser.add_argument("--output_file", type=str, default="./output.csv")
parser.add_argument("--temp_dir", type=str, default="./temp")
return parser.parse_args()
def extract_input(input_dir, temp_dir):
os.makedirs(temp_dir, exist_ok=True)
p = pathlib.Path(input_dir)
swarm_ids = []
zip_files = p.glob("*.zip")
for zf in zip_files:
shutil.copy(zf, temp_dir)
temp_zip_file = os.path.join(temp_dir, zf.name)
with zipfile.ZipFile(temp_zip_file, "r") as zip_ref:
zip_ref.extractall(temp_dir)
swarm_ids.append(zf.stem)
return swarm_ids
def parse_control_files(data_dir, ids):
# regex to ignore comments on a line
line_re = re.compile("(.*)#")
# map to hold extract control info
swarm_control_info = {}
p = pathlib.Path(data_dir)
# find control files for each swarm id
for id in ids:
swarm_control_info[id] = []
control_files = p.glob(f"{id}_CONTROL.*.txt")
for cf in control_files:
# open the control file
with open(cf, "r") as f:
# strip comments
lines = f.read().splitlines()
stripped_lines = []
for l in lines:
m = line_re.match(l)
if m:
stripped_lines.append(m.group(1).strip())
else:
stripped_lines.append(l.strip())
# read in required data
parsed_date = datetime.strptime(stripped_lines[0], "%y %m %d %H %M")
number_of_tracks = stripped_lines[1]
duration_hrs = stripped_lines[2 + int(number_of_tracks)]
swarm_control_info[id].append(
ControlInfo(number_of_tracks, parsed_date, duration_hrs)
)
swarm_control_info[id].sort(key=lambda d: d.date)
ctrl_arr = swarm_control_info[id]
return swarm_control_info
def parse_trajectory_files(data_dir, ids, control_info):
day_re = re.compile(".*_day(\d+)")
p = pathlib.Path(data_dir)
# list of trajectory dataframes
trajectories = []
# find control files for each swarm id
for id in ids:
trajectory_data = {}
trajectory_files = p.glob(f"{id}_day*.txt")
for tf in trajectory_files:
# extract the day and use that to look up the control info
day = int(day_re.match(str(tf)).group(1))
ci = control_info[id][day - 1]
# load csv
tdf = pd.read_csv(tf, names=["point_id", "x", "y", "altitude"])
# get rid of the end marker
tdf = tdf[tdf["point_id"] != "END"]
# ID field consists of a track number in the first digit, and a record
# number in the remaining. They need to be split out.
tdf["track"] = tdf["point_id"].str.strip().str[0]
tdf["point_id"] = tdf["point_id"].str.strip().str[1:]
tdf["date"] = ci.date
# Group by the track ID number.
track_groups = tdf.groupby("track")
# Collect the grouped data frames by track/day
for track, frame in track_groups:
if track not in trajectory_data:
trajectory_data[track] = []
trajectory_data[track].append(frame)
# Compose the data for each track into a single df spanning multiple days
for _, d in trajectory_data.items():
tdf = pd.concat(d)
# recompute index/point ids to enmerate final point orderings
tdf = tdf.sort_values(["date", "point_id"])
tdf = tdf.reset_index(drop=True)
tdf["point_id"] = tdf.index
# write the swarm id and the starting altitude in for each
tdf["swarm_id"] = id
tdf["altitude_id"] = int(tdf.loc[0, "altitude"])
# save out the final dataframe
trajectories.append(tdf)
return trajectories
def geohash_cell_size(level):
bounds = geohash.bbox("0" * level)
return (bounds["e"] - bounds["w"], bounds["n"] - bounds["s"])
def get_cell_loc(p, cell_size):
idx = int(math.floor(p / cell_size))
return cell_size * idx
def line_to_geohashes(x0, y0, x1, y1, level):
# effectively a ray tracing operation over a rectangular grid
line_geohashes = set()
cell_x_size, cell_y_size = geohash_cell_size(level)
# compute the coordinate of the LL corner of each cell
x0_cell_loc = get_cell_loc(x0, cell_x_size)
y0_cell_loc = get_cell_loc(y0, cell_y_size)
x1_cell_loc = get_cell_loc(x1, cell_x_size)
y1_cell_loc = get_cell_loc(y1, cell_y_size)
# compute the difference between the endpoints
dx = math.fabs(x1 - x0)
dy = math.fabs(y1 - y0)
dt_dx = 1.0 / dx if dx != 0.0 else 0.0
dt_dy = 1.0 / dy if dy != 0.0 else 0.0
x = x0_cell_loc
y = y0_cell_loc
num_steps = 1
x_inc = 0.0
y_inc = 0.0
t_next_horiz = 0.0
t_next_vert = 0.0
if dx == 0.0:
x_inc = 0.0
t_next_horiz = dt_dx * cell_x_size
elif x1 > x0:
x_inc = cell_x_size
# number of horizontal intersections
num_steps += (x1_cell_loc - x) / cell_x_size
t_next_horiz = (x0_cell_loc + cell_x_size - x0) * dt_dx
else:
x_inc = -cell_x_size
# number of horizontal intersections
num_steps += (x - x1_cell_loc) / cell_x_size
t_next_horiz = (x0 - x0_cell_loc) * dt_dx
if dy == 0.0:
y_inc = 0
t_next_vert = dt_dy * cell_y_size
elif y1 > y0:
y_inc = cell_y_size
# number of vertical intersections
num_steps += (y1_cell_loc - y) / cell_y_size
t_next_vert = (y0_cell_loc + cell_y_size - y0) * dt_dy
else:
y_inc = -cell_y_size
# number of vertical intersections
num_steps += (y - y1_cell_loc) / cell_y_size
t_next_vert = (y0 - y0_cell_loc) * dt_dy
for n in range(int(num_steps), 0, -1):
line_geohashes.add(geohash.encode(y, x, level))
if t_next_vert < t_next_horiz:
y += y_inc
t_next_vert += dt_dy * cell_y_size
else:
x += x_inc
t_next_horiz += dt_dx * cell_x_size
return line_geohashes
def trajectories_to_df(trajectories, level):
trajectory_dataframes = []
# loop over the trajectory dataframes and extract the flight paths as a list of x,y coords
for tdf in trajectories:
points = zip(tdf["x"], tdf["y"])
last_p = next(points)
geohashes = set()
for p in points:
geohashes = geohashes.union(
line_to_geohashes(p[0], p[1], last_p[0], last_p[1], level)
)
last_p = p
geohashes = list(geohashes)
bounds = [helpers.geohash_to_array_str(gh) for gh in geohashes]
gdf = pd.DataFrame(
columns=["swarm_id", "altitude_id", "date", "geohash", "bounds"]
)
gdf["geohash"] = geohashes
gdf["bounds"] = bounds
gdf["swarm_id"] = tdf["swarm_id"]
gdf["altitude_id"] = tdf["altitude_id"]
gdf["date"] = tdf["date"]
trajectory_dataframes.append(gdf)
return pd.concat(trajectory_dataframes)
def debug():
df = pd.read_csv("temp/swarm_7055_day3.txt", names=["id", "x", "y", "alt"])
df = df.head(100)
points = zip(df["x"], df["y"])
last_p = next(points)
geohashes = set()
idx = 0
for p in points:
idx += 1
if idx == 25:
print("25")
geohashes = geohashes.union(
line_to_geohashes(p[0], p[1], last_p[0], last_p[1], 5)
)
last_p = p
geohashes = list(geohashes)
p = [geohash.decode(g) for g in geohashes]
gh_p = list(zip(*p))
pdf = pd.DataFrame(columns=["x", "y", "source"])
pdf["x"] = gh_p[0]
pdf["y"] = gh_p[1]
pdf["source"] = False
edf = pd.DataFrame(columns=pdf.columns)
edf["y"] = df["x"]
edf["x"] = df["y"]
edf["source"] = True
pdf = | pd.concat([pdf, edf]) | pandas.concat |
import pandas as pd
from sankeyview.sankey_definition import SankeyDefinition, Ordering, ProcessGroup, Waypoint, Bundle
from sankeyview.sankey_view import sankey_view
from sankeyview.partition import Partition
from sankeyview.dataset import Dataset
def test_sankey_view_accepts_dataframe_as_dataset():
nodes = {
'a': ProcessGroup(selection=['a']),
'b': ProcessGroup(selection=['b']),
}
bundles = [
Bundle('a', 'b'),
]
ordering = [['a'], ['b']]
vd = SankeyDefinition(nodes, bundles, ordering)
flows = pd.DataFrame.from_records(
[('a', 'b', 'm', 3)],
columns=('source', 'target', 'material', 'value'))
GR, groups = sankey_view(vd, flows)
def test_sankey_view_results():
nodes = {
'a': ProcessGroup(selection=['a1', 'a2']),
'b': ProcessGroup(selection=['b1']),
'c': ProcessGroup(selection=['c1', 'c2'],
partition=Partition.Simple('process', ['c1', 'c2'])),
'via': Waypoint(partition=Partition.Simple('material', ['m', 'n'])),
}
bundles = [
Bundle('a', 'c', waypoints=['via']),
Bundle('b', 'c', waypoints=['via']),
]
ordering = [[['a', 'b']], [['via']], [['c']]]
vd = SankeyDefinition(nodes, bundles, ordering)
# Dataset
flows = pd.DataFrame.from_records(
[
('a1', 'c1', 'm', 3),
('a2', 'c1', 'n', 1),
('b1', 'c1', 'm', 1),
('b1', 'c2', 'm', 2),
('b1', 'c2', 'n', 1),
],
columns=('source', 'target', 'material', 'value'))
dim_process = pd.DataFrame({
'id': list(flows.source.unique()) + list(flows.target.unique())
}).set_index('id')
dataset = Dataset(flows, dim_process)
GR, groups = sankey_view(vd, dataset)
assert set(GR.nodes()) == {'a^*', 'b^*', 'via^m', 'via^n', 'c^c1', 'c^c2'}
assert sorted(GR.edges(keys=True, data=True)) == [
('a^*', 'via^m', ('*', '*'), {'value': 3, 'measures': {},
'bundles': [0]}),
('a^*', 'via^n', ('*', '*'), {'value': 1, 'measures': {},
'bundles': [0]}),
('b^*', 'via^m', ('*', '*'), {'value': 3, 'measures': {},
'bundles': [1]}),
('b^*', 'via^n', ('*', '*'), {'value': 1, 'measures': {},
'bundles': [1]}),
('via^m', 'c^c1', ('*', '*'), {'value': 4, 'measures': {},
'bundles': [0, 1]}),
('via^m', 'c^c2', ('*', '*'), {'value': 2, 'measures': {},
'bundles': [0, 1]}),
('via^n', 'c^c1', ('*', '*'), {'value': 1, 'measures': {},
'bundles': [0, 1]}),
('via^n', 'c^c2', ('*', '*'), {'value': 1, 'measures': {},
'bundles': [0, 1]}),
]
assert GR.ordering == Ordering([
[['a^*', 'b^*']],
[['via^m', 'via^n']],
[['c^c1', 'c^c2']],
])
assert groups == [
{'id': 'a',
'title': '',
'type': 'process',
'nodes': ['a^*']},
{'id': 'b',
'title': '',
'type': 'process',
'nodes': ['b^*']},
{'id': 'via',
'title': '',
'type': 'group',
'nodes': ['via^m', 'via^n']},
{'id': 'c',
'title': '',
'type': 'process',
'nodes': ['c^c1', 'c^c2']},
]
# Can also set flow_partition for all bundles at once
vd2 = SankeyDefinition(
nodes,
bundles,
ordering,
flow_partition=Partition.Simple('material', ['m', 'n']))
GR, groups = sankey_view(vd2, dataset)
assert sorted(GR.edges(keys=True, data=True)) == [
('a^*', 'via^m', ('m', '*'), {'value': 3, 'measures': {},
'bundles': [0]}),
('a^*', 'via^n', ('n', '*'), {'value': 1, 'measures': {},
'bundles': [0]}),
('b^*', 'via^m', ('m', '*'), {'value': 3, 'measures': {},
'bundles': [1]}),
('b^*', 'via^n', ('n', '*'), {'value': 1, 'measures': {},
'bundles': [1]}),
('via^m', 'c^c1', ('m', '*'), {'value': 4, 'measures': {},
'bundles': [0, 1]}),
('via^m', 'c^c2', ('m', '*'), {'value': 2, 'measures': {},
'bundles': [0, 1]}),
('via^n', 'c^c1', ('n', '*'), {'value': 1, 'measures': {},
'bundles': [0, 1]}),
('via^n', 'c^c2', ('n', '*'), {'value': 1, 'measures': {},
'bundles': [0, 1]}),
]
def test_sankey_view_results_time_partition():
nodes = {
'a': ProcessGroup(selection=['a1']),
'b': ProcessGroup(selection=['b1']),
}
bundles = [Bundle('a', 'b')]
ordering = [[['a']], [['b']]]
time_partition = Partition.Simple('time', [1, 2])
vd = SankeyDefinition(
nodes, bundles, ordering,
time_partition=time_partition)
# Dataset
flows = pd.DataFrame.from_records(
[
('a1', 'b1', 'm', 1, 3),
('a1', 'b1', 'm', 2, 2),
],
columns=('source', 'target', 'material', 'time', 'value'))
dim_process = | pd.DataFrame({'id': ['a1', 'b1']}) | pandas.DataFrame |
import datetime
import pandas as pd
from local_group_support.config.config import get_config
from rebel_management_utilities.action_network import get_forms, query, query_all
FORMATION_DATE = datetime.date(2018, 4, 1)
def get_form(submission):
form_id = submission['action_network:form_id']
has_website = 'action_network:referrer_data' in submission.keys() and \
submission['action_network:referrer_data']['source'] != 'none'
form_mapping = get_forms().set_index('identifier')['name']
submission_date = | pd.to_datetime(submission['created_date']) | pandas.to_datetime |
import wx.grid as gridlib
import pandas as pd
import numpy as np
import copy
import ciw
import re
import math
import statistics
import random
import imp
adapt = imp.load_source('adapt', 'src/adapt.py')
summary = imp.load_source('summary', 'src/Summary.py')
cluster = imp.load_source('cluster', 'src/clustering.py')
transitions = imp.load_source('transitions', 'src/transitions.py')
capacity = imp.load_source('capacity', 'src/capacity.py')
results = imp.load_source('results', 'src/results.py')
sim = imp.load_source('sim', 'src/simulation.py')
custom_ciw = imp.load_source('custom_ciw', 'src/custom_ciw.py')
#========= Data Panel ===========
#--------------- on Columns ----------------
def onHeaders_selection(columns, selected):
"""Create list of selected column names."""
headers = [columns[value] for value in selected]
return headers
def create_codes(headers):
"""Create character code for each activity."""
activity_codes = adapt.codes(headers)
letters = [key for key in activity_codes.keys()]
return(letters, activity_codes)
def Create_pathways_data(activity_codes, data, save_location, data_name):
"""Creates pathway data for non formatted input data.
Add pathway, waiting time per activity and total time in system to data.
df created is each unique pathways and the number of times performed.
"""
data['pathways'] = data.apply(lambda row: adapt.find_pathways(row, activity_codes),axis=1)
for index,key in enumerate(activity_codes.keys()):
data[key] = data.apply(lambda row: adapt.find_time_from_previous(row, key, activity_codes), axis=1)
if index == 0:
first = key
if index == len(activity_codes)-1:
last = key
data['totaltime'] = data.apply(lambda row: row[first:last].sum(),axis=1)
data = data.replace(' ', np.NaN)
df = transitions.pathway_counts(data)
with pd.ExcelWriter(save_location + 'SimProFlow_' + data_name, mode='w') as writer:
data.to_excel(writer,'Data')
df.to_excel(writer,'dataframe')
return(data, df)
#-------------- on Format -----------------
def Create_multi_pathways_data(data, id_column, activity_column, dates_column, columns, selected, save_location, data_name):
"""Creates pathway data for formatted input data.
Add multi_pathway (double codes) and pathway (single codes), and total time in system to data.
Waiting time per single code activity recorded with list of waiting time.
df created is each unique pathways and the number of times performed.
"""
if id_column != None:
# format data
data = data.dropna().reset_index(drop=True)
data = adapt.rename_duplicates(data, id_column, activity_column, dates_column)
headers = adapt.multi_headers(data, id_column)
else:
# multi columns
headers = onHeaders_selection(columns, selected)
activity_codes, multi_activity_codes = adapt.multi_codes(headers)
letters = [code for code in activity_codes.keys()]
multi_letters = [code for code in multi_activity_codes.keys()]
data['multi_pathways'] = data.apply(lambda row: adapt.find_pathways(row, multi_activity_codes),axis=1)
data['pathways'] = data.apply(lambda row: adapt.condense_pathways(row), axis=1)
for index,key in enumerate(multi_activity_codes.keys()):
data[key] = data.apply(lambda row: adapt.find_time_from_previous_Double(row, key, multi_activity_codes), axis=1)
for general_key in activity_codes.keys():
all_code = [key for key in multi_activity_codes.keys() if key[0] == general_key]
data[general_key] = data[all_code].values.tolist()
for index,key in enumerate(multi_activity_codes.keys()):
if index == 0:
first = key
if index == len(multi_activity_codes)-1:
last = key
data['totaltime'] = data.apply(lambda row: row[first:last].sum(),axis=1)
for key in multi_activity_codes.keys():
data = data.drop(key, axis=1)
data = data.replace(' ', np.NaN)
df = transitions.pathway_counts(data)
with pd.ExcelWriter(save_location + 'SimProFlow_' + data_name, mode='w') as writer:
data.to_excel(writer,'Data')
df.to_excel(writer,'dataframe')
return(data, df, activity_codes, multi_activity_codes, headers, letters, multi_letters)
def Create_Summary_Sheet(data, df, multi_activity_codes, save_location, original_name):
"""Create summary sheet and save to output folder."""
summary.SummarySheet(data, df, multi_activity_codes, save_location, original_name)
#========= Clustering Panel ===========
def Get_default_ranks(activity_codes, data):
"""Generates the default rankings per activity based on occurance frequency in data."""
default_rank = adapt.freq_Rankings(activity_codes, data)
return default_rank
def Get_Weights(dict_rank):
"""Converts rankings into weights."""
Weights = adapt.create_Weightings(dict_rank)
return Weights
def GetMedoids(select_medoids, df, max_k, data, comp_Matrix, Specify_textbox):
"""From user selections, generate initial medoids for clustering."""
if select_medoids.IsChecked(0):
# ensure no repeats
all_options = [i for i in range(len(df))]
set_medoids = random.sample(all_options, max_k)
elif select_medoids.IsChecked(1):
# as df is ordered by counts
set_medoids = [i for i in range(max_k)]
elif select_medoids.IsChecked(2):
sum_matrix = [sum(x) for x in comp_Matrix]
smallest = sorted(sum_matrix)[:max_k]
set_medoids = [sum_matrix.index(sum_values) for sum_values in smallest]
elif select_medoids.IsChecked(3) == True:
specify_medoids = Specify_textbox.GetValue()
if specify_medoids == 'Enter values':
return([], 'Enter values Error')
potential_set_medoids = list(map(int, specify_medoids.split(',')))
if len(potential_set_medoids) != max_k:
return(potential_set_medoids, 'Large Error')
set_medoids = potential_set_medoids
return(set_medoids, 'No')
def RunClustering(data, df, comp_Matrix, set_medoids, max_k, save_location, save_name, result_type, include_centroids):
"""Run clustering and display results level from user selection."""
cluster_results = cluster.classic_cluster(data, df, comp_Matrix, set_medoids, max_k, save_location, save_name, results=result_type, include_centroids=include_centroids)
return cluster_results
def RunProcessClustering(data, df, letters, comp_Matrix, set_medoids, max_k, save_location, save_name, tol, result_type, include_centroids, adjust):
"""Run process clustering and display results level from user selection."""
k, process_cluster_results, plot_name = cluster.process_cluster(data, df, letters, comp_Matrix, set_medoids, max_k, save_location , save_name, tol,
results=result_type, include_centroids=include_centroids, adjust=adjust)
return (k, process_cluster_results, plot_name)
#========= Draw ===========
def get_draw_network(sim_type, letters, Matrix, save_location, file_name, process_k, centroids, adjust, LR, penwidth, round_to):
"""Draw transitions network.
+ Full Transitions: One diagram for whole transtions matrix
+ Clustered Transtions: Diagram per class (k) for class transition matrix
+ Process Based: Three diagrams, full transitions, seperated pathways and linked by postion.
"""
if sim_type == 'Full Transitions':
save_file_name = save_location + 'Network_diagrams/' + file_name
transitions.draw_network(letters, Matrix, save_file_name, LR, penwidth, round_to)
if sim_type == 'Clustered Transitions':
for c_class, c_matrix in Matrix.items():
c_file_name = save_location + 'Network_diagrams/' + file_name + '_' + c_class
transitions.draw_network(letters, c_matrix, c_file_name, LR, penwidth, round_to)
if sim_type == 'Process Medoids':
adjust_save_file_name = save_location + 'Network_diagrams/' + file_name + '_' + str(process_k) + '_adjust_' + str(adjust)
transitions.draw_network(letters, Matrix[0], adjust_save_file_name, LR, penwidth, round_to)
file_name_network = save_location + 'Network_diagrams/' + file_name + '_' + str(process_k)
transitions.draw_network(letters, Matrix[1], file_name_network, LR, penwidth, round_to)
file_name_centroids = file_name_network + '_pathways'
transitions.draw_centroids(centroids, str(process_k), file_name_centroids)
max_length = centroids[str(process_k)].str.len().max()
first = [[path[i:i+1] for path in centroids[str(process_k)]] for i in range(0, max_length)]
all_firsts = [list(set(row)) for row in first]
for r, row in enumerate(all_firsts):
all_firsts[r] = ['End' if x=='' else x for x in row]
file_name_grouped = file_name_network + '_linked'
transitions.draw_centroids_linked(centroids[str(process_k)], all_firsts, file_name_grouped)
#========= Simulation ===========
def initialise_results_tables(data, letters):
"""Iitialise the four simulation results data frames."""
dataframe_T1, dataframe_T2, dataframe_T3, dataframe_T4, original_transitions = results.initialise_results_tables(data, 'pathways', letters)
return(dataframe_T1, dataframe_T2, dataframe_T3, dataframe_T4, original_transitions)
def initial_vis_inputs(data, headers, activity_codes, formatted):
"""Generate initial values from raw data for results tables."""
initial_individuals = len(data)
overall_min, overall_max, overall_period = capacity.get_total_time_period(data, headers)
df_start = transitions.initial_last_arrival(data, activity_codes, formatted)
column_min, column_max, real_last_arrival = capacity.get_column_time_period(df_start.Start_dates)
return(initial_individuals, real_last_arrival, overall_period)
def get_vis_summary(results_data, time_column, pathway_column, table_letters, letters,
dataframe_T1, dataframe_T2, dataframe_T3, dataframe_T4,
original_transitions, simulation_transitions,
intervention_codes, target, individuals,
save_location, simulation_name, listed_times,
last_arrival, period):
"""Fill results tables."""
T1_results, T2_results, T3_results, T4_results = results.run_results(results_data, time_column, pathway_column, table_letters, letters,
dataframe_T1, dataframe_T2, dataframe_T3, dataframe_T4,
original_transitions, simulation_transitions,
intervention_codes, target, individuals,
save_location, simulation_name, listed_times,
last_arrival, period)
sim.plot_totaltime(save_location, simulation_name, results_data)
sim.plot_activity_waittimes(save_location, simulation_name, results_data, table_letters)
return(T1_results, T2_results, T3_results, T4_results)
def get_period(data, headers):
"""Get overall period covered by raw data."""
overall_min, overall_max, overall_period = capacity.get_total_time_period(data, headers)
return overall_period
def AutoSetupInputs(sim_type, data, activity_codes, multi_activity_codes, headers, letters, individuals, overall_period, clusters, process_k, centroids, original_name, adjust):
"""Generate basic sim default inputs.
Service default 0.1 for all types.
Servers capacity same for all.
+ Raw Pathways: Routing and arrivals at dummy node created
+ Full Transitions: arrivals row matrix and routing transition matrix
+ Clustered Transitions: arrivals row matrix and routing transition matrix per class (k)
+ Process Medoids: Routing of centroids and arrivals per class
"""
# if formatted data use multi_activity_codes
if original_name == 'original_formatted':
input_servers = sim.define_input_capacity(data, multi_activity_codes, headers, letters, original_name)
else:
input_servers = sim.define_input_capacity(data, activity_codes, headers, letters, original_name)
input_service = 0.1
if sim_type == 'Raw Pathways':
draw_matrix = []
input_routing = sim.define_input_routing(sim_type, data['pathways'], letters, None)
input_arrival = individuals/overall_period
if sim_type == 'Full Transitions':
# allow draw of adjusted matrix
input_arrival, draw_matrix, Matrix_prob = transitions.get_transitions(data['pathways'], letters, False)
input_routing = sim.define_input_routing(sim_type, data['pathways'], letters, Matrix_prob)
if sim_type == 'Clustered Transitions':
pathway_counts = transitions.pathway_counts(data)
propergated_clusters = transitions.propergate_clusters(pathway_counts, clusters)
input_arrival = {}
input_routing = {}
draw_matrix = {}
for c, cluster in enumerate(propergated_clusters):
class_name = 'Class '+ str(c)
prop_df = pd.DataFrame(cluster)
prop_df.columns = ['centroids']
Start, c_draw_matrix, Matrix_prob = transitions.get_transitions(prop_df.centroids, letters, False)
input_arrival[class_name] = Start
input_routing[class_name] = Matrix_prob
draw_matrix[class_name] = c_draw_matrix
if sim_type == 'Process Medoids':
draw_matrix = []
adjust_input_arrival, adjust_draw_matrix, adjust_Matrix_prob = transitions.get_transitions(data['pathways'], letters, adjust)
draw_matrix.append(adjust_draw_matrix)
Start, process_draw_matrix, Matrix_prob = transitions.get_transitions(centroids[str(process_k)], letters, centroids['prop_counter_' + str(process_k)])
draw_matrix.append(process_draw_matrix)
input_arrival = {}
for c, route in enumerate(centroids[str(process_k)]):
arrival = [centroids['prop_counter_' + str(process_k)][c] if route[0] == code else 'NoArrivals' for code in letters]
class_name = 'Class '+ str(c)
input_arrival[class_name] = arrival
input_routing = sim.define_input_routing(sim_type, centroids[str(process_k)], letters, None)
return(input_arrival, input_service, input_servers, input_routing, draw_matrix)
def ConstructSim(sim_type, week, warm_type, letters, individuals, overall_period, cluster_k,
arrivals, service, capacity, warm, Routes):
"""Constructs the simulation network.
Takes warm up type into consideration.
+ Routing type for process based different for Raw Pathways and Process Cetroids
+ Both routing functions defined here.
"""
def raw_routing_function(ind):
"""Return route from id_number - for Raw Pathways."""
route_number = ind.id_number - 1
return copy.deepcopy(Routes[route_number])
def process_routing_function(ind):
"""Return route from customer class - for Process Medoids."""
route_number = ind.customer_class
return copy.deepcopy(Routes[route_number])
if warm_type == 'Itterative':
time_run = individuals * warm
individuals = individuals * warm
else:
time_run = individuals
Arrival = sim.define_arrivals(sim_type, letters, individuals, arrivals, Routes)
Service = sim.define_service(sim_type, letters, service, cluster_k)
Servers = sim.define_servers(sim_type, warm_type, letters, overall_period, capacity, week, warm)
if sim_type == 'Raw Pathways':
if warm_type == 'Itterative':
Routes = Routes * warm
All_Routing = [raw_routing_function for _ in range(len(letters)+1)]
elif sim_type == 'Process Medoids':
All_Routing = [process_routing_function for _ in range(len(letters))]
else:
All_Routing = Routes
Network = ciw.create_network(
arrival_distributions = Arrival,
service_distributions = Service,
number_of_servers = Servers,
routing = All_Routing
)
return(Network, Servers, time_run, individuals)
def RunBasicSim(Network, sim_seed, time_run):
"""Run the simulation."""
ciw.seed(sim_seed)
Q = ciw.Simulation(Network, node_class=custom_ciw.CustomNode)
Q.simulate_until_max_customers(time_run, method='Finish', progress_bar=True)
return(Q)
def RunSimData(Q, warm_type, warm, letters, Servers_Schedules, week_type,
dataframe_T1, dataframe_T2, dataframe_T3, dataframe_T4,
original_transitions,
activity_codes, target, individuals,
save_location, simulation_name, basic):
"""Produce the simulation results.
Results adjusted if warm up Itterative selected.
+ Basic simulation will include utilisation table and graphics and waiting time graphics
+ Otherwise not produced becuase trials run
"""
if letters[0] == 'Dummy':
original_letters = letters[1:]
current_letters = [''] + original_letters
else:
original_letters = letters
current_letters = letters
df_recs = pd.DataFrame(Q.get_all_records())
df_recs2, all_unique_pathways, sim_waiting_columns = transitions.convert_records(Q, current_letters, True)
df_all = transitions.sim_results(df_recs2, all_unique_pathways)
if basic == True:
with pd.ExcelWriter(save_location + 'Raw_Sim_Results.xlsx', engine="openpyxl", mode='a') as writer:
df_all.to_excel(writer,simulation_name)
if warm_type == 'Itterative':
start_collection = individuals - int((individuals/warm))
individuals = int((individuals/warm))
df_recs = df_recs[df_recs.id_number > start_collection]
df_recs = df_recs.reset_index()
df_all = df_all[df_all.id_number > start_collection]
df_all = df_all.reset_index()
simulation_name = simulation_name + '_selected'
if basic == True:
with pd.ExcelWriter(save_location + 'Raw_Sim_Results.xlsx', engine="openpyxl", mode='a') as writer:
df_all.to_excel(writer,simulation_name)
simulation_transitions = transitions.find_transitions(original_letters, df_all['pathway'], None, False)
dataframe_T1, dataframe_T2, dataframe_T3, dataframe_T4 = get_vis_summary(df_all, 'totaltime', 'pathway', sim_waiting_columns, current_letters,
dataframe_T1, dataframe_T2, dataframe_T3, dataframe_T4,
original_transitions, simulation_transitions,
activity_codes, target, individuals,
save_location, simulation_name, listed_times=True,
last_arrival=df_recs, period=df_recs)
if basic == True:
week_type = int(week_type[0])
df_utilisation = sim.run_utilisation_results(df_recs, current_letters, Servers_Schedules, week_type, save_location, simulation_name)
with pd.ExcelWriter(save_location + 'Raw_Sim_Results.xlsx', engine="openpyxl", mode='a') as writer: # added to save util table
df_utilisation.to_excel(writer,simulation_name + '_Util') # added to save util table
return(dataframe_T1, dataframe_T2, dataframe_T3, dataframe_T4, df_utilisation)
else:
return(dataframe_T1, dataframe_T2, dataframe_T3, dataframe_T4)
#----------------- Trials -------------------------
def RunTrialSim(Network, trials, time_run, warm_type, warm, letters, Servers_Schedules, week_type,
dataframe_T1, dataframe_T2, dataframe_T3, dataframe_T4,
original_transitions,
activity_codes, target, individuals,
save_location, simulation_name, basic):
"""Run simulation for trials.
Seed will change for each run.
Results recorded and then reported in the form of confidence intervals.
Results only for T1, T2 and T4.
Standard deviation graphics produced to allow user decision on sufficient runs.
"""
# Set up empty tables for results
Trials_dataframe_T1 = pd.DataFrame(columns=['Name',
'Mean Time in System',
'Median Time in System',
'Target [days, %]',
'No. Unique Pathways',
'Occurs Once',
'Occurs > Once',
'Total Transitions',
'Mean Transitions',
'Largest Transition',
'Day Last Arrival',
'Overall Period'])
Trials_dataframe_T2 = | pd.DataFrame({'Activity': letters}) | pandas.DataFrame |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.