prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
from logging import getLogger, Formatter, StreamHandler, INFO, FileHandler
from collections import Counter
from pathlib import Path
import subprocess
import importlib
import math
import sys
import glob
import json
import pickle
import re
import warnings
from sklearn.datasets.base import Bunch
from skimage.draw import polygon
import skimage.transform
import shapely.wkt
from shapely.geometry import MultiPolygon, Polygon
import pandas as pd
import numpy as np
import tables as tb
import scipy
import rasterio
import rasterio.features
import tqdm
import cv2
import gdal
import click
import skimage.draw
import shapely.wkt
import shapely.ops
import shapely.geometry
import fiona
import affine
from keras.models import Model
from keras.engine.topology import merge as merge_l
from keras.layers import (
Input, Convolution2D, MaxPooling2D, UpSampling2D,
Reshape, core, Dropout,
Activation, BatchNormalization)
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint, EarlyStopping, History
from keras import backend as K
MODEL_NAME = 'v16'
ORIGINAL_SIZE = 650
INPUT_SIZE = 256
STRIDE_SZ = 197
LOGFORMAT = '%(asctime)s %(levelname)s %(message)s'
BASE_DIR = "/data/train" # train data
BASE_TEST_DIR = "/data/test" # test data
WORKING_DIR = "/data/working"
IMAGE_DIR = "/data/working/images/{}".format('v16')
V12_IMAGE_DIR = "/data/working/images/{}".format('v12') # for mask and mul
V5_IMAGE_DIR = "/data/working/images/{}".format('v5')
MODEL_DIR = "/data/working/models/{}".format(MODEL_NAME)
FN_SOLUTION_CSV = "/data/output/{}.csv".format(MODEL_NAME)
# ---------------------------------------------------------
# Parameters
MIN_POLYGON_AREA = 30
# ---------------------------------------------------------
# Input files
FMT_TRAIN_SUMMARY_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("summaryData/{prefix:s}_Train_Building_Solutions.csv"))
FMT_TRAIN_RGB_IMAGE_PATH = str(
Path("{datapath:s}/") /
Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"))
FMT_TEST_RGB_IMAGE_PATH = str(
Path("{datapath:s}/") /
Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"))
FMT_TRAIN_MSPEC_IMAGE_PATH = str(
Path("{datapath:s}/") /
Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"))
FMT_TEST_MSPEC_IMAGE_PATH = str(
Path("{datapath:s}/") /
Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"))
# ---------------------------------------------------------
# Preprocessing result
FMT_RGB_BANDCUT_TH_PATH = V12_IMAGE_DIR + "/rgb_bandcut{}.csv"
FMT_MUL_BANDCUT_TH_PATH = V12_IMAGE_DIR + "/mul_bandcut{}.csv"
# ---------------------------------------------------------
# Image list, Image container and mask container
FMT_VALTRAIN_IMAGELIST_PATH = V5_IMAGE_DIR + "/{prefix:s}_valtrain_ImageId.csv"
FMT_VALTEST_IMAGELIST_PATH = V5_IMAGE_DIR + "/{prefix:s}_valtest_ImageId.csv"
FMT_TRAIN_IMAGELIST_PATH = V5_IMAGE_DIR + "/{prefix:s}_train_ImageId.csv"
FMT_TEST_IMAGELIST_PATH = V5_IMAGE_DIR + "/{prefix:s}_test_ImageId.csv"
# Mask
FMT_VALTRAIN_MASK_STORE = V12_IMAGE_DIR + "/valtrain_{}_mask.h5"
FMT_VALTEST_MASK_STORE = V12_IMAGE_DIR + "/valtest_{}_mask.h5"
FMT_TRAIN_MASK_STORE = V12_IMAGE_DIR + "/train_{}_mask.h5"
# MUL
FMT_VALTRAIN_MUL_STORE = V12_IMAGE_DIR + "/valtrain_{}_mul.h5"
FMT_VALTEST_MUL_STORE = V12_IMAGE_DIR + "/valtest_{}_mul.h5"
FMT_TRAIN_MUL_STORE = V12_IMAGE_DIR + "/train_{}_mul.h5"
FMT_TEST_MUL_STORE = V12_IMAGE_DIR + "/test_{}_mul.h5"
FMT_MULMEAN = V12_IMAGE_DIR + "/{}_mulmean.h5"
# OSM
FMT_VALTRAIN_OSM_STORE = IMAGE_DIR + "/valtrain_{}_osm.h5"
FMT_VALTEST_OSM_STORE = IMAGE_DIR + "/valtest_{}_osm.h5"
FMT_TRAIN_OSM_STORE = IMAGE_DIR + "/train_{}_osm.h5"
FMT_TEST_OSM_STORE = IMAGE_DIR + "/test_{}_osm.h5"
FMT_OSM_MEAN = IMAGE_DIR + "/{}_osmmean.h5"
# ---------------------------------------------------------
# Model files
FMT_VALMODEL_PATH = MODEL_DIR + "/{}_val_weights.h5"
FMT_FULLMODEL_PATH = MODEL_DIR + "/{}_full_weights.h5"
FMT_VALMODEL_HIST = MODEL_DIR + "/{}_val_hist.csv"
FMT_VALMODEL_EVALHIST = MODEL_DIR + "/{}_val_evalhist.csv"
FMT_VALMODEL_EVALTHHIST = MODEL_DIR + "/{}_val_evalhist_th.csv"
# ---------------------------------------------------------
# Prediction & polygon result
FMT_TESTPRED_PATH = MODEL_DIR + "/{}_pred.h5"
FMT_VALTESTPRED_PATH = MODEL_DIR + "/{}_eval_pred.h5"
FMT_VALTESTPOLY_PATH = MODEL_DIR + "/{}_eval_poly.csv"
FMT_VALTESTTRUTH_PATH = MODEL_DIR + "/{}_eval_poly_truth.csv"
FMT_VALTESTPOLY_OVALL_PATH = MODEL_DIR + "/eval_poly.csv"
FMT_VALTESTTRUTH_OVALL_PATH = MODEL_DIR + "/eval_poly_truth.csv"
FMT_TESTPOLY_PATH = MODEL_DIR + "/{}_poly.csv"
# ---------------------------------------------------------
# Model related files (others)
FMT_VALMODEL_LAST_PATH = MODEL_DIR + "/{}_val_weights_last.h5"
FMT_FULLMODEL_LAST_PATH = MODEL_DIR + "/{}_full_weights_last.h5"
# OSM dataset (Extracted from https://mapzen.com/data/metro-extracts/)
FMT_OSMSHAPEFILE = "/root/osmdata/{name:}/{name:}_{layer:}.shp"
FMT_SERIALIZED_OSMDATA = WORKING_DIR + "/osm_{}_subset.pkl"
LAYER_NAMES = [
'buildings',
'landusages',
'roads',
'waterareas',
]
# ---------------------------------------------------------
# warnins and logging
warnings.simplefilter("ignore", UserWarning)
warnings.simplefilter("ignore", FutureWarning)
handler = StreamHandler()
handler.setLevel(INFO)
handler.setFormatter(Formatter('%(asctime)s %(levelname)s %(message)s'))
fh_handler = FileHandler(".{}.log".format(MODEL_NAME))
fh_handler.setFormatter(Formatter('%(asctime)s %(levelname)s %(message)s'))
logger = getLogger(__name__)
logger.setLevel(INFO)
if __name__ == '__main__':
logger.addHandler(handler)
logger.addHandler(fh_handler)
# Fix seed for reproducibility
np.random.seed(1145141919)
def directory_name_to_area_id(datapath):
"""
Directory name to AOI number
Usage:
>>> directory_name_to_area_id("/data/test/AOI_2_Vegas")
2
"""
dir_name = Path(datapath).name
if dir_name.startswith('AOI_2_Vegas'):
return 2
elif dir_name.startswith('AOI_3_Paris'):
return 3
elif dir_name.startswith('AOI_4_Shanghai'):
return 4
elif dir_name.startswith('AOI_5_Khartoum'):
return 5
else:
raise RuntimeError("Unsupported city id is given.")
def _remove_interiors(line):
if "), (" in line:
line_prefix = line.split('), (')[0]
line_terminate = line.split('))",')[-1]
line = (
line_prefix +
'))",' +
line_terminate
)
return line
def _calc_fscore_per_aoi(area_id):
prefix = area_id_to_prefix(area_id)
truth_file = FMT_VALTESTTRUTH_PATH.format(prefix)
poly_file = FMT_VALTESTPOLY_PATH.format(prefix)
cmd = [
'java',
'-jar',
'/root/visualizer-2.0/visualizer.jar',
'-truth',
truth_file,
'-solution',
poly_file,
'-no-gui',
'-band-triplets',
'/root/visualizer-2.0/data/band-triplets.txt',
'-image-dir',
'pass',
]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = proc.communicate()
lines = [line for line in stdout_data.decode('utf8').split('\n')[-10:]]
"""
Overall F-score : 0.85029
AOI_2_Vegas:
TP : 27827
FP : 4999
FN : 4800
Precision: 0.847712
Recall : 0.852883
F-score : 0.85029
"""
if stdout_data.decode('utf8').strip().endswith("Overall F-score : 0"):
overall_fscore = 0
tp = 0
fp = 0
fn = 0
precision = 0
recall = 0
fscore = 0
elif len(lines) > 0 and lines[0].startswith("Overall F-score : "):
assert lines[0].startswith("Overall F-score : ")
assert lines[2].startswith("AOI_")
assert lines[3].strip().startswith("TP")
assert lines[4].strip().startswith("FP")
assert lines[5].strip().startswith("FN")
assert lines[6].strip().startswith("Precision")
assert lines[7].strip().startswith("Recall")
assert lines[8].strip().startswith("F-score")
overall_fscore = float(re.findall("([\d\.]+)", lines[0])[0])
tp = int(re.findall("(\d+)", lines[3])[0])
fp = int(re.findall("(\d+)", lines[4])[0])
fn = int(re.findall("(\d+)", lines[5])[0])
precision = float(re.findall("([\d\.]+)", lines[6])[0])
recall = float(re.findall("([\d\.]+)", lines[7])[0])
fscore = float(re.findall("([\d\.]+)", lines[8])[0])
else:
logger.warn("Unexpected data >>> " + stdout_data.decode('utf8'))
raise RuntimeError("Unsupported format")
return {
'overall_fscore': overall_fscore,
'tp': tp,
'fp': fp,
'fn': fn,
'precision': precision,
'recall': recall,
'fscore': fscore,
}
def prefix_to_area_id(prefix):
area_dict = {
'AOI_2_Vegas': 2,
'AOI_3_Paris': 3,
'AOI_4_Shanghai': 4,
'AOI_5_Khartoum': 5,
}
return area_dict[area_id]
def area_id_to_prefix(area_id):
area_dict = {
2: 'AOI_2_Vegas',
3: 'AOI_3_Paris',
4: 'AOI_4_Shanghai',
5: 'AOI_5_Khartoum',
}
return area_dict[area_id]
def area_id_to_osmprefix(area_id):
area_id_to_osmprefix_dict = {
2: 'las-vegas_nevada_osm',
3: 'paris_france_osm',
4: 'shanghai_china_osm',
5: 'ex_s2cCo6gpCXAvihWVygCAfSjNVksnQ_osm',
}
return area_id_to_osmprefix_dict[area_id]
# ---------------------------------------------------------
# main
def _get_model_parameter(area_id):
prefix = area_id_to_prefix(area_id)
fn_hist = FMT_VALMODEL_EVALTHHIST.format(prefix)
best_row = pd.read_csv(fn_hist).sort_values(
by='fscore',
ascending=False,
).iloc[0]
param = dict(
fn_epoch=int(best_row['zero_base_epoch']),
min_poly_area=int(best_row['min_area_th']),
)
return param
def _internal_test_predict_best_param(area_id,
save_pred=True):
prefix = area_id_to_prefix(area_id)
param = _get_model_parameter(area_id)
epoch = param['fn_epoch']
min_th = param['min_poly_area']
# Prediction phase
logger.info("Prediction phase: {}".format(prefix))
dict_n_osm_layers = {
2: 4,
3: 5,
4: 4,
5: 4,
}
osm_layers = dict_n_osm_layers[area_id]
n_input_layers = 8 + osm_layers
X_mean = get_mul_mean_image(area_id)
X_osm_mean = np.zeros((
osm_layers,
INPUT_SIZE,
INPUT_SIZE,
))
X_mean = np.vstack([X_mean, X_osm_mean])
# Load model weights
# Predict and Save prediction result
fn = FMT_TESTPRED_PATH.format(prefix)
fn_model = FMT_VALMODEL_PATH.format(prefix + '_{epoch:02d}')
fn_model = fn_model.format(epoch=epoch)
model = get_unet(input_layers=n_input_layers)
model.load_weights(fn_model)
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
y_pred = model.predict_generator(
generate_test_batch(
area_id,
batch_size=64,
immean=X_mean,
enable_tqdm=True,
),
val_samples=len(df_test) * 9,
)
del model
# Save prediction result
if save_pred:
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, 'pred', atom, y_pred.shape,
filters=filters)
ds[:] = y_pred
return y_pred
def _internal_test(area_id, enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
y_pred = _internal_test_predict_best_param(area_id, save_pred=False)
# Postprocessing phase
logger.info("Postprocessing phase")
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn_out = FMT_TESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
test_image_list = df_test.index.tolist()
for idx, image_id in tqdm.tqdm(enumerate(test_image_list)):
pred_values = np.zeros((650, 650))
pred_count = np.zeros((650, 650))
for slice_pos in range(9):
slice_idx = idx * 9 + slice_pos
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
pred_values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += (
y_pred[slice_idx][0]
)
pred_count[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += 1
pred_values = pred_values / pred_count
df_poly = mask_to_poly(pred_values, min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
def _internal_validate_predict_best_param(area_id,
enable_tqdm=False):
param = _get_model_parameter(area_id)
epoch = param['fn_epoch']
y_pred = _internal_validate_predict(
area_id,
epoch=epoch,
save_pred=False,
enable_tqdm=enable_tqdm)
return y_pred
def _internal_validate_predict(area_id,
epoch=3,
save_pred=True,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
dict_n_osm_layers = {
2: 4,
3: 5,
4: 4,
5: 4,
}
osm_layers = dict_n_osm_layers[area_id]
n_input_layers = 8 + osm_layers
# Image Mean
X_mean = get_mul_mean_image(area_id)
X_osm_mean = np.zeros((
osm_layers,
INPUT_SIZE,
INPUT_SIZE,
))
X_mean = np.vstack([X_mean, X_osm_mean])
# Load model weights
# Predict and Save prediction result
fn_model = FMT_VALMODEL_PATH.format(prefix + '_{epoch:02d}')
fn_model = fn_model.format(epoch=epoch)
model = get_unet(input_layers=n_input_layers)
model.load_weights(fn_model)
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
y_pred = model.predict_generator(
generate_valtest_batch(
area_id,
batch_size=32,
immean=X_mean,
enable_tqdm=enable_tqdm,
),
val_samples=len(df_test) * 9,
)
del model
# Save prediction result
if save_pred:
fn = FMT_VALTESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(
f.root,
'pred',
atom,
y_pred.shape,
filters=filters,
)
ds[:] = y_pred
return y_pred
def _internal_validate_fscore_wo_pred_file(area_id,
epoch=3,
min_th=MIN_POLYGON_AREA,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
# ------------------------
# Prediction phase
logger.info("Prediction phase")
y_pred = _internal_validate_predict(
area_id,
save_pred=False,
epoch=epoch,
enable_tqdm=enable_tqdm)
# ------------------------
# Postprocessing phase
logger.info("Postprocessing phase")
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
test_list = df_test.index.tolist()
iterator = enumerate(test_list)
for idx, image_id in tqdm.tqdm(iterator, total=len(test_list)):
pred_values = np.zeros((650, 650))
pred_count = np.zeros((650, 650))
for slice_pos in range(9):
slice_idx = idx * 9 + slice_pos
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
pred_values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += (
y_pred[slice_idx][0]
)
pred_count[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += 1
pred_values = pred_values / pred_count
df_poly = mask_to_poly(pred_values, min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
# ------------------------
# Validation solution file
logger.info("Validation solution file")
fn_true = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df_true = pd.read_csv(fn_true)
# # Remove prefix "PAN_" from ImageId column
# df_true.loc[:, 'ImageId'] = df_true.ImageId.str[4:]
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
df_test_image_ids = df_test.ImageId.unique()
fn_out = FMT_VALTESTTRUTH_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
df_true = df_true[df_true.ImageId.isin(df_test_image_ids)]
for idx, r in df_true.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
r.ImageId,
r.BuildingId,
r.PolygonWKT_Pix,
1.0))
def _internal_validate_fscore(area_id,
epoch=3,
predict=True,
min_th=MIN_POLYGON_AREA,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
# ------------------------
# Prediction phase
logger.info("Prediction phase")
if predict:
_internal_validate_predict(
area_id,
epoch=epoch,
enable_tqdm=enable_tqdm)
# ------------------------
# Postprocessing phase
logger.info("Postprocessing phase")
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn = FMT_VALTESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'r') as f:
y_pred = np.array(f.get_node('/pred'))
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
test_list = df_test.index.tolist()
iterator = enumerate(test_list)
for idx, image_id in tqdm.tqdm(iterator, total=len(test_list)):
pred_values = np.zeros((650, 650))
pred_count = np.zeros((650, 650))
for slice_pos in range(9):
slice_idx = idx * 9 + slice_pos
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
pred_values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += (
y_pred[slice_idx][0]
)
pred_count[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += 1
pred_values = pred_values / pred_count
df_poly = mask_to_poly(pred_values, min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
# ------------------------
# Validation solution file
logger.info("Validation solution file")
fn_true = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df_true = pd.read_csv(fn_true)
# # Remove prefix "PAN_" from ImageId column
# df_true.loc[:, 'ImageId'] = df_true.ImageId.str[4:]
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
df_test_image_ids = df_test.ImageId.unique()
fn_out = FMT_VALTESTTRUTH_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
df_true = df_true[df_true.ImageId.isin(df_test_image_ids)]
for idx, r in df_true.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
r.ImageId,
r.BuildingId,
r.PolygonWKT_Pix,
1.0))
def mask_to_poly(mask, min_polygon_area_th=MIN_POLYGON_AREA):
mask = (mask > 0.5).astype(np.uint8)
shapes = rasterio.features.shapes(mask.astype(np.int16), mask > 0)
poly_list = []
mp = shapely.ops.cascaded_union(
shapely.geometry.MultiPolygon([
shapely.geometry.shape(shape)
for shape, value in shapes
]))
if isinstance(mp, shapely.geometry.Polygon):
df = pd.DataFrame({
'area_size': [mp.area],
'poly': [mp],
})
else:
df = pd.DataFrame({
'area_size': [p.area for p in mp],
'poly': [p for p in mp],
})
df = df[df.area_size > min_polygon_area_th].sort_values(
by='area_size', ascending=False)
df.loc[:, 'wkt'] = df.poly.apply(lambda x: shapely.wkt.dumps(
x, rounding_precision=0))
df.loc[:, 'bid'] = list(range(1, len(df) + 1))
df.loc[:, 'area_ratio'] = df.area_size / df.area_size.max()
return df
def jaccard_coef(y_true, y_pred):
smooth = 1e-12
intersection = K.sum(y_true * y_pred, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def jaccard_coef_int(y_true, y_pred):
smooth = 1e-12
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
intersection = K.sum(y_true * y_pred_pos, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred_pos, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def generate_test_batch(area_id,
batch_size=64,
immean=None,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
df_test = pd.read_csv(FMT_TEST_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_TEST_MUL_STORE.format(prefix)
fn_osm = FMT_TEST_OSM_STORE.format(prefix)
slice_id_list = []
for idx, row in df_test.iterrows():
for slice_pos in range(9):
slice_id = row.ImageId + '_' + str(slice_pos)
slice_id_list.append(slice_id)
if enable_tqdm:
pbar = tqdm.tqdm(total=len(slice_id_list))
while 1:
total_sz = len(slice_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im,\
tb.open_file(fn_osm, 'r') as f_osm:
for i_batch in range(n_batch):
target_slice_ids = slice_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_slice_ids) == 0:
continue
X_test = []
y_test = []
for slice_id in target_slice_ids:
im = np.array(f_im.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
im2 = np.array(f_osm.get_node('/' + slice_id))
im2 = np.swapaxes(im2, 0, 2)
im2 = np.swapaxes(im2, 1, 2)
im = np.vstack([im, im2])
X_test.append(im)
mask = np.zeros((INPUT_SIZE, INPUT_SIZE)).astype(np.uint8)
y_test.append(mask)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_test = y_test.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_test = X_test - immean
if enable_tqdm:
pbar.update(y_test.shape[0])
yield (X_test, y_test)
if enable_tqdm:
pbar.close()
def generate_valtest_batch(area_id,
batch_size=8,
immean=None,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_VALTEST_MUL_STORE.format(prefix)
fn_mask = FMT_VALTEST_MASK_STORE.format(prefix)
fn_osm = FMT_VALTEST_OSM_STORE.format(prefix)
slice_id_list = []
for idx, row in df_train.iterrows():
for slice_pos in range(9):
slice_id = row.ImageId + '_' + str(slice_pos)
slice_id_list.append(slice_id)
if enable_tqdm:
pbar = tqdm.tqdm(total=len(slice_id_list))
while 1:
total_sz = len(slice_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im,\
tb.open_file(fn_osm, 'r') as f_osm,\
tb.open_file(fn_mask, 'r') as f_mask:
for i_batch in range(n_batch):
target_slice_ids = slice_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_slice_ids) == 0:
continue
X_train = []
y_train = []
for slice_id in target_slice_ids:
im = np.array(f_im.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
im2 = np.array(f_osm.get_node('/' + slice_id))
im2 = np.swapaxes(im2, 0, 2)
im2 = np.swapaxes(im2, 1, 2)
im = np.vstack([im, im2])
X_train.append(im)
mask = np.array(f_mask.get_node('/' + slice_id))
mask = (mask > 0).astype(np.uint8)
y_train.append(mask)
X_train = np.array(X_train)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_train = X_train - immean
if enable_tqdm:
pbar.update(y_train.shape[0])
yield (X_train, y_train)
if enable_tqdm:
pbar.close()
def generate_valtrain_batch(area_id, batch_size=8, immean=None):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_VALTRAIN_MUL_STORE.format(prefix)
fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix)
fn_osm = FMT_VALTRAIN_OSM_STORE.format(prefix)
slice_id_list = []
for idx, row in df_train.iterrows():
for slice_pos in range(9):
slice_id = row.ImageId + '_' + str(slice_pos)
slice_id_list.append(slice_id)
np.random.shuffle(slice_id_list)
while 1:
total_sz = len(slice_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im,\
tb.open_file(fn_osm, 'r') as f_osm,\
tb.open_file(fn_mask, 'r') as f_mask:
for i_batch in range(n_batch):
target_slice_ids = slice_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_slice_ids) == 0:
continue
X_train = []
y_train = []
for slice_id in target_slice_ids:
im = np.array(f_im.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
im2 = np.array(f_osm.get_node('/' + slice_id))
im2 = np.swapaxes(im2, 0, 2)
im2 = np.swapaxes(im2, 1, 2)
im = np.vstack([im, im2])
X_train.append(im)
mask = np.array(f_mask.get_node('/' + slice_id))
mask = (mask > 0).astype(np.uint8)
y_train.append(mask)
X_train = np.array(X_train)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_train = X_train - immean
yield (X_train, y_train)
def get_unet(input_layers=15):
conv_params = dict(activation='relu', border_mode='same')
merge_params = dict(mode='concat', concat_axis=1)
inputs = Input((input_layers, 256, 256))
conv1 = Convolution2D(32, 3, 3, **conv_params)(inputs)
conv1 = Convolution2D(32, 3, 3, **conv_params)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, **conv_params)(pool1)
conv2 = Convolution2D(64, 3, 3, **conv_params)(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, **conv_params)(pool2)
conv3 = Convolution2D(128, 3, 3, **conv_params)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, **conv_params)(pool3)
conv4 = Convolution2D(256, 3, 3, **conv_params)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, **conv_params)(pool4)
conv5 = Convolution2D(512, 3, 3, **conv_params)(conv5)
up6 = merge_l([UpSampling2D(size=(2, 2))(conv5), conv4], **merge_params)
conv6 = Convolution2D(256, 3, 3, **conv_params)(up6)
conv6 = Convolution2D(256, 3, 3, **conv_params)(conv6)
up7 = merge_l([UpSampling2D(size=(2, 2))(conv6), conv3], **merge_params)
conv7 = Convolution2D(128, 3, 3, **conv_params)(up7)
conv7 = Convolution2D(128, 3, 3, **conv_params)(conv7)
up8 = merge_l([UpSampling2D(size=(2, 2))(conv7), conv2], **merge_params)
conv8 = Convolution2D(64, 3, 3, **conv_params)(up8)
conv8 = Convolution2D(64, 3, 3, **conv_params)(conv8)
up9 = merge_l([UpSampling2D(size=(2, 2))(conv8), conv1], **merge_params)
conv9 = Convolution2D(32, 3, 3, **conv_params)(up9)
conv9 = Convolution2D(32, 3, 3, **conv_params)(conv9)
conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)
optimizer = SGD(lr=0.01, momentum=0.9, nesterov=True)
model = Model(input=inputs, output=conv10)
model.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=['accuracy', jaccard_coef, jaccard_coef_int])
return model
def get_mul_mean_image(area_id):
prefix = area_id_to_prefix(area_id)
with tb.open_file(FMT_MULMEAN.format(prefix), 'r') as f:
im_mean = np.array(f.get_node('/mulmean'))
return im_mean
def __load_band_cut_th(band_fn, bandsz=3):
df = pd.read_csv(band_fn, index_col='area_id')
all_band_cut_th = {area_id: {} for area_id in range(2, 6)}
for area_id, row in df.iterrows():
for chan_i in range(bandsz):
all_band_cut_th[area_id][chan_i] = dict(
min=row['chan{}_min'.format(chan_i)],
max=row['chan{}_max'.format(chan_i)],
)
return all_band_cut_th
def get_mask_im(df, image_id):
im_mask = np.zeros((650, 650))
for idx, row in df[df.ImageId == image_id].iterrows():
shape_obj = shapely.wkt.loads(row.PolygonWKT_Pix)
if shape_obj.exterior is not None:
coords = list(shape_obj.exterior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 1
interiors = shape_obj.interiors
for interior in interiors:
coords = list(interior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 0
im_mask = (im_mask > 0.5).astype(np.uint8)
return im_mask
def get_slice_mask_im(df, image_id):
im_mask = np.zeros((650, 650))
for idx, row in df[df.ImageId == image_id].iterrows():
shape_obj = shapely.wkt.loads(row.PolygonWKT_Pix)
if shape_obj.exterior is not None:
coords = list(shape_obj.exterior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 1
interiors = shape_obj.interiors
for interior in interiors:
coords = list(interior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 0
im_mask = (im_mask > 0.5).astype(np.uint8)
for slice_pos in range(9):
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
im_mask_part = im_mask[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE]
assert im_mask_part.shape == (256, 256)
yield slice_pos, im_mask_part
def get_test_image_path_from_imageid(image_id, datapath, mul=False):
if mul:
return FMT_TEST_MSPEC_IMAGE_PATH.format(
datapath=datapath, image_id=image_id)
else:
return FMT_TEST_RGB_IMAGE_PATH.format(
datapath=datapath, image_id=image_id)
def get_train_image_path_from_imageid(image_id, datapath, mul=False):
if mul:
return FMT_TRAIN_MSPEC_IMAGE_PATH.format(
datapath=datapath, image_id=image_id)
else:
return FMT_TRAIN_RGB_IMAGE_PATH.format(
datapath=datapath, image_id=image_id)
def image_id_to_prefix(image_id):
prefix = image_id.split('img')[0][:-1]
return prefix
def load_train_summary_data(area_id):
prefix = area_id_to_prefix(area_id)
fn = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df = pd.read_csv(fn)
# df.loc[:, 'ImageId'] = df.ImageId.str[4:]
return df
def split_val_train_test(area_id):
prefix = area_id_to_prefix(area_id)
df = load_train_summary_data(area_id)
df_agg = df.groupby('ImageId').agg('first')
image_id_list = df_agg.index.tolist()
np.random.shuffle(image_id_list)
sz_valtrain = int(len(image_id_list) * 0.7)
sz_valtest = len(image_id_list) - sz_valtrain
# Parent directory
parent_dir = Path(FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)).parent
if not parent_dir.exists():
parent_dir.mkdir(parents=True)
pd.DataFrame({'ImageId': image_id_list[:sz_valtrain]}).to_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index=False)
pd.DataFrame({'ImageId': image_id_list[sz_valtrain:]}).to_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index=False)
# ---------------------------------------------------------
def calc_multiband_cut_threshold(path_list):
band_values = {k: [] for k in range(3)}
band_cut_th = {k: dict(max=0, min=0) for k in range(3)}
for path in path_list:
with rasterio.open(path, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(3):
band_values[i_chan].append(values[i_chan].ravel())
for i_chan in range(3):
band_values[i_chan] = np.concatenate(band_values[i_chan]).ravel()
band_cut_th[i_chan]['max'] = scipy.percentile(band_values[i_chan], 98)
band_cut_th[i_chan]['min'] = scipy.percentile(band_values[i_chan], 2)
return band_cut_th
def tif_to_latlon(path):
ds = gdal.Open(path)
width = ds.RasterXSize
height = ds.RasterYSize
gt = ds.GetGeoTransform()
minx = gt[0]
miny = gt[3] + width*gt[4] + height*gt[5]
maxx = gt[0] + width*gt[1] + height*gt[2]
maxy = gt[3]
return Bunch(
minx=minx,
maxx=maxx,
miny=miny,
maxy=maxy,
xcenter=(minx+maxx)/2.0,
ycenter=(miny+maxy)/2.0)
def location_summary(area_id, datapath):
area_prefix = area_id_to_prefix(area_id)
rows = []
glob_path = str(
Path(datapath) /
Path("PAN/PAN_{prefix:s}_img*.tif")
).format(prefix=area_prefix)
for path in sorted(glob.glob(glob_path)):
image_id = path.split('/')[-1][:-4]
pos = tif_to_latlon(path)
rows.append(dict(ImageId=image_id, path=path, pos=pos))
df_location = | pd.DataFrame(rows) | pandas.DataFrame |
import pandas as pd
from sklearn.model_selection import ParameterGrid,GridSearchCV
from sklearn.base import clone
from sklearn.cluster import MeanShift
from sklearn import metrics
import numpy as np
import joblib
import datetime
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve
def MSGridsearch(dmodel, data, labels, param_dict):
"""
dmodel: 默认模型
data:训练数据
labels: 真实分类
param_dict: 超参数组合字典
"""
output_models = []
# create parameter grid
# 构建超参数网格
param_grid = ParameterGrid(param_dict)
# change the parameter attributes in dbscan according to the param_grid
# 依据网格超参数修改dbscan object 的对应参数,训练模型,得出输出数据
for param in param_grid:
for key, value in param.items():
setattr(dmodel, key, value)
dmodel.fit(data)
model = clone(dmodel)
output_models.append(model)
# 如果有其他需要输出的数据,继续往里面添加就可以
return (output_models)
# 评价标准,测试用,非最后的模块化后的功能块
def get_marks(estimator, data, labels,name=None):
"""获取评分,有五种需要知道数据集的实际分类信息,有三种不需要,参考readme.txt
:param estimator: 模型
:param name: 初始方法
:param data: 特征数据集
"""
estimator.fit(data.astype(np.float64))
print(30 * '*', name, 30 * '*')
print(" 模型及参数: ", estimator)
print("Homogeneity Score (均一性): ", metrics.homogeneity_score(labels, estimator.labels_))
print("Completeness Score (完整性): ", metrics.completeness_score(labels, estimator.labels_))
print("V-Measure Score (V量): ", metrics.v_measure_score(labels, estimator.labels_))
print("Adjusted Rand Score (调整后兰德指数): ", metrics.adjusted_rand_score(labels, estimator.labels_))
print("Adjusted Mutual Info Score(调整后的共同信息): ", metrics.adjusted_mutual_info_score(labels, estimator.labels_))
print("Calinski Harabasz Score: (方差比指数) ", metrics.calinski_harabasz_score(data, estimator.labels_))
print("Silhouette Score (轮廓分数): ", metrics.silhouette_score(data, estimator.labels_))
def read_para():
para = pd.read_excel('para.xlsx',header=None,dtype='object')
dic=para.set_index(0).T.to_dict('list')
for i in dic:
dic[i]=[x for x in dic[i] if x == x]
return dic
def plot_learning_curve(model,data,labels):
train_sizes, train_scores, test_scores = learning_curve(model, data, labels,
scoring='adjusted_rand_score', cv=5)
train_scores_mean = np.mean(train_scores, axis=1) # 将训练得分集合按行的到平均值
train_scores_std = np.std(train_scores, axis=1) # 计算训练矩阵的标准方差
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid() # 背景设置为网格线
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1,
color='r')
# plt.fill_between()函数会把模型准确性的平均值的上下方差的空间里用颜色填充。
plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1,
color='g')
plt.plot(train_sizes, train_scores_mean, 'o-', color='r', label='Training score')
# 然后用plt.plot()函数画出模型准确性的平均值
plt.plot(train_sizes, test_scores_mean, 'o-', color='g', label='Cross_validation score')
plt.legend(loc='best') # 显示图例
plt.show()
def main():
df = pd.read_excel('test4.xlsx')
data = df.drop('TRUE VALUE', axis=1)
labels = df['TRUE VALUE']
# 测试非监督模型
ms = MeanShift()
ms_dict =read_para()
output = MSGridsearch(ms, data, labels, ms_dict)
# meanshift测试结果
for i in range(len(output)):
get_marks(output[i], data=data, labels=labels,name="output" + str(i))
ms_best_model = GridSearchCV(ms, ms_dict, cv=5, scoring='adjusted_rand_score', verbose=1, n_jobs=-1)
ms_result = ms_best_model.fit(data, labels)
print(ms_result.best_params_)
# 保存模型
joblib.dump(ms_best_model.best_estimator_, "./test.pkl")
# 保存参数
TIMESTAMP = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S").replace("'", "")
result = | pd.DataFrame(ms_result.best_params_, index=['value']) | pandas.DataFrame |
from contextlib import closing
import socket
import json
import os
import tempfile
from pathlib import Path
from tempfile import NamedTemporaryFile
from textwrap import dedent
from unittest.mock import patch
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
import pyarrow
import pytest
import responses
from datarobot_drum.drum.drum import (
possibly_intuit_order,
output_in_code_dir,
create_custom_inference_model_folder,
)
from datarobot_drum.drum.exceptions import DrumCommonException
from datarobot_drum.drum.model_adapter import PythonModelAdapter
from datarobot_drum.drum.language_predictors.python_predictor.python_predictor import (
PythonPredictor,
)
from datarobot_drum.drum.language_predictors.r_predictor.r_predictor import RPredictor
from datarobot_drum.drum.language_predictors.java_predictor.java_predictor import JavaPredictor
from datarobot_drum.drum.push import _push_inference, _push_training, drum_push
from datarobot_drum.drum.common import (
read_model_metadata_yaml,
MODEL_CONFIG_FILENAME,
TargetType,
validate_config_fields,
ModelMetadataKeys,
)
from datarobot_drum.drum.utils import StructuredInputReadUtils
class TestOrderIntuition:
tests_data_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "testdata"))
binary_filename = os.path.join(tests_data_path, "iris_binary_training.csv")
regression_filename = os.path.join(tests_data_path, "boston_housing.csv")
one_target_filename = os.path.join(tests_data_path, "one_target.csv")
def test_colname(self):
classes = possibly_intuit_order(self.binary_filename, target_col_name="Species")
assert set(classes) == {"Iris-versicolor", "Iris-setosa"}
def test_colfile(self):
with NamedTemporaryFile() as target_file:
df = pd.read_csv(self.binary_filename)
with open(target_file.name, "w") as f:
target_series = df["Species"]
target_series.to_csv(f, index=False, header="Target")
classes = possibly_intuit_order(self.binary_filename, target_data_file=target_file.name)
assert set(classes) == {"Iris-versicolor", "Iris-setosa"}
def test_badfile(self):
with pytest.raises(DrumCommonException):
possibly_intuit_order(self.one_target_filename, target_col_name="Species")
def test_unsupervised(self):
classes = possibly_intuit_order(
self.regression_filename, target_col_name="MEDV", is_anomaly=True
)
assert classes is None
class TestValidatePredictions:
def test_class_labels(self):
positive_label = "poslabel"
negative_label = "neglabel"
adapter = PythonModelAdapter(model_dir=None, target_type=TargetType.BINARY)
df = pd.DataFrame({positive_label: [0.1, 0.2, 0.3], negative_label: [0.9, 0.8, 0.7]})
adapter._validate_predictions(
to_validate=df, class_labels=[positive_label, negative_label],
)
with pytest.raises(ValueError):
df = pd.DataFrame({positive_label: [0.1, 0.2, 0.3], negative_label: [0.9, 0.8, 0.7]})
adapter._validate_predictions(
to_validate=df, class_labels=["yes", "no"],
)
def test_regression_predictions_header(self):
adapter = PythonModelAdapter(model_dir=None, target_type=TargetType.REGRESSION)
df = pd.DataFrame({"Predictions": [0.1, 0.2, 0.3]})
adapter._validate_predictions(
to_validate=df, class_labels=None,
)
with pytest.raises(ValueError):
df = pd.DataFrame({"other_name": [0.1, 0.2, 0.3]})
adapter._validate_predictions(
to_validate=df, class_labels=None,
)
def test_add_to_one(self):
positive_label = "poslabel"
negative_label = "neglabel"
for predictor in [PythonPredictor(), RPredictor(), JavaPredictor()]:
predictor._target_type = TargetType.BINARY
df_good = pd.DataFrame(
{positive_label: [0.1, 0.2, 0.3], negative_label: [0.9, 0.8, 0.7]}
)
predictor.validate_output(df_good)
df_bad = pd.DataFrame({positive_label: [1, 1, 1], negative_label: [-1, 0, 0]})
with pytest.raises(ValueError):
predictor.validate_output(df_bad)
modelID = "5f1f15a4d6111f01cb7f91f"
environmentID = "5e8c889607389fe0f466c72d"
projectID = "abc123"
@pytest.fixture
def inference_metadata_yaml():
return dedent(
"""
name: drumpush-regression
type: inference
targetType: regression
environmentID: {environmentID}
inferenceModel:
targetName: MEDV
validation:
input: hello
"""
).format(environmentID=environmentID)
@pytest.fixture
def inference_binary_metadata_yaml_no_target_name():
return dedent(
"""
name: drumpush-binary
type: inference
targetType: binary
environmentID: {environmentID}
inferenceModel:
positiveClassLabel: yes
negativeClassLabel: no
validation:
input: hello
"""
).format(environmentID=environmentID)
@pytest.fixture
def inference_binary_metadata_no_label():
return dedent(
"""
name: drumpush-binary
type: inference
targetType: binary
inferenceModel:
positiveClassLabel: yes
"""
)
@pytest.fixture
def multiclass_labels():
return ["GALAXY", "QSO", "STAR"]
@pytest.fixture
def inference_multiclass_metadata_yaml_no_labels():
return dedent(
"""
name: drumpush-multiclass
type: inference
targetType: multiclass
environmentID: {}
inferenceModel:
targetName: class
validation:
input: hello
"""
).format(environmentID)
@pytest.fixture
def inference_multiclass_metadata_yaml(multiclass_labels):
return dedent(
"""
name: drumpush-multiclass
type: inference
targetType: multiclass
environmentID: {}
inferenceModel:
targetName: class
classLabels:
- {}
- {}
- {}
validation:
input: hello
"""
).format(environmentID, *multiclass_labels)
@pytest.fixture
def inference_multiclass_metadata_yaml_label_file(multiclass_labels):
with NamedTemporaryFile(mode="w+") as f:
f.write("\n".join(multiclass_labels))
f.flush()
yield dedent(
"""
name: drumpush-multiclass
type: inference
targetType: multiclass
environmentID: {}
inferenceModel:
targetName: class
classLabelsFile: {}
validation:
input: hello
"""
).format(environmentID, f.name)
@pytest.fixture
def inference_multiclass_metadata_yaml_labels_and_label_file(multiclass_labels):
with NamedTemporaryFile(mode="w+") as f:
f.write("\n".join(multiclass_labels))
f.flush()
yield dedent(
"""
name: drumpush-multiclass
type: inference
targetType: multiclass
environmentID: {}
inferenceModel:
targetName: class
classLabelsFile: {}
classLabels:
- {}
- {}
- {}
validation:
input: hello
"""
).format(environmentID, f.name, *multiclass_labels)
@pytest.fixture
def training_metadata_yaml():
return dedent(
"""
name: drumpush-regression
type: training
targetType: regression
environmentID: {environmentID}
validation:
input: hello
"""
).format(environmentID=environmentID)
@pytest.fixture
def training_metadata_yaml_with_proj():
return dedent(
"""
name: drumpush-regression
type: training
targetType: regression
environmentID: {environmentID}
trainingModel:
trainOnProject: {projectID}
validation:
input: hello
"""
).format(environmentID=environmentID, projectID=projectID)
@pytest.fixture
def custom_predictor_metadata_yaml():
return dedent(
"""
name: model-with-custom-java-predictor
type: inference
targetType: regression
customPredictor:
arbitraryField: This info is read directly by a custom predictor
"""
)
version_response = {
"id": "1",
"custom_model_id": "1",
"version_minor": 1,
"version_major": 1,
"is_frozen": False,
"items": [{"id": "1", "file_name": "hi", "file_path": "hi", "file_source": "hi"}],
}
@pytest.mark.parametrize(
"config_yaml",
[
"custom_predictor_metadata_yaml",
"training_metadata_yaml",
"training_metadata_yaml_with_proj",
"inference_metadata_yaml",
"inference_multiclass_metadata_yaml",
"inference_multiclass_metadata_yaml_label_file",
],
)
@pytest.mark.parametrize("existing_model_id", [None])
def test_yaml_metadata(request, config_yaml, existing_model_id, tmp_path):
config_yaml = request.getfixturevalue(config_yaml)
if existing_model_id:
config_yaml = config_yaml + "\nmodelID: {}".format(existing_model_id)
with open(os.path.join(tmp_path, MODEL_CONFIG_FILENAME), mode="w") as f:
f.write(config_yaml)
read_model_metadata_yaml(tmp_path)
@pytest.mark.parametrize(
"config_yaml, test_case_number",
[
("custom_predictor_metadata_yaml", 1),
("inference_binary_metadata_no_label", 2),
("inference_multiclass_metadata_yaml_no_labels", 3),
("inference_multiclass_metadata_yaml_labels_and_label_file", 4),
("inference_multiclass_metadata_yaml", 100),
("inference_multiclass_metadata_yaml_label_file", 100),
],
)
def test_yaml_metadata_missing_fields(tmp_path, config_yaml, request, test_case_number):
config_yaml = request.getfixturevalue(config_yaml)
with open(os.path.join(tmp_path, MODEL_CONFIG_FILENAME), mode="w") as f:
f.write(config_yaml)
if test_case_number == 1:
conf = read_model_metadata_yaml(tmp_path)
with pytest.raises(
DrumCommonException, match="Missing keys: \['validation', 'environmentID'\]"
):
validate_config_fields(
conf,
ModelMetadataKeys.CUSTOM_PREDICTOR,
ModelMetadataKeys.VALIDATION,
ModelMetadataKeys.ENVIRONMENT_ID,
)
elif test_case_number == 2:
with pytest.raises(DrumCommonException, match="Missing keys: \['negativeClassLabel'\]"):
read_model_metadata_yaml(tmp_path)
elif test_case_number == 3:
with pytest.raises(
DrumCommonException,
match="Error - for multiclass classification, either the class labels or a class labels file must be provided in model-metadata.yaml file",
):
read_model_metadata_yaml(tmp_path)
elif test_case_number == 4:
with pytest.raises(
DrumCommonException,
match="Error - for multiclass classification, either the class labels or a class labels file should be provided in model-metadata.yaml file, but not both",
):
read_model_metadata_yaml(tmp_path)
elif test_case_number == 100:
read_model_metadata_yaml(tmp_path)
def test_read_model_metadata_properly_casts_typeschema(tmp_path, training_metadata_yaml):
config_yaml = training_metadata_yaml + dedent(
"""
typeSchema:
input_requirements:
- field: number_of_columns
condition: IN
value:
- 1
- 2
- field: data_types
condition: EQUALS
value:
- NUM
- TXT
output_requirements:
- field: number_of_columns
condition: IN
value: 2
- field: data_types
condition: EQUALS
value: NUM
"""
)
with open(os.path.join(tmp_path, MODEL_CONFIG_FILENAME), mode="w") as f:
f.write(config_yaml)
yaml_conf = read_model_metadata_yaml(tmp_path)
output_reqs = yaml_conf["typeSchema"]["output_requirements"]
input_reqs = yaml_conf["typeSchema"]["input_requirements"]
value_key = "value"
expected_as_int_list = next(
(el for el in input_reqs if el["field"] == "number_of_columns")
).get(value_key)
expected_as_str_list = next((el for el in input_reqs if el["field"] == "data_types")).get(
value_key
)
expected_as_int = next((el for el in output_reqs if el["field"] == "number_of_columns")).get(
value_key
)
expected_as_str = next((el for el in output_reqs if el["field"] == "data_types")).get(value_key)
assert all(isinstance(el, int) for el in expected_as_int_list)
assert all(isinstance(el, str) for el in expected_as_str_list)
assert isinstance(expected_as_str_list, list)
assert isinstance(expected_as_int, int)
assert isinstance(expected_as_str, str)
def version_mocks():
responses.add(
responses.GET,
"http://yess/version/",
json={"major": 2, "versionString": "2.21", "minor": 21},
status=200,
)
responses.add(
responses.POST,
"http://yess/customModels/{}/versions/".format(modelID),
json=version_response,
status=200,
)
def mock_get_model(model_type="training", target_type="Regression"):
body = {
"customModelType": model_type,
"id": modelID,
"name": "1",
"description": "1",
"targetType": target_type,
"deployments_count": "1",
"created_by": "1",
"updated": "1",
"created": "1",
"latestVersion": version_response,
}
if model_type == "inference":
body["language"] = "Python"
body["trainingDataAssignmentInProgress"] = False
responses.add(
responses.GET, "http://yess/customModels/{}/".format(modelID), json=body,
)
responses.add(
responses.POST, "http://yess/customModels/".format(modelID), json=body,
)
def mock_post_blueprint():
responses.add(
responses.POST,
"http://yess/customTrainingBlueprints/",
json={
"userBlueprintId": "2",
"custom_model": {"id": "1", "name": "1"},
"custom_model_version": {"id": "1", "label": "1"},
"execution_environment": {"id": "1", "name": "1"},
"execution_environment_version": {"id": "1", "label": "1"},
"training_history": [],
},
)
def mock_post_add_to_repository():
responses.add(
responses.POST,
"http://yess/projects/{}/blueprints/fromUserBlueprint/".format(projectID),
json={"id": "1"},
)
def mock_get_env():
responses.add(
responses.GET,
"http://yess/executionEnvironments/{}/".format(environmentID),
json={
"id": "1",
"name": "hi",
"latestVersion": {"id": "hii", "environment_id": environmentID, "build_status": "yes"},
},
)
def mock_train_model():
responses.add(
responses.POST,
"http://yess/projects/{}/models/".format(projectID),
json={},
adding_headers={"Location": "the/moon"},
)
responses.add(
responses.GET,
"http://yess/projects/{}/modelJobs/the/".format(projectID),
json={
"is_blocked": False,
"id": "55",
"processes": [],
"model_type": "fake",
"project_id": projectID,
"blueprint_id": "1",
},
)
@responses.activate
@pytest.mark.parametrize(
"config_yaml",
[
"training_metadata_yaml",
"training_metadata_yaml_with_proj",
"inference_metadata_yaml",
"inference_multiclass_metadata_yaml",
"inference_multiclass_metadata_yaml_label_file",
],
)
@pytest.mark.parametrize("existing_model_id", [None, modelID])
def test_push(request, config_yaml, existing_model_id, multiclass_labels, tmp_path):
config_yaml = request.getfixturevalue(config_yaml)
if existing_model_id:
config_yaml = config_yaml + "\nmodelID: {}".format(existing_model_id)
with open(os.path.join(tmp_path, MODEL_CONFIG_FILENAME), mode="w") as f:
f.write(config_yaml)
config = read_model_metadata_yaml(tmp_path)
version_mocks()
mock_post_blueprint()
mock_post_add_to_repository()
mock_get_model(model_type=config["type"], target_type=config["targetType"].capitalize())
mock_get_env()
mock_train_model()
push_fn = _push_training if config["type"] == "training" else _push_inference
push_fn(config, code_dir="", endpoint="http://Yess", token="<PASSWORD>")
calls = responses.calls
if existing_model_id is None:
assert calls[1].request.path_url == "/customModels/" and calls[1].request.method == "POST"
if config["targetType"] == TargetType.MULTICLASS.value:
sent_labels = json.loads(calls[1].request.body)["classLabels"]
assert sent_labels == multiclass_labels
call_shift = 1
else:
call_shift = 0
assert (
calls[call_shift + 1].request.path_url == "/customModels/{}/versions/".format(modelID)
and calls[call_shift + 1].request.method == "POST"
)
if push_fn == _push_training:
assert (
calls[call_shift + 2].request.path_url == "/customTrainingBlueprints/"
and calls[call_shift + 2].request.method == "POST"
)
if "trainingModel" in config:
assert (
calls[call_shift + 3].request.path_url
== "/projects/{}/blueprints/fromUserBlueprint/".format(projectID)
and calls[call_shift + 3].request.method == "POST"
)
assert (
calls[call_shift + 4].request.path_url == "/projects/abc123/models/"
and calls[call_shift + 4].request.method == "POST"
)
assert len(calls) == 6 + call_shift
else:
assert len(calls) == 3 + call_shift
else:
assert len(calls) == 2 + call_shift
@responses.activate
@pytest.mark.parametrize(
"config_yaml", ["inference_binary_metadata_yaml_no_target_name",],
)
def test_push_no_target_name_in_yaml(request, config_yaml, tmp_path):
config_yaml = request.getfixturevalue(config_yaml)
config_yaml = config_yaml + "\nmodelID: {}".format(modelID)
with open(os.path.join(tmp_path, MODEL_CONFIG_FILENAME), mode="w") as f:
f.write(config_yaml)
config = read_model_metadata_yaml(tmp_path)
from argparse import Namespace
options = Namespace(code_dir=tmp_path, model_config=config)
with pytest.raises(DrumCommonException, match="Missing keys: \['targetName'\]"):
drum_push(options)
def test_output_in_code_dir():
code_dir = "/test/code/is/here"
output_other = "/test/not/code"
output_code_dir = "/test/code/is/here/output"
assert not output_in_code_dir(code_dir, output_other)
assert output_in_code_dir(code_dir, output_code_dir)
def test_output_dir_copy():
with tempfile.TemporaryDirectory() as tempdir:
# setup
file = Path(tempdir, "test.py")
file.touch()
Path(tempdir, "__pycache__").mkdir()
out_dir = Path(tempdir, "out")
out_dir.mkdir()
# test
create_custom_inference_model_folder(tempdir, str(out_dir))
assert Path(out_dir, "test.py").exists()
assert not Path(out_dir, "__pycache__").exists()
assert not Path(out_dir, "out").exists()
def test_read_structured_input_arrow_csv_na_consistency(tmp_path):
"""
Test that N/A values (None, numpy.nan) are handled consistently when using
CSV vs Arrow as a prediction payload format.
1. Make CSV and Arrow prediction payloads from the same dataframe
2. Read both payloads
3. Assert the resulting dataframes are equal
"""
# arrange
df = pd.DataFrame({"col_int": [1, np.nan, None], "col_obj": ["a", np.nan, None]})
csv_filename = os.path.join(tmp_path, "X.csv")
with open(csv_filename, "w") as f:
f.write(df.to_csv(index=False))
arrow_filename = os.path.join(tmp_path, "X.arrow")
with open(arrow_filename, "wb") as f:
f.write(pyarrow.ipc.serialize_pandas(df).to_pybytes())
# act
csv_df = StructuredInputReadUtils.read_structured_input_file_as_df(csv_filename)
arrow_df = StructuredInputReadUtils.read_structured_input_file_as_df(arrow_filename)
# assert
is_nan = lambda x: isinstance(x, float) and np.isnan(x)
is_none = lambda x: x is None
| assert_frame_equal(csv_df, arrow_df) | pandas.testing.assert_frame_equal |
import sys
import pandas as pd
import numpy as np
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV, train_test_split, StratifiedKFold, cross_val_score, learning_curve
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
import lightgbm as lgb
from skopt import BayesSearchCV
from hyperopt import hp, tpe,anneal, fmin, Trials
import eda
import helper_functions as h
def search_hyperparameter(def_params, n_iter, cv, train_data, train_targets):
""" Returns the best hyper parameters, logging data and function to optimize
Args:
def_params: Dict of search space of hyper parameters
n_iter: Maximum number of iterations
cv: Number of k-fold CV
train_data: The data to fit
train_targets: The target variable to try to predict
Returns:
best: the best hyper parameters
trials: logging information of a test
objective: function to optimize
"""
def objective(params, def_params=def_params, X=train_data, y=train_targets):
# the function gets a set of variable parameters in "param"
params = {x[0] : params[x[0]] if x[1]['dtype'] == 'float' else int(params[x[0]]) for x in def_params.items()}
# we use this params to create a new LGBM Regressor
clf = lgb.LGBMClassifier(
random_state=h.RANDOM_STATE,
application='binary',
class_weight='balanced',
cv =cv,
**params)
# and then conduct the cross validation with the same folds as before
score =-cross_val_score(clf, X, y, scoring="roc_auc", n_jobs=-1).mean()
return score
# trials will contain logging information
trials = Trials()
space = {
k: v['hpf'] for k, v in zip(def_params.keys(), def_params.values())
}
best = fmin(fn=objective, # function to optimize
space=space,
algo=anneal.suggest, # optimization algorithm, hyperotp will select its parameters automatically
max_evals=n_iter, # maximum number of iterations
trials=trials, # logging
rstate=np.random.RandomState(h.RANDOM_STATE), # fixing random state for the reproducibility
)
return best, trials, objective
def build_model(best, def_params):
"""
Args:
best: the best hyper parameters
def_params: Dict of search space of hyper parameters
Returns:
model (LightGBM LGBMClassifier): LGBMClassifier model object
"""
params = {x[0] : best[x[0]] if x[1]['dtype'] == 'float' else int(best[x[0]]) for x in def_params.items()}
return lgb.LGBMClassifier(
random_state=h.RANDOM_STATE,
application='binary',
class_weight='balanced',
**params,)
def evaluate_model(model, objective, best, X_test, Y_test):
"""Prints multi-output classification results
Args:
model (pandas dataframe): the scikit-learn fitted model
objective: function to optimize
X_text (pandas dataframe): The X test set
Y_test (pandas dataframe): the Y test classifications
Returns: None
"""
sa_test_score= - roc_auc_score(Y_test, model.predict(X_test))
print("Best ROC {:.3f} params {}".format(objective(best), best))
def plot_result(trials):
def get_results(x):
score = np.abs(x['result']['loss'])
params = np.array([p for p in x['misc']['vals'].values()], dtype='float32')
sa_results = np.append(arr=score , values=params)
return sa_results
sa_results=np.array([
get_results(x)
for x in trials] )
sa_columns = ['score']
sa_columns.extend(trials[0]['misc']['vals'].keys())
sa_results_df = pd.DataFrame(sa_results, columns=sa_columns)
sa_results_df.plot(subplots=True,figsize=(10, 10))
return sa_results_df
def save_model(model, model_filepath):
"""dumps the model to the given filepath
Args:
model (scikit-learn model): The fitted model
model_filepath (string): the filepath to save the model to
Returns: None
"""
pickle.dump(model.best_estimator_, open(model_filepath, 'wb'))
def main():
eda_mailout_train = pd.read_pickle ('../ArvatoPrj_200222.liso/eda_mailout_train.full.pkl')
response = pd.read_pickle ('../ArvatoPrj_200222.liso/response.pkl')
random_state = 2020
train_data, test_data, train_targets, test_targets = train_test_split(
eda_mailout_train.data_scaled,
response,
test_size=0.20,
shuffle=True,
random_state=random_state)
train_data, test_data, train_targets, test_targets = pd.DataFrame(train_data), | pd.DataFrame(test_data) | pandas.DataFrame |
import pandas as pd
def init_wallet(wallet):
wallet['Capital'] = 1000.0
wallet['IN_POSITION'] = False
wallet['total_trade'] = 0
wallet['win_trade'] = 0
wallet['lose_trade'] = 0
wallet['COIN'] = ""
wallet['ASSET_WORTH'] = 0.0
def buy_crypto(wallet, amount, price, fees):
wallet['Capital'] = wallet['Capital'] - (amount * price) - fees
def sell_crypto(wallet, amount, price, fees):
wallet['Capital'] = wallet['Capital'] + (amount * price) - fees
def createFrame_wallet(msg):
df = | pd.DataFrame([msg]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from datetime import datetime
from multiprocessing import Pool
from functools import partial
from pathos import pools as pp
import pickle
'''
This class implements user centric method. Each function will describe which metric it is used for according
to the questions number and mapping.
These metrics assume that the data is in the order id,created_at,type,actor.id,repo.id
'''
class UserCentricMeasurements(object):
def __init__(self):
super(UserCentricMeasurements, self).__init__()
'''
This function selects a subset of the full data set for a selected set of users and event types.
Inputs: users - A boolean or a list of users. If it is list of user ids (login_h) the data frame is subset on only this list of users.
If it is True, then the pre-selected node-level subset is used. If False, then all users are included.
eventType - A list of event types to include in the data set
Output: A data frame with only the selected users and event types.
'''
def determineDf(self,users,eventType):
if users == True:
#self.selectedUsers is a data frame containing only the users in interested_users
df = self.selectedUsers
elif users != False:
df = df[df.user.isin(users)]
else:
df = self.main_df
if eventType != None:
df = df[df.event.isin(eventType)]
return df
'''
This method returns the number of unique repos that a particular set of users contributed too
Question #17
Inputs: selectedUsers - A list of users of interest or a boolean indicating whether to subset to the node-level measurement users.
eventType - A list of event types to include in the data
Output: A dataframe with the user id and the number of repos contributed to
'''
def getUserUniqueRepos(self,selectedUsers=False,eventType=None):
df = self.determineDf(selectedUsers,eventType)
df = df.groupby('user')
data = df.repo.nunique().reset_index()
data.columns = ['user','value']
return data
'''
This method returns the timeline of activity of the desired user over time, either in raw or cumulative counts.
Question #19
Inputs: selectedUsers - A list of users of interest or a boolean indicating whether to subset to node-level measurement users.
time_bin - Time frequency for calculating event counts
cumSum - Boolean indicating whether to calculate the cumulative activity counts
eventType = List of event types to include in the data
Output: A dictionary with a data frame for each user with two columns: data and event counts
'''
def getUserActivityTimeline(self, selectedUsers=True,time_bin='1d',cumSum=False,eventType=None):
df = self.determineDf(selectedUsers,eventType)
df['value'] = 1
if cumSum:
df['cumsum'] = df.groupby('user').value.transform(pd.Series.cumsum)
df = df.groupby(['user',pd.Grouper(key='time',freq=time_bin)]).max().reset_index()
df['value'] = df['cumsum']
df = df.drop('cumsum',axis=1)
else:
df = df.groupby(['user',pd.Grouper(key='time',freq=time_bin)]).sum().reset_index()
data = df.sort_values(['user', 'time'])
measurements = {}
for user in data['user'].unique():
measurements[user] = data[data['user'] == user]
return measurements
'''
This method returns the top k most popular users for the dataset, where popularity is measured
as the total popularity of the repos created by the user.
Question #25
Inputs: k - (Optional) The number of users that you would like returned.
use_metadata - External metadata file containing repo owners. Otherwise use first observed user with
a creation event as a proxy for the repo owner.
eventType - A list of event types to include
Output: A dataframe with the user ids and number events for that user
'''
def getUserPopularity(self,k=5000,use_metadata=False,eventType=None):
df = self.determineDf(False,eventType)
df['value'] = 1
repo_popularity = df[df.event.isin(['WatchEvent','ForkEvent'])].groupby('repo')['value'].sum().reset_index()
if use_metadata and self.useRepoMetaData:
#merge repo popularity with the owner information in repo_metadata
#drop data for which no owner information exists in metadata
repo_popularity = repo_popularity.merge(self.repoMetaData,left_on='repo',right_on='repo',
how='left').dropna()
elif df['repo'].str.match('.{22}/.{22}').all():
#if all repo IDs have the correct format use the owner info from the repo id
repo_popularity['owner_id'] = repo_popularity['repo'].apply(lambda x: x.split('/')[0])
else:
#otherwise use creation event as a proxy for ownership
user_repos = df[df['event'] == 'CreateEvent'].sort_values('time').drop_duplicates(subset='repo',keep='first')
user_repos = user_repos[['user','repo']]
user_repos.columns = ['owner_id','repo']
if len(user_repos.index) >= 0:
repo_popularity = user_repos.merge(repo_popularity,on='repo',how='left')
else:
return None
measurement = repo_popularity.groupby('owner_id').value.sum().sort_values(ascending=False).head(k)
measurement = pd.DataFrame(measurement).sort_values('value',ascending=False)
return measurement
'''
This method returns the average time between events for each user
Inputs: df - Data frame of all data for repos
users - (Optional) List of specific users to calculate the metric for
nCPu - (Optional) Number of CPU's to run metric in parallel
Outputs: A list of average times for each user. Length should match number of repos
'''
def getAvgTimebwEventsUsers(self,selectedUsers=True, nCPU=1):
df = self.determineDf(selectedUsers)
users = self.df['user'].unique()
args = [(df, users[i]) for i, item_a in enumerate(users)]
pool = pp.ProcessPool(nCPU)
deltas = pool.map(self.getMeanTimeHelper, args)
return deltas
'''
Helper function for getting the average time between events
Inputs: Same as average time between events
Output: Same as average time between events
'''
def getMeanTimeUser(self,df, user):
d = df[df.user == user]
d = d.sort_values(by='time')
delta = np.mean(np.diff(d.time)) / np.timedelta64(1, 's')
return delta
def getMeanTimeUserHelper(self,args):
return self.getMeanTimeUser(*args)
'''
This method returns distribution the diffusion delay for each user
Question #27
Inputs: DataFrame - Desired dataset
unit - (Optional) This is the unit that you want the distribution in. Check np.timedelta64 documentation
for the possible options
metadata_file - File containing user account creation times. Otherwise use first observed action of user as proxy for account creation time.
Output: A list (array) of deltas in units specified
'''
def getUserDiffusionDelay(self,unit='h', selectedUser=True,eventType=None):
df = self.determineDf(selectedUser,eventType)
df['value'] = df['time']
df['value'] = pd.to_datetime(df['value'])
df['value'] = df['value'].dt.round('1H')
if self.useUserMetaData:
df = df.merge(self.userMetaData[['user','created_at']],left_on='user',right_on='user',how='left')
df = df[['user','created_at','value']].dropna()
measurement = df['value'].sub(df['created_at']).apply(lambda x: int(x / np.timedelta64(1, unit)))
else:
grouped = df.groupby('user')
transformed = grouped['value'].transform('min')
measurement = df['value'].sub(transformed).apply(lambda x: int(x / np.timedelta64(1, unit)))
return measurement
'''
This method returns the top k users with the most events.
Question #24b
Inputs: DataFrame - Desired dataset. Used mainly when dealing with subset of events
k - Number of users to be returned
Output: Dataframe with the user ids and number of events
'''
def getMostActiveUsers(self,k=5000,eventType=None):
df = self.main_df
if eventType != None:
df = df[df.event.isin(eventType)]
df['value'] = 1
df = df.groupby('user')
measurement = df.value.sum().sort_values(ascending=False).head(k)
measurement = pd.DataFrame(measurement).sort_values('value',ascending=False)
return measurement
'''
This method returns the distribution for the users activity (event counts).
Question #24a
Inputs: DataFrame - Desired dataset
eventType - (Optional) Desired event type to use
Output: List containing the event counts per user
'''
def getUserActivityDistribution(self,eventType=None,selectedUser=False):
if selectedUser:
df = self.selectedUsers
else:
df = self.main_df
if eventType != None:
df = df[df.event.isin(eventType)]
df['value'] = 1
df = df.groupby('user')
measurement = df.value.sum().reset_index()
return measurement
'''
Calculate the proportion of pull requests that are accepted by each user.
Question #15 (Optional Measurement)
Inputs: eventType: List of event types to include in the calculation (Should be PullRequestEvent).
thresh: Minimum number of PullRequests a repo must have to be included in the distribution.
Output: Data frame with the proportion of accepted pull requests for each user
'''
def getUserPullRequestAcceptance(self,eventType=['PullRequestEvent'], thresh=2):
df = self.main_df_opt
if not df is None and 'PullRequestEvent' in self.main_df.event.values:
df = df[self.main_df.event.isin(eventType)]
users_repos = self.main_df[self.main_df.event.isin(eventType)]
#subset on only PullRequest close actions (not opens)
idx = df['action'] == 'closed'
closes = df[idx]
users_repos = users_repos[idx]
#merge pull request columns (action, merged) with main data frame columns
closes = | pd.concat([users_repos,closes],axis=1) | pandas.concat |
import os
import pandas as pd
from requests import get
from settings import INPUT_DATA_PATH
HOSPITAL_DIRPATH = os.path.join(INPUT_DATA_PATH, "hospitalisation_data")
URL = "https://opendata.ecdc.europa.eu/covid19/hospitalicuadmissionrates/csv/data.csv"
COUNTRIES = {"france", "belgium", "italy", "sweden", "uk", "spain"}
RENAME_INDICATOR = {
"Daily hospital occupancy": "hosp_occup",
"Daily ICU occupancy": "icu_occup",
"Weekly new hospital admissions per 100k": "hosp_adm_per_100K",
"Weekly new ICU admissions per 100k": "icu_adm_per_100K",
}
endpoint = (
"https://api.coronavirus.data.gov.uk/v1/data?"
"filters=areaType=overview&"
'structure={"date":"date","covidOccupiedMVBeds":"covidOccupiedMVBeds","newAdmissions":"newAdmissions","hospitalCases":"hospitalCases"}'
)
def get_data(url):
response = get(endpoint, timeout=10)
if response.status_code >= 400:
raise RuntimeError(f"Request failed: { response.text }")
return response.json()
def get_uk():
uk_df = get_data(endpoint)
uk_df = pd.DataFrame(uk_df["data"])
uk_df["date"] = pd.to_datetime(
uk_df["date"], errors="coerce", format="%Y-%m-%d", infer_datetime_format=False
)
uk_df.rename(
columns={
"covidOccupiedMVBeds": "uk_icu_occup",
"newAdmissions": "uk_hosp_adm",
"hospitalCases": "uk_hosp_occup",
},
inplace=True,
)
uk_df["year_week"] = uk_df.date.dt.strftime(date_format="%Y-W%U")
# Need to renumber week 0 to last week of 2020
uk_df.year_week.replace({"2021-W00": "2020-W53"}, inplace=True)
uk_df.groupby(["year_week"]).mean().reset_index()
return uk_df.groupby(["year_week"]).mean().reset_index()
def get_eu_countries():
eu_countries = | pd.read_csv(URL) | pandas.read_csv |
import sys
sys.path.append("/home/sawyer/wenxuan/180521_EPI_clean/")
import numpy as np
import pickle
from IPython import embed
import os
import tensorflow as tf
import pandas as pd
from rllab.envs.normalized_env import normalize
from rllab.envs.gym_env import GymEnv
from sandbox.rocky.tf.envs.base import TfEnv
import EPI
def main():
name = 'Exp180418_simple_baseline_hopper'
EPI.init('hopper', num_of_params=8)
sess = tf.Session()
sess.__enter__()
algo = pickle.load(open(os.getcwd()+"/"+name+"/pickle.p", "rb"))
env = TfEnv(normalize(GymEnv('HopperAvg-v0')))
core_env = env.wrapped_env.wrapped_env.env.env
target_sample_size = 500
egreedy = 0.2
data = []
rollouts = []
sample_size = 0
while sample_size < target_sample_size:
observation = env.reset()
core_env.change_env(scale=np.array([0.1,0.1,0.1,0.1,0.2,0.1,0.1,0.1]), env_id=0)
episode_size = 0
while True:
if np.random.rand() < egreedy:
action = env.action_space.sample()
else:
action, d = algo.policy.get_action(observation)
full_state = core_env.state_vector()
rollouts.append([full_state, action])
next_observation, reward, terminal, reward_dict = env.step(action)
episode_size += 1
sample_size += 1
observation = next_observation
if terminal or sample_size == target_sample_size:
break
print('Rollout...')
scale_list = pd.read_csv('../EPI/envs/hopper_env_list.csv').values
for i in range(100):
env_id = i
core_env.change_env(scale=scale_list[i, 1:], env_id=i)
print(core_env.env_id)
print(core_env.scale)
for rollout in rollouts:
state = rollout[0]
observation = core_env.force_reset_model(qpos=state[0:6], qvel=state[6:12])
action = rollout[1]
next_observation, reward, terminal, reward_dict = env.step(action)
data.append(np.concatenate([observation, action, next_observation, np.array([env_id]), core_env.scale, np.array([reward, terminal * 1])]))
sample_size += 1
observation = next_observation
data = np.array(data)
g = lambda s, num: [s + str(i) for i in range(num)]
columns = g('obs', len(observation))+g('ac', len(action))+g('next_obs', len(observation))+g('env_id', 1)+g('env_vec', 8)+['reward']+['terminal']
df = | pd.DataFrame(data, columns=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# ## panel ##
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
tm.assert_panel_equal(p, expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2, 1, 0)
expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
'Item2': [32, 32, 32, 32]},
index=p_orig.major_axis)
expected = expected.transpose(2, 1, 0)
p = p_orig.copy()
p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
tm.assert_panel_equal(p, expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates,
columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig, DataFrame(
{'A': 7}, index=[dates[-1] + 1])])
df = df_orig.copy()
df.loc[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
expected = pd.concat([df_orig, exp_other], axis=1)
df = df_orig.copy()
df.loc[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=['B'])
exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
index=[0], dtype='float64')
tm.assert_frame_equal(df, exp)
# list-like must conform
df = DataFrame(columns=['A', 'B'])
def f():
df.loc[0] = [1, 2, 3]
self.assertRaises(ValueError, f)
# these are coerced to float unavoidably (as its a list-like to begin)
df = DataFrame(columns=['A', 'B'])
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
dtype='float64')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[4, 5, 6, 7]).loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_series_partial_set_with_name(self):
# GH 11497
idx = Index([1, 2], dtype='int64', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
# loc
exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
name='s')
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 'x', 1]]
| tm.assert_series_equal(result, expected, check_index_type=True) | pandas.util.testing.assert_series_equal |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
print()
# In[ ]:
import pandas as pd
import numpy as np
import missingno as mno
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import Imputer, LabelEncoder, StandardScaler
from sklearn.impute import SimpleImputer, MissingIndicator
from feature_engine import missing_data_imputers as mdi
from feature_engine.categorical_encoders import OneHotCategoricalEncoder
from sklearn.pipeline import Pipeline
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error,mean_absolute_error, r2_score
from sklearn.ensemble import RandomForestRegressor
from feature_engine import discretisers as dsc
from feature_engine import categorical_encoders as ce
# In[ ]:
data = | pd.read_csv("../../../input/jojoker_singapore-airbnb/listings.csv") | pandas.read_csv |
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
from plotly.subplots import make_subplots
from pathlib import Path
repo_dir = Path(__file__).parent.parent
outputdir = repo_dir/'output'
outputdir.mkdir(parents=True, exist_ok=True)
casos = pd.read_csv('https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto3/TotalesPorRegion_std.csv')
casos['Fecha'] = pd.to_datetime(casos['Fecha'])
casos_sintomaticos = casos[casos['Categoria']=='Casos nuevos con sintomas'].pivot(index='Fecha', columns='Region', values='Total')
casos_nuevos = casos[casos['Categoria']=='Casos nuevos totales'].pivot(index='Fecha', columns='Region', values='Total')
casos_activos_conf = casos[casos['Categoria']=='Casos activos confirmados'].pivot(index='Fecha', columns='Region', values='Total')
casos_activos_prob = casos[casos['Categoria']=='Casos activos probables'].pivot(index='Fecha', columns='Region', values='Total')
casos_nuevos_prob = casos[casos['Categoria']=='Casos probables acumulados'].pivot(index='Fecha', columns='Region', values='Total').diff()
casos_nuevos_antigeno = casos[casos['Categoria']=='Casos nuevos confirmados por antigeno'].pivot(index='Fecha', columns='Region', values='Total')
casos_sintomaticos.rename(columns={'Total': 'Chile'}, inplace=True)
casos_nuevos.rename(columns={'Total': 'Chile'}, inplace=True)
casos_activos_conf.rename(columns={'Total': 'Chile'}, inplace=True)
casos_activos_prob.rename(columns={'Total': 'Chile'}, inplace=True)
casos_nuevos_prob.rename(columns={'Total': 'Chile'}, inplace=True)
casos_nuevos_antigeno.rename(columns={'Total': 'Chile'}, inplace=True)
casos_nuevos_prob_antigeno = casos_nuevos.add(casos_nuevos_prob, fill_value=0)
casos_nuevos_prob_antigeno = casos_nuevos_prob_antigeno.add(casos_nuevos_antigeno, fill_value=0)
datos_regiones = pd.read_csv('https://raw.githubusercontent.com/ivanMSC/COVID19_Chile/master/utils/regionesChile.csv')
casos_activos = | pd.read_csv('https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto46/activos_vs_recuperados.csv') | pandas.read_csv |
"""
financial_database_manager.py
"""
import numpy as np
import pandas as pd
import pandas.core.common as com
from pandas.tseries.offsets import BDay
from datetime import date
import logging
from sqlalchemy import func, or_, and_
# my data base configurations
from database2.base import session_factory
from database2.domain_model import Instrument, OpenPrice, HighPrice, LowPrice, ClosePrice, Volume, Dividend, Split
from database2.domain_model import data_table_instrument_types_mapper, available_data_sources, init_instrument_arg_names, instrument_default_value_map
from database2.yahoo_finance import YahooFinanceConnection as _YahooFinanceConnection
# other tools
from tools.general_tools import user_picks_element_from_list, list_grouper, time_period_logger_msg
from tools.dataframe_tools import nan_before_dates, dataframe_with_new_calendar
# Logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s : %(module)s : %(funcName)s : %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
class FinancialDatabaseManager:
def __init__(self):
self.session = session_factory()
def ticker_existence(self, tickers: {str, list}, data_source: str = None) -> tuple:
"""
Returns a tuple with three lists: 1) list of existing tickers 2) list of missing tickers 3) list of existing
tickers but with a different data source than the one provided. If data_source is not specified, only two lists
will be returned
:param tickers: str or list
:param data_source: str
:return: (list, list, list)
"""
tickers = make_ticker_list(tickers)
logger.debug('Checking existence of {} ticker(s)'.format(len(tickers)))
# filter out the tickers and associated data sources
q_existing_tickers = self.session.query(
Instrument.ticker,
Instrument.data_source) \
.filter(
Instrument.ticker.in_(tickers)
).all()
self.session.close()
# create the resulting ticker lists
existing_tickers = [q_tup[0] for q_tup in q_existing_tickers]
missing_tickers = list(set(tickers).difference(existing_tickers))
if data_source:
existing_tickers_diff_source = [q_tup[0] for q_tup in q_existing_tickers if q_tup[1] != data_source.upper()]
return existing_tickers, missing_tickers, existing_tickers_diff_source
else:
return existing_tickers, missing_tickers
def get_ticker_instrument_attribute_dict(self, tickers: {str, list}, attribute: {str, list}) -> dict:
"""
Assumes that ticker is a string or a list of strings and attribute is a string (e.g. Instrument.sector will
return a dictionary like {'ticker 1': 'sector A', 'ticker 2': 'sector B' ,...}).
Returns a dictionary with tickers as keys and the specific attribute as values
:param tickers:
:param attribute: str
:return: dict
"""
logger.debug('Loads {} from the data base'.format("'%s'" % "', '".join(attribute) if isinstance(attribute, list) else attribute))
ticker_attribute_dict = {} # initializing the dictionary
attributes = [getattr(Instrument, inst_attr) for inst_attr in attribute] if isinstance(attribute, list) \
else [getattr(Instrument, attribute)]
# to make the requests smaller, we need to split the ticker list into sub list
for ticker_sub_list in list_grouper(make_ticker_list(tickers), 500):
query_ticker_attribute = self.session.query(Instrument.ticker, *attributes) \
.filter(
Instrument.ticker.in_(ticker_sub_list)) \
.order_by(Instrument.ticker)
if isinstance(attribute, list):
ticker_attribute_dict.update({tup[0]: dict(zip(attribute, tup[1:])) for tup in query_ticker_attribute.all()})
else:
ticker_attribute_dict.update(dict(query_ticker_attribute))
self.session.close()
return ticker_attribute_dict
def get_latest_as_of_date_dict(self, tickers: {str, list},
table: {OpenPrice, HighPrice, LowPrice, ClosePrice, Volume, Dividend}) -> dict:
"""
Returns a dictionary with tickers as keys and the latest date with a value in the given data table as values
:param tickers: str, list
:param table: {OpenPrice, HighPrice, LowPrice, ClosePrice, Volume, Dividend}
:return: dict
"""
# make the initial query for instrument id and maximum of the dates to later be filtered and grouped by
latest_as_of_date_ticker_map = {}
q_instrument_id_max_date = self.session.query(
Instrument.ticker,
func.max(table.as_of_date)
).join(Instrument)
for ticker_sub_list in list_grouper(make_ticker_list(tickers), 500):
# first query the latest observation date for the close price
sub_q_latest_close_date = q_instrument_id_max_date \
.filter(
Instrument.ticker.in_(ticker_sub_list),
).group_by(
table.instrument_id
)
latest_as_of_date_ticker_map.update(sub_q_latest_close_date)
self.session.close()
return latest_as_of_date_ticker_map
def get_eligible_start_date(self, tickers: {str, list},
data_table: {OpenPrice, HighPrice, LowPrice, ClosePrice, Volume, Dividend, list}) -> {date, None}:
"""
For a given number of tickers and a data table, gather the latest date with a value in the data table and return the
oldest of these dates. If one ticker does not have a row in the data table, return None.
:param tickers: str, list
:param data_table: OpenPrice, HighPrice, LowPrice, ClosePrice, Volume, Dividend, list
if list call the function recursively and return the oldest date for each data table
:return: None, date
"""
if isinstance(data_table, list):
start_dates = []
for d_tab in data_table:
# call the function recursively
start_dates.append(self.get_eligible_start_date(tickers=tickers, data_table=d_tab))
if None in start_dates:
return None
else:
return min(start_dates)
else:
latest_dates = self.get_latest_as_of_date_dict(tickers=tickers, table=data_table).values()
num_tickers = len(tickers) if isinstance(tickers, list) else 1
if len(latest_dates) < num_tickers: # meaning at least one ticker does not have a row in the data table
return None
else:
return min(latest_dates)
def delete_instrument(self, tickers: {str, list}) -> None:
"""
Deletes all instruments with the given ticker(s) from the database
:param tickers: str, list
:return: None
"""
tickers = make_ticker_list(tickers)
q_instrument = self.session.query(Instrument) \
.filter(
Instrument.ticker.in_(tickers)
)
for instrument in q_instrument:
logger.debug('Deletes {} from the instrument table'.format(instrument.ticker))
self.session.delete(instrument)
self.session.commit() # triggers a final flush of the remaining changes
self.session.close()
# TODO change the inputs to ticker list, start_date and end_date
def _delete_rows_in_data_table_overlapping_df(self, df: pd.DataFrame, data_table: {OpenPrice, HighPrice, LowPrice,
ClosePrice, Volume, Dividend, Split}):
"""
Deletes the rows in the given data table class corresponding to overlaps in the given DataFrame
:param df: DataFrame (columns = instrument tickers, index = DatetimeIndex)
:param data_table: OpenPrice, HighPrice, LowPrice, ClosePrice, Volume, Dividend
:return: None
"""
# create a list of all the instrument ids corresponding to the tickers
# we can't join(Instrument) and then filter on Instrument.ticker since delete will raise an error
instrument_ids = [inst_id for inst_id in
self.get_ticker_instrument_attribute_dict(tickers=list(df), attribute='id').values()]
# delete all data table rows for each relevant tickers with a date between the DataFrame index
start_date = min(df.index)
end_date = max(df.index)
logger.debug("Delete rows in '{}' table for {} ticker(s)".format(data_table.__tablename__, df.shape[1]) + time_period_logger_msg(start_date=start_date, end_date=end_date))
for sub_instrument_id_list in list_grouper(instrument_ids, 500):
self.session.query(data_table) \
.filter(
and_(
data_table.instrument_id.in_(sub_instrument_id_list),
and_(data_table.as_of_date >= start_date, data_table.as_of_date <= end_date)
)
).delete(synchronize_session=False) # don’t synchronize the session. This option is the most efficient and
# is reliable once the session is expired, which typically occurs after a commit(), or explicitly using
# expire_all()
self.session.commit()
self.session.close()
def _get_data(self, tickers: {str, list}, start_date: {date, None}, end_date: {date, None},
data_table: {OpenPrice, HighPrice, LowPrice, ClosePrice, Volume, Dividend, Split}):
"""
Returns a DataFrame with tickers as columns, as of date as index and data as values (e.g. dividends)
:param tickers: str, list
:param start_date: date, None
:param end_date: date, None
:param data_table: OpenPrice, HighPrice, LowPrice, ClosePrice, Volume, Dividend, Split
:return: DataFrame
"""
logger.debug("Load data from the '{}' table".format(data_table.__tablename__) + time_period_logger_msg(start_date=start_date, end_date=end_date))
sql_table_df = None # initialize the resulting DataFrame table
for sub_ticker_list in list_grouper(make_ticker_list(ticker=tickers), 500):
# make the database query based on tickers, start date and end date
q_data = self.session.query(
data_table.as_of_date,
data_table.value,
Instrument.ticker)\
.join(Instrument)\
.filter(
and_(
Instrument.ticker.in_(sub_ticker_list),
data_table.as_of_date >= start_date if start_date is not None else True,
data_table.as_of_date <= end_date if end_date is not None else True
)
)
# store data in a DataFrame and concatenate the results
sql_query_df = pd.read_sql(q_data.statement, q_data.session.bind)
if sql_table_df is None:
sql_table_df = sql_query_df
else:
sql_table_df = pd.concat([sql_table_df, sql_query_df])
# pivot the DataFrame
logger.debug('Pivot DataFrame')
result_df = pd.pivot_table(sql_table_df, values='value', index=['as_of_date'], columns=['ticker'])
return result_df
def _calculate_total_return_price(self, price_df: pd.DataFrame, div_tax: float):
"""
Adjust the price to take into account the reinvestment of dividends
:param price_df: DataFrame
:param div_tax: float
:return: DataFrame
"""
logger.debug("Calculate total return for {} ticker(s)".format(price_df.shape[1]))
if div_tax < 0:
raise ValueError("div_tax needs to ba a float larger or equal to 0")
ticker_type_dict = self.get_ticker_instrument_attribute_dict(tickers=list(price_df),
attribute='instrument_type')
# find the tickers that has a dividend paying instrument type and download the data
div_paying_ticker = [v for v, k in ticker_type_dict.items() if k in data_table_instrument_types_mapper[Dividend]]
dividend_df = self.get_dividend(tickers=div_paying_ticker, start_date=min(price_df.index), end_date=max(price_df.index))
dividend_df = dividend_df.dropna(axis=1, how='all')
dividend_df = dividend_df.fillna(value=0) * (1.0 - div_tax)
div_paying_ticker = list(dividend_df)
# adjust the price data by adding the dividend (after tax), calculating the cumulative daily total return and
div_pay_price_df = price_df[div_paying_ticker].copy()
div_pay_price_clean_df = div_pay_price_df.fillna(method='ffill')
div_yield_df = dividend_df.loc[div_pay_price_clean_df.index].divide(div_pay_price_clean_df.shift())
daily_tr = div_pay_price_clean_df.pct_change() + div_yield_df.fillna(value=0).values
cum_tr = (1 + daily_tr.fillna(value=0)).cumprod()
# set the initial non nan price to the original one and remove total return price where the original was nan
index_first_non_nan = div_pay_price_clean_df.notna().idxmax() # index of first non-NaN for each column
first_value = np.diag(div_pay_price_clean_df.loc[index_first_non_nan]) # get the first non-NaN for each column
cum_tr *= first_value # to have the same initial value as the original DataFrame
nan_or_1 = div_pay_price_df.copy()
nan_or_1[~nan_or_1.isnull()] = 1
cum_tr *= nan_or_1.values
price_df[div_paying_ticker] = cum_tr
return price_df
def _convert_fx(self, df: pd.DataFrame, fx: str):
pass
def _get_price(self, tickers: {str, list}, start_date: {date, None}, end_date: {date, None},
data_table: {OpenPrice, HighPrice, LowPrice, ClosePrice}, total_return: bool, div_tax: float):
price_df = self._get_data(tickers=tickers, start_date=start_date, end_date=end_date, data_table=data_table)
if total_return:
return self._calculate_total_return_price(price_df=price_df, div_tax=div_tax)
else:
return price_df
def get_dividend(self, tickers: {str, list}, start_date: date = None, end_date: date = None):
return self._get_data(tickers=tickers, start_date=start_date, end_date=end_date, data_table=Dividend)
def get_volume(self, tickers: {str, list}, start_date: date = None, end_date: date = None):
return self._get_data(tickers=tickers, start_date=start_date, end_date=end_date, data_table=Volume)
def get_close_price(self, tickers: {str, list}, start_date: date = None, end_date: date = None,
total_return: bool = False, div_tax: float = 0, fx: str = None):
return self._get_price(tickers=tickers, start_date=start_date, end_date=end_date, data_table=ClosePrice,
total_return=total_return, div_tax=div_tax)
class _DatabaseFeeder(FinancialDatabaseManager):
_new_old_key_map = {}
_old_new_value_per_old_key_map = {}
_data_tables = None
def __init__(self, source: str = None, as_of_date: date = None):
super().__init__()
# set the data source (can only be set once and is read only)
if source is None:
self._source = source
elif source.upper() in available_data_sources:
self._source = source.upper()
else:
raise ValueError("'source' needs to be one of '%s'" % "', '".join(available_data_sources))
self.as_of_date = as_of_date
def _find_unrefreshed_tickers(self, tickers: {str, list},
data_table: {OpenPrice, HighPrice, LowPrice, ClosePrice, Volume, Dividend, list}):
"""
Takes a given number of tickers and a data table object and filters away the tickers that does not support the given
data table (e.g. a ticker representing a FX rate would be filtered away if the given data table object was Dividend),
and checks the latest recorded refresh date. Filter away tickers that has a recorded refresh date set to today
:param tickers: str, list
:param data_table: OpenPrice, HighPrice, LowPrice, ClosePrice, Volume, Dividend
:return: list
"""
unrefreshed_tickers = []
if isinstance(data_table, list):
for d_tab in data_table:
unrefreshed_tickers.extend(self._find_unrefreshed_tickers(tickers=tickers, data_table=d_tab))
return list(set(unrefreshed_tickers))
else:
logger.debug("Looking for tickers with unrefreshed data in the '{}' table".format(data_table.__tablename__))
q_ticker = self.session.query(Instrument.ticker) # initial query for the tickers
for tickers_sub_list in list_grouper(make_ticker_list(tickers), 500):
sub_q_ticker = q_ticker \
.filter(
and_(
Instrument.ticker.in_(tickers_sub_list), # tickers should be in the sub list
Instrument.instrument_type.in_(data_table_instrument_types_mapper[data_table]), # the instrument type is eligible
or_(
# the ticker has no data in the table
getattr(Instrument, data_table.refresh_info_column_name) == None,
# the latest refresh was in the past
getattr(Instrument, data_table.refresh_info_column_name) <= self.as_of_date
)
)
).all()
unrefreshed_tickers.extend([q_tick[0] for q_tick in sub_q_ticker])
self.session.close()
return unrefreshed_tickers
def _download_instrument_info_dict(self, tickers: list):
raise ValueError("have not defined method that loads instrument information")
def _download_data_dict(self, tickers: list, start_date: {date, None}, end_date: {date, None}) -> dict:
raise ValueError("have not defined method that loads instrument information")
def _info_dict_translator(self, info_dict: dict)->dict:
"""
Takes a ticker information dictionary (key=tickers, values = sub-dictionary with information) and changes the
keys and values in the sub-dictionrary based on the translator attributes '_new_old_key_translator' and
'_old_new_value_translator_per_key'
:param info_dict: dict
:return: dict
"""
logger.debug('Translate the tickers and values of the information dictionary')
# for each ticker, the keys and values of the given dictionary according to the specified mappers
adj_info = {}
for ticker, info_per_ticker in info_dict.items():
# find the keys where the corresponding value needs to change
value_adj_keys = [key for key in self._old_new_value_per_old_key_map.keys() if key in info_per_ticker.keys()]
# change each value according to the mapper
for value_adj_key in value_adj_keys:
info_per_ticker.update(
{
value_adj_key: self._old_new_value_per_old_key_map[value_adj_key].get(
info_per_ticker[value_adj_key],
info_per_ticker[value_adj_key]
)
}
)
# change each key according to the mapper (when there is no key in the old dict set a default value to the
# key in the new dictionary)
adj_info_per_ticker = {key: info_per_ticker.get(key,
info_per_ticker.get(
self._new_old_key_map.get(
key,
key
),
instrument_default_value_map[key]
)
) for key in init_instrument_arg_names}
adj_info_per_ticker.update({'data_source': self.source}) # add the data source
adj_info.update(
{
ticker: adj_info_per_ticker
}
)
logger.debug('Done with adjusting the information dictionary')
return adj_info
def add_instrument(self, tickers) -> None:
"""
:param tickers:
:return:
"""
existence = self.ticker_existence(tickers=tickers, data_source=self.source)
missing_tickers = existence[1]
existing_tickers_diff_source = existence[2]
if len(existing_tickers_diff_source):
tasks = ['Delete & Replace', 'Replace', 'Refresh', 'Ignore']
print('There exists {} ticker(s) in the database with a different data source than {}\nWhat should we do?\n'
.format(len(existing_tickers_diff_source), self.source))
chosen_task = user_picks_element_from_list(list_=tasks)
if chosen_task == tasks[0]: # Delete & Replace
# delete the instruments and add the tickers as missing
self.delete_instrument(existing_tickers_diff_source)
missing_tickers.extend(missing_tickers)
elif chosen_task == tasks[1]: # Replace
# keep the instruments in the table but change the attributes and set the dates to None
# load the daily data and remove all data in the database that overlaps with this data
info_dict = self._download_instrument_info_dict(tickers=existing_tickers_diff_source)
adj_info_dict = self._info_dict_translator(info_dict=info_dict)
self.adjusting_instrument_attributes(ticker_attributes_dict=adj_info_dict)
self.refresh_data(tickers=existing_tickers_diff_source, replace=True, use_eligible_start_date=False)
elif chosen_task == tasks[2]: # Refresh
# load the daily data (based on the latest date in the database) and remove all loaded daily data that
# overlaps with the database
self.refresh_data(tickers=existing_tickers_diff_source)
elif chosen_task == tasks[3]:
# do nothing
pass
else:
raise ValueError(f"'{chosen_task}' is not a recognized task")
if len(missing_tickers):
logger.debug('Add {} tickers to the database'.format(len(missing_tickers)))
for sub_ticker_list in list_grouper(missing_tickers, 10):
info_dict = self._download_instrument_info_dict(tickers=sub_ticker_list)
adj_info_dict = self._info_dict_translator(info_dict=info_dict)
self._populate_instrument_table(ticker_attributes_dict=adj_info_dict)
# self.refresh_data(tickers=missing_tickers)
else:
logger.info("All tickers already exists in the database")
def refresh_data(self, tickers: {str, list}, replace: bool = False, use_eligible_start_date: bool = True,
data_table: {OpenPrice, HighPrice, LowPrice, ClosePrice, Volume, Dividend, list} = None):
"""
:param tickers: str, list
:param replace: bool
:param use_eligible_start_date: bool
:param data_table: OpenPrice, HighPrice, LowPrice, ClosePrice, Volume, Dividend, list
:return:
"""
if data_table is None:
data_table = self._data_tables
else:
data_table = data_table if isinstance(data_table, list) else [data_table]
if any(d_tab not in self._data_tables for d_tab in data_table):
raise ValueError("{} can only refresh data for the following tables: '%s'".format(type(self).__name__) % "', '".join(self._data_tables))
# check if some tickers are missing from the data base and raise an error if that is the case
missing_tickers = self.ticker_existence(tickers=tickers)[1]
if len(missing_tickers):
raise ValueError("{} tickers are missing from the database:\n'%s'".format(len(missing_tickers)) % "', '".join(missing_tickers))
# remove rows from the data tables where the 'as of date' is equal to the 'created by' date
# this is to always have historical data after the close
self._delete_data_rows_before_close(tickers=tickers, data_table=data_table)
tickers = self._find_unrefreshed_tickers(tickers=tickers, data_table=data_table)
if len(tickers) == 0:
logger.debug("All tickers are refreshed")
return
if use_eligible_start_date:
# get the correct start date
start_date = self.get_eligible_start_date(tickers=tickers, data_table=self._data_tables)
else:
start_date = None
logger.debug("Refresh data for {} ticker(s)".format(len(tickers)) + time_period_logger_msg(start_date=start_date, end_date=None))
# download the DataFrame dictionary and populate the data table rows
table_name_data_dict = self._download_data_dict(tickers=tickers, start_date=start_date, end_date=date.today())
self._populate_data_table(table_df_dict={table: table_name_data_dict[table]
for table in table_name_data_dict.keys() & data_table}, replace=replace)
def _delete_data_rows_before_close(self, tickers: {str, list}, data_table: list)->None:
"""
Deletes the data table rows for the given tickers where the 'as of date' == 'created by' date. This is to avoid
downloading and storing data that has been loaded before the close
:param tickers: str, list
:return: None
"""
instrument_id_list = self.get_ticker_instrument_attribute_dict(tickers=tickers, attribute='id').keys()
for d_tab in data_table:
self.session.query(
d_tab
).filter(
d_tab.instrument_id.in_(instrument_id_list),
d_tab.as_of_date == d_tab.created_at
).delete(synchronize_session=False)
self.session.commit()
self.session.close()
def _populate_instrument_table(self, ticker_attributes_dict: dict) -> None:
"""
Adds rows to the instruments table in the database. Assumes that the given dictionary has tickers as keys and
attribute sub-dictionaries as values
:param ticker_attributes_dict: dict {ticker, {attribute_name: attribute_value}, ...}
:return: None
"""
self.session.add_all(
[Instrument(ticker=ticker, **ticker_attributes_dict[ticker])
for ticker in ticker_attributes_dict.keys()]
)
self.session.commit()
self.session.close()
def adjusting_instrument_attributes(self, ticker_attributes_dict: dict) -> None:
"""
Changes attributes for instruments that already exists in the database. Assumes that the given dictionary has
tickers (with capital letters) as keys and attribute sub-dictionaries as values
:param ticker_attributes_dict: dict {ticker, {attribute_name: attribute_value}, ...}
:return: None
"""
# query all instrument rows that will have their attributes changed
q_instruments = self.session.query(Instrument) \
.filter(
Instrument.ticker.in_(ticker_attributes_dict.keys())) \
.all()
for instrument in q_instruments:
new_attributes = ticker_attributes_dict[instrument.ticker] # {attribute_name: attribute_value}
for attribute in new_attributes.keys():
if attribute in Instrument.__table__.c.keys():
setattr(instrument, attribute, new_attributes[attribute])
else:
raise ValueError("'{}' does not exist as an attribute of Instrument class".format(attribute))
self.session.commit()
self.session.close()
def _populate_data_table(self, df: pd.DataFrame = None,
data_table: {OpenPrice, HighPrice, LowPrice, ClosePrice, Volume, Dividend}=None,
table_df_dict: dict = None, replace: bool = False) -> None:
"""
Inserts rows in the data table class:
1) records the date of the refresh in the instrument table
2) replaces the data in the database with the data in the given DataFrame or first removes the overlaps before
insert
3) formats the DataFrame to be in line with the data table and insert it to the database
:param df: DataFrame
:param data_table: OpenPrice, HighPrice, LowPrice, ClosePrice, Volume, Dividend
:param table_df_dict: {DataFrame: data table}
:param replace: bool
:return: None
"""
if table_df_dict is not None:
if com.count_not_none(df, data_table) > 0:
raise ValueError("when 'table_df_dict' is specified, please don't specify 'df' or 'table'")
for data_table, df in table_df_dict.items():
# recall the function recursively
self._populate_data_table(df=df, data_table=data_table)
else:
if com.count_not_none(df, data_table) != 2:
raise ValueError("when 'table_df_dict' is not specified, please specify 'df' and 'table'")
logger.info("Add rows to '{}' table".format(data_table.__tablename__))
# remove all the overlapping dates
if replace: # removes the overlaps from the database
# remove the dates from the database that exists in the DataFrame for the relevant tickers
self._delete_rows_in_data_table_overlapping_df(df=df, data_table=data_table)
else: # removes the overlaps from the input
# set values to be removed from the DataFrame to nan
ticker_last_date_dict = self.get_latest_as_of_date_dict(tickers=list(df), table=data_table)
# column-wise, for each date before the available date in the database, set the values to nan (to be
# removed when converting the DataFrame to an SQL table)
df = nan_before_dates(df=df, col_name_date_dict=ticker_last_date_dict)
# format the DataFrame to be in line with the SQL data table
df_sql = self._reformat_df_to_sql_table(df=df, data_table=data_table)
# insert the new rows into the data table
df_sql.to_sql(data_table.__tablename__, self.session.get_bind(), if_exists='append', index=False)
# update the refresh columns for the tickers in the DataFrame
self._record_refresh(tickers=list(df), table=data_table)
self.session.commit()
self.session.close()
def _record_refresh(self, tickers: list, table: {OpenPrice, HighPrice, LowPrice, ClosePrice, Volume, Dividend}) -> None:
"""
In the instrument table, set the latest refresh date for the relevant table to today()
:param tickers: list of str
:param table: OpenPrice, HighPrice, LowPrice, ClosePrice, Volume, Dividend
:return: None
"""
logger.debug("Record refresh for {} ticker(s) for '{}' table".format(len(tickers), table.__tablename__))
self.adjusting_instrument_attributes(ticker_attributes_dict=
{ticker: {table.refresh_info_column_name: date.today()}
for ticker in tickers}
)
def _reformat_df_to_sql_table(self, df: pd.DataFrame, data_table: {OpenPrice, HighPrice, LowPrice, ClosePrice, Volume, Dividend}) -> pd.DataFrame:
"""
Reformat the given DataFrame (assumed to have tickers as column names and dates as index) to be in line with the
given data table class
:param df: DataFrame
:param data_table: OpenPrice, HighPrice, LowPrice, ClosePrice, Volume, Dividend
:return: DataFrame
"""
logger.debug("Reformat the loaded DataFrame to be in line with a SQL table")
# 'melt' the DataFrame i.e. a 'reverse pivot'
df_index_reset = df.reset_index()
df_index_reset[list(df_index_reset)[0]] = df_index_reset[list(df_index_reset)[0]].dt.date # datetime to date
sql_table_df = pd.melt(df_index_reset, id_vars=[list(df_index_reset)[0]])
first_col_name = list(sql_table_df)[0]
# drop the nans in the 'value' column
sql_table_df.dropna(subset=['value'], inplace=True)
# add data source and created_at column
sql_table_df['source'] = self.source
sql_table_df['created_at'] = date.today()
# replace the ticker with the corresponding instrument id
ticker_id_dict = self.get_ticker_instrument_attribute_dict(tickers=list(set(sql_table_df['variable'].values)),
attribute='id')
sql_table_df.replace({'variable': ticker_id_dict}, inplace=True)
# reshuffle columns and set column names
sql_table_df = sql_table_df[[first_col_name, 'value', 'source', 'created_at', 'variable']]
sql_table_df.columns = data_table.__table__.columns.keys()[1:] # first key is 'id' and should not be provided
return sql_table_df
@property
def source(self):
return self._source
@property
def as_of_date(self):
return self._as_of_date
@as_of_date.setter
def as_of_date(self, as_of_date: date):
if as_of_date is None:
self._as_of_date = (date.today() - | BDay(1) | pandas.tseries.offsets.BDay |
"""
Copyright 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this work except in compliance with the License. You may obtain a copy of the
License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import pandas as pd
import tkinter as tk
from tkinter import *
from tkinter import filedialog
from datetime import datetime
from tkinter.messagebox import showinfo
from tkinter.messagebox import showwarning
from tkinter.font import Font
from os import path
import sys
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
print('running in a PyInstaller bundle')
else:
print('running in a normal Python process')
# ----------------------------------------------- GLOBAL VARIABLES
REF_FILE_NAME = ""
TEST_FILE_NAME = ""
MAIN_FILE_ONE_NAME = ""
MAIN_FILE_TWO_NAME = ""
SEQ_ONE_GAPS = []
SEQ_TWO_GAPS = []
SEQ_THREE_GAPS = []
SEQ_FOUR_GAPS = []
FOUR_SEQ_ALIGN = False
ALIGNMENT_WARNING = False
THRESHOLD = 1
LVL_SEL = "L1&L2"
PEP_COLUMNS = ["peptide", "Peptide", "Peptide sequence"]
START_COLUMNS = ["start", "Start", "Peptide start"]
REF_PEPTIDE_MAX_LENGTH = 50
TEST_PEPTIDE_MAX_LENGTH = 50
MAIN_PEPTIDE_MAX_LENGTH = 50
# ----------------------------------------------- CLASSES
class MainApplication:
def __init__(self, master):
self.master = master
self.canvas = tk.Canvas(master, width=550, height=690) # width=550, height=690
# to make a frame
self.frame = tk.Frame(master, bg='white')
############################################################################################
# Frame Input
# this frame is placed in the original frame
title_font = Font(family="Calibri", size=12, weight="bold")
self.frame_input = tk.Frame(self.frame, bd='10', padx=3, pady=3)
self.label_input_files = tk.Label(self.frame_input, text='Input File Paths', bd='3', fg='blue', font=title_font)
self.label_epitope_predictions = tk.Label(self.frame_input, text='Epitope Predictions', bd='3', fg='blue')
self.label_ref = tk.Label(self.frame_input, text='Sequence A', bd='3')
self.label_test = tk.Label(self.frame_input, text='Sequence B', bd='3')
self.label_database_searches = tk.Label(self.frame_input, text='Database Searches', bd='3', fg='blue')
self.label_main_one = tk.Label(self.frame_input, text='Sequence A', bd='3')
self.label_main_two = tk.Label(self.frame_input, text='Sequence B', bd='3')
self.entry_ref = tk.Entry(self.frame_input, bd='3', justify="center")
self.entry_test = tk.Entry(self.frame_input, bd='3', justify="center")
self.entry_main_one = tk.Entry(self.frame_input, bd='3', justify="center")
self.entry_main_two = tk.Entry(self.frame_input, bd='3', justify="center")
self.button_ref = tk.Button(self.frame_input, text='Browse', command=self.browse_ref)
self.button_test = tk.Button(self.frame_input, text='Browse', command=self.browse_test)
self.button_main_one = tk.Button(self.frame_input, text='Browse', command=self.browse_main_one)
self.button_main_two = tk.Button(self.frame_input, text='Browse', command=self.browse_main_two)
self.label_indels_title = tk.Label(self.frame_input, text='CAVES Indel Search', bd='3', fg='blue')
self.label_indels_alignment = tk.Label(self.frame_input, text='Alignment', bd='3')
self.entry_indels_alignment = tk.Entry(self.frame_input, bd='3', justify="center")
self.button_indels_alignment = tk.Button(self.frame_input, text='Browse', command=self.browse_alignment)
self.label_threshold_title = tk.Label(self.frame_input, text='Minimum Peptide Length', bd='3', fg='blue',
font=title_font)
self.entry_threshold = tk.Entry(self.frame_input, bd='3', justify="center")
self.label_threshold_helper = tk.Label(self.frame_input,
text='Default minimum is 1 amino acid',
bd='3', fg='red')
self.label_radio_title = tk.Label(self.frame_input, text='Level Selection', bd='3', fg='blue',
font=title_font)
self.frame_radio_buttons = tk.Frame(self.frame_input, bd='0', padx=3, pady=3)
self.level_selection = IntVar()
self.level_selection.set(1)
self.radio_both_lvls = Radiobutton(self.frame_radio_buttons, text="Level 1 and 2",
command=self.config_L1L2_entries,
variable=self.level_selection, value=1).grid(row=0, column=1, padx=50)
self.radio_lvl_one_only = Radiobutton(self.frame_radio_buttons, text="Level 1 only",
command=self.config_L1_only_entries,
variable=self.level_selection, value=2).grid(row=0, column=2)
self.radio_lvl_two_only = Radiobutton(self.frame_radio_buttons, text="Level 2 only",
command=self.config_L2_only_entries,
variable=self.level_selection, value=3).grid(row=0, column=3, padx=50)
self.label_result_file_title = tk.Label(self.frame_input, text='Results File', bd='3', fg='blue',
font=title_font)
self.entry_result_file = tk.Entry(self.frame_input, bd='3', justify="center")
self.button_result_path = tk.Button(self.frame_input, text='Browse', command=self.browse_result_path)
# place used to place the widgets in the frame
self.label_input_files.place(relx=-0.005, rely=-0.01, relheight=0.05)
self.label_epitope_predictions.place(relx=0.025, rely=0.06, relheight=0.035)
self.label_ref.place(relx=0.05, rely=0.12, relheight=0.035)
self.entry_ref.place(relx=0.20, rely=0.12, relwidth=0.55, relheight=0.035)
self.button_ref.place(relx=0.80, rely=0.12, relheight=0.030)
self.label_test.place(relx=0.05, rely=0.18, relheight=0.035)
self.entry_test.place(relx=0.20, rely=0.18, relwidth=0.55, relheight=0.035)
self.button_test.place(relx=0.80, rely=0.18, relheight=0.030)
self.label_database_searches.place(relx=0.025, rely=0.26, relheight=0.035)
self.label_main_one.place(relx=0.05, rely=0.32, relheight=0.035)
self.entry_main_one.place(relx=0.20, rely=0.32, relwidth=0.55, relheight=0.035)
self.button_main_one.place(relx=0.80, rely=0.32, relheight=0.030)
self.label_main_two.place(relx=0.05, rely=0.38, relheight=0.035)
self.entry_main_two.place(relx=0.20, rely=0.38, relwidth=0.55, relheight=0.035)
self.button_main_two.place(relx=0.80, rely=0.38, relheight=0.030)
self.label_indels_title.place(relx=0.025, rely=0.46, relheight=0.035)
self.label_indels_alignment.place(relx=0.06, rely=0.52, relheight=0.035)
self.entry_indels_alignment.place(relx=0.20, rely=0.52, relwidth=0.55, relheight=0.035)
self.button_indels_alignment.place(relx=0.80, rely=0.52, relheight=0.030)
self.label_threshold_title.place(relx=-0.005, rely=0.60, relheight=0.05)
self.entry_threshold.place(relx=0.10, rely=0.69, relwidth=0.05, relheight=0.030)
self.label_threshold_helper.place(relx=0.175, rely=0.69, relheight=0.030)
self.label_radio_title.place(relx=-0.005, rely=0.76, relheight=0.05)
# Radio buttons are placed in their own frame (self.frame_radio_buttons)
self.label_result_file_title.place(relx=-0.005, rely=0.90, relheight=0.035)
self.entry_result_file.place(relx=0.20, rely=0.955, relwidth=0.55, relheight=0.035)
self.button_result_path.place(relx=0.80, rely=0.955, relheight=0.030)
############################################################################################
# placing the buttons below
submit_font = Font(family="Calibri", size=12)
self.frame_button = tk.Frame(self.frame, bd='3', padx=3, pady=3)
self.button_start = tk.Button(self.frame_button, text='Compare', font=submit_font, command=self.start_clicked)
self.button_cancel = tk.Button(self.frame_button, text='Cancel', font=submit_font, command=master.destroy)
self.button_cancel.place(relx=0.6, rely=0.22, relheight=0.6, relwidth=0.18)
self.button_start.place(relx=0.8, rely=0.22, relheight=0.6, relwidth=0.18)
###############################################################################################
# all the frames are placed in their respective positions
self.frame_input.place(relx=0.005, rely=0.005, relwidth=0.99, relheight=0.906)
self.frame_radio_buttons.place(relx=0.005, rely=0.8275, relwidth=1, relheight=1)
self.frame_button.place(relx=0.005, rely=0.915, relwidth=0.99, relheight=0.08)
self.frame.place(relx=0.02, rely=0.02, relwidth=0.96, relheight=0.96)
self.canvas.pack()
##############################################################################################
def start_clicked(self):
print("Compare Start")
init_objects(self.level_selection.get())
global LVL_SEL
print("Reading epitope predictions: Sequence A file")
ref_raw = init_ref_raw(self.entry_ref.get().strip())
if ref_raw is None:
return
if LVL_SEL != "L2Only":
print("Reading epitope predictions: Sequence B file")
test_raw = init_test_raw(self.entry_test.get().strip())
if test_raw is None:
return
if LVL_SEL != "L1Only":
print("Reading database searches: Sequence A file")
main_raw_one = init_main_raw(self.entry_main_one.get().strip())
if main_raw_one is None:
print("Unable to read database searches: Sequence A file")
return
global MAIN_FILE_ONE_NAME
MAIN_FILE_ONE_NAME = self.entry_main_one.get().split("/").pop()
if LVL_SEL == "L1&L2":
print("Reading database searches: Sequence B file")
main_raw_two = init_main_raw(self.entry_main_two.get().strip())
if main_raw_two is None:
print("Unable to read database searches: Sequence B file")
return
global MAIN_FILE_TWO_NAME
MAIN_FILE_TWO_NAME = self.entry_main_two.get().split("/").pop()
if self.entry_indels_alignment.get().strip() != "":
print("Reading alignment file")
if not init_alignment(self.entry_indels_alignment.get().strip()):
print("Unable to create gap character lists")
return
else:
print("Empty alignment file path")
return
if not init_threshold(self.entry_threshold.get().strip()):
print("Minimum peptide length input error: minimum length set to 1")
result_file = generate_result_file(self.entry_result_file.get())
ref_dictionary = create_test_comparison_dict(ref_raw.to_dict('split'), REF_FILE_NAME)
if LVL_SEL == "L1&L2":
test_dictionary = create_test_comparison_dict(test_raw.to_dict('split'), TEST_FILE_NAME)
main_dict_one = create_main_comparison_dict(main_raw_one.to_dict('split'), MAIN_FILE_ONE_NAME)
main_dict_two = create_main_comparison_dict(main_raw_two.to_dict('split'), MAIN_FILE_TWO_NAME)
generate_test_comparison_results(ref_dictionary, test_dictionary)
generate_main_comparison_results(L1_matched_dict, "L1m", main_dict_one, main_dict_two)
generate_main_comparison_results(L1_partial_dict, "L1p", main_dict_one, main_dict_two)
generate_main_comparison_results(L1_novel_dict, "L1n", main_dict_one, main_dict_two)
finalize_L1L2_results(result_file)
if LVL_SEL == "L1Only":
test_dictionary = create_test_comparison_dict(test_raw.to_dict('split'), TEST_FILE_NAME)
generate_test_comparison_results(ref_dictionary, test_dictionary)
finalize_L1Only_results(result_file)
if LVL_SEL == "L2Only":
main_dict_one = create_main_comparison_dict(main_raw_one.to_dict('split'), MAIN_FILE_ONE_NAME)
generate_main_comparison_results(ref_dictionary, "L2", main_dict_one)
finalize_L2Only_results(result_file)
print("Compared")
showinfo("CAVES", "Comparison Complete!")
def browse_ref(self):
filename = filedialog.askopenfilename(title="Select a File", filetypes=[("CSV files", "*.csv")])
self.entry_ref.delete(0, tk.END)
self.entry_ref.insert(0, filename)
def browse_test(self):
filename = filedialog.askopenfilename(title="Select a File", filetypes=[("CSV files", "*.csv")])
self.entry_test.delete(0, tk.END)
self.entry_test.insert(0, filename)
def browse_main_one(self):
filename = filedialog.askopenfilename(title="Select a File", filetypes=[("CSV files", "*.csv")])
self.entry_main_one.delete(0, tk.END)
self.entry_main_one.insert(0, filename)
def browse_main_two(self):
filename = filedialog.askopenfilename(title="Select a File", filetypes=[("CSV files", "*.csv")])
self.entry_main_two.delete(0, tk.END)
self.entry_main_two.insert(0, filename)
def browse_alignment(self):
fasta_exts = [("FASTA files", "*.fasta"), ("FASTA files", "*.fna"), ("FASTA files", "*.ffn"),
("FASTA files", "*.faa"), ("FASTA files", "*.frn"), ("FASTA files", "*.fa"),
("FASTA files", "*.fsa")]
filename = filedialog.askopenfilename(title="Select a File", filetypes=fasta_exts)
self.entry_indels_alignment.delete(0, tk.END)
self.entry_indels_alignment.insert(0, filename)
def browse_result_path(self):
time = datetime.now().strftime("%Y-%m-%d_%H%M%S")
filename = filedialog.asksaveasfilename(initialfile="results_"+time, title="Results File",
filetypes=[("Excel files", "*.xlsx")])
self.entry_result_file.delete(0, tk.END)
self.entry_result_file.insert(0, filename)
def config_L1L2_entries(self):
self.entry_ref.config(state='normal')
self.entry_test.config(state='normal')
self.entry_main_one.config(state='normal')
self.entry_main_two.config(state='normal')
def config_L1_only_entries(self):
self.entry_main_one.delete(0, tk.END)
self.entry_main_two.delete(0, tk.END)
self.entry_ref.config(state='normal')
self.entry_test.config(state='normal')
self.entry_main_one.config(state='disabled')
self.entry_main_two.config(state='disabled')
def config_L2_only_entries(self):
self.entry_test.delete(0, tk.END)
self.entry_main_two.delete(0, tk.END)
self.entry_ref.config(state='normal')
self.entry_test.config(state='disabled')
self.entry_main_one.config(state='normal')
self.entry_main_two.config(state='disabled')
class ResultSheetObject:
def __init__(self):
self.origin_file_one = []
self.peptide_one = []
self.start_one = []
self.end_one = []
self.length_one = []
self.letters_matched = []
self.letters_matched_length = []
self.origin_file_two = []
self.peptide_two = []
self.start_two = []
self.end_two = []
self.length_two = []
self.mutated_pos = []
class PeptideObject:
def __init__(self, new_file, new_pep, new_start, new_end, new_length, new_suffix):
self.origin_file = new_file
self.peptide = new_pep
self.start = new_start
self.end = new_end
self.length = new_length
self.suffix = new_suffix
# ----------------------------------------------- RESULT OBJECTS
L1_novel = ResultSheetObject()
L1_partial = ResultSheetObject()
L1_matched = ResultSheetObject()
L2_novel = ResultSheetObject()
L2_partial = ResultSheetObject()
L2_matched = ResultSheetObject()
L1_novel_L2_novel = ResultSheetObject()
L1_novel_L2_partial = ResultSheetObject()
L1_novel_L2_matched = ResultSheetObject()
L1_partial_L2_novel = ResultSheetObject()
L1_partial_L2_partial = ResultSheetObject()
L1_partial_L2_matched = ResultSheetObject()
L1_matched_L2_novel = ResultSheetObject()
L1_matched_L2_partial = ResultSheetObject()
L1_matched_L2_matched = ResultSheetObject()
# ----------------------------------------------- LEVEL 1 DICTIONARIES
L1_novel_dict = {}
L1_partial_dict = {}
L1_matched_dict = {}
# ----------------------------------------------- FUNCTIONS
def init_objects(lvl_sel):
global REF_FILE_NAME
REF_FILE_NAME = ""
global TEST_FILE_NAME
TEST_FILE_NAME = ""
global MAIN_FILE_ONE_NAME
MAIN_FILE_ONE_NAME = ""
global MAIN_FILE_TWO_NAME
MAIN_FILE_TWO_NAME = ""
global SEQ_ONE_GAPS
SEQ_ONE_GAPS = []
global SEQ_TWO_GAPS
SEQ_TWO_GAPS = []
global SEQ_THREE_GAPS
SEQ_THREE_GAPS = []
global SEQ_FOUR_GAPS
SEQ_FOUR_GAPS = []
global FOUR_SEQ_ALIGN
FOUR_SEQ_ALIGN = False
global LVL_SEL
if lvl_sel == 1:
LVL_SEL = "L1&L2"
elif lvl_sel == 2:
LVL_SEL = "L1Only"
else:
LVL_SEL = "L2Only"
global L1_novel
L1_novel = ResultSheetObject()
global L1_partial
L1_partial = ResultSheetObject()
global L1_matched
L1_matched = ResultSheetObject()
global L2_novel
L2_novel = ResultSheetObject()
global L2_partial
L2_partial = ResultSheetObject()
global L2_matched
L2_matched = ResultSheetObject()
global L1_novel_L2_novel
L1_novel_L2_novel = ResultSheetObject()
global L1_novel_L2_partial
L1_novel_L2_partial = ResultSheetObject()
global L1_novel_L2_matched
L1_novel_L2_matched = ResultSheetObject()
global L1_partial_L2_novel
L1_partial_L2_novel = ResultSheetObject()
global L1_partial_L2_partial
L1_partial_L2_partial = ResultSheetObject()
global L1_partial_L2_matched
L1_partial_L2_matched = ResultSheetObject()
global L1_matched_L2_novel
L1_matched_L2_novel = ResultSheetObject()
global L1_matched_L2_partial
L1_matched_L2_partial = ResultSheetObject()
global L1_matched_L2_matched
L1_matched_L2_matched = ResultSheetObject()
global L1_novel_dict
L1_novel_dict = {}
global L1_partial_dict
L1_partial_dict = {}
global L1_matched_dict
L1_matched_dict = {}
def init_ref_raw(file_path):
if not path.exists(file_path):
print("Unable to find predictions file: " + file_path)
return None
global REF_FILE_NAME
REF_FILE_NAME = file_path.strip().split("/").pop() # gives last item in list which is file
ref_raw = None
for pep_col in PEP_COLUMNS:
for start_col in START_COLUMNS:
try:
ref_raw = | pd.read_csv(file_path, index_col=False, usecols={start_col, pep_col}) | pandas.read_csv |
from kfp.components import InputPath, OutputPath
from kfp.v2.dsl import (Artifact,
Dataset,
Input,
Model,
Output,
Metrics,
ClassificationMetrics)
def get_full_adj_prices(
# adj_price_dataset01_path: InputPath('DataFrame'),
# adj_price_dataset02_path: InputPath('DataFrame'),
# adj_price_dataset03_path: InputPath('DataFrame'),
# adj_price_dataset04_path: InputPath('DataFrame'),
# adj_price_dataset05_path: InputPath('DataFrame'),
# full_adj_prices_dataset_path: OutputPath('DataFrame')
adj_price_dataset01: Input[Dataset],
adj_price_dataset02: Input[Dataset],
adj_price_dataset03: Input[Dataset],
adj_price_dataset04: Input[Dataset],
adj_price_dataset05: Input[Dataset],
adj_price_dataset06: Input[Dataset],
adj_price_dataset07: Input[Dataset],
adj_price_dataset08: Input[Dataset],
adj_price_dataset09: Input[Dataset],
adj_price_dataset10: Input[Dataset],
adj_price_dataset11: Input[Dataset],
full_adj_prices_dataset: Output[Dataset]
):
import pandas as pd
df_adj_price_01 = pd.read_pickle(adj_price_dataset01.path)
df_adj_price_02 = pd.read_pickle(adj_price_dataset02.path)
df_adj_price_03 = pd.read_pickle(adj_price_dataset03.path)
df_adj_price_04 = pd.read_pickle(adj_price_dataset04.path)
df_adj_price_05 = pd.read_pickle(adj_price_dataset05.path)
df_adj_price_06 = pd.read_pickle(adj_price_dataset06.path)
df_adj_price_07 = pd.read_pickle(adj_price_dataset07.path)
df_adj_price_08 = | pd.read_pickle(adj_price_dataset08.path) | pandas.read_pickle |
from ..instruments import keithley6221 as k6221
from ..instruments import srs830 as srs
from ..instruments import tektronix3252 as tek
from .. import core
import time
import numpy as np
import pandas as pd
__all__ = ('ready_for_pulse', 'pulse', 'EGMR', 'collect_lockin_data')
def ready_for_pulse(pulse_gen, voltage_pulse_amplitude, voltage_pulse_width, inverted = False):
"""need docstring"""
if inverted:
tek.set_function_to_pulse(pulse_gen)
tek.set_polarity(pulse_gen, inverted = True)
tek.set_low_voltage(pulse_gen, '-'+voltage_pulse_amplitude)
tek.set_high_voltage(pulse_gen, '0v',)
tek.set_pulsewidth(pulse_gen, voltage_pulse_width)
tek.set_ncylces_for_burst_mode(pulse_gen, ncycles=1)
tek.set_run_mode_to_burst(pulse_gen)
else:
tek.set_function_to_pulse(pulse_gen)
tek.set_polarity(pulse_gen, )
tek.set_low_voltage(pulse_gen, '0v')
tek.set_high_voltage(pulse_gen, voltage_pulse_amplitude,)
tek.set_pulsewidth(pulse_gen, voltage_pulse_width)
tek.set_ncylces_for_burst_mode(pulse_gen, ncycles=1)
tek.set_run_mode_to_burst(pulse_gen)
return
def pulse(pulse_gen):
tek.start_pulse_gen(pulse_gen,)
tek.trigger(pulse_gen)
tek.stop_pulse_gen(pulse_gen,)
return
def collect_lockin_data(lockin, npoints, delay_between_points):
"""
Collects npoints of data with delay specified from lockin.
args:
lockin (pyvisa.resources.gpib.GPIBInstrument): srs830
npoints (int): Number of points to collect
delay_between_points (float): Sleep time (as timed by python)
returns:
(dict): Dict with keys 'R', 'theta'
"""
rs, thetas = [], []
for i in range(npoints):
r, theta = srs.get_lockin_r_theta(lockin)
time.sleep(delay_between_points)
rs.append(r)
thetas.append(theta)
return {'R':np.array(rs), 'theta':np.array(thetas)}
def cycle(lockin, pulse_generator, voltage_pulse_amplitude, voltage_pulse_width, npoints_after_pulse, delay_between_points,):
"""need docstring"""
#start by pulsing up
ready_for_pulse(pulse_generator, voltage_pulse_amplitude, voltage_pulse_width, inverted = False)
#pulse up
pulse(pulse_generator)
updata = collect_lockin_data(lockin, npoints_after_pulse, delay_between_points)
#ready for down
ready_for_pulse(pulse_generator, voltage_pulse_amplitude, voltage_pulse_width, inverted = True)
#pulse down
pulse(pulse_generator)
downdata = collect_lockin_data(lockin, npoints_after_pulse, delay_between_points)
out = {'R':np.concatenate((updata['R'], downdata['R'])), 'theta': np.concatenate((updata['theta'], downdata['theta']))}
return out
def egmr(lockin, pulse_generator, voltage_pulse_amplitude,
voltage_pulse_width, ncycles, npoints_after_pulse, delay_between_points,):
"""need docstring"""
#get initial baseline
baseline = collect_lockin_data(lockin, npoints_after_pulse, delay_between_points)
out = | pd.DataFrame(baseline) | pandas.DataFrame |
import spotipy
from spotipy import client as cl
from spotipy.oauth2 import SpotifyOAuth
from spotipy.oauth2 import SpotifyClientCredentials
import requests # required for getting the csv file from spotify.chart without 403 error: Forbidden
from io import StringIO #
import json
import pandas as pd
from matplotlib import pyplot as plt
import plotly.express as px
from wordcloud import WordCloud
import multiprocessing as mp
from multiprocessing import Pool
from tqdm import tqdm
username = 'epalmer822'
CLIENT_ID = 'fc5e21deea874e2a9246c8e8935e9fe1'
CLIENT_SECRET = '<KEY>'
NUM_OF_SONGS = 200
FIRST_SONG_ROW = 3
NUM_OF_ATTRIBUTES = 5
numbersList = list(range(FIRST_SONG_ROW, NUM_OF_SONGS))
URL_ROW = 4 # adjusted for columns being indexed at 1 in csv
NUM_OF_SONGS += 1 # compensate for headers (first song from spotify CSVs is listed in row 3)
genres = {"RAP" : 0, "OTHER": 0}
genre_list = list()
# ---- <NAME>
csvFile = requests.get("https://spotifycharts.com/regional/us/weekly/latest/download", headers={
'User-Agent': 'Mozilla/5.0'}) # we can replace 28 and 44 with this. Gets csv file directly from spotify.chart website.
fileData = StringIO(csvFile.text) # requests and StringIO required to bypass error: urllib2.HTTPError: HTTP Error 403: Forbidden
inputCSV = pd.read_csv(fileData, header=1)
def checkCPUcount(): # ---- <NAME>
cores = mp.cpu_count()
processes = int(cores / 2)
print("Found " + str(cores) + " threads, using " + str(processes) + " processes.\n")
return cores
class SongDataClass:
dataPointCount = 0 # evaluate how many of the songs are actually being used as data points
songAttributeDict = dict()
c = SongDataClass()
# each song data list will contain, in order, ['name'], ['artist], ['duration'], ['loudness'], ['tempo'], ['key'], and ['mode']
# indexed with the number of the position in top 200
# -------------------------------------------------------------------------------------------------------------------------
# notes: keys/modes are returned as numbers, but obviously a song isn't in the key of 5. Uses "standard pitch class notation"
# so on a scale of 1-11,
# 0 = C
# 1 = C#
# 2 = D
# 3 = Eb
# 4 = E
# 5 = F
# 6 = F#
# 7 = G
# 8 = G#
# 9 = A
# 10 = Bb
# 11 = B
# modes are also labeled as integers, but no support for more than the two most common modes, so the confidence value on that has to be lower
# because songs aren't always written in major or minor
# 0 = minor
# 1 = major
def numberToKey(keyNum):
keys = {0: "C", 1: "C#", 2: "D", 3: "Eb", 4: "E", 5: "F", 6: "F#", 7: "G", 8: "G#", 9: "A", 10: "Bb", 11: "B"}
try:
return keys[keyNum]
except KeyError:
return "Invalid"
def numberToMode(modeNum):
modes = {0: "Minor", 1: "Major"}
try:
return modes[modenum]
except KeyError:
return "Invalid"
# ----------------------------------------------------------------------------------------------------------------------
# made a function so multiprocessing can work
def SongDataSocket(i): # ---- <NAME>
url = inputCSV.iloc[i - 1][
URL_ROW] # sets the song URL for this iteration of the loop, constant just in case spotify decides to reformat its
songAnalysis = sp.audio_analysis(url) # fetch the song attributes
# confidenceValues = [songAnalysis['track']['tempo_confidence'],
# songAnalysis['track']['time_signature_confidence'],
# songAnalysis['track']['key_confidence'], songAnalysis['track']['mode_confidence']]
# may implement that if I decide on logic that i like; otherwise it'll probably just disappear in an update eventually
songAnalysis = songAnalysis['track']
urlData = sp.track(url)
trackData = \
{
'name': urlData['name'],
'artist': urlData['album']['artists'][0]['name'],
# for some reason spotify indexes all artist data in a list that only has one element, which is a dictionary. no clue why
'duration': songAnalysis['duration'],
'loudness': songAnalysis['loudness'],
'tempo': songAnalysis['tempo'],
'key': songAnalysis['key'],
'mode': songAnalysis['mode']
# 'extra data' : songAnalysis['extra data'] is always an option, we can use whatever data we decide we need
}
dataReturn = [str(i - 2), dict(trackData)]
return dataReturn
result_list = []
def log_results(result):
c.songAttributeDict.update({str(result[0]): result[1]})
pbar.update(1)
def main(allowed_processes): # ----<NAME>
pool = mp.Pool(processes=allowed_processes) # creates a multiprocessing pool to fill the dictionary file
for i in range(3, 203):
pool.apply_async(SongDataSocket, args=(i,), callback=log_results) # apply_async because they're indexed
# by position, so what order they're
# callback essentially feeds the output of # physically stored doesn't matter, can
# SongDataSocket directly into log_results() # efficiently pull the correct data out regardless
pool.close()
pool.join()
pool.close()
def print_results():
print(c.songAttributeDict)
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(username=username, scope="user-library-read", client_id=CLIENT_ID,
client_secret=CLIENT_SECRET, redirect_uri="https://spotify.com"))
if __name__ == '__main__':
sp.me() # to trigger a spotify authentication, without opening 200 chrome tabs
outFileName = "SpotifyDataDict.txt"
try:
with open(outFileName, 'r') as g:
DataUsable = json.loads(
g.read()) # checks to see if file exists, if it does, will just use existing data. If not, throws filenotfound, and will create a new file
except FileNotFoundError:
allowed_processes = checkCPUcount()
pbar = tqdm(total=200) # generates a progress bar
main(allowed_processes)
pbar.close() # ends the progress bar so it isn't displayed twice
with open(outFileName, 'w') as f: # outputs the data to the file specified above
f.write(json.dumps(c.songAttributeDict))
DataUsable = json.dumps(c.songAttributeDict)
# after this, DataUsable is set to what it needs to be regardless, but there will also be a file with the data so it does not need to be retrieved again.
# plots -------------------------------------------------------------------------------------------------------
# Key frequency ---- <NAME>
keyFreq = {"C": 0, "C#": 0, "D": 0, "Eb": 0, "F": 0, "F#": 0, "G": 0, "G#": 0, "A": 0, "Bb": 0, "B": 0}
for num in range(1, 200):
tempKey = DataUsable[str(num)]['key']
tempKey = numberToKey(tempKey)
if tempKey in keyFreq:
keyFreq[tempKey] += 1
else:
keyFreq[tempKey] = 1
labelList = list()
dataList = list()
for key in keyFreq:
labelList.append(key)
dataList.append(keyFreq[key])
KFP = plt.figure()
KFP = KFP.add_axes([0.05, 0.05, 0.90, 0.85])
KFP.bar(labelList, dataList, color="olive")
KFP.set_title("Frequency of musical keys in Spotify top 200")
for bar in KFP.patches:
KFP.annotate(format(bar.get_height(), '.0f'),
(bar.get_x() + bar.get_width() / 2,
bar.get_height() / 1.5), ha='center', va='center',
size=8, xytext=(0, 8),
textcoords='offset points')
plt.show()
# Artist Word Cloud ---- <NAME>
with open('SpotifyDataDict.txt', 'r') as inf:
data = eval(inf.read())
dataPd = pd.DataFrame.from_dict(data, orient='index')
tracks = ' '.join(
[artist for artist in dataPd['name']]) # change inputCSV to newDF if you decide to not change the inputCSV
wordcloud = WordCloud(max_words=100, background_color="white").generate(tracks)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
#Artist Diversity Bar Chart ---- <NAME>
# This shows how many artists are featured in top 200 chart to see how diverse the music chart is at the moment.
with open('SpotifyDataDict.txt', 'r') as inf:
data = eval(inf.read())
dataPd = | pd.DataFrame.from_dict(data, orient='index') | pandas.DataFrame.from_dict |
# <NAME>, 2019
# <EMAIL>
# Essentials
import os, sys, glob
import pandas as pd
import numpy as np
import nibabel as nib
# Stats
import scipy as sp
from scipy import stats
import statsmodels.api as sm
import pingouin as pg
# Plotting
import seaborn as sns
import matplotlib.pyplot as plt
plt.rcParams['svg.fonttype'] = 'none'
from IPython.display import clear_output
from scipy.stats import t
from numpy.matlib import repmat
from scipy.linalg import svd, schur
from statsmodels.stats import multitest
# Sklearn
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold, GridSearchCV
from sklearn.linear_model import Ridge, Lasso, LinearRegression
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVR, LinearSVR
from sklearn.metrics import make_scorer, r2_score, mean_squared_error, mean_absolute_error
from sklearn.decomposition import PCA
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
def my_get_cmap(which_type = 'qual1', num_classes = 8):
# Returns a nice set of colors to make a nice colormap using the color schemes
# from http://colorbrewer2.org/
#
# The online tool, colorbrewer2, is copyright <NAME>, <NAME> and
# The Pennsylvania State University.
if which_type == 'linden':
cmap_base = np.array([[255,105,97],[97,168,255],[178,223,138],[117,112,179],[255,179,71]])
elif which_type == 'pair':
cmap_base = np.array([[124,230,199],[255,169,132]])
elif which_type == 'qual1':
cmap_base = np.array([[166,206,227],[31,120,180],[178,223,138],[51,160,44],[251,154,153],[227,26,28],
[253,191,111],[255,127,0],[202,178,214],[106,61,154],[255,255,153],[177,89,40]])
elif which_type == 'qual2':
cmap_base = np.array([[141,211,199],[255,255,179],[190,186,218],[251,128,114],[128,177,211],[253,180,98],
[179,222,105],[252,205,229],[217,217,217],[188,128,189],[204,235,197],[255,237,111]])
elif which_type == 'seq_red':
cmap_base = np.array([[255,245,240],[254,224,210],[252,187,161],[252,146,114],[251,106,74],
[239,59,44],[203,24,29],[165,15,21],[103,0,13]])
elif which_type == 'seq_blu':
cmap_base = np.array([[247,251,255],[222,235,247],[198,219,239],[158,202,225],[107,174,214],
[66,146,198],[33,113,181],[8,81,156],[8,48,107]])
elif which_type == 'redblu_pair':
cmap_base = np.array([[222,45,38],[49,130,189]])
elif which_type == 'yeo17':
cmap_base = np.array([[97,38,107], # VisCent
[194,33,39], # VisPeri
[79,130,165], # SomMotA
[44,181,140], # SomMotB
[75,148,72], # DorsAttnA
[23,116,62], # DorsAttnB
[149,77,158], # SalVentAttnA
[222,130,177], # SalVentAttnB
[75,87,61], # LimbicA
[149,166,110], # LimbicB
[210,135,47], # ContA
[132,48,73], # ContB
[92,107,131], # ContC
[218,221,50], # DefaultA
[175,49,69], # DefaultB
[41,38,99], # DefaultC
[53,75,158] # TempPar
])
elif which_type == 'yeo17_downsampled':
cmap_base = np.array([[97,38,107], # VisCent
[79,130,165], # SomMotA
[75,148,72], # DorsAttnA
[149,77,158], # SalVentAttnA
[75,87,61], # LimbicA
[210,135,47], # ContA
[218,221,50], # DefaultA
[53,75,158] # TempPar
])
if cmap_base.shape[0] > num_classes: cmap = cmap_base[0:num_classes]
else: cmap = cmap_base
cmap = cmap / 255
return cmap
def get_sys_prop(coef, p_vals, idx, alpha = 0.05):
u_idx = np.unique(idx)
sys_prop = np.zeros((len(u_idx),2))
for i in u_idx:
# filter regions by system idx
coef_tmp = coef[idx == i]
p_tmp = p_vals[idx == i]
# threshold out non-sig coef
coef_tmp = coef_tmp[p_tmp < alpha]
# proportion of signed significant coefs within system i
sys_prop[i-1,0] = coef_tmp[coef_tmp > 0].shape[0] / np.sum(idx == i)
sys_prop[i-1,1] = coef_tmp[coef_tmp < 0].shape[0] / np.sum(idx == i)
return sys_prop
def get_sys_summary(coef, p_vals, idx, method = 'mean', alpha = 0.05, signed = True):
u_idx = np.unique(idx)
if signed == True:
sys_summary = np.zeros((len(u_idx),2))
else:
sys_summary = np.zeros((len(u_idx),1))
for i in u_idx:
# filter regions by system idx
coef_tmp = coef[idx == i]
p_tmp = p_vals[idx == i]
# threshold out non-sig coef
coef_tmp = coef_tmp[p_tmp < alpha]
# proportion of signed significant coefs within system i
if method == 'mean':
if signed == True:
if any(coef_tmp[coef_tmp > 0]): sys_summary[i-1,0] = np.mean(abs(coef_tmp[coef_tmp > 0]))
if any(coef_tmp[coef_tmp < 0]): sys_summary[i-1,1] = np.mean(abs(coef_tmp[coef_tmp < 0]))
else:
try:
sys_summary[i-1,0] = np.mean(coef_tmp[coef_tmp != 0])
except:
sys_summary[i-1,0] = 0
elif method == 'median':
if signed == True:
if any(coef_tmp[coef_tmp > 0]): sys_summary[i-1,0] = np.median(abs(coef_tmp[coef_tmp > 0]))
if any(coef_tmp[coef_tmp < 0]): sys_summary[i-1,1] = np.median(abs(coef_tmp[coef_tmp < 0]))
else:
try:
sys_summary[i-1,0] = np.median(coef_tmp[coef_tmp != 0])
except:
sys_summary[i-1,0] = 0
elif method == 'max':
if signed == True:
if any(coef_tmp[coef_tmp > 0]): sys_summary[i-1,0] = np.max(abs(coef_tmp[coef_tmp > 0]))
if any(coef_tmp[coef_tmp < 0]): sys_summary[i-1,1] = np.max(abs(coef_tmp[coef_tmp < 0]))
else:
try:
sys_summary[i-1,0] = np.max(coef_tmp[coef_tmp != 0])
except:
sys_summary[i-1,0] = 0
if np.any(np.isnan(sys_summary)):
sys_summary[np.isnan(sys_summary)] = 0
return sys_summary
def prop_bar_plot(sys_prop, sys_summary, labels = '', which_colors = 'yeo17', axlim = 'auto', title_str = '', fig_size = [4,4]):
f, ax = plt.subplots()
f.set_figwidth(fig_size[0])
f.set_figheight(fig_size[1])
y_pos = np.arange(1,sys_prop.shape[0]+1)
if which_colors == 'solid':
cmap = my_get_cmap(which_type = 'redblu_pair', num_classes = 2)
ax.barh(y_pos, sys_prop[:,0], color = cmap[0], edgecolor = 'k', align='center')
if sys_prop.shape[1] == 2:
ax.barh(y_pos, -sys_prop[:,1], color = cmap[1], edgecolor = 'k', align='center')
ax.axvline(linewidth = 1, color = 'k')
elif which_colors == 'opac_scaler':
cmap = my_get_cmap(which_type = 'redblu_pair', num_classes = 2)
for i in range(sys_prop.shape[0]):
ax.barh(y_pos[i], sys_prop[i,0], facecolor = np.append(cmap[0], sys_summary[i,0]), edgecolor = 'k', align='center')
if sys_prop.shape[1] == 2:
ax.barh(y_pos[i], -sys_prop[i,1], facecolor = np.append(cmap[1], sys_summary[i,1]), edgecolor = 'k', align='center')
ax.axvline(linewidth = 1, color = 'k')
else:
cmap = my_get_cmap(which_type = which_colors, num_classes = sys_prop.shape[0])
ax.barh(y_pos, sys_prop[:,0], color = cmap, linewidth = 0, align='center')
if sys_prop.shape[1] == 2:
ax.barh(y_pos, -sys_prop[:,1], color = cmap, linewidth = 0, align='center')
ax.axvline(linewidth = 1, color = 'k')
ax.set_yticks(y_pos)
ax.set_yticklabels(labels)
ax.invert_yaxis() # labels read top-to-bottom
if axlim == 'auto':
anchors = np.array([0.2, 0.4, 0.6, 0.8, 1])
the_max = np.round(np.max(sys_prop),2)
ax_anchor = anchors[find_nearest_above(anchors, the_max)]
ax.set_xlim([-ax_anchor-ax_anchor*.05, ax_anchor+ax_anchor*.05])
else:
if axlim == 0.2:
ax.set_xticks(np.arange(axlim[0], axlim[1]+0.1, 0.1))
elif axlim == 0.1:
ax.set_xticks(np.arange(axlim[0], axlim[1]+0.05, 0.05))
elif axlim == 1:
ax.set_xticks(np.arange(axlim[0], axlim[1]+0.5, 0.5))
else:
ax.set_xlim([axlim[0], axlim[1]])
ax.xaxis.grid(True, which='major')
ax.xaxis.tick_top()
if sys_prop.shape[1] == 2:
ax.set_xticklabels([str(abs(np.round(x,2))) for x in ax.get_xticks()])
ax.set_title(title_str)
# Hide the right and top spines
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
plt.show()
return f, ax
def update_progress(progress, my_str = ''):
bar_length = 20
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
if progress < 0:
progress = 0
if progress >= 1:
progress = 1
block = int(round(bar_length * progress))
clear_output(wait = True)
text = my_str + " Progress: [{0}] {1:.1f}%".format( "#" * block + "-" * (bar_length - block), progress * 100)
print(text)
def node_strength(A):
s = np.sum(A, axis = 0)
return s
def node_degree(A):
B = A > 0
k = np.sum(B, axis = 0)
return k
def ave_control(A, c = 1):
# FUNCTION:
# Returns values of AVERAGE CONTROLLABILITY for each node in a
# network, given the adjacency matrix for that network. Average
# controllability measures the ease by which input at that node can
# steer the system into many easily-reachable states.
#
# INPUT:
# A is the structural (NOT FUNCTIONAL) network adjacency matrix,
# such that the simple linear model of dynamics outlined in the
# reference is an accurate estimate of brain state fluctuations.
# Assumes all values in the matrix are positive, and that the
# matrix is symmetric.
#
# OUTPUT:
# Vector of average controllability values for each node
#
# Bassett Lab, University of Pennsylvania, 2016.
# Reference: <NAME>, Cieslak, Telesford, <NAME>,
# <NAME>, Grafton & Bassett, Nature Communications
# 6:8414, 2015.
u, s, vt = svd(A) # singluar value decomposition
A = A/(c + s[0]) # Matrix normalization
T, U = schur(A,'real') # Schur stability
midMat = np.multiply(U,U).transpose()
v = np.matrix(np.diag(T)).transpose()
N = A.shape[0]
P = np.diag(1 - np.matmul(v,v.transpose()))
P = repmat(P.reshape([N,1]), 1, N)
values = sum(np.divide(midMat,P))
return values
def ave_control_alt(A, c = 0.99):
# FUNCTION:
# Returns values of AVERAGE CONTROLLABILITY for each node in a
# network, given the adjacency matrix for that network. Average
# controllability measures the ease by which input at that node can
# steer the system into many easily-reachable states.
#
# INPUT:
# A is the structural (NOT FUNCTIONAL) network adjacency matrix,
# such that the simple linear model of dynamics outlined in the
# reference is an accurate estimate of brain state fluctuations.
# Assumes all values in the matrix are positive, and that the
# matrix is symmetric.
#
# OUTPUT:
# Vector of average controllability values for each node
#
# Bassett Lab, University of Pennsylvania, 2016.
# Reference: <NAME>, Cieslak, Telesford, <NAME>,
# <NAME>, <NAME>, Nature Communications
# 6:8414, 2015.
u, s, vt = svd(A) # singluar value decomposition
A = (A/s[0])*c # Matrix normalization
T, U = schur(A,'real') # Schur stability
midMat = np.multiply(U,U).transpose()
v = np.matrix(np.diag(T)).transpose()
N = A.shape[0]
P = np.diag(1 - np.matmul(v,v.transpose()))
P = repmat(P.reshape([N,1]), 1, N)
values = sum(np.divide(midMat,P))
return values
def modal_control(A, c = 1):
# FUNCTION:
# Returns values of MODAL CONTROLLABILITY for each node in a
# network, given the adjacency matrix for that network. Modal
# controllability indicates the ability of that node to steer the
# system into difficult-to-reach states, given input at that node.
#
# INPUT:
# A is the structural (NOT FUNCTIONAL) network adjacency matrix,
# such that the simple linear model of dynamics outlined in the
# reference is an accurate estimate of brain state fluctuations.
# Assumes all values in the matrix are positive, and that the
# matrix is symmetric.
#
# OUTPUT:
# Vector of modal controllability values for each node
#
# Bassett Lab, University of Pennsylvania, 2016.
# Reference: <NAME>, Cieslak, Telesford, Yu, Kahn, Medaglia,
# Vettel, Miller, Grafton & Bassett, Nature Communications
# 6:8414, 2015.
u, s, vt = svd(A) # singluar value decomposition
A = A/(c + s[0]) # Matrix normalization
T, U = schur(A,'real') # Schur stability
eigVals = np.diag(T)
N = A.shape[0]
phi = np.zeros(N,dtype = float)
for i in range(N):
Al = U[i,] * U[i,]
Ar = (1.0 - np.power(eigVals,2)).transpose()
phi[i] = np.matmul(Al, Ar)
return phi
def modal_control_alt(A, c = 0.99):
# FUNCTION:
# Returns values of MODAL CONTROLLABILITY for each node in a
# network, given the adjacency matrix for that network. Modal
# controllability indicates the ability of that node to steer the
# system into difficult-to-reach states, given input at that node.
#
# INPUT:
# A is the structural (NOT FUNCTIONAL) network adjacency matrix,
# such that the simple linear model of dynamics outlined in the
# reference is an accurate estimate of brain state fluctuations.
# Assumes all values in the matrix are positive, and that the
# matrix is symmetric.
#
# OUTPUT:
# Vector of modal controllability values for each node
#
# Bassett Lab, University of Pennsylvania, 2016.
# Reference: <NAME>, Cieslak, Telesford, <NAME>, Medaglia,
# Vettel, Miller, Grafton & Bassett, Nature Communications
# 6:8414, 2015.
u, s, vt = svd(A) # singluar value decomposition
A = (A/s[0])*c # Matrix normalization
T, U = schur(A,'real') # Schur stability
eigVals = np.diag(T)
N = A.shape[0]
phi = np.zeros(N,dtype = float)
for i in range(N):
Al = U[i,] * U[i,]
Ar = (1.0 - np.power(eigVals,2)).transpose()
phi[i] = np.matmul(Al, Ar)
return phi
def mark_outliers(x, thresh = 3, c = 1.4826):
my_med = np.median(x)
mad = np.median(abs(x - my_med))/c
cut_off = mad * thresh
upper = my_med + cut_off
lower = my_med - cut_off
outliers = np.logical_or(x > upper, x < lower)
return outliers
def winsorize_outliers_signed(x, thresh = 3, c = 1.4826):
my_med = np.median(x)
mad = np.median(abs(x - my_med))/c
cut_off = mad * thresh
upper = my_med + cut_off
lower = my_med - cut_off
pos_outliers = x > upper
neg_outliers = x < lower
if pos_outliers.any() and ~neg_outliers.any():
x_out = sp.stats.mstats.winsorize(x, limits = (0,0.05))
elif ~pos_outliers.any() and neg_outliers.any():
x_out = sp.stats.mstats.winsorize(x, limits = (0.05,0))
elif pos_outliers.any() and neg_outliers.any():
x_out = sp.stats.mstats.winsorize(x, limits = 0.05)
else:
x_out = x
return x_out
def get_synth_cov(df, cov = 'ageAtScan1_Years', stp = 1):
# Synthetic cov data
X_range = [np.min(df[cov]), np.max(df[cov])]
X = np.arange(X_range[0],X_range[1],stp)
X = X.reshape(-1,1)
return X
def get_fdr_p(p_vals, alpha = 0.05):
out = multitest.multipletests(p_vals, alpha = alpha, method = 'fdr_bh')
p_fdr = out[1]
return p_fdr
def get_fdr_p_df(p_vals, alpha = 0.05, rows = False):
if rows:
p_fdr = pd.DataFrame(index = p_vals.index, columns = p_vals.columns)
for row, data in p_vals.iterrows():
p_fdr.loc[row,:] = get_fdr_p(data.values)
else:
p_fdr = pd.DataFrame(index = p_vals.index,
columns = p_vals.columns,
data = np.reshape(get_fdr_p(p_vals.values.flatten(), alpha = alpha), p_vals.shape))
return p_fdr
def compute_null(df, df_z, num_perms = 1000, method = 'pearson'):
np.random.seed(0)
null = np.zeros((num_perms,df_z.shape[1]))
for i in range(num_perms):
if i%10 == 0: update_progress(i/num_perms, df.name)
null[i,:] = df_z.reset_index(drop = True).corrwith(df.sample(frac = 1).reset_index(drop = True), method = method)
update_progress(1, df.name)
return null
def get_null_p(coef, null):
num_perms = null.shape[0]
num_vars = len(coef)
p_perm = np.zeros((num_vars,))
for i in range(num_vars):
r_obs = abs(coef[i])
r_perm = abs(null[:,i])
p_perm[i] = np.sum(r_perm >= r_obs) / num_perms
return p_perm
def run_pheno_correlations(df_phenos, df_z, method = 'pearson', assign_p = 'permutation', nulldir = os.getcwd()):
df_out = pd.DataFrame(columns = ['pheno','variable','coef', 'p'])
phenos = df_phenos.columns
for pheno in phenos:
df_tmp = pd.DataFrame(index = df_z.columns, columns = ['coef', 'p'])
if assign_p == 'permutation':
# Get true correlation
df_tmp.loc[:,'coef'] = df_z.corrwith(df_phenos.loc[:,pheno], method = method)
# Get null
if os.path.exists(os.path.join(nulldir,'null_' + pheno + '_' + method + '.npy')): # if null exists, load it
null = np.load(os.path.join(nulldir,'null_' + pheno + '_' + method + '.npy'))
else: # otherwise, compute and save it out
null = compute_null(df_phenos.loc[:,pheno], df_z, num_perms = 1000, method = method)
np.save(os.path.join(nulldir,'null_' + pheno + '_' + method), null)
# Compute p-values using null
df_tmp.loc[:,'p'] = get_null_p(df_tmp.loc[:,'coef'].values, null)
elif assign_p == 'parametric':
if method == 'pearson':
for col in df_z.columns:
df_tmp.loc[col,'coef'] = sp.stats.pearsonr(df_phenos.loc[:,pheno], df_z.loc[:,col])[0]
df_tmp.loc[col,'p'] = sp.stats.pearsonr(df_phenos.loc[:,pheno], df_z.loc[:,col])[1]
if method == 'spearman':
for col in df_z.columns:
df_tmp.loc[col,'coef'] = sp.stats.spearmanr(df_phenos.loc[:,pheno], df_z.loc[:,col])[0]
df_tmp.loc[col,'p'] = sp.stats.spearmanr(df_phenos.loc[:,pheno], df_z.loc[:,col])[1]
elif assign_p == 'none':
df_tmp.loc[:,'coef'] = df_z.corrwith(df_phenos.loc[:,pheno], method = method)
# append
df_tmp.reset_index(inplace = True); df_tmp.rename(index=str, columns={'index': 'variable'}, inplace = True); df_tmp['pheno'] = pheno
df_out = df_out.append(df_tmp, sort = False)
df_out.set_index(['pheno','variable'], inplace = True)
return df_out
def run_pheno_partialcorrs(df_phenos, df_z, method = 'pearson'):
df_input = pd.concat((df_phenos, df_z), axis = 1)
if method == 'pearson': df_out = pd.DataFrame(columns = ['pheno','variable','coef', 'p', 'BF10'])
else: df_out = | pd.DataFrame(columns = ['pheno','variable','coef', 'p']) | pandas.DataFrame |
"""Mapping utilities."""
import numpy as np
import pandas as pd
from vectorbt import _typing as tp
from vectorbt.utils import checks
def reverse_mapping(mapping: tp.Mapping) -> dict:
"""Reverse a mapping.
Returns a dict."""
return {v: k for k, v in mapping.items()}
def to_mapping(mapping_like: tp.MappingLike, reverse: bool = False) -> dict:
"""Convert mapping-like object to a mapping.
Enable `reverse` to apply `reverse_mapping` on the result dict."""
if checks.is_namedtuple(mapping_like):
mapping = {v: k for k, v in mapping_like._asdict().items()}
if -1 not in mapping_like:
mapping[-1] = None
elif not checks.is_mapping(mapping_like):
if checks.is_index(mapping_like):
mapping_like = pd.Series(mapping_like)
if checks.is_series(mapping_like):
mapping = mapping_like.to_dict()
else:
mapping = dict(enumerate(mapping_like))
else:
mapping = dict(mapping_like)
if reverse:
mapping = reverse_mapping(mapping)
return mapping
def apply_mapping(obj: tp.Any,
mapping_like: tp.Optional[tp.MappingLike] = None,
reverse: bool = False,
ignore_case: bool = True,
ignore_underscores: bool = True,
ignore_other_types: bool = True,
na_sentinel: tp.Any = None) -> tp.Any:
"""Apply mapping on object using a mapping-like object.
Args:
obj (any): Any object.
Can take a scalar, tuple, list, set, frozenset, NumPy array, Index, Series, and DataFrame.
mapping_like (mapping_like): Any mapping-like object.
See `to_mapping`.
reverse (bool): See `reverse` in `to_mapping`.
ignore_case (bool): Whether to ignore the case if the key is a string.
ignore_underscores (bool): Whether to ignore underscores if the key is a string.
ignore_other_types (bool): Whether to ignore other data types. Otherwise, throws an error.
na_sentinel (any): Value to mark “not found”.
"""
if mapping_like is None:
return na_sentinel
if ignore_case and ignore_underscores:
key_func = lambda x: x.lower().replace('_', '')
elif ignore_case:
key_func = lambda x: x.lower()
elif ignore_underscores:
key_func = lambda x: x.replace('_', '')
else:
key_func = lambda x: x
mapping = to_mapping(mapping_like, reverse=reverse)
key_types = set()
new_mapping = dict()
for k, v in mapping.items():
if pd.isnull(k):
na_sentinel = v
else:
if isinstance(k, str):
k = key_func(k)
new_mapping[k] = v
key_types.add(type(k))
def _type_in_key_types(x_type: type) -> bool:
for key_type in key_types:
if x_type is key_type:
return True
x_dtype = np.dtype(x_type)
key_dtype = np.dtype(key_type)
if x_dtype is key_dtype:
return True
if np.issubdtype(x_dtype, np.number) and np.issubdtype(key_dtype, np.number):
return True
if np.issubdtype(x_dtype, np.bool_) and np.issubdtype(key_dtype, np.bool_):
return True
if np.issubdtype(x_dtype, np.flexible) and np.issubdtype(key_dtype, np.flexible):
return True
return False
def _converter(x: tp.Any) -> tp.Any:
if pd.isnull(x):
return na_sentinel
if isinstance(x, str):
x = key_func(x)
return new_mapping[x]
if _type_in_key_types(type(obj)):
return _converter(obj)
if isinstance(obj, (tuple, list, set, frozenset)):
result = [apply_mapping(
v,
mapping_like=mapping_like,
reverse=reverse,
ignore_case=ignore_case,
ignore_underscores=ignore_underscores,
ignore_other_types=ignore_other_types,
na_sentinel=na_sentinel
) for v in obj]
return type(obj)(result)
if isinstance(obj, np.ndarray):
if obj.size == 0:
return obj
if _type_in_key_types(type(obj.item(0))):
return np.vectorize(_converter)(obj)
if not ignore_other_types:
raise ValueError(f"Type is {type(obj.item(0))}, must be one of types {key_types}")
return obj
if isinstance(obj, pd.Series):
if obj.size == 0:
return obj
if _type_in_key_types(type(obj.iloc[0])):
return obj.map(_converter)
if not ignore_other_types:
raise ValueError(f"Type is {type(obj.iloc[0])}, must be one of types {key_types}")
return obj
if isinstance(obj, pd.Index):
if obj.size == 0:
return obj
if _type_in_key_types(type(obj[0])):
return obj.map(_converter)
if not ignore_other_types:
raise ValueError(f"Type is {type(obj[0])}, must be one of types {key_types}")
return obj
if isinstance(obj, pd.DataFrame):
if obj.size == 0:
return obj
series = []
for sr_name, sr in obj.iteritems():
if _type_in_key_types(type(sr.iloc[0])):
series.append(sr.map(_converter))
else:
if not ignore_other_types:
raise ValueError(f"Type is {type(sr.iloc[0])}, must be one of types {key_types}")
series.append(sr)
return | pd.concat(series, axis=1, keys=obj.columns) | pandas.concat |
2# -*- coding: utf-8 -*-
"""
Created on Thu May 24 17:13:42 2018
@author: <NAME>
"""
"""
this script is used to test which bearing has failed using the trained Kmeans model.
"""
# import the libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from utils import cal_max_freq,plotlabels
import os
def main():
try:
# user input for the path of the dataset
filedir = input("enter the complete directory path ")
filepath = input("enter the folder name")
# load the files
all_files = os.listdir(filedir)
freq_max1,freq_max2,freq_max3,freq_max4,freq_max5 = cal_max_freq(all_files,filepath)
except IOError:
print("you have entered either the wrong data directory path or filepath")
# load the model
filename = "kmeanModel.npy"
model = np.load(filename).item()
# checking the iteration
if (filepath == "1st_test/"):
rhigh = 8
else:
rhigh = 4
testlabels = []
for i in range(0,rhigh):
print("checking for the bearing",i+1)
result = | pd.DataFrame() | pandas.DataFrame |
import torch
import numpy as np
import pandas as pd
import torch.nn.functional as F
import torch.nn as nn
from sklearn import preprocessing
from util.load_data_util import get_batch_loader
EPOCHS = 500
BATCH_SIZE = 1000
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
""" ************************************************************************************ """
""" 数据读取和转换 """
""" ************************************************************************************ """
def load_dataset():
header = ['user_id', 'age', 'gender', 'occupation', 'zip_code']
df_user = pd.read_csv('../data/Movielens100K/u.user', sep='|', names=header)
header = ['item_id', 'title', 'release_date', 'video_release_date', 'IMDb_URL', 'unknown', 'Action', 'Adventure',
'Animation', 'Children', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror',
'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western']
df_item = | pd.read_csv('../data/Movielens100K/u.item', sep='|', names=header, encoding="ISO-8859-1") | pandas.read_csv |
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sns
import sqlite3
def load_data():
"""loads the required .csv files."""
return pd.read_csv(r'C:\Users\Dell\OneDrive\Desktop\netflix_titles.csv')
def db_connection():
"""creates a connection to the database."""
return sqlite3.connect("netflix.db")
def init_app():
"""initializes all the required variables"""
df = load_data()
con = db_connection()
cur = con.cursor()
df.to_sql('MyTable', con, if_exists='append', index=False)
df = | pd.read_sql_query("SELECT * from MyTable", con) | pandas.read_sql_query |
# -*- coding: utf-8 -*-
import datetime
import logging
import os
from ast import literal_eval
import numpy as np
import pandas as pd
from fooltrader.consts import CHINA_STOCK_INDEX, USA_STOCK_INDEX
from fooltrader.contract import data_contract
from fooltrader.contract import files_contract
from fooltrader.contract.files_contract import get_kdata_dir, get_kdata_path
from fooltrader.settings import US_STOCK_CODES
from fooltrader.utils.utils import get_file_name, to_time_str
logger = logging.getLogger(__name__)
def convert_to_list_if_need(input):
if input and "[" in input:
return literal_eval(input)
else:
return input
# meta
def get_security_list(security_type='stock', exchanges=['sh', 'sz'], start=None, end=None,
mode='simple', start_date=None, codes=None):
"""
get security list.
Parameters
----------
security_type : str
{‘stock’, 'future'},default: stock
exchanges : list
['sh', 'sz','nasdaq','nyse','amex'],default: ['sh','sz']
start : str
the start code,default:None
only works when exchanges is ['sh','sz']
end : str
the end code,default:None
only works when exchanges is ['sh','sz']
mode : str
whether parse more security info,{'simple','es'},default:'simple'
start_date : Timestamp str or Timestamp
the filter for start list date,default:None
codes : list
the exact codes to query,default:None
Returns
-------
DataFrame
the security list
"""
if security_type == 'stock':
df = pd.DataFrame()
df_usa = pd.DataFrame()
for exchange in exchanges:
the_path = files_contract.get_security_list_path(security_type, exchange)
if os.path.exists(the_path):
if exchange == 'sh' or exchange == 'sz':
if mode == 'simple':
df1 = pd.read_csv(the_path,
converters={'code': str})
else:
df1 = pd.read_csv(the_path,
converters={'code': str,
'sinaIndustry': convert_to_list_if_need,
'sinaConcept': convert_to_list_if_need,
'sinaArea': convert_to_list_if_need})
df = df.append(df1, ignore_index=True)
elif exchange == 'nasdaq':
df_usa = pd.read_csv(the_path, dtype=str)
elif security_type == 'index':
df = pd.DataFrame(CHINA_STOCK_INDEX)
df_usa = pd.DataFrame()
if 'nasdaq' in exchanges:
df_usa = pd.DataFrame(USA_STOCK_INDEX)
if df.size > 0:
if start:
df = df[df["code"] <= end]
if end:
df = df[df["code"] >= start]
if start_date:
df['listDate'] = pd.to_datetime(df['listDate'])
df = df[df['listDate'] >= pd.Timestamp(start_date)]
df = df.set_index(df['code'], drop=False)
if df_usa.size > 0:
df_usa = df_usa.set_index(df_usa['code'], drop=False)
if codes:
df_usa = df_usa.loc[codes]
df = df.append(df_usa, ignore_index=True)
return df
def _get_security_item(code=None, id=None, the_type='stock'):
"""
get the security item.
Parameters
----------
code : str
the security code,default: None
id : str
the security id,default: None
the_type : str
the security type
Returns
-------
DataFrame
the security item
"""
df = get_security_list(security_type=the_type)
if id:
df = df.set_index(df['id'])
return df.loc[id,]
if code:
df = df.set_index(df['code'])
return df.loc[code,]
def to_security_item(security_item):
if type(security_item) == str:
if 'stock' in security_item:
security_item = _get_security_item(id=security_item, the_type='stock')
elif 'index' in security_item:
security_item = _get_security_item(id=security_item, the_type='index')
else:
security_item = _get_security_item(code=security_item)
return security_item
# tick
def get_ticks(security_item, the_date=None, start=None, end=None):
"""
get the ticks.
Parameters
----------
security_item : SecurityItem or str
the security item,id or code
the_date : TimeStamp str or TimeStamp
get the tick for the exact date
start : TimeStamp str or TimeStamp
start date
end: TimeStamp str or TimeStamp
end date
Yields
-------
DataFrame
"""
security_item = to_security_item(security_item)
if the_date:
tick_path = files_contract.get_tick_path(security_item, the_date)
yield _parse_tick(tick_path, security_item)
else:
tick_dir = files_contract.get_tick_dir(security_item)
if start or end:
if not start:
start = security_item['listDate']
if not end:
end = datetime.datetime.today()
tick_paths = [os.path.join(tick_dir, f) for f in
os.listdir(tick_dir) if
get_file_name(f) in pd.date_range(start=start, end=end)]
else:
tick_paths = [os.path.join(tick_dir, f) for f in
os.listdir(tick_dir)]
for tick_path in sorted(tick_paths):
yield _parse_tick(tick_path, security_item)
def _parse_tick(tick_path, security_item):
if os.path.isfile(tick_path):
df = pd.read_csv(tick_path)
df['timestamp'] = get_file_name(tick_path) + " " + df['timestamp']
df = df.set_index(df['timestamp'], drop=False)
df.index = pd.to_datetime(df.index)
df = df.sort_index()
df['code'] = security_item['code']
df['securityId'] = security_item['id']
return df
def get_available_tick_dates(security_item):
dir = files_contract.get_tick_dir(security_item)
return [get_file_name(f) for f in os.listdir(dir)]
# kdata
def get_kdata(security_item, the_date=None, start_date=None, end_date=None, fuquan='bfq', dtype=None, source='163',
level='day'):
"""
get kdata.
Parameters
----------
security_item : SecurityItem or str
the security item,id or code
the_date : TimeStamp str or TimeStamp
get the kdata for the exact date
start_date : TimeStamp str or TimeStamp
start date
end_date : TimeStamp str or TimeStamp
end date
fuquan : str
{"qfq","hfq","bfq"},default:"bfq"
dtype : type
the data type for the csv column,default: None
source : str
the data source,{'163','sina'},default: '163'
level : str or int
the kdata level,{1,5,15,30,60,'day','week','month'},default : 'day'
Returns
-------
DataFrame
"""
security_item = to_security_item(security_item)
# 163的数据是合并过的,有复权因子,都存在'bfq'目录下,只需从一个地方取数据,并做相应转换
if source == '163':
the_path = files_contract.get_kdata_path(security_item, source=source, fuquan='bfq')
else:
the_path = files_contract.get_kdata_path(security_item, source=source, fuquan=fuquan)
if os.path.isfile(the_path):
if not dtype:
dtype = {"code": str, 'timestamp': str}
df = pd.read_csv(the_path, dtype=dtype)
df.timestamp = df.timestamp.apply(lambda x: to_time_str(x))
df = df.set_index(df['timestamp'], drop=False)
df.index = pd.to_datetime(df.index)
df = df.sort_index()
if the_date:
if the_date in df.index:
return df.loc[the_date]
else:
return pd.DataFrame()
if not start_date:
if security_item['type'] == 'stock':
if type(security_item['listDate']) != str and np.isnan(security_item['listDate']):
start_date = '2002-01-01'
else:
start_date = security_item['listDate']
else:
start_date = datetime.datetime.today() - datetime.timedelta(days=30)
if not end_date:
end_date = datetime.datetime.today()
if start_date and end_date:
df = df.loc[start_date:end_date]
#
if source == '163' and security_item['type'] == 'stock':
if fuquan == 'bfq':
return df
if 'factor' in df.columns:
current_factor = df.tail(1).factor.iat[0]
# 后复权是不变的
df.close *= df.factor
df.open *= df.factor
df.high *= df.factor
df.low *= df.factor
if fuquan == 'qfq':
# 前复权需要根据最新的factor往回算
df.close /= current_factor
df.open /= current_factor
df.high /= current_factor
df.low /= current_factor
return df
return pd.DataFrame()
def get_latest_download_trading_date(security_item, return_next=True, source='163'):
df = get_kdata(security_item, source=source)
if len(df) == 0:
return pd.Timestamp(security_item['listDate'])
if return_next:
return df.index[-1] + pd.DateOffset(1)
else:
return df.index[-1]
def get_trading_dates(security_item, dtype='list', ignore_today=False, source='163', fuquan='bfq'):
df = get_kdata(security_item, source=source, fuquan=fuquan)
if dtype is 'list' and len(df.index) > 0:
dates = df.index.strftime('%Y-%m-%d').tolist()
if ignore_today:
dates = [the_date for the_date in dates if the_date != datetime.datetime.today().strftime('%Y-%m-%d')]
return dates
return dates
return df.index
def kdata_exist(security_item, year, quarter, fuquan=None, source='163'):
df = get_kdata(security_item, fuquan=fuquan, source=source)
if "{}Q{}".format(year, quarter) in df.index:
return True
return False
# TODO:use join
def merge_to_current_kdata(security_item, df, fuquan='bfq'):
df = df.set_index(df['timestamp'], drop=False)
df.index = pd.to_datetime(df.index)
df = df.sort_index()
df1 = get_kdata(security_item, source='sina', fuquan=fuquan, dtype=str)
df1 = df1.append(df)
df1 = df1.drop_duplicates(subset='timestamp', keep='last')
df1 = df1.sort_index()
the_path = files_contract.get_kdata_path(security_item, source='sina', fuquan=fuquan)
df1.to_csv(the_path, index=False)
def time_index_df(df):
df = df.set_index(df['timestamp'])
df.index = | pd.to_datetime(df.index) | pandas.to_datetime |
#
# This script is intended to add new providers and update existing providers in the datastore from an Excel input file.
#
# Pass the PROD, TEST or DEV base url as the first commandline argument and the input file as the second
# argument. A third argument identifying the out put file is optional. If this is not provided then the output file
# will tbe the timestamp appended with 'new-providers.xlsx'. For example:
#
# $ python3 new-providers.py https://********.cloudfunctions.net "input-file.xlsx" "output-file.xlsx"
#
# An attempt is made to create a new record for each of the entries in the input file.
# If the provider already exists then the record is updated with the values from the input file
# but the code value is not changed so that the provider's link remains valid. In all cases the results
# and the links for the providers are appended to the output Excel file.
#
# The script will try to use the credentials of a locally configured service account so it
# is important to provide the key file as an environment variable e.g.
#
# $ export GOOGLE_APPLICATION_CREDENTIALS="ppe-inventory-dev.json"
#
from google.cloud import datastore
import uuid
import sys
import logging
import pandas as pd
import urllib.parse
from email_validator import validate_email, EmailNotValidError
import datetime
baseUrl = ''
if len(sys.argv) > 2:
baseUrl = sys.argv[1]
input_file = sys.argv[2]
else:
print('Missing arguments i.e. first is the base url for the target environment and second is the input file.')
sys.exit(1)
now = datetime.datetime.now()
logfile = f'{now} new-providers.log'
logging.basicConfig(level=logging.INFO, filename=logfile)
print(f'Writing logs to "{logfile}" file ...')
logging.info(f'Base url is {baseUrl}')
logging.info(f'Input file is {input_file}')
if len(sys.argv) > 3:
output_file = sys.argv[3]
else:
output_file = f'{now} new-providers.xlsx'
sheet_in = pd.read_excel(input_file)
sheet_out = pd.ExcelWriter(output_file)
df = pd.DataFrame(
columns=['provider', 'code', 'link', 'site', 'acute', 'location', 'postcode', 'service_type', 'borough',
'contact_name_1', 'contact_name_2', 'email_1', 'email_2', 'email_3', 'telephone',
'pcn_network', 'practice_code', 'parent', 'parent_link', 'comment'])
logging.info(sheet_in.head)
logging.info(sheet_in.columns.ravel())
# Instantiates a client
datastore_client = datastore.Client()
# Helper functions to clean up input values
def get_clean_value(value):
output = ''
if | pd.notnull(value) | pandas.notnull |
"""Module to run a basic decision tree model
Author(s):
<NAME> (<EMAIL>)
"""
import pandas as pd
import numpy as np
import logging
from sklearn import preprocessing
from primrose.base.transformer import AbstractTransformer
class ExplicitCategoricalTransform(AbstractTransformer):
DEFAULT_NUMERIC = -9999
def __init__(self, categoricals):
"""initialize the ExplicitCategoricalTransform
Args:
categoricals: dictionary containing for each column to be transformed:
- transformations: list of strings to be executed on the data ('x' represents the current categorical variable)
- rename: if present, rename the current categorical variable to that name
- to_numeric: if true, attempt to apply to_numeric after previous transformations
"""
self.categoricals = categoricals
def fit(self, data):
pass
@staticmethod
def _process_transformations(data, input_data, categorical, x):
"""transform a column
Args:
data (dataframe): dataframe
input configuration (JSON): JSON categorical config for this variable
categorical (str): varible name
x (str): transformation string
Returns:
data (dataframe)
"""
if "transformations" in input_data.keys():
logging.info(
"Applying key {} to variable {}".format("transformations", categorical)
)
for transformation in input_data["transformations"]:
exec(transformation.format(x=x))
@staticmethod
def _process_rename(data, input_data, categorical):
"""rename a field
Args:
data (dataframe): dataframe
input configuration (JSON): JSON categorical config for this variable
categorical (str): varible name
Returns:
(tuple): tuple containing:
data (dataframe): dataframe
name (str): original name (if not "to_numeric": True), new_name otherwise
"""
if "rename" in input_data.keys():
logging.info("Applying key {} to variable {}".format("rename", categorical))
data = data.rename({categorical: input_data["rename"]}, axis="columns")
return data, input_data["rename"]
return data, categorical
@staticmethod
def _process_numeric(data, input_data, name):
"""convert column to numeric
Args:
data (dataframe): dataframe
input configuration (JSON): JSON categorical config for this variable
name (str): field name
Returns:
data with the colun converted to numeric
"""
if input_data.get("to_numeric", False):
logging.info("Applying key {} to variable {}".format("to_numeric", name))
# if there are errors converting to numerical values, we need to sub in a reasonable value
if sum( | pd.to_numeric(data[name], errors="coerce") | pandas.to_numeric |
import pandas as pd
import glob
from datetime import datetime
location = "C:/Users/user/Python/Python-Rikard/Projects/excelfiler/*.xlsx"
excel_files = glob.glob(location)
pd.set_option("display.max_rows",183)
print(excel_files)
df1 = pd.DataFrame()
for file in excel_files:
df2 = pd.read_excel(file, index_col=0)
df1 = | pd.concat([df1, df2]) | pandas.concat |
#!/usr/local/bin/python3.9
# SPAWNING THREADS FOR COMPUTATION:
# well, 1 thread, if you do anything computational expensive, tk will hang... so spawn a thread for work.. etc
############################################################################################
# NEXT Stream
# flesh out the stats frame to show some nice visuals
############################################################################################
import tkinter as tk
from tkinter import Button, Entry
from tkinter.ttk import Notebook, Label, Frame
from PIL import Image, ImageTk
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
dataFile = '../clocktime.csv'
df = pd.read_csv(dataFile)
today = datetime.today().strftime('%m/%d/%y')
def clickSubmitRecord():
"""DOCUMENTATION"""
if recordDate_var.get() in df.values:
row = df[df['date'] == recordDate_var.get()].index[0]
# PRODUCTIVE TIME
df.loc[row, 'Lsec'] = df.loc[row, 'Lsec'] + int(recordProdSeconds_var.get())
if df.loc[row, 'Lsec'] > 60:
df.loc[row, 'Lmin'] = df.loc[row, 'Lmin'] + df.loc[row, 'Lsec'] // 60
df.loc[row, 'Lsec'] = df.loc[row, 'Lsec'] % 60
df.loc[row, 'Lmin'] = df.loc[row, 'Lmin'] + int(recordProdMinutes_var.get())
if df.loc[row, 'Lmin'] > 60:
df.loc[row, 'Lhour'] = df.loc[row, 'Lhour'] + df.loc[row, 'Lmin'] // 60
df.loc[row, 'Lmin'] = df.loc[row, 'Lmin'] % 60
df.loc[row, 'Lhour'] = df.loc[row, 'Lhour'] + int(recordProdHours_var.get())
# NON PRODUCTIVE TIME
df.loc[row, 'Rsec'] = df.loc[row, 'Rsec'] + int(recordNonProdSeconds_var.get())
if df.loc[row, 'Rsec'] > 60:
df.loc[row, 'Rmin'] = df.loc[row, 'Rmin'] + df.loc[row, 'Rsec'] // 60
df.loc[row, 'Rsec'] = df.loc[row, 'Rsec'] % 60
df.loc[row, 'Rmin'] = df.loc[row, 'Rmin'] + int(recordNonProdMinutes_var.get())
if df.loc[row, 'Rmin'] > 60:
df.loc[row, 'Rhour'] = df.loc[row, 'Rhour'] + df.loc[row, 'Rmin'] // 60
df.loc[row, 'Rmin'] = df.loc[row, 'Rmin'] % 60
df.loc[row, 'Rhour'] = df.loc[row, 'Rhour'] + int(recordNonProdHours_var.get())
# The final overwrite of the CSV
df.to_csv('../clocktime.csv', index=False)
elif recordDate_var.get() not in df.values:
newRecord = {
'date': recordDate_var.get(),
'Lhour': int(recordProdHours_var.get()),
'Lmin': int(recordProdMinutes_var.get()),
'Lsec': int(recordProdSeconds_var.get()),
'Rhour': int(recordNonProdHours_var.get()),
'Rmin': int(recordNonProdMinutes_var.get()),
'Rsec': int(recordNonProdSeconds_var.get())
}
newdf = df.append(newRecord, ignore_index=True)
# sort the data frame chronologically
df.sort('date')
# The final overwrite of the CSV
newdf.to_csv(dataFile, index=False)
# Clear the entry fields after a successful writing to the data file
inputProdSeconds.delete(0, 'end')
inputProdMinutes.delete(0, 'end')
inputProdHours.delete(0, 'end')
inputNonProdSeconds.delete(0, 'end')
inputNonProdMinutes.delete(0, 'end')
inputNonProdHours.delete(0, 'end')
#subfrEntryFields.pack_forget()
# Establish the Application instance
# Name the window's title
# Determine starting size and coordinates
# Enable tab control for the application
root = tk.Tk()
root.title("Productivity Data Tool")
root.geometry("850x900+200+200")
tabControl = Notebook(root)
# Create global variables for the applciation to use when adding a new record to the data file
# These are basically empty containers until we assign a value to them
recordDate_var = tk.StringVar()
recordProdHours_var = tk.StringVar()
recordProdMinutes_var = tk.StringVar()
recordProdSeconds_var = tk.StringVar()
recordNonProdHours_var = tk.StringVar()
recordNonProdMinutes_var = tk.StringVar()
recordNonProdSeconds_var = tk.StringVar()
# This is where we build the different menu tabs
# These are menu tabs
splashScreen = Frame(tabControl)
recordMgmt = Frame(tabControl)
statCharts = Frame(tabControl)
# SPLASH SCREEN FRAME
row1 = Label(splashScreen, text=" ")
row1.config(font=("Norse", 300))
row1.pack(fill=tk.X)
lblWelcome = Label(splashScreen,
text="Welcome To The Productivity App!")
lblWelcome.config(font=("Norse", 40))
lblWelcome.pack()
# Load chess timer image
load = Image.open("timer.png")
load = load.resize((50, 50), Image.ANTIALIAS)
render = ImageTk.PhotoImage(load)
img = Label(splashScreen, image=render)
img.image = render
img.pack()
# RECORD MANAGEMENT FRAME
## SUB FRAME for the [Add] button entry fields
subfrEntryFields = Frame(recordMgmt)
### SUB SUB FRAME for the date entry input fields
subsubfrEntryFieldDate = Frame(subfrEntryFields)
entryFieldDateSpacer = Label(subsubfrEntryFieldDate, text=" ")
entryFieldDateSpacer.config(font=("Norse", 16))
entryFieldDateSpacer.pack()
lblDate = Label(subsubfrEntryFieldDate, text="Date")
lblDate.config(font=("Norse", 16))
lblDate.pack()
inputDate = Entry(subsubfrEntryFieldDate,
textvariable=recordDate_var,
font=("Norse", 16, "italic"))
inputDate.insert(0, today)
inputDate.pack(side=tk.LEFT)
### Render
subsubfrEntryFieldDate.pack(side=tk.TOP)
### Spacing and labels for input fields
productiveTimeSpacer = Label(subfrEntryFields, text=" ")
productiveTimeSpacer.config(font=("Norse", 16))
productiveTimeSpacer.pack()
prodLabel = Label(subfrEntryFields, text="Productive Time")
prodLabel.config(font=("Norse", 30))
prodLabel.pack(side=tk.TOP)
prodDataLabels = Label(subfrEntryFields, text="Hours\t\t\tMinutes\t\t\tSeconds")
prodDataLabels.config(font=("Norse", 16))
prodDataLabels.pack(side=tk.TOP)
### SUB SUB FRAME for Productivity Data Entry Fields
subsubfrEntryFieldsProductivity = Frame(subfrEntryFields)
### Productive Hours
inputProdHours = Entry(subsubfrEntryFieldsProductivity,
textvariable=recordProdHours_var,
font=("Norse", 16, "italic"))
inputProdHours.pack(side=tk.LEFT)
### Productive Minutes
inputProdMinutes = Entry(subsubfrEntryFieldsProductivity,
textvariable=recordProdMinutes_var,
font=("Norse", 16, "italic"))
inputProdMinutes.pack(side=tk.LEFT)
### Productive Seconds
inputProdSeconds = Entry(subsubfrEntryFieldsProductivity,
textvariable=recordProdSeconds_var,
font=("Norse", 16, "italic"))
inputProdSeconds.pack(side=tk.LEFT)
### Render
subsubfrEntryFieldsProductivity.pack(side=tk.TOP)
### Spacing and labels for input fields
nonProductiveTimeSpacer = Label(subfrEntryFields, text=" ")
nonProductiveTimeSpacer.config(font=("Norse", 16))
nonProductiveTimeSpacer.pack()
nonProdLabel = Label(subfrEntryFields, text="Non-Productive Time")
nonProdLabel.config(font=("Norse", 30))
nonProdLabel.pack(side=tk.TOP)
nonProdDataLabels = Label(subfrEntryFields, text="Hours\t\t\tMinutes\t\t\tSeconds")
nonProdDataLabels.config(font=("Norse", 16))
nonProdDataLabels.pack(side=tk.TOP)
### SUB SUB FRAME for Non-Productivity Data Entry Fields
subsubfrEntryFieldsNonProductivity = Frame(subfrEntryFields)
### non-Productive Hours
inputNonProdHours = Entry(subsubfrEntryFieldsNonProductivity,
textvariable=recordNonProdHours_var,
font=("Norse", 16, "italic"))
inputNonProdHours.pack(side=tk.LEFT)
### non-Productive Minutes
inputNonProdMinutes = Entry(subsubfrEntryFieldsNonProductivity,
textvariable=recordNonProdMinutes_var,
font=("Norse", 16, "italic"))
inputNonProdMinutes.pack(side=tk.LEFT)
### non-Productive Seconds
inputNonProdSeconds = Entry(subsubfrEntryFieldsNonProductivity,
textvariable=recordNonProdSeconds_var,
font=("Norse", 16, "italic"))
inputNonProdSeconds.pack(side=tk.LEFT)
### Render
subsubfrEntryFieldsNonProductivity.pack(side=tk.TOP)
submitButtonTimeSpacer = Label(subfrEntryFields, text=" ")
submitButtonTimeSpacer.config(font=("Norse", 16))
submitButtonTimeSpacer.pack()
# the submit button for the new entries
submitRecord = Button(subfrEntryFields, text="Submit",
command=clickSubmitRecord)
submitRecord.config(font=("Norse", 20))
submitRecord.pack(side=tk.TOP)
subfrEntryFields.pack(side=tk.TOP)
# END hidden record entry field frame
# STATS FRAME
## SUB FRAME for stats visuals
subfrStats = Frame(statCharts)
df_stats = | pd.read_csv(dataFile) | pandas.read_csv |
import unittest
import tempfile
import json
import numpy as np
import pandas as pd
from numpy.testing import assert_almost_equal
from supervised.preprocessing.loo_encoder import LooEncoder
class LabelEncoderTest(unittest.TestCase):
def test_fit(self):
# training data
d = {"col1": ["a", "a", "c"], "col2": ["w", "e", "d"], "y": [1, 2, 0]}
df = pd.DataFrame(data=d)
le = LooEncoder(cols=["col1"])
le.fit(df[["col1", "col2"]], df["y"])
self.assertTrue(le.enc is not None)
self.assertTrue(le.enc._dim == 2)
assert_almost_equal(le.enc._mean, 1.0)
self.assertTrue("col1" in le.enc.mapping)
self.assertTrue("col2" not in le.enc.mapping)
def test_transform(self):
# training data
d = {"col1": ["a", "a", "c"]}
y = [1, 1, 0]
df = pd.DataFrame(data=d)
# fit encoder
le = LooEncoder(cols=["col1"])
le.fit(df, y)
t1 = le.transform(df)
# test data
d_test = {"col1": ["c", "c", "a"]}
df_test = pd.DataFrame(data=d_test)
# transform
t2 = le.transform(df_test)
assert_almost_equal(t1["col1"][0], t2["col1"][2])
assert_almost_equal(t1["col1"][2], t2["col1"][1])
def test_transform_with_new_and_missing_values(self):
# training data
d = {"col1": ["a", "a", "c"]}
y = [1, 1, 1]
df = pd.DataFrame(data=d)
# fit encoder
le = LooEncoder(cols=["col1"])
le.fit(df, y)
# test data
d_test = {"col1": ["c", "a", "d", "f", np.nan]}
df_test = pd.DataFrame(data=d_test)
# transform
t = le.transform(df_test)
assert_almost_equal(t["col1"][2], 1)
assert_almost_equal(t["col1"][3], 1)
assert_almost_equal(t["col1"][4], 1)
def test_to_and_from_json(self):
# training data
d = {"col1": ["a", "a", "c"]}
y = [1, 1, 1]
df = pd.DataFrame(data=d)
# fit encoder
le = LooEncoder()
le.fit(df, y)
# new encoder
new_le = LooEncoder()
new_le.from_json(le.to_json())
# test data
d_test = {"col1": ["c", "c", "a", "e"]}
df_test = | pd.DataFrame(data=d_test) | pandas.DataFrame |
# %%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from lightgbm import LGBMClassifier
from sklearn.metrics import roc_auc_score, recall_score, confusion_matrix, classification_report
import subprocess
import joblib
# Get multiple outputs in the same cell
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# Ignore all warnings
import warnings
warnings.filterwarnings('ignore')
warnings.filterwarnings(action='ignore', category=DeprecationWarning)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# %%
# Reading the dataset
dc = pd.read_csv("Churn_Modelling.csv")
dc.head(5)
# %%
# Dimension of the dataset
dc.shape
# %%
dc.describe(exclude= ['O']) # Describe all numerical columns
dc.describe(include = ['O']) # Describe all non-numerical/categorical columns
# %%
# Checking number of unique customers in the dataset
dc.shape[0], dc.CustomerId.nunique()
# %%
# churn value Distribution
dc["Exited"].value_counts()
# %%
dc.groupby(['Surname']).agg({'RowNumber':'count', 'Exited':'mean'}
).reset_index().sort_values(by='RowNumber', ascending=False).head()
# %%
dc.groupby(['Geography']).agg({'RowNumber':'count', 'Exited':'mean'}
).reset_index().sort_values(by='RowNumber', ascending=False)
# %%
sns.set(style="whitegrid")
sns.boxplot(y=dc['CreditScore'])
# %%
sns.boxplot(y=dc['Age'])
# %%
sns.violinplot(y = dc.Tenure)
# %%
sns.violinplot(y = dc['Balance'])
# %%
sns.set(style = 'ticks')
sns.distplot(dc.NumOfProducts, hist=True, kde=False)
# %%
# When dealing with numerical characteristics, one of the most useful statistics to examine is the data distribution.
# we can use Kernel-Density-Estimation plot for that purpose.
sns.kdeplot(dc.EstimatedSalary)
# %%
# Separating out different columns into various categories as defined above
target_var = ['Exited']
cols_to_remove = ['RowNumber', 'CustomerId']
# numerical columns
num_feats = ['CreditScore', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'EstimatedSalary']
# categorical columns
cat_feats = ['Surname', 'Geography', 'Gender', 'HasCrCard', 'IsActiveMember']
# %%
y = dc[target_var].values
dc.drop(cols_to_remove, axis=1, inplace=True)
# %%
# Keeping aside a test/holdout set
dc_train_val, dc_test, y_train_val, y_test = train_test_split(dc, y.ravel(), test_size = 0.1, random_state = 42)
# Splitting into train and validation set
dc_train, dc_val, y_train, y_val = train_test_split(dc_train_val, y_train_val, test_size = 0.12, random_state = 42)
dc_train.shape, dc_val.shape, dc_test.shape, y_train.shape, y_val.shape, y_test.shape
np.mean(y_train), np.mean(y_val), np.mean(y_test)
# %%
# label encoding With the sklearn method
le = LabelEncoder()
# Label encoding of Gender variable
dc_train['Gender'] = le.fit_transform(dc_train['Gender'])
le_gender_mapping = dict(zip(le.classes_, le.transform(le.classes_)))
le_gender_mapping
# %%
# Encoding Gender feature for validation and test set
dc_val['Gender'] = dc_val.Gender.map(le_gender_mapping)
dc_test['Gender'] = dc_test.Gender.map(le_gender_mapping)
# Filling missing/NaN values created due to new categorical levels
dc_val['Gender'].fillna(-1, inplace=True)
dc_test['Gender'].fillna(-1, inplace=True)
# %%
dc_train.Gender.unique(), dc_val.Gender.unique(), dc_test.Gender.unique()
# %%
# With the sklearn method(LabelEncoder())
le_ohe = LabelEncoder()
ohe = OneHotEncoder(handle_unknown = 'ignore', sparse=False)
enc_train = le_ohe.fit_transform(dc_train.Geography).reshape(dc_train.shape[0],1)
ohe_train = ohe.fit_transform(enc_train)
ohe_train
# %%
# mapping between classes
le_ohe_geography_mapping = dict(zip(le_ohe.classes_, le_ohe.transform(le_ohe.classes_)))
le_ohe_geography_mapping
# %%
# Encoding Geography feature for validation and test set
enc_val = dc_val.Geography.map(le_ohe_geography_mapping).ravel().reshape(-1,1)
enc_test = dc_test.Geography.map(le_ohe_geography_mapping).ravel().reshape(-1,1)
# Filling missing/NaN values created due to new categorical levels
enc_val[np.isnan(enc_val)] = 9999
enc_test[np.isnan(enc_test)] = 9999
# %%
ohe_val = ohe.transform(enc_val)
ohe_test = ohe.transform(enc_test)
# %%
# Show what happens when a new value is inputted into the OHE
ohe.transform(np.array([[9999]]))
# %%
cols = ['country_' + str(x) for x in le_ohe_geography_mapping.keys()]
cols
# %%
# Adding to the respective dataframes
dc_train = pd.concat([dc_train.reset_index(), pd.DataFrame(ohe_train, columns = cols)], axis = 1).drop(['index'], axis=1)
dc_val = pd.concat([dc_val.reset_index(), pd.DataFrame(ohe_val, columns = cols)], axis = 1).drop(['index'], axis=1)
dc_test = pd.concat([dc_test.reset_index(), pd.DataFrame(ohe_test, columns = cols)], axis = 1).drop(['index'], axis=1)
print("Training set")
dc_train.head()
print("\n\nValidation set")
dc_val.head()
print("\n\nTest set")
dc_test.head()
# %%
dc_train.drop(['Geography'], axis=1, inplace=True)
dc_val.drop(['Geography'], axis=1, inplace=True)
dc_test.drop(['Geography'], axis=1, inplace=True)
# %%
means = dc_train.groupby(['Surname']).Exited.mean()
means.head()
means.tail()
# %%
global_mean = y_train.mean()
global_mean
# %%
# Creating new encoded features for surname - Target (mean) encoding
dc_train['Surname_mean_churn'] = dc_train.Surname.map(means)
dc_train['Surname_mean_churn'].fillna(global_mean, inplace=True)
# %%
freqs = dc_train.groupby(['Surname']).size()
freqs.head()
# %%
dc_train['Surname_freq'] = dc_train.Surname.map(freqs)
dc_train['Surname_freq'].fillna(0, inplace=True)
# %%
dc_train['Surname_enc'] = ((dc_train.Surname_freq * dc_train.Surname_mean_churn) - dc_train.Exited)/(dc_train.Surname_freq - 1)
# Fill NaNs occuring due to category frequency being 1 or less
dc_train['Surname_enc'].fillna((((dc_train.shape[0] * global_mean) - dc_train.Exited) / (dc_train.shape[0] - 1)), inplace=True)
dc_train.head(5)
# %%
# Replacing by category means and new category levels by global mean
dc_val['Surname_enc'] = dc_val.Surname.map(means)
dc_val['Surname_enc'].fillna(global_mean, inplace=True)
dc_test['Surname_enc'] = dc_test.Surname.map(means)
dc_test['Surname_enc'].fillna(global_mean, inplace=True)
# Show that using LOO Target encoding decorrelates features
dc_train[['Surname_mean_churn', 'Surname_enc', 'Exited']].corr()
# %%
dc_train.drop(['Surname_mean_churn'], axis=1, inplace=True)
dc_train.drop(['Surname_freq'], axis=1, inplace=True)
dc_train.drop(['Surname'], axis=1, inplace=True)
dc_val.drop(['Surname'], axis=1, inplace=True)
dc_test.drop(['Surname'], axis=1, inplace=True)
dc_train.head()
# %%
corr = dc_train.corr()
sns.heatmap(corr, cmap = 'coolwarm')
# %%
sns.boxplot(x="Exited", y="Age", data=dc_train, palette="Set3")
# %%
sns.violinplot(x="Exited", y="Balance", data=dc_train, palette="Set3")
# %%
cat_vars_bv = ['Gender', 'IsActiveMember', 'country_Germany', 'country_France']
for col in cat_vars_bv:
dc_train.groupby([col]).Exited.mean()
print()
# %%
# Computed mean on churned or non chuned custmers group by number of product on training data
col = 'NumOfProducts'
dc_train.groupby([col]).Exited.mean()
# unique "NumOfProducts" on training data
dc_train[col].value_counts()
# %%
eps = 1e-6
dc_train['bal_per_product'] = dc_train.Balance/(dc_train.NumOfProducts + eps)
dc_train['bal_by_est_salary'] = dc_train.Balance/(dc_train.EstimatedSalary + eps)
dc_train['tenure_age_ratio'] = dc_train.Tenure/(dc_train.Age + eps)
dc_train['age_surname_mean_churn'] = np.sqrt(dc_train.Age) * dc_train.Surname_enc
# %%
new_cols = ['bal_per_product', 'bal_by_est_salary', 'tenure_age_ratio', 'age_surname_mean_churn']
# Ensuring that the new column doesn't have any missing values
dc_train[new_cols].isnull().sum()
# %%
# Linear association of new columns with target variables to judge importance
sns.heatmap(dc_train[new_cols + ['Exited']].corr(), annot=True)
# %%
dc_val['bal_per_product'] = dc_val.Balance/(dc_val.NumOfProducts + eps)
dc_val['bal_by_est_salary'] = dc_val.Balance/(dc_val.EstimatedSalary + eps)
dc_val['tenure_age_ratio'] = dc_val.Tenure/(dc_val.Age + eps)
dc_val['age_surname_mean_churn'] = np.sqrt(dc_val.Age) * dc_val.Surname_enc
dc_test['bal_per_product'] = dc_test.Balance/(dc_test.NumOfProducts + eps)
dc_test['bal_by_est_salary'] = dc_test.Balance/(dc_test.EstimatedSalary + eps)
dc_test['tenure_age_ratio'] = dc_test.Tenure/(dc_test.Age + eps)
dc_test['age_surname_mean_churn'] = np.sqrt(dc_test.Age) * dc_test.Surname_enc
# %%
# initialize the standard scaler
sc = StandardScaler()
cont_vars = ['CreditScore', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'EstimatedSalary', 'Surname_enc', 'bal_per_product'
, 'bal_by_est_salary', 'tenure_age_ratio', 'age_surname_mean_churn']
cat_vars = ['Gender', 'HasCrCard', 'IsActiveMember', 'country_France', 'country_Germany', 'country_Spain']
# Scaling only continuous columns
cols_to_scale = cont_vars
sc_X_train = sc.fit_transform(dc_train[cols_to_scale])
# Converting from array to dataframe and naming the respective features/columns
sc_X_train = | pd.DataFrame(data=sc_X_train, columns=cols_to_scale) | pandas.DataFrame |
# pylint: disable=W0612,E1101
from datetime import datetime
import os
import operator
import unittest
import numpy as np
from pandas.core.api import DataFrame, Index, notnull
from pandas.core.datetools import bday
from pandas.core.frame import group_agg
from pandas.core.panel import WidePanel, LongPanel, pivot
import pandas.core.panel as panelmod
from pandas.util.testing import (assert_panel_equal,
assert_frame_equal,
assert_series_equal,
assert_almost_equal)
import pandas.core.panel as panelm
import pandas.util.testing as common
class PanelTests(object):
panel = None
def test_pickle(self):
import cPickle
pickled = cPickle.dumps(self.panel)
unpickled = cPickle.loads(pickled)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
class SafeForLongAndSparse(object):
def test_repr(self):
foo = repr(self.panel)
def test_iter(self):
common.equalContents(list(self.panel), self.panel.items)
def _check_statistic(self, frame, name, alternative):
f = getattr(frame, name)
for i, ax in enumerate(['items', 'major', 'minor']):
result = f(axis=i)
assert_frame_equal(result, frame.apply(alternative, axis=ax))
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_statistic(self.panel, 'count', f)
def test_sum(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return nona.sum()
self._check_statistic(self.panel, 'sum', f)
def test_prod(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return np.prod(nona)
self._check_statistic(self.panel, 'prod', f)
def test_mean(self):
def f(x):
x = np.asarray(x)
return x[notnull(x)].mean()
self._check_statistic(self.panel, 'mean', f)
def test_median(self):
def f(x):
x = np.asarray(x)
return np.median(x[notnull(x)])
self._check_statistic(self.panel, 'median', f)
def test_min(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return nona.min()
self._check_statistic(self.panel, 'min', f)
def test_max(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return nona.max()
self._check_statistic(self.panel, 'max', f)
def test_var(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) < 2:
return np.NaN
else:
return nona.var(ddof=1)
self._check_statistic(self.panel, 'var', f)
def test_std(self):
def f(x):
x = np.asarray(x)
nona = x[ | notnull(x) | pandas.core.api.notnull |
# Test different features scaling methods
# Need another function to select the method outright
# logging setup
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(name)s: %(asctime)s\n%(message)s')
file_handler = logging.FileHandler('logs/features.log')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# import libraries
import numpy as np
import pandas as pd
from sklearn import preprocessing as pp
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import log_loss
from tabulate import tabulate
def scale(X_t, X_v, scaler, X_te = pd.DataFrame()):
cols = X_t.columns
scaler_fit = scaler.fit(X_t)
X_t_s = scaler_fit.transform(X_t)
X_v_s = scaler_fit.transform(X_v)
if X_te.empty == True:
return pd.DataFrame(X_t_s, columns=cols), pd.DataFrame(X_v_s, columns=cols)
else:
X_te_s = scaler_fit.transform(X_te)
return | pd.DataFrame(X_t_s, columns=cols) | pandas.DataFrame |
import nose
import os
import numpy as np
import pandas as pd
from pandas import (merge_asof, read_csv,
to_datetime, Timedelta)
from pandas.tools.merge import MergeError
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal
class TestAsOfMerge(tm.TestCase):
_multiprocess_can_split_ = True
def read_data(self, name, dedupe=False):
path = os.path.join(tm.get_data_path(), name)
x = read_csv(path)
if dedupe:
x = (x.drop_duplicates(['time', 'ticker'], keep='last')
.reset_index(drop=True)
)
x.time = to_datetime(x.time)
return x
def setUp(self):
self.trades = self.read_data('trades.csv')
self.quotes = self.read_data('quotes.csv', dedupe=True)
self.asof = self.read_data('asof.csv')
self.tolerance = self.read_data('tolerance.csv')
self.allow_exact_matches = self.read_data('allow_exact_matches.csv')
self.allow_exact_matches_and_tolerance = self.read_data(
'allow_exact_matches_and_tolerance.csv')
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
'right_val': [1, 2, 3, 6, 7]})
pd.merge_asof(left, right, on='a')
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.038',
'20160525 13:30:00.048',
'20160525 13:30:00.048',
'20160525 13:30:00.048']),
'ticker': ['MSFT', 'MSFT',
'GOOG', 'GOOG', 'AAPL'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100]},
columns=['time', 'ticker', 'price', 'quantity'])
quotes = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.030',
'20160525 13:30:00.041',
'20160525 13:30:00.048',
'20160525 13:30:00.049',
'20160525 13:30:00.072',
'20160525 13:30:00.075']),
'ticker': ['GOOG', 'MSFT', 'MSFT',
'MSFT', 'GOOG', 'AAPL', 'GOOG',
'MSFT'],
'bid': [720.50, 51.95, 51.97, 51.99,
720.50, 97.99, 720.50, 52.01],
'ask': [720.93, 51.96, 51.98, 52.00,
720.93, 98.01, 720.88, 52.03]},
columns=['time', 'ticker', 'bid', 'ask'])
pd.merge_asof(trades, quotes,
on='time',
by='ticker')
pd.merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=pd.Timedelta('2ms'))
pd.merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=pd.Timedelta('10ms'),
allow_exact_matches=False)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype('category')
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype('category')
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != 'MSFT']
result = merge_asof(trades, q,
on='time',
by='ticker')
expected.loc[expected.ticker == 'MSFT', ['bid', 'ask']] = np.nan
assert_frame_equal(result, expected)
def test_basic2(self):
expected = self.read_data('asof2.csv')
trades = self.read_data('trades2.csv')
quotes = self.read_data('quotes2.csv', dedupe=True)
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_no_by(self):
f = lambda x: x[x.ticker == 'MSFT'].drop('ticker', axis=1) \
.reset_index(drop=True)
# just use a single ticker
expected = f(self.asof)
trades = f(self.trades)
quotes = f(self.quotes)
result = merge_asof(trades, quotes,
on='time')
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
from __future__ import unicode_literals
import csv
import os
import webbrowser
import pandas as pd
import tweepy as tp
from credentials import twitter_api_key, twitter_api_secret_key
from entities import Account, Entity, EntityGroup
tokens_cache_file = os.path.join(
os.path.dirname(__file__), "twitter_tokens.csv")
def connect_to_twitter():
tokens = | pd.read_csv(tokens_cache_file) | pandas.read_csv |
import math
import glob
import numpy as np
import pandas as pd
from skimage.io import imread
#get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
# ref: https://www.kaggle.com/paulorzp/run-length-encode-and-decode
def rle_decode(mask_rle, shape=(101, 101)):
'''
mask_rle: run-length as string formated (start length)
shape: (height,width) of array to return
Returns numpy array, 1 - mask, 0 - background
'''
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
if str(mask_rle) != str(np.nan):
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(shape).T # Needed to align to RLE direction
submissions = glob.glob('../subm/best_resunet_tgs_slt_cycle*.csv')
print('Found {} submissions to merge'.format(len(submissions)))
folds = []
for subm in submissions:
#print('reading {}'.format(subm))
folds.append( | pd.read_csv(subm) | pandas.read_csv |
from __future__ import division #brings in Python 3.0 mixed type calculations
import numpy as np
import os
import pandas as pd
import sys
#find parent directory and import model
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
class BeerexInputs(ModelSharedInputs):
"""
Input class for Beerex
"""
def __init__(self):
"""Class representing the inputs for Beerex"""
super(BeerexInputs, self).__init__()
#self.incorporation_depth = pd.Series([], dtype="float")
self.application_rate = | pd.Series([], dtype="float") | pandas.Series |
import smalltrain as st
from dateutil.parser import parse as parse_datetime
from datetime import timezone
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from datetime import datetime
import time
import pandas as pd
import numpy as np
import math
import random
import csv
import os
import sys
from multiprocessing import Process, Manager
import multiprocessing as mp
from ggutils.gg_data_base import GGDataBase
from ggutils.gg_hash import GGHash
# TRAIN_DATA_SET_FILE_PATH = 'data/train_data_set_item_cnt_normed.csv'
# TRAIN_DATA_SET_MERGE_TEST_FILE_PATH = 'data/train_data_set_item_cnt_normed_merge_test.csv'
DT_COL_NAME = None
DEFAULT_DECREASE_RESOLUTION_RATIO = 2
import json
class ExtendedJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if hasattr(obj, 'to_dict'):
return obj.to_dict()
if isinstance(obj, (datetime)):
return obj.isoformat()
if isinstance(obj, (np.int32, np.int64)):
return str(obj)
if isinstance(obj, (np.float32, np.float64)):
return str(obj)
return json.JSONEncoder.default(self, obj)
def list_to_hash(arg_list):
json_str = json.dumps(arg_list, cls=ExtendedJSONEncoder)
# print('json_str of arg_list:{}'.format(json_str))
import hashlib
data_hash_value = hashlib.sha256(json_str.encode()).hexdigest()
# print('data_hash_value:{}'.format(data_hash_value))
return data_hash_value
# cache_db_host = 'localhost'
# use_cache = False
witout_check_cache = False
class GGData:
def __init__(self, name, use_db='GGDataBase', db_host='localhost', refresh=False, dtype=np.ndarray):
PREFIX = '[TSData]'
self.key_delimiter = '/'
if use_db is None: use_db = 'Default'
self.use_db = use_db
self.dtype = dtype
self.name = name
self.group_key = 'g{}{}'.format(self.key_delimiter, name)
if use_db == 'GGDataBase':
self._db = GGDataBase.Instance()
self._db.set_db(setting_file_path='/usr/local/etc/vendor/gg/redis_connection_setting.json', debug_mode=False)
if refresh:
print('refresh with delete group_key:{}'.format(self.group_key))
keys = self.get_keys()
if keys is not None:
for key in keys:
print('refresh with delete key:{}'.format(key))
self._db.delete(key)
self._db.delete(self.group_key)
elif use_db == 'Default':
# Default simple k-v dictionary
self._db = {}
else:
raise Exception('Invalid self.use_db:{}'.format(self.use_db))
def construct_data_ins(self, name, use_db='GGDataBase', db_host='localhost', refresh=False, dtype=np.ndarray):
'''
A Factory method to construct an instance of this class
:param name:
:param use_db:
:param db_host:
:param refresh:
:param dtype:
:return:
'''
return GGData(name, use_db, db_host, refresh, dtype)
def get(self, key=None):
_iterable, key_or_list = self.is_key_iterable(key)
if key is None or (_iterable and len(key_or_list) == 0):
raise Exception('Invalid usage of get with empty key:{}. Use get_all_values.'.format(key))
else:
_iterable, key_or_list = self.is_key_iterable(key)
if self.use_db in ['GGDataBase']:
if _iterable:
return np.asarray([self.cast_dtype(self._db.read(self.create_key_with_name(k))) for k in key_or_list])
else:
return self.cast_dtype(self._db.read(self.create_key_with_name(key)))
elif self.use_db == 'Default':
if _iterable:
return np.asarray([self._db[self.create_key_with_name(k)] for k in key_or_list])
else:
return np.asarray(self._db[self.create_key_with_name(key)])
raise Exception('Invalid self.use_db:{}'.format(self.use_db))
def cast_dtype(self, value):
if isinstance(self.dtype, np.ndarray) and not isinstance(value, np.ndarray): return np.asarray(value)
if isinstance(self.dtype, pd.DataFrame) and not isinstance(value, pd.DataFrame):
if isinstance(value, np.ndarray): return pd.DataFrame(value)
raise ValueError('invalid dtype:{}'.format(self.dtype))
# do nothing
return value
def set(self, key, value):
if self.use_db in ['GGDataBase']:
key_with_name = self.create_key_with_name(key)
self._db.update(key_with_name, value)
# also set key to group_key
keys = self._db.read(self.group_key)
if keys is None:
keys = [key_with_name]
else:
keys.append(key_with_name)
self._db.update(self.group_key, keys)
return
elif self.use_db == 'Default':
self._db[self.create_key_with_name(key)] = value
return
raise Exception('Invalid self.use_db:{}'.format(self.use_db))
def get_keys(self):
if self.use_db in ['GGDataBase']:
if self.group_key is not None:
# keys = self._db.read_range(self.group_key)
keys = self._db.read(self.group_key)
return keys
raise ValueError('self.group_key is None')
# try to read with name
pattern = '{}*'.format(self.name)
print('try to read with pattern:{}'.format(pattern))
keys = self._db.keys(pattern=pattern)
print('read keys:{} with pattern:{}'.format(keys, pattern))
return keys
elif self.use_db == 'Default':
keys = [k for k in self._db.keys() if self.name in k]
return keys
raise Exception('Invalid self.use_db:{}'.format(self.use_db))
def get_all_values(self):
all_key_with_name = self.get_keys()
if self.use_db in ['GGDataBase']:
return np.asarray([self.cast_dtype(self._db.read(k)) for k in all_key_with_name])
elif self.use_db == 'Default':
return np.asarray([self._db[k] for k in all_key_with_name])
raise Exception('Invalid self.use_db:{}'.format(self.use_db))
def get_size(self):
if self.use_db in ['Default', 'GGDataBase']:
keys = self.get_keys()
return len(keys)
raise Exception('Invalid self.use_db:{}'.format(self.use_db))
def shape(self, index=None):
if index is None:
# get all shape
ret_size = [self.get_size()]
if self.use_db in ['Default', 'GGDataBase']:
for key in self.get_keys():
ret_size.extend(self.get(key).shape[0:]) # TODO only get first key-value's shape
return tuple(ret_size)
raise Exception('Invalid self.use_db:{}'.format(self.use_db))
elif index == 0:
return self.get_size()
elif index > 1:
if self.use_db in ['Default', 'GGDataBase']:
for key in self._db.keys():
return self.get(key).shape[index-1] # TODO only get first key-value's shape
raise Exception('Invalid self.use_db:{}'.format(self.use_db))
else:
raise Exception('Invalid index:{}'.format(index))
def create_key_with_name(self, key):
_iterable, key_or_list = self.is_key_iterable(key)
def add_name_to_key_if_not_contain(name, key_delimiter, key):
if key is None or len(str(key)) < len(name): return '{}{}{}'.format(name, key_delimiter, key)
return key if str(key).find('{}{}'.format(name, key_delimiter)) == 0 else '{}{}{}'.format(name, key_delimiter, key)
if _iterable:
return [add_name_to_key_if_not_contain(self.name, self.key_delimiter, key) for key in key_or_list]
else:
return add_name_to_key_if_not_contain(self.name, self.key_delimiter, key)
def remove_name_from_key_with_name(self, key_with_name):
_iterable, key_with_name_or_list = self.is_key_iterable(key_with_name)
def remove_name_from_key_if_contain(name, key_delimiter, key):
if key is None or len(str(key)) < len(name): return key
return key[len(name)+len(key_delimiter):] if str(key).find('{}{}'.format(name, key_delimiter)) == 0 else key
if _iterable:
# removed_key = key_with_name_or_list
# for i in range(len(removed_key)):
# if removed_key[i].find('{}{}'.format(self.name, self.key_delimiter)) == 0: removed_key[i] = removed_key[i][len(self.name)+len(self.key_delimiter):]
# return removed_key
return [remove_name_from_key_if_contain(self.name, self.key_delimiter, key_with_name) for key_with_name in key_with_name_or_list]
else:
return remove_name_from_key_if_contain(self.name, self.key_delimiter, key_with_name)
def is_key_iterable(self, key):
if key is None: return False, key
if isinstance(key, list): return True, key
if isinstance(key, np.ndarray) and len(key) > 1: return True, list(key)
if isinstance(key, range): return True, list(key)
return False, key
class GGDataSet:
# Abstract method
def __init__(self, debug_mode=False,
prepare_data_mode=False, prediction_mode=False, hparams=None):
PREFIX = '[GGDataSet]'
self.hparams = hparams
print('{}TODO init with hparams:{}'.format(PREFIX, hparams))
self.debug_mode = debug_mode
self.prepare_data_mode = prepare_data_mode
self.model_type = 'CLASSIFICATION'
if hparams and 'model_type' in hparams.keys():
print('{}Use model_type in hparams:{}'.format(PREFIX, hparams['model_type']))
self.model_type = hparams['model_type']
else:
print('{}TODO Use ts_start with default value:{}'.format(PREFIX, self.model_type))
self.prediction_mode = prediction_mode
print('{}init with prediction_mode:{}'.format(PREFIX, prediction_mode))
self.multiprocessing = st.Hyperparameters.DEFAULT_DICT['multiprocessing']
if hparams and 'multiprocessing' in hparams.keys():
print('{}Use multiprocessing in hparams:{}'.format(PREFIX, hparams['multiprocessing']))
self.multiprocessing = hparams['multiprocessing']
else:
print('{}TODO Use multiprocessing with default value:{}'.format(PREFIX, self.multiprocessing))
self.max_threads = st.Hyperparameters.DEFAULT_DICT['max_threads']
if hparams and 'max_threads' in hparams.keys():
print('{}Use max_threads in hparams:{}'.format(PREFIX, hparams['max_threads']))
self.max_threads = hparams['max_threads']
else:
print('{}TODO Use max_threads with default value:{}'.format(PREFIX, self.max_threads))
print('{}multiprocessing: {}'.format(PREFIX, self.multiprocessing))
print('{}cpu_count: {}, max_threads: {}'.format(PREFIX, mp.cpu_count(), self.max_threads))
if self.multiprocessing and self.max_threads > 1:
self.thread_dict = Manager().dict()
# about mask_rate
self.mask_rate = None
if hparams and 'mask_rate' in hparams.keys():
print('{}Use mask_rate in hparams:{}'.format(PREFIX, hparams['mask_rate']))
self.mask_rate = hparams['mask_rate']
if self.mask_rate is not None:
try:
self.mask_rate = float(self.mask_rate)
except ValueError:
print('{}mask_rate is not float type. reset with None'.format(PREFIX))
self.mask_rate = None
# (For compatibility with ver0.1.1 ```col_index_to_mask``` and ver0.1.2 ```ch_index_to_mask``` )
self.col_index_to_mask = None
if hparams and 'col_index_to_mask' in hparams.keys():
print('{}Use col_index_to_mask in hparams:{}'.format(PREFIX, hparams['col_index_to_mask']))
self.col_index_to_mask = hparams['col_index_to_mask']
# check both mask_rate and col_index_to_mask
if self.mask_rate is None or self.col_index_to_mask is None:
print('{}Set both mask_rate and col_index_to_mask None because one of them is None'.format(PREFIX))
self.mask_rate = None
self.col_index_to_mask = None
# set ch_index_to_mask
self.ch_index_to_mask = self.col_index_to_mask # (For compatibility with ver0.1.1 ```col_index_to_mask``` and ver0.1.2 ```ch_index_to_mask``` )
if hparams and 'ch_index_to_mask' in hparams.keys():
print('{}Use ch_index_to_mask in hparams:{}'.format(PREFIX, hparams['ch_index_to_mask']))
self.ch_index_to_mask = hparams['ch_index_to_mask']
# check both mask_rate and ch_index_to_mask
if self.mask_rate is None or self.ch_index_to_mask is None:
print('{}Set both mask_rate and ch_index_to_mask None because one of them is None'.format(PREFIX))
self.mask_rate = None
self.ch_index_to_mask = None
# about skipping invalid data
# about skip_invalid_data
self.skip_invalid_data = None
if hparams and 'skip_invalid_data' in hparams.keys():
print('{}Use skip_invalid_data in hparams:{}'.format(PREFIX, hparams['skip_invalid_data']))
self.skip_invalid_data = hparams['skip_invalid_data']
self.skip_invalid_data = (self.skip_invalid_data is not None and self.skip_invalid_data)
# about skip_invalid_data
self.valid_data_range = None
if hparams and 'valid_data_range' in hparams.keys():
print('{}Use valid_data_range in hparams:{}'.format(PREFIX, hparams['valid_data_range']))
self.valid_data_range = hparams['valid_data_range']
# about multi_resolution_channels
self.multi_resolution_channels = 0
if hparams and 'multi_resolution_channels' in hparams.keys():
print('{}Use multi_resolution_channels in hparams:{}'.format(PREFIX, hparams['multi_resolution_channels']))
self.multi_resolution_channels = hparams['multi_resolution_channels']
else:
print('{}TODO Use multi_resolution_channels with default value:{}'.format(PREFIX, self.multi_resolution_channels))
# set decrease_resolution_ratio or decrease_resolution_ratio_list
self.decrease_resolution_ratio_list = None
if self.multi_resolution_channels > 0:
# 1. decrease_resolution_ratio
self.decrease_resolution_ratio = DEFAULT_DECREASE_RESOLUTION_RATIO
if hparams and 'decrease_resolution_ratio' in hparams.keys():
print('{}Use decrease_resolution_ratio in hparams:{}'.format(PREFIX, hparams['decrease_resolution_ratio']))
self.decrease_resolution_ratio = hparams['decrease_resolution_ratio']
else:
print('{}TODO Use decrease_resolution_ratio with default value:{}'.format(PREFIX, self.decrease_resolution_ratio))
# 2. decrease_resolution_ratio_list
if hparams and 'decrease_resolution_ratio_list' in hparams.keys():
print('{}Use decrease_resolution_ratio_list in hparams:{}'.format(PREFIX, hparams['decrease_resolution_ratio_list']))
self.decrease_resolution_ratio_list = hparams['decrease_resolution_ratio_list']
if self.decrease_resolution_ratio_list is None:
print('{}TODO decrease_resolution_ratio_list is set with decrease_resolution_ratio:{} and multi_resolution_channels:{}'.format(PREFIX, self.decrease_resolution_ratio, self.multi_resolution_channels))
self.decrease_resolution_ratio_list = [int(math.pow(self.decrease_resolution_ratio, extend_level)) for extend_level in range(1, self.multi_resolution_channels + 1)]
print('{}DONE decrease_resolution_ratio_list is set {}'.format(PREFIX, self.decrease_resolution_ratio_list))
if hparams and 'input_data_names' in hparams.keys():
print('{}Use input_data_names in hparams:{}'.format(PREFIX, hparams['input_data_names']))
self.input_data_names = hparams['input_data_names']
else:
print('{}Error no input_data_names'.format(PREFIX))
exit(1)
self.col_size = len(self.input_data_names)
# about channels to be extended with multi_resolution_channels
self.input_data_names_to_be_extended = None
if hparams and 'input_data_names_to_be_extended' in hparams.keys():
print('{}Use input_data_names_to_be_extended in hparams:{}'.format(PREFIX, hparams['input_data_names_to_be_extended']))
self.input_data_names_to_be_extended = hparams['input_data_names_to_be_extended']
if self.input_data_names_to_be_extended and self.input_data_names_to_be_extended is not None:
self.col_size += len(self.input_data_names_to_be_extended) * self.multi_resolution_channels
elif self.multi_resolution_channels > 0:
self.input_data_names_to_be_extended = self.input_data_names
print('{}Use input_data_names_to_be_extended with all input_data_names:{}'.format(PREFIX, self.input_data_names))
self.col_size = len(self.input_data_names) *(1 + self.multi_resolution_channels)
else:
print('{}No input_data_names_to_be_extended'.format(PREFIX))
print('self.col_size:{}'.format(self.col_size))
if hparams and 'output_data_names' in hparams.keys():
print('{}Use output_data_names in hparams:{}'.format(PREFIX, hparams['output_data_names']))
self.output_data_names = hparams['output_data_names']
else:
print('{}Error no output_data_names'.format(PREFIX))
exit(1)
# Whether Has to complement the value before ts starts or not(Default:True)
self.has_to_complement_before = True
if hparams and 'has_to_complement_before' in hparams.keys():
print('{}Use has_to_complement_before in hparams:{}'.format(PREFIX, hparams['has_to_complement_before']))
self.has_to_complement_before = hparams['has_to_complement_before']
if self.has_to_complement_before is None:
self.has_to_complement_before = True
print('{}Use has_to_complement_before with default value:{}'.format(PREFIX, self.has_to_complement_before))
# S (For compatibility with ver0.1.1 ```complement_ts``` and ver0.1.2 ```complement_input_data``` )
self.complement_ts = None
if hparams and 'complement_ts' in hparams.keys():
print('{}Use complement_ts in hparams:{}'.format(PREFIX, hparams['complement_ts']))
self.complement_ts = hparams['complement_ts']
else:
print('{}Use complement_ts with default value:{}'.format(PREFIX, self.complement_ts))
# E (For compatibility with ver0.1.1 ```complement_ts``` and ver0.1.2 ```complement_input_data``` )
self.complement_input_data = self.complement_ts # (For compatibility with ver0.1.1 ```complement_ts``` and ver0.1.2 ```complement_input_data``` )
if hparams and 'complement_input_data' in hparams.keys():
print('{}Use complement_input_data in hparams:{}'.format(PREFIX, hparams['complement_input_data']))
self.complement_input_data = hparams['complement_input_data']
else:
print('{}Use complement_input_data with default value:{}'.format(PREFIX, self.complement_input_data))
# S (For compatibility with ver0.1.1 ```complement_ts``` and ver0.1.2 ```complement_input_data``` )
if self.complement_input_data is None:
self.complement_input_data = self.complement_ts
# E (For compatibility with ver0.1.1 ```complement_ts``` and ver0.1.2 ```complement_input_data``` )
self.data_dir_path = '/var/data/'
if hparams and 'data_dir_path' in hparams.keys():
print('{}Use data_dir_path in hparams:{}'.format(PREFIX, hparams['data_dir_path']))
self.data_dir_path = hparams['data_dir_path']
else:
print('{}Use data_dir_path with default value:{}'.format(PREFIX, self.data_dir_path))
self.data_set_def_path = None
if hparams and 'data_set_def_path' in hparams.keys():
print('{}Use data_set_def_path in hparams:{}'.format(PREFIX, hparams['data_set_def_path']))
self.data_set_def_path = hparams['data_set_def_path']
try:
# read df_data_set_def and check
self.df_data_set_def = | pd.read_csv(self.data_set_def_path) | pandas.read_csv |
from unityagents import UnityEnvironment
import numpy as np
import matplotlib.pyplot as plt
from agents.ddpg_agent import MADDPGCollective
import csv
import sys
import pandas as pd
import torch
TARGET_SCORE = 0.5
USE_COLLECTIVE_TRAINING = True
def plot_scores(scores, episode_solved, rolling_window=100):
"""Plot scores and optional rolling mean using specified window."""
sums = []
for s in scores:
sums.append(np.sum(s))
rolling_mean = | pd.Series(sums) | pandas.Series |
import unittest
import pandas as pd
import numpy as np
from pandas.util.testing import assert_frame_equal
from pdblp import pdblp
import os
IP_PORT = 8194
class TestBCon(unittest.TestCase):
def setUp(self):
self.con = pdblp.BCon(port=IP_PORT, timeout=5000)
self.con.start()
cdir = os.path.dirname(__file__)
self.path = os.path.join(cdir, 'data/')
def tearDown(self):
pass
def pivot_and_assert(self, df, df_exp, with_date=False):
# as shown below, since the raw data returned from bbg is an array
# with unknown ordering, there is no guruantee that the `position` will
# always be the same so pivoting prior to comparison is necessary
#
# fieldData = {
# INDX_MWEIGHT[] = {
# INDX_MWEIGHT = {
# Member Ticker and Exchange Code = "BON8"
# Percentage Weight = 2.410000
# }
# INDX_MWEIGHT = {
# Member Ticker and Exchange Code = "C N8"
# Percentage Weight = 6.560000
# }
# INDX_MWEIGHT = {
# Member Ticker and Exchange Code = "CLN8"
# Percentage Weight = 7.620000
# }
# }
# }
name_cols = list(df_exp.name.unique())
sort_cols = list(df_exp.name.unique())
index_cols = ["name", "position", "field", "ticker"]
if with_date:
sort_cols.append("date")
index_cols.append("date")
df = (df.set_index(index_cols).loc[:, "value"]
.unstack(level=0).reset_index().drop(columns="position")
.sort_values(by=sort_cols, axis=0))
df_exp = (df_exp.set_index(index_cols).loc[:, "value"]
.unstack(level=0).reset_index().drop(columns="position")
.sort_values(by=sort_cols, axis=0))
# deal with mixed types resulting in str from csv read
for name in name_cols:
try:
df_exp.loc[:, name] = df_exp.loc[:, name].astype(float)
except ValueError:
pass
for name in name_cols:
try:
df.loc[:, name] = df.loc[:, name].astype(float)
except ValueError:
pass
if with_date:
df.loc[:, "date"] = pd.to_datetime(df.loc[:, "date"],
format="%Y%m%d")
df_exp.loc[:, "date"] = pd.to_datetime(df_exp.loc[:, "date"],
format="%Y%m%d")
assert_frame_equal(df, df_exp)
def test_bdh_one_ticker_one_field_pivoted(self):
df = self.con.bdh('SPY US Equity', 'PX_LAST', '20150629', '20150630')
midx = pd.MultiIndex(levels=[["SPY US Equity"], ["PX_LAST"]],
labels=[[0], [0]], names=["ticker", "field"])
df_expect = pd.DataFrame(
index=pd.date_range("2015-06-29", "2015-06-30"),
columns=midx,
data=[205.42, 205.85]
)
df_expect.index.names = ["date"]
assert_frame_equal(df, df_expect)
def test_bdh_one_ticker_one_field_longdata(self):
df = self.con.bdh('SPY US Equity', 'PX_LAST', '20150629', '20150630',
longdata=True)
idx = pd.Index(["date", "ticker", "field", "value"])
data = [["2015-06-29", "2015-06-30"],
["SPY US Equity", "SPY US Equity"], ["PX_LAST", "PX_LAST"],
[205.42, 205.85]]
df_expect = pd.DataFrame(data=data, index=idx).transpose()
df_expect.loc[:, "date"] = pd.to_datetime(df_expect.loc[:, "date"])
df_expect.loc[:, "value"] = np.float64(df_expect.loc[:, "value"])
assert_frame_equal(df, df_expect)
def test_bdh_one_ticker_two_field_pivoted(self):
cols = ['PX_LAST', 'VOLUME']
df = self.con.bdh('SPY US Equity', cols, '20150629', '20150630')
midx = pd.MultiIndex(
levels=[["SPY US Equity"], cols],
labels=[[0, 0], [0, 1]], names=["ticker", "field"]
)
df_expect = pd.DataFrame(
index=pd.date_range("2015-06-29", "2015-06-30"),
columns=midx,
data=[[205.42, 202621332], [205.85, 182925106]]
)
df_expect = df_expect.astype(np.float64)
df_expect.index.names = ["date"]
assert_frame_equal(df, df_expect)
def test_bdh_one_ticker_two_field_longdata(self):
cols = ['PX_LAST', 'VOLUME']
df = self.con.bdh('SPY US Equity', cols, '20150629', '20150630',
longdata=True)
idx = pd.Index(["date", "ticker", "field", "value"])
data = [["2015-06-29", "2015-06-29", "2015-06-30", "2015-06-30"],
["SPY US Equity", "SPY US Equity", "SPY US Equity", "SPY US Equity"], # NOQA
["PX_LAST", "VOLUME", "PX_LAST", "VOLUME"],
[205.42, 202621332, 205.85, 182925106]]
df_expect = pd.DataFrame(data=data, index=idx).transpose()
df_expect.loc[:, "date"] = pd.to_datetime(df_expect.loc[:, "date"])
df_expect.loc[:, "value"] = np.float64(df_expect.loc[:, "value"])
assert_frame_equal(df, df_expect)
def test_bdh_value_errors(self):
bad_col = "not_a_fld"
self.assertRaises(ValueError, self.con.bdh, "SPY US Equity", bad_col,
"20150630", "20150630")
bad_ticker = "not_a_ticker"
self.assertRaises(ValueError, self.con.bdh, bad_ticker, "PX_LAST",
"20150630", "20150630")
def test_bdib(self):
# BBG has limited history for the IntradayBarRequest service so this
# needs to be periodically updated
df = self.con.bdib('SPY US Equity', '2018-02-09T10:00:00',
'2018-02-09T10:20:01', event_type="BID",
interval=10)
idx = pd.DatetimeIndex(["2018-02-09T10:00:00", "2018-02-09T10:10:00",
"2018-02-09T10:20:00"])
data = [[260.85, 260.90, 260.50, 260.58, 8038, 938],
[260.58, 260.72, 260.34, 260.64, 11795, 1460],
[260.64, 260.78, 260.64, 260.77, 964, 116]]
cols = ["open", "high", "low", "close", "volume", "numEvents"]
df_expect = pd.DataFrame(data=data, index=idx, columns=cols)
assert_frame_equal(df, df_expect)
# REF TESTS
def test_ref_one_ticker_one_field(self):
df = self.con.ref('AUD Curncy', 'NAME')
df_expect = pd.DataFrame(
columns=["ticker", "field", "value"],
data=[["AUD Curncy", "NAME", "Australian Dollar Spot"]]
)
assert_frame_equal(df, df_expect)
def test_ref_one_ticker_one_field_override(self):
df = self.con.ref('AUD Curncy', 'SETTLE_DT',
[("REFERENCE_DATE", "20161010")])
df_expect = pd.DataFrame(
columns=["ticker", "field", "value"],
data=[["AUD Curncy", "SETTLE_DT",
pd.datetime(2016, 10, 12).date()]]
)
assert_frame_equal(df, df_expect)
def test_ref_invalid_field(self):
self.assertRaises(ValueError, self.con.ref,
"EI862261 Corp", "not_a_field")
def test_ref_not_applicable_field(self):
# test both cases described in
# https://github.com/matthewgilbert/pdblp/issues/6
df = self.con.ref("BCOM Index", ["INDX_GWEIGHT"])
df_expect = pd.DataFrame(
[["BCOM Index", "INDX_GWEIGHT", np.NaN]],
columns=['ticker', 'field', 'value']
)
assert_frame_equal(df, df_expect)
df = self.con.ref("BCOM Index", ["INDX_MWEIGHT_PX2"])
df_expect = pd.DataFrame(
[["BCOM Index", "INDX_MWEIGHT_PX2", np.NaN]],
columns=['ticker', 'field', 'value']
)
assert_frame_equal(df, df_expect)
def test_ref_invalid_security(self):
self.assertRaises(ValueError, self.con.ref, "NOT_A_TICKER", "MATURITY")
def test_ref_applicable_with_not_applicable_field(self):
df = self.con.ref("BVIS0587 Index", ["MATURITY", "NAME"])
df_exp = pd.DataFrame(
[["BVIS0587 Index", "MATURITY", np.NaN],
["BVIS0587 Index", "NAME", "CAD Canada Govt BVAL Curve"]],
columns=["ticker", "field", "value"])
assert_frame_equal(df, df_exp)
def test_ref_mixed_data_error(self):
# calling ref which returns singleton and array data throws error
self.assertRaises(ValueError, self.con.ref, 'CL1 Comdty', 'FUT_CHAIN')
# BULKREF TESTS
def test_bulkref_one_ticker_one_field(self):
df = self.con.bulkref('BCOM Index', 'INDX_MWEIGHT',
ovrds=[("END_DATE_OVERRIDE", "20150530")])
df_expected = pd.read_csv(
os.path.join(self.path, "bulkref_20150530.csv")
)
self.pivot_and_assert(df, df_expected)
def test_bulkref_two_ticker_one_field(self):
df = self.con.bulkref(['BCOM Index', 'OEX Index'], 'INDX_MWEIGHT',
ovrds=[("END_DATE_OVERRIDE", "20150530")])
df_expected = pd.read_csv(
os.path.join(self.path, "bulkref_two_fields_20150530.csv")
)
self.pivot_and_assert(df, df_expected)
def test_bulkref_singleton_error(self):
# calling bulkref which returns singleton throws error
self.assertRaises(ValueError, self.con.bulkref, 'CL1 Comdty',
'FUT_CUR_GEN_TICKER')
def test_bulkref_null_scalar_sub_element(self):
# related to https://github.com/matthewgilbert/pdblp/issues/32#issuecomment-385555289 # NOQA
# smoke test to check parse correctly
ovrds = [("DVD_START_DT", "19860101"), ("DVD_END_DT", "19870101")]
self.con.bulkref("101 HK EQUITY", "DVD_HIST", ovrds=ovrds)
def test_bulkref_empty_field(self):
df = self.con.bulkref(["88428LAA0 Corp"], ["INDEX_LIST"])
df_exp = pd.DataFrame(
[["88428LAA0 Corp", "INDEX_LIST", np.NaN, np.NaN, np.NaN]],
columns=["ticker", "field", "name", "value", "position"]
)
| assert_frame_equal(df, df_exp) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 29 14:31:25 2020
@author: <NAME>
"""
import os
os.chdir("..")
import numpy as np
import pandas as pd
import statsmodels.api as sm
from pandasql import sqldf
#rolling regression for tickers
#to be used to get daily residual for each observation given past 100 obs and sp500
def roll_reg(x, k):
x.sort_values(by=['date'])
temp_df_100=x.iloc[k-100:k]
temp_df_pred_row=x.iloc[k:k+1]
results= sm.OLS(temp_df_100['ln_return_price'], sm.add_constant(temp_df_100[['ln_return_index']])).fit()
q=float(temp_df_pred_row['ln_return_index'])
temp_pred = results.predict([1,q])
rolling_resid_return=float(temp_df_pred_row.iloc[0,4])-float(temp_pred)
return rolling_resid_return
master_finance_df= pd.read_csv("constructed\\capstone\\capstone_equity_ticker_daily.csv")
master_finance_df= master_finance_df.loc[:, ~master_finance_df.columns.str.contains('^Unnamed')]
master_finance_df.dtypes
#Does the following: calculation ln return price; ln return index; makes daily residuals
#using roll_reg function above
stack_df= pd.DataFrame(columns=['ticker', 'date', 'ln_return_price', \
'ln_return_index'])
for x in master_finance_df.ticker.unique():
temp_df=master_finance_df[master_finance_df.ticker ==x]
temp_df=temp_df[['price','ticker','date','sp500']]
temp_df['ln_return_price']=float(0)
temp_df['ln_return_index']=float(0)
temp_df['roll_resid']=float(0)
temp_df.sort_values(by=['date'])
for i in range(1, len(temp_df)):
try:
temp_price_ret=np.log(temp_df.iloc[i][0]/temp_df.iloc[i-1][0])
temp_index_ret=np.log(temp_df.iloc[i][3]/temp_df.iloc[i-1][3])
temp_df.iloc[i,4]=temp_price_ret
temp_df.iloc[i,5]=temp_index_ret
except:
temp_df.iloc[i,4]=0
temp_df.iloc[i,5]=0
for p in range(100, len(temp_df)-100):
temp_resid=roll_reg(temp_df, p)
temp_df.iloc[p,6]=temp_resid
temp_df=temp_df[['ticker','date', 'ln_return_price', 'ln_return_index', 'roll_resid']]
stack_df=stack_df.append(temp_df)
#stack_df.to_csv("constructed\\stack_backup.csv", sep=',')
#stack_df=pd.read_csv("constructed\\stack_backup.csv", sep=',')
stack_df= stack_df.loc[:, ~stack_df.columns.str.contains('^Unnamed')]
stack_df.dtypes
master_finance_df= | pd.merge(master_finance_df, stack_df, how='inner', \
on=['date', 'ticker'], validate='one_to_one') | pandas.merge |
import dash
from dash import dcc
import dash_bootstrap_components as dbc
from dash import html
from dash.dependencies import Input, Output, State
import pandas as pd
import random
import re
#######################
# Helper functions
#######################
# # convert a dataframe into a dict where each item is another dict corresponding
# # to a row of the html table
def make_table(df):
# table header
rows = [html.Tr([html.Th(col) for col in list(df.columns)])]
# loop through each unique filename and create a list of the Html objects to make that row
for r in range(len(df.index)):
row = [html.Th(df.iloc[r,c]) for c in range(len(df.columns))]
rows.append(html.Tr(row))
return rows
def get_auto_picks(start_pick,end_pick,pl,n_teams,roster):
randweights = [0]*25+[1]*9+[2]*5+[3]*3+[4]*2+[5]*2+[6]+[7]+[8]+[9]
for pick_number in range(start_pick,end_pick):
# determine team needs
team = (teamnames[:n_teams+1]+teamnames[n_teams:0:-1])[pick_number % (2*n_teams)]
pln = remove_unneeded_players(pl, roster, team)
# use randomness to determine which player will be selected
pick_no = randweights[random.randrange(0,49)]
pick_idx = pln.sort_values('Rank',ascending=True).index[pick_no]
pos= pl.loc[pick_idx,'Position(s)']
# update players table
pl.loc[pick_idx,'Available'] = False
pl.loc[pick_idx,'Rd'] = (pick_number-1) // n_teams + 1
pl.loc[pick_idx,'Pick'] = (pick_number-1) % n_teams + 1
pl.loc[pick_idx,'Slot'] = determine_slot(pos,roster,pl.loc[pl.Team == team])
pl.loc[pick_idx,'Team'] = team
return pl
def determine_slot(pos, ros, teampl):
m = ros.merge(teampl,on='Slot',how='left')
# add alternative positions
altpos = (['MI'] if '2B' in pos or 'SS' in pos else []) + (
['CI'] if '1B' in pos or '3B' in pos else []) + ['UT','BE']
for p in pos.split(', ') + altpos:
for a in m.loc[m.Player.isna()].sort_values('Num')['Slot']:
if p == re.sub('\d$','',a):
return a
else:
return '-'
def remove_unneeded_players(pl,roster,team):
# Remove the players from pl that team doesn't need based on roster
teampl = pl.loc[pl.Team == team]
teamros = roster.merge(teampl,on = 'Slot',how='left')
needs = list(teamros.loc[teamros.Player.isna(),'Slot'].str.replace('\d+$','',regex=True))
# handle MI and CI
if 'MI' in needs:
needs = needs + ['SS','2B']
if 'CI' in needs:
needs = needs + ['1B','3B']
# filter players that don't match roster needs
if ('BE' not in needs) and ('UT' not in needs):
return pl.loc[pl['Position(s)'].str.match('|'.join(needs)) & pl['Available']]
else:
return pl.loc[pl['Available']]
#######################
# Initial Data Prep
#######################
players = pd.read_csv('players.csv')
players['Team'], players['Slot'], players['Rd'], players['Pick'] = (pd.NA, pd.NA, pd.NA, pd.NA)
teamnames = 'AABCDEFGHIJKLMNOPQRSTUVWXYZ'
#######################
# Dash app layout
#######################
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
server = app.server
# header for the app
header = [dbc.Row(html.H1('Draft Simulator')),
dbc.Row(html.Div(' ',style = {'height': "35px"}))
]
startsection = [
dbc.Row([
dbc.Col(
html.Div([
dcc.Dropdown(id='n-p-dropdown',options=list(range(5,16)),value=9),
html.Div(children='# of Pitchers')
],style = {'width':'90%'}), md=1),
dbc.Col(
html.Div([
dcc.Dropdown(id='n-of-dropdown',options=list(range(3,8)),value=3),
html.Div(children='# of Outfielders')
],style = {'width':'90%'}), md=1),
dbc.Col(
html.Div([
dcc.Dropdown(id='n-c-dropdown',options=list(range(1,4)),value=1),
html.Div(children='# of Catchers')
],style = {'width':'90%'}), md=1),
dbc.Col(
html.Div([
dcc.Dropdown(id='n-ci-dropdown',options=list(range(0,6)),value=1),
html.Div(children='# of Corner IF')
],style = {'width':'90%'}), md=1),
dbc.Col(
html.Div([
dcc.Dropdown(id='n-mi-dropdown',options=list(range(0,6)),value=1),
html.Div(children='# of Middle IF')
],style = {'width':'90%'}), md=1),
dbc.Col(
html.Div([
dcc.Dropdown(id='n-ut-dropdown',options=list(range(0,21)),value=2),
html.Div(children='# of Utility Players')
],style = {'width':'90%'}), md=1),
dbc.Col(
html.Div([
dcc.Dropdown(id='n-be-dropdown',options=list(range(0,21)),value=2),
html.Div(children='# of Bench Players')
],style = {'width':'15%'}), md=6)
],id = 'start-row-1'),
dbc.Row(html.Div(' ',style = {'height': "25px"})),
dbc.Row([
dbc.Col(
html.Div([
dcc.Dropdown(id='n-teams-dropdown',options=list(range(2,25)),value=10),
html.Div(children='Select number of teams')
],style = {'width':'75%'}), md=2),
dbc.Col(
html.Div([
dcc.Dropdown(id='position-dropdown'),
html.Div(children='Select your draft position')
],style = {'width':'75%'}), md=2),
dbc.Col(html.Button('Begin!',id='begin-button',style={'width': '25%'}),md=8)
],id = 'start-row-2')
]
# put the table of the sorted data in the left half of the screen
draftpanel = [
html.Div([
html.Div([
html.H3('Select Player'),
dbc.Row([
dbc.Col([
dcc.Dropdown(options = players.Rank.astype(str)+'. '+players.Name+' ('+players['Position(s)']+')'
,id = 'pick-dropdown'),
html.Button('Draft Player', id='draft-button', n_clicks=0)],md=5),
dbc.Col([
html.Table(make_table(pd.DataFrame({})),id='bat-proj-table',className='table'),
html.Table(make_table(pd.DataFrame({})),id='pit-proj-table',className='table')],md=7)
]),
html.Div(' ',style={'height':'20px'})
],id = 'draft-div'),
html.H3('Team Roster'),
dcc.Dropdown(id='team-roster-dropdown',options=['My-Team'], value = 'My-Team'),
html.Table(make_table(pd.DataFrame({})),id='roster-table',className='table')
],id='draft-panel',style={"width": "90%"})
]
pickspanel = [
html.Div([
html.H3('Last Picks'),
html.Table(make_table(pd.DataFrame({})),id='last-picks-table',className='table'),
html.Div(players.to_json(),id='players',style={'display': 'none'}),
html.Div(0,id='n-teams',style={'display': 'none'}),
html.Div(0,id='position',style={'display': 'none'}),
html.Div(0,id='pick-number',style={'display': 'none'}),
html.Div(0,id='roster',style={'display': 'none'})
],style = {"width": "90%"})
]
projpanel = [
html.Div([
html.H3('Projected Standings'),
dcc.RadioItems(['Stats','Ranks'],'Stats',id='proj-type-radioitems',style = {'width':'200%'}),
html.Table(make_table(pd.DataFrame({})),id='proj-standings-table',className='table')
])
]
# lay out the app based on the above panel definitions
app.layout = dbc.Container([
html.Div(header),
html.Div(startsection,id ='start-section'),
html.Div(dbc.Row([dbc.Col(draftpanel, md=5),
dbc.Col(projpanel, md=5),
dbc.Col(pickspanel, md=2)])
,id = 'main-section',style = {'display':'none'})
],fluid=True)
# #######################
# # Reactive callbacks
# #######################
@app.callback(
Output('roster','children'),
[Input('n-of-dropdown','value'),
Input('n-p-dropdown','value'),
Input('n-c-dropdown','value'),
Input('n-mi-dropdown','value'),
Input('n-ci-dropdown','value'),
Input('n-ut-dropdown','value'),
Input('n-be-dropdown','value'),
Input('begin-button','n_clicks')]
)
def update_roster(n_of,n_p,n_c,n_mi,n_ci,n_ut,n_be,n_clicks):
slots = (['C'+str(i+1) for i in range(n_c)] +
['1B','2B','3B','SS'] +
['OF'+str(i+1) for i in range(n_of)] +
['MI'+str(i+1) for i in range(n_mi)] +
['CI'+str(i+1) for i in range(n_ci)] +
['P'+str(i+1) for i in range(n_p)] +
['UT'+str(i+1) for i in range(n_ut)] +
['BE'+str(i+1) for i in range(n_be)])
roster = pd.DataFrame({'Slot': slots,'Num': list(range(len(slots)))})
return roster.to_json()
@app.callback(
Output('position-dropdown', 'options'),
[Input('n-teams-dropdown', 'value')]
)
def update_position_dropdown(num_teams):
return list(range(1,num_teams+1))
@app.callback(
[Output('pick-dropdown','options')],
[Input('players','children'),
Input('roster','children')]
)
def update_pick_options(players_json,roster_json):
pl = pd.read_json(players_json)
roster = pd.read_json(roster_json)
pln = remove_unneeded_players(pl, roster, 'My-Team')
return [list(pln.Rank.astype(str)+'. '+pln.Player+' ('+pln['Position(s)']+')')]
@app.callback(
Output('last-picks-table', 'children'),
[Input('players','children')],
[State('n-teams','children')]
)
def update_last_picks_table(players_json,n_teams):
pl = pd.read_json(players_json)
last_picks = pl.loc[~pl.Team.isna()]
last_picks['Pick'] = (last_picks['Rd']-1)*n_teams + last_picks['Pick']
last_picks.loc[last_picks.Team == 'My-Team','Team'] = 'Me'
return make_table(last_picks.sort_values('Pick',ascending = False)
[['Pick','Team','Player']].iloc[0:3*n_teams])
@app.callback(
Output('roster-table', 'children'),
[Input('players','children'),
Input('team-roster-dropdown','value')],
[State('roster','children')]
)
def update_roster_table(players_json,teamchoice,roster_json):
ros = pd.read_json(roster_json)
pl = pd.read_json(players_json)
pl['AVG'] = (pl['H']/pl['AB']).round(3)
pl['ERA'] = (9*pl['ER']/pl['IP']).round(2)
pl['WHIP'] = ((pl['BB']+pl['H.P'])/pl['IP']).round(2)
teampl = pl.loc[pl.Team == teamchoice]
retcols = ['Slot','Player','Rd','AB','R','HR','RBI','SB','AVG',
'IP', 'ERA', 'W', 'SO', 'SV', 'WHIP']
ret = ros.merge(teampl,on='Slot',how='left').sort_values('Num')
return make_table(ret[retcols])
@app.callback(
Output('bat-proj-table', 'children'),
[Input('pick-dropdown','value')],
[State('players','children')]
)
def update_bat_proj_table(pick,players_json):
pl = pd.read_json(players_json)
pickrank = int(pick.split('.')[0])
pick_idx = pl.loc[pl.Rank == pickrank].index[0]
pl['AVG'] = (pl['H']/pl['AB']).round(3)
if pl.loc[pick_idx,['AB']].count() > 0:
return make_table(pl.loc[[pick_idx],['AB', 'R', 'HR', 'RBI', 'SB','AVG']])
else:
return make_table(pd.DataFrame({}))
@app.callback(
Output('pit-proj-table', 'children'),
[Input('pick-dropdown','value')],
[State('players','children')]
)
def update_pit_proj_table(pick,players_json):
pl = pd.read_json(players_json)
pickrank = int(pick.split('.')[0])
pick_idx = pl.loc[pl.Rank == pickrank].index[0]
pl['WHIP'] = ((pl['BB']+pl['H.P'])/pl['IP']).round(2)
pl['ERA'] = (9*pl['ER']/pl['IP']).round(2)
if pl.loc[pick_idx,['IP']].count() > 0:
return make_table(pl.loc[[pick_idx],['IP', 'ERA', 'W', 'SO', 'SV', 'WHIP']])
else:
return make_table(pd.DataFrame({}))
@app.callback(
Output('proj-standings-table','children'),
[Input('players','children'),
Input('proj-type-radioitems','value')]
)
def update_proj_standings(players_json,proj_type):
df = pd.read_json(players_json)
dfg=df.groupby('Team')[['AB', 'H', 'R', 'HR', 'RBI', 'SB', 'IP', 'ER', 'W',
'SO', 'SV', 'H.P','BB']].sum().reset_index().sort_values('Team')
dfg['AVG'] = (dfg['H']/dfg['AB']).round(3)
dfg['ERA'] = (9*dfg['ER']/dfg['IP']).round(2)
dfg['WHIP'] = ((dfg['BB']+dfg['H.P'])/dfg['IP']).round(2)
ranks = {'Team':dfg.Team}
for m in ['R', 'HR', 'RBI', 'SB','AVG', 'W','SO', 'SV']:
ranks.update({m: dfg[m].rank(ascending=False)})
for m in ['ERA','WHIP']:
ranks.update({m: dfg[m].rank()})
rdf = | pd.DataFrame(ranks,index=dfg.index) | pandas.DataFrame |
from us_geo_helper import USGeoHelper
import math
import pandas as pd
import unittest
class GeoTests(unittest.TestCase):
def assertLatLong(self, ll1, ll2):
self.assertAlmostEqual(ll1[0], ll2[0], places=2)
self.assertAlmostEqual(ll1[1], ll2[1], places=2)
class SingleTests(GeoTests):
def test_zips(self):
u = USGeoHelper()
self.assertLatLong(u.zipToCoord(12065), (42.852, -73.786))
# 10009 in NYC; 1. lots of people 2. land_mi2 < 1
self.assertGreater(u.zipInfo(10009)[0], 50000)
self.assertLess(u.zipInfo(10009)[2], 1)
# not a real zip code
with self.assertRaises(KeyError):
u.zipToCoord(90200)
def test_cities(self):
u = USGeoHelper()
self.assertLatLong(u.stateCityToCoord("NY", "New York"), (40.664, -73.939))
# LA has at least 1 million people
self.assertGreater(u.stateCityInfo("ca", "los angeles")[0], 1e6)
# Carmel NY not in df; check online lookup
self.assertLatLong(u.stateCityToCoord("Ny", "carmel"), (41.430, -73.680))
def test_inv(self):
u = USGeoHelper()
self.assertEqual(u.coordToZip(42.852, -73.786), 12065)
self.assertEqual(u.coordToStateCity(40.664, -73.939), ("ny", "new york"))
class DataFrameTests(GeoTests):
def test_zipToStateCity(self):
u = USGeoHelper()
og = pd.DataFrame([[85719, 0], [94043, None], [1234, 2], [90200, 3], [None, 4]])
og.columns = ["zip", "n"]
df = u.zipToStateCityDf(og, "zip", False)
df.set_index("zip", inplace=True)
self.assertEqual(df.shape[0], 2)
self.assertEqual(df.loc[85719, "state"], "az")
self.assertEqual(df.loc[85719, "city"], "south tucson")
self.assertEqual(df.loc[94043, "city"], "mountain view")
# when using online, nothing gets left out except the None
df = u.zipToStateCityDf(og, "zip", True)
self.assertEqual(df.shape[0], 4)
# cutoff too strict; nothing makes it past
df = u.zipToStateCityDf(og, "zip", True, 0.0001)
self.assertEqual(df.shape[0], 0)
def test_cleanZips(self):
u = USGeoHelper()
og = | pd.DataFrame([[85719, 0], [94043, None], [1234, 2], [90200, 3], [None, 4]]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import numpy as np
import pandas as pd
from datetime import datetime, timedelta, time
import calendar
import seaborn as sns
from hypnospy import Wearable
from hypnospy import Experiment
import warnings
class Viewer(object):
"""
Class used for plotting sleep, activity and HR signals from the Wearable.data df.
"""
def __init__(self, input: {Wearable, Experiment}):
if input is None:
raise ValueError("Invalid value for input.")
elif type(input) is Wearable:
self.wearables = [input]
elif type(input) is Experiment:
self.wearables = input.get_all_wearables()
sns.set_context("talk", font_scale=1.3, rc={"axes.linewidth": 2, 'image.cmap': 'plasma', })
plt.rcParams['font.size'] = 18
plt.rcParams['image.cmap'] = 'plasma'
plt.rcParams['axes.linewidth'] = 2
plt.rc('font', family='serif')
@staticmethod
def __get_details(alphas, colors, edgecolors, labels, part, index,
default_alpha=1.0, default_color="black", default_edgecolor=None, default_label="label"):
alpha, color, edgecolor, label = default_alpha, default_color, default_edgecolor, default_label
if alphas is not None and part in alphas:
alpha = alphas[part]
if isinstance(alpha, list):
alpha = alpha[index]
if colors is not None and part in colors:
color = colors[part]
if isinstance(color, list):
color = color[index]
if edgecolors is not None and part in edgecolors:
edgecolor = edgecolors[part]
if isinstance(edgecolor, list):
edgecolor = edgecolor[index]
if labels is not None and part in labels:
label = labels[part]
if isinstance(label, list):
label = label[index]
return alpha, color, edgecolor, label
@staticmethod
def get_day_label(df):
s = ""
startdate = df.index[0]
enddate = df.index[-1]
if startdate.day == enddate.day:
s = "%d - %s\n %s" % (
startdate.day, calendar.month_name[startdate.month][:3], calendar.day_name[startdate.dayofweek])
else:
if startdate.month == enddate.month:
s = "%d/%d - %s\n %s/%s" % (
startdate.day, enddate.day, calendar.month_name[startdate.month][:3],
calendar.day_name[startdate.dayofweek][:3], calendar.day_name[enddate.dayofweek][:3])
else:
s = "%d - %s/%d - %s\n %s/%s" % (
startdate.day, calendar.month_name[startdate.month][:3], enddate.day,
calendar.month_name[enddate.month][:3],
calendar.day_name[startdate.dayofweek][:3], calendar.day_name[enddate.dayofweek][:3])
return s
def view_signals(self, signal_categories: list = ["activity", "hr", "pa_intensity", "sleep"],
other_signals: list = [], signal_as_area: list = [], resample_to: str = None,
sleep_cols: list = [], select_days: list = None, zoom: list = ["00:00:00", "23:59:59"],
alphas: dict = None, colors: dict = None, edgecolors: dict = None, labels: dict = None,
text: list = []
):
# Many days, one day per panel
for wearable in self.wearables:
Viewer.view_signals_wearable(wearable, signal_categories, other_signals, signal_as_area, resample_to,
sleep_cols, select_days, zoom, alphas, colors, edgecolors, labels, text)
@staticmethod
def view_signals_wearable(wearable: Wearable, signal_categories: list, other_signals: list, signal_as_area: list,
resample_to: str, sleep_cols: list, select_days: list, zoom: list,
alphas: dict = None, colors: dict = None, edgecolors: dict = None, labels: dict = None,
text: list = []):
# Convert zoom to datatime object:
assert len(zoom) == 2
zoom_start = datetime.strptime(zoom[0], '%H:%M:%S')
zoom_end = datetime.strptime(zoom[1], '%H:%M:%S')
textstr = 'day: validation id \n'
cols = []
for signal in signal_categories:
if signal == "activity":
cols.append(wearable.get_activity_col())
elif signal == "hr":
if wearable.get_hr_col():
cols.append(wearable.get_hr_col())
else:
raise KeyError("HR is not available for PID %s" % wearable.get_pid())
elif signal == "pa_intensity":
if hasattr(wearable, 'pa_cutoffs') and hasattr(wearable, 'pa_names'):
for pa in wearable.pa_names:
if pa in wearable.data.keys():
cols.append(pa)
else:
raise ValueError("PA Intensity levels not available for PID %s" % (wearable.get_pid()))
elif signal == "sleep":
for sleep_col in sleep_cols:
if sleep_col not in wearable.data.keys():
raise ValueError("Could not find sleep_col (%s). Aborting." % sleep_col)
cols.append(sleep_col)
elif signal == "diary" and wearable.diary_onset in wearable.data.keys() and \
wearable.diary_offset in wearable.data.keys():
cols.append(wearable.diary_onset)
cols.append(wearable.diary_offset)
else:
cols.append(signal)
if len(cols) == 0:
raise ValueError("Aborting: Empty list of signals to show.")
if wearable.data.empty:
warnings.warn("Aborting: Dataframe for PID %s is empty." % wearable.get_pid())
return
cols.append(wearable.time_col)
for col in set(other_signals + signal_as_area):
cols.append(col)
if "validation" in text:
df_plot = wearable.data[cols + ['hyp_invalid'] ].set_index(wearable.time_col)
else:
df_plot = wearable.data[cols].set_index(wearable.time_col)
if resample_to is not None:
df_plot = df_plot.resample(resample_to).mean()
# Add column for experiment day. It will be resampled using the the mean
cols.append(wearable.experiment_day_col)
changed_experiment_hour = False
if not Viewer.__is_default_zoom(zoom_start, zoom_end) and zoom_start.hour != wearable.hour_start_experiment:
changed_experiment_hour = True
saved_start_hour = wearable.hour_start_experiment
wearable.change_start_hour_for_experiment_day(zoom_start.hour)
if resample_to is not None:
df_plot[wearable.experiment_day_col] = wearable.data[
[wearable.time_col, wearable.experiment_day_col]].set_index(wearable.time_col).resample(resample_to).median()
else:
df_plot[wearable.experiment_day_col] = wearable.data[
[wearable.time_col, wearable.experiment_day_col]].set_index(wearable.time_col)[wearable.experiment_day_col]
if changed_experiment_hour:
wearable.change_start_hour_for_experiment_day(saved_start_hour)
# Daily version
# dfs_per_day = [pd.DataFrame(group[1]) for group in df_plot.groupby(df_plot.index.day)]
# Based on the experiment day gives us the correct chronological order of the days
if select_days is not None:
df_plot = df_plot[df_plot[wearable.experiment_day_col].isin(select_days)]
if df_plot.empty:
raise ValueError("Invalid day selection: no remaining data to show.")
dfs_per_group = [pd.DataFrame(group[1]) for group in df_plot.groupby(wearable.experiment_day_col)]
fig, ax1 = plt.subplots(len(dfs_per_group), 1, figsize=(14, 8))
if len(dfs_per_group) == 1:
ax1 = [ax1]
for idx in range(len(dfs_per_group)):
maxy = 2
df_panel = dfs_per_group[idx]
if "activity" in signal_categories:
alpha, color, edgecolor, label = Viewer.__get_details(alphas, colors, edgecolors, labels, "activity",
None, default_label="Activity")
maxy = max(maxy, df_panel[wearable.get_activity_col()].max())
ax1[idx].plot(df_panel.index, df_panel[wearable.get_activity_col()], label=label, linewidth=2,
color=color, alpha=alpha)
if "pa_intensity" in signal_categories:
#TODO: colors should not be limited to only these four
pa_predefined_colors = ["palegoldenrod", "honeydew", "palegreen", "forestgreen"]
for i in range(len(wearable.pa_names)):
pa_filter = df_panel[wearable.pa_names[i]]
for j in range(len(wearable.pa_names)):
if i != j:
pa_filter &= (~df_panel[wearable.pa_names[j]])
ax1[idx].fill_between(df_panel.index, 0, maxy, where=pa_filter, label=wearable.pa_names[i],
alpha=alpha, facecolor=pa_predefined_colors[i], edgecolor=pa_predefined_colors[i])
if "sleep" in signal_categories:
facecolors = ['royalblue', 'green', 'orange']
endy = 0
alpha = 1
addition = (maxy / len(sleep_cols)) if len(sleep_cols) > 0 else maxy
for i, sleep_col in enumerate(sleep_cols):
starty = endy
endy = endy + addition
sleeping = df_panel[sleep_col] # TODO: get a method instead of an attribute
ax1[idx].fill_between(df_panel.index, starty, endy, where=sleeping, facecolor=facecolors[i],
alpha=0.7, label=sleep_col)
if "diary" in signal_categories and wearable.diary_onset in df_panel.keys() and wearable.diary_offset in df_panel.keys():
diary_event = df_panel[
(df_panel[wearable.diary_onset] == True) | (df_panel[wearable.diary_offset] == True)].index
ax1[idx].vlines(x=diary_event, ymin=0, ymax=maxy, facecolor='black', alpha=alpha, label='Diary',
linestyles="dashed")
if "validation" in text and "hyp_invalid" in df_panel.keys():
textstr = textstr + str(idx) + ": " + str(df_panel['hyp_invalid'].unique()[0]) + '\n'
for i, col in enumerate(other_signals):
# colors = ["orange", "violet", "pink", "gray"] # Change to paramters
ax1[idx].plot(df_panel.index, df_panel[col], label=col, linewidth=1, color=colors[i], alpha=alpha)
endy = 0
addition = 0 if len(signal_as_area) == 0 else (maxy / len(signal_as_area))
for i, col in enumerate(signal_as_area):
alpha, color, edgecolor, label = Viewer.__get_details(alphas, colors, edgecolors, labels, "area", i,
default_label=col, default_color="blue")
starty = endy
endy = endy + addition
ax1[idx].fill_between(df_panel.index, starty, endy, where=df_panel[col], facecolor=color,
alpha=alpha, label=label)
# configure time limits (y-axis) for plot.
ax1[idx].tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=True, rotation=0)
ax1[idx].set_facecolor('snow')
# If the user has not specified a zoom...
if Viewer.__is_default_zoom(zoom_start, zoom_end):
new_start_datetime = df_panel.index[0] - timedelta(
hours=(df_panel.index[0].hour - wearable.hour_start_experiment) % 24,
minutes=df_panel.index[0].minute, seconds=df_panel.index[0].second),
new_end_datetime = df_panel.index[0] - timedelta(
hours=(df_panel.index[0].hour - wearable.hour_start_experiment) % 24,
minutes=df_panel.index[0].minute, seconds=df_panel.index[0].second) + timedelta(minutes=1439)
else:
new_start_date = df_panel.index[0].date()
new_start_datetime = datetime(new_start_date.year, new_start_date.month, new_start_date.day,
zoom_start.hour, zoom_start.minute, zoom_start.second)
new_end_date = df_panel.index[-1].date()
new_end_datetime = datetime(new_end_date.year, new_end_date.month, new_end_date.day, zoom_end.hour,
zoom_end.minute, zoom_end.second)
if new_end_datetime < new_start_datetime:
print("Changing it here")
new_end_datetime = datetime(new_end_date.year, new_end_date.month, new_end_date.day + 1,
int(zoom_end.hour), int(zoom_end.minute), int(zoom_end.second))
new_start_datetime = pd.to_datetime(new_start_datetime)
new_end_datetime = pd.to_datetime(new_end_datetime)
ax1[idx].set_xlim(new_start_datetime, new_end_datetime)
y_label = Viewer.get_day_label(df_panel)
ax1[idx].set_ylabel("%s" % y_label, rotation=0, horizontalalignment="right", verticalalignment="center")
ax1[idx].set_xticks([])
ax1[idx].set_yticks([])
# create a twin of the axis that shares the x-axis
if "hr" in signal_categories:
alpha, color, edgecolor, label = Viewer.__get_details(alphas, colors, edgecolors, labels, "hr", None,
default_label="HR", default_color="red")
ax2 = ax1[idx].twinx()
ax2.plot(df_panel.index, df_panel[wearable.get_hr_col()], label=label, color=color)
ax2.set_ylim(df_panel[wearable.get_hr_col()].min() - 5, df_panel[wearable.get_hr_col()].max() + 5)
ax2.set_xticks([])
ax2.set_yticks([])
ax1[0].set_title("PID = %s" % wearable.get_pid(), fontsize=16)
ax1[-1].set_xlabel('Time')
ax1[-1].xaxis.set_minor_locator(dates.HourLocator(interval=4)) # every 4 hours
ax1[-1].xaxis.set_minor_formatter(dates.DateFormatter('%H:%M')) # hours and minutes
handles, labels = ax1[-1].get_legend_handles_labels()
# handles2, labels2 = ax2.get_legend_handles_labels()
# fig.legend(handles + handles2, labels + labels2, loc='lower center', ncol=4)
# return fig
# ax.figure.savefig('%s_signals.pdf' % (self.get_pid()))
# fig.suptitle("%s" % self.get_pid(), fontsize=16)
fig.legend(handles, labels, loc='lower center', ncol=len(cols), fontsize=14, shadow=True)
# place a text box in upper left in axes coords
if "validation" in text and "hyp_invalid" in wearable.data.columns:
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
fig.text(0.93, 0.87, textstr, fontsize=14,
verticalalignment='top', bbox=props)
fig.savefig('%s_signals.pdf' % (wearable.get_pid()), dpi=300, transparent=True, bbox_inches='tight')
plt.show()
plt.close()
def view_signals_ml_format(self, signal_categories: list = ["activity", "sleep"],
other_signals: list = [], signal_as_area: list = [],
sleep_cols: list = [], select_days: list = None, zoom: list = ["00:00:00", "23:59:59"],
alphas: dict = None, colors: dict = None, edgecolors: dict = None, labels: dict = None,
text: list = []
):
# Many days, one day per panel
for wearable in self.wearables:
Viewer.view_signals_wearable_ml_format(wearable, signal_categories, other_signals, signal_as_area,
sleep_cols, select_days, zoom, alphas, colors, edgecolors, labels, text)
@staticmethod
def view_signals_wearable_ml_format(wearable: Wearable, signal_categories: list, other_signals: list, signal_as_area: list,
sleep_cols: list, select_days: list, zoom: list,
alphas: dict = None, colors: dict = None, edgecolors: dict = None, labels: dict = None,
text: list = []):
# Convert zoom to datatime object:
assert len(zoom) == 2
zoom_start = datetime.strptime(zoom[0], '%H:%M:%S')
zoom_end = datetime.strptime(zoom[1], '%H:%M:%S')
textstr = 'day: validation id \n'
cols = []
for signal in signal_categories:
if signal == "activity":
cols.append(wearable.get_activity_col())
elif signal == "sleep":
for sleep_col in sleep_cols:
if sleep_col not in wearable.data.keys():
raise ValueError("Could not find sleep_col (%s). Aborting." % sleep_col)
cols.append(sleep_col)
else:
cols.append(signal)
if len(cols) == 0:
raise ValueError("Aborting: Empty list of signals to show.")
if wearable.data.empty:
warnings.warn("Aborting: Dataframe for PID %s is empty." % wearable.get_pid())
return
cols.append(wearable.time_col)
for col in set(other_signals + signal_as_area):
cols.append(col)
if "validation" in text:
df_plot = wearable.data[cols + ['hyp_invalid'] ].set_index(wearable.time_col)
else:
df_plot = wearable.data[cols].set_index(wearable.time_col)
# Add column for experiment day. It will be resampled using the the mean
cols.append(wearable.experiment_day_col)
changed_experiment_hour = False
if not Viewer.__is_default_zoom(zoom_start, zoom_end) and zoom_start.hour != wearable.hour_start_experiment:
changed_experiment_hour = True
saved_start_hour = wearable.hour_start_experiment
wearable.change_start_hour_for_experiment_day(zoom_start.hour)
df_plot[wearable.experiment_day_col] = wearable.data[
[wearable.time_col, wearable.experiment_day_col]].set_index(wearable.time_col)[wearable.experiment_day_col]
if select_days is not None:
df_plot = df_plot[df_plot[wearable.experiment_day_col].isin(select_days)]
if df_plot.empty:
raise ValueError("Invalid day selection: no remaining data to show.")
dfs_per_group = [pd.DataFrame(group[1]) for group in df_plot.groupby(wearable.experiment_day_col)]
max_sequence_length = [len(g) for g in dfs_per_group]
max_sequence_length = max(max_sequence_length)
fig, ax1 = plt.subplots(len(dfs_per_group), 1, figsize=(14, 8))
if len(dfs_per_group) == 1:
ax1 = [ax1]
for idx in range(len(dfs_per_group)):
maxy = 2
df_panel = dfs_per_group[idx]
padding_values = np.zeros(max_sequence_length - len(df_panel))
if "activity" in signal_categories:
y = df_panel[wearable.get_activity_col()]
alpha, color, edgecolor, label = Viewer.__get_details(alphas, colors, edgecolors, labels, "activity",
None, default_label="Activity")
maxy = max(maxy, df_panel[wearable.get_activity_col()].max())
ax1[idx].plot(df_panel.index, y, label=label, linewidth=2,
color=color, alpha=alpha)
if "sleep" in signal_categories:
facecolors = ['royalblue', 'green', 'orange']
endy = 0
alpha = 1
addition = (maxy / len(sleep_cols)) if len(sleep_cols) > 0 else maxy
for i, sleep_col in enumerate(sleep_cols):
starty = endy
endy = endy + addition
sleeping = df_panel[sleep_col] # TODO: get a method instead of an attribute
ax1[idx].fill_between(df_panel.index, starty, endy, where=sleeping, facecolor=facecolors[i],
alpha=0.7, label=sleep_col)
if "validation" in text and "hyp_invalid" in df_panel.keys():
textstr = textstr + str(idx) + ": " + str(df_panel['hyp_invalid'].unique()[0]) + '\n'
for i, col in enumerate(other_signals):
# colors = ["orange", "violet", "pink", "gray"] # Change to paramters
ax1[idx].plot(df_panel.index, df_panel[col], label=col, linewidth=1, color=colors[i], alpha=alpha)
endy = 0
addition = 0 if len(signal_as_area) == 0 else (maxy / len(signal_as_area))
for i, col in enumerate(signal_as_area):
alpha, color, edgecolor, label = Viewer.__get_details(alphas, colors, edgecolors, labels, "area", i,
default_label=col, default_color="blue")
starty = endy
endy = endy + addition
ax1[idx].fill_between(df_panel.index, starty, endy, where=df_panel[col], facecolor=color,
alpha=alpha, label=label)
ax1[idx].tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=True, rotation=0)
ax1[idx].tick_params(axis='x', which='major', labelsize='small')
ax1[idx].set_facecolor('snow')
new_start_datetime = df_panel.index[0]
freq = wearable.get_frequency_in_secs()
new_end_datetime = new_start_datetime + pd.DateOffset(seconds=freq*max_sequence_length)
ax1[idx].set_xlim(new_start_datetime, new_end_datetime)
y_label = idx
ax1[idx].set_ylabel("%s" % y_label, rotation=0, horizontalalignment="right", verticalalignment="center")
ax1[idx].xaxis.set_major_locator(dates.DayLocator(interval=1))
ax1[idx].xaxis.set_major_formatter(dates.DateFormatter('%m-%d'))
ax1[idx].xaxis.set_minor_locator(dates.HourLocator(interval=4)) # every 4 hours
ax1[idx].xaxis.set_minor_formatter(dates.DateFormatter('%H:%M')) # hours and minutes
ax1[idx].set_yticks([])
ax1[0].set_title("PID = %s" % wearable.get_pid(), fontsize=16)
ax1[-1].set_xlabel('Epochs')
handles, labels = ax1[-1].get_legend_handles_labels()
fig.legend(handles, labels, loc='lower center', ncol=len(cols), fontsize=14, shadow=True)
# place a text box in upper left in axes coords
if "validation" in text and "hyp_invalid" in wearable.data.columns:
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
fig.text(0.93, 0.87, textstr, fontsize=14,
verticalalignment='top', bbox=props)
fig.savefig('%s_signals_ml_format.pdf' % (wearable.get_pid()), dpi=300, transparent=True, bbox_inches='tight')
plt.subplots_adjust(hspace=1.0)
plt.show()
plt.close()
# gets mean index of awake and sleep sequences
@staticmethod
def get_rolling_mean(df_plot):
# get last index of awake and sleep sequences
df_plot['index_percentage'] = df_plot.reset_index().index / df_plot.reset_index().index.max()
df = df_plot.reset_index().groupby(['ml_sequence', "sleep_period_annotation"]).apply(lambda x: x.index[-1])
# append row with value 0 to df to take care of NaN in first row
ind = pd.MultiIndex.from_arrays([[-1], [False]])
t0 = pd.Series(0, index=ind)
df = t0.append(df)
# for every sequence, (first index + last index) / 2
m = df.rolling(2).mean()
df = pd.DataFrame(df, columns=['index'])
df['mean'] = m
# remove the added -1 index
df = df.loc[0:]
df['mean'] = df['mean'].astype(int)
return df_plot.iloc[df['mean']]['index_percentage']
@staticmethod
def view_ml_format_in_one_row(wearable: Wearable,
signal_categories: list,
sleep_cols: list,
alphas: dict = None,
colors: dict = None,
edgecolors: dict = None,
labels: dict = None):
# Convert zoom to datatime object:
textstr = 'day: validation id \n'
cols = []
for signal in signal_categories:
if signal == "activity":
cols.append(wearable.get_activity_col())
elif signal == "sleep":
for sleep_col in sleep_cols:
if sleep_col not in wearable.data.keys():
raise ValueError("Could not find sleep_col (%s). Aborting." % sleep_col)
cols.append(sleep_col)
else:
cols.append(signal)
if len(cols) == 0:
raise ValueError("Aborting: Empty list of signals to show.")
if wearable.data.empty:
warnings.warn("Aborting: Dataframe for PID %s is empty." % wearable.get_pid())
return
cols.append(wearable.time_col)
df_plot = wearable.data[cols].set_index(wearable.time_col)
### Add column for experiment day. It will be resampled using the the mean
cols.append(wearable.experiment_day_col)
changed_experiment_hour = False
df_plot[wearable.experiment_day_col] = wearable.data[
[wearable.time_col, wearable.experiment_day_col]].set_index(wearable.time_col)[wearable.experiment_day_col]
### Init fig plot
fig, ax1 = plt.subplots(1, 1, figsize=(21, 3))
maxy = 2
### Plot Activity
if "activity" in signal_categories:
y = df_plot[wearable.get_activity_col()]
alpha, color, edgecolor, label = Viewer.__get_details(alphas, colors, edgecolors, labels, "activity",
None, default_label="Activity")
maxy = max(maxy, df_plot[wearable.get_activity_col()].max())
ax1.plot(df_plot.index, y, label=label, linewidth=2,
color=color, alpha=alpha)
### Plot Sleep
if "sleep" in signal_categories:
facecolors = ['royalblue', 'green', 'orange']
endy = 0
alpha = 1
addition = (maxy / len(sleep_cols)) if len(sleep_cols) > 0 else maxy
for i, sleep_col in enumerate(sleep_cols):
starty = endy
endy = endy + addition
sleeping = df_plot[sleep_col] # TODO: get a method instead of an attribute
ax1.fill_between(df_plot.index, starty, endy, where=~sleeping, facecolor='red',
alpha=0.3, label=sleep_col, edgecolor='red')
ax1.fill_between(df_plot.index, starty, endy, where=sleeping, facecolor=facecolors[i],
alpha=0.3, label=sleep_col, edgecolor='purple')
# X-tick label
labels = []
for day in np.unique(df_plot[wearable.experiment_day_col]):
labels.append('Active ' + str(day + 1))
labels.append('Sleep ' + str(day + 1))
# remove last sleep
labels = labels[:-1]
# get indices at the middle of awake and sleep sequences
mean_indices = Viewer.get_rolling_mean(df_plot)
for label, awake_sleep_index in zip(labels, mean_indices):
ax1.text(awake_sleep_index, -0.1, label, fontsize=14,
verticalalignment='center',
horizontalalignment='center',
transform=ax1.transAxes)
### X-tick params
ax1.tick_params(axis='x', which='both', bottom=True, top=False, labelbottom=True, rotation=0,
labelsize='medium', pad=20)
ax1.tick_params(axis='x', which='major', bottom=False, labelbottom=False)
ax1.tick_params(axis='y', which='major')
ax1.set_facecolor('snow')
new_start_datetime = df_plot.index[0]
new_end_datetime = df_plot.index[-1]
ax1.set_xlim(new_start_datetime, new_end_datetime)
ax1.set_ylim(df_plot[wearable.get_activity_col()].min() - 5, df_plot[wearable.get_activity_col()].max() + 5)
y_label = 'Activity'
ax1.set_ylabel("%s" % y_label, rotation=0, horizontalalignment="right", verticalalignment="center")
ax1.xaxis.set_minor_locator(dates.HourLocator(byhour=[15])) # every 4 hours
ax1.xaxis.set_minor_formatter(dates.DateFormatter('%H:%M')) # hours and minutes
ax1.set_title("PID = %s" % wearable.get_pid(), fontsize=16)
ax1.set_xlabel('Time')
print(ax1.get_xticks())
plt.subplots_adjust(hspace=1.0)
plt.show()
return ax1, plt
@staticmethod
def __is_default_zoom(zoom_start, zoom_end):
return zoom_start.time() == time(0, 0, 0) and zoom_end.time() == time(23, 59, 59)
def view_signals_multipanel(self, signals: list,
select_day,
signals_as_area: list = [],
dashes_across: list = [],
resample_to: str = None, zoom: list = ["00:00:00", "23:59:59"],
alphas: dict = None, colors: dict = None, edgecolors: dict = None, labels: dict = None,
):
# One single day -- multiple panels
for wearable in self.wearables:
# Convert zoom to datatime object:
assert len(zoom) == 2
zoom_start = datetime.strptime(zoom[0], '%H:%M:%S')
zoom_end = datetime.strptime(zoom[1], '%H:%M:%S')
changed_experiment_hour = False
if not Viewer.__is_default_zoom(zoom_start, zoom_end) and zoom_start.hour != wearable.hour_start_experiment:
changed_experiment_hour = True
saved_start_hour = wearable.hour_start_experiment
wearable.change_start_hour_for_experiment_day(zoom_start.hour)
df_plot = wearable.data[wearable.data[wearable.experiment_day_col] == select_day]
if df_plot.empty:
raise ValueError("Invalid day selection: no remaining data to show. Possible days are:",
df_plot[wearable.experiment_day_col].unique)
nplots = len(signals) + len(signals_as_area)
cols = list(set(signals + signals_as_area + dashes_across)) + [wearable.time_col]
df_plot = df_plot[cols].set_index(wearable.time_col)
if resample_to is not None:
df_plot = df_plot.resample(resample_to).mean()
if changed_experiment_hour:
wearable.change_start_hour_for_experiment_day(saved_start_hour)
fig, ax = plt.subplots(len(signals) + len(signals_as_area), 1, figsize=(14, 8))
if len(signals) == 1:
ax = [ax]
for idx in range(len(signals)):
signal = signals[idx]
maxy = 2
alpha, color, edgecolor, label = self.__get_details(alphas, colors, edgecolors, labels, "signal", idx)
maxy = max(maxy, df_plot[signal].max())
ax[idx].plot(df_plot.index, df_plot[signal], label=label, linewidth=2, color=color, alpha=alpha)
ax[idx].set_xticks([])
ax[idx].set_yticks([])
ax[idx].set_ylabel("%s" % label, rotation=0, horizontalalignment="right", verticalalignment="center")
plot_idx = len(signals)
for idx in range(len(signals_as_area)):
signal = signals_as_area[idx]
alpha, color, edgecolor, label = self.__get_details(alphas, colors, edgecolors, labels, "area", idx)
idx = idx + plot_idx # shifts idx to the point to the correct panel
maxy = max(maxy, df_plot[signal].max())
ax[idx].fill_between(df_plot.index, 0, maxy, where=df_plot[signal], facecolor=color, alpha=alpha,
label=label, edgecolor=edgecolor)
ax[idx].set_xticks([])
ax[idx].set_yticks([])
ax[idx].set_ylabel("%s" % label, rotation=0, horizontalalignment="right", verticalalignment="center")
for idx in range(nplots):
for dashes in dashes_across:
alpha, color, edgecolor, label = self.__get_details(alphas, colors, edgecolors, labels, "dashes_across", idx)
block = df_plot[df_plot[dashes] == True]
dash_event = [block.index[0], block.index[-1]]
ax[idx].vlines(x=dash_event, ymin=0, ymax=maxy, colors=color, alpha=alpha, label=label,
linestyles="dashed")
# Default options, we use hour_start_experiment
if Viewer.__is_default_zoom(zoom_start, zoom_end):
new_start_datetime = df_plot.index[0] - timedelta(
hours=(df_plot.index[0].hour - wearable.hour_start_experiment) % 24,
minutes=df_plot.index[0].minute, seconds=df_plot.index[0].second),
new_end_datetime = df_plot.index[0] - timedelta(
hours=(df_plot.index[0].hour - wearable.hour_start_experiment) % 24,
minutes=df_plot.index[0].minute, seconds=df_plot.index[0].second) + timedelta(minutes=1439)
else:
new_start_date = df_plot.index[0].date()
new_start_datetime = datetime(new_start_date.year, new_start_date.month, new_start_date.day,
zoom_start.hour, zoom_start.minute, zoom_start.second)
new_end_date = df_plot.index[-1].date()
new_end_datetime = datetime(new_end_date.year, new_end_date.month, new_end_date.day, zoom_end.hour,
zoom_end.minute, zoom_end.second)
if new_end_datetime < new_start_datetime:
print("Changing it here")
new_end_datetime = datetime(new_end_date.year, new_end_date.month, new_end_date.day + 1,
zoom_end.hour, zoom_end.minute, zoom_end.second)
new_start_datetime = pd.to_datetime(new_start_datetime)
new_end_datetime = | pd.to_datetime(new_end_datetime) | pandas.to_datetime |
import numpy as np
import os.path
import pandas as pd
import sys
#find parent directory and import base (travis)
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
# print(sys.path)
# print(os.path)
class LeslieInputs(ModelSharedInputs):
"""
Input class for Leslie.
"""
def __init__(self):
"""Class representing the inputs for Leslie"""
super(LeslieInputs, self).__init__()
self.init_pop_size = pd.Series([], dtype="float")
self.stages = pd.Series([], dtype="float")
self.l_m = pd.Series([], dtype="float")
self.time_steps = pd.Series([], dtype="float")
class LeslieOutputs(object):
"""
Output class for Leslie.
"""
def __init__(self):
"""Class representing the outputs for Leslie"""
super(LeslieOutputs, self).__init__()
self.out_pop_matrix = pd.Series(name="out_pop_matrix")
self.out_fecundity = pd.Series(name="out_fecundity")
self.out_growth = pd.Series(name="out_growth")
self.out_survival = pd.Series(name="out_survival")
self.out_eigdom = pd.Series(name="out_eigdom")
self.out_eigleft = pd.Series(name="out_eigleft")
self.out_eigright = | pd.Series(name="out_eigright") | pandas.Series |
from pytorch_lightning.core.step_result import TrainResult
import pandas as pd
import torch
import math
import numpy as np
from src.utils import simple_accuracy
from copy import deepcopy
from torch.optim.lr_scheduler import LambdaLR
class WeightEMA(object):
def __init__(self, model, ema_model, alpha=0.999):
self.model = model
self.ema_model = ema_model
self.ema_model.eval()
self.alpha = alpha
self.ema_has_module = hasattr(self.ema_model, 'module')
# Fix EMA. https://github.com/valencebond/FixMatch_pytorch thank you!
self.param_keys = [k for k, _ in self.ema_model.named_parameters()]
self.buffer_keys = [k for k, _ in self.ema_model.named_buffers()]
for p in self.ema_model.parameters():
p.requires_grad_(False)
def step(self):
needs_module = hasattr(self.model, 'module') and not self.ema_has_module
with torch.no_grad():
msd = self.model.state_dict()
esd = self.ema_model.state_dict()
for k in self.param_keys:
if needs_module:
j = 'module.' + k
else:
j = k
model_v = msd[j].detach()
ema_v = esd[k]
esd[k].copy_(ema_v * self.alpha + (1. - self.alpha) * model_v)
for k in self.buffer_keys:
if needs_module:
j = 'module.' + k
else:
j = k
esd[k].copy_(msd[j])
class UnlabelledStatisticsLogger:
def __init__(self, level='image', save_frequency=500, artifacts_path=None, name='unlabelled'):
self.level = level
self.batch_dfs = []
self.save_frequency = save_frequency
self.artifacts_path = artifacts_path
self.logging_df = pd.DataFrame()
self.name = name
self.strategies = set()
def log_statistics(self,
u_scores: torch.tensor,
u_targets: torch.tensor,
u_pseudo_targets: torch.tensor,
u_ids: torch.tensor,
current_epoch: int,
strategy_name=None,
current_globalstep: int = None):
if self.level == 'batch':
raise NotImplementedError()
# Needs to be rewriten to consider u_scores
# certain_ul_targets = u_targets[thresholding_mask == 1.0].cpu().numpy()
# all_ul_targets = u_targets.cpu().numpy()
# result.log('certain_ul_acc', certain_ul_acc, on_epoch=False, on_step=True, sync_dist=True)
# result.log('all_ul_acc', all_ul_acc, on_epoch=False, on_step=True, sync_dist=True)
# result.log('max_probs', u_scores.mean(), on_epoch=False, on_step=True, sync_dist=True)
# result.log('n_certain', thresholding_mask.sum(), on_epoch=False, on_step=True, sync_dist=True)
elif self.level == 'image':
batch_df = pd.DataFrame(index=range(len(u_ids)))
batch_df['image_id'] = u_ids.tolist()
batch_df['score'] = u_scores.tolist()
batch_df['correctness'] = (u_pseudo_targets == u_targets).tolist()
batch_df['epoch'] = current_epoch
if current_globalstep is not None:
batch_df['datastep'] = current_globalstep
if strategy_name is not None:
batch_df['strategy'] = strategy_name
self.strategies.add(strategy_name)
self.batch_dfs.append(batch_df)
def on_epoch_end(self, current_epoch):
if self.level:
for batch_df in self.batch_dfs:
self.logging_df = self.logging_df.append(batch_df, ignore_index=True)
self.batch_dfs = []
if self.level == 'image' and current_epoch % self.save_frequency == 0:
epochs_range = self.logging_df['epoch'].min(), self.logging_df['epoch'].max()
csv_path = f'{self.artifacts_path}/{self.name}_epochs_{epochs_range[0]:05d}_{epochs_range[1]:05d}.csv'
self.logging_df.to_csv(csv_path, index=False)
self.logging_df = pd.DataFrame()
def get_optimal_threshold(self, from_datasteps, accuracy=0.95, strategy_name=None):
if strategy_name is not None:
logging_df = pd.concat(self.batch_dfs[-from_datasteps * len(self.strategies):], ignore_index=True)
logging_df = logging_df[logging_df.strategy == strategy_name]
else:
logging_df = | pd.concat(self.batch_dfs[-from_datasteps:], ignore_index=True) | pandas.concat |
import numpy as np
"""
This monte carlo algorithm aproximates the "true" value of the interesting
parameter/s using a random walk of normally distributed steps with mean 0 or
a mean of the last accepted step in the walk for the parameter.
"""
truth=5
tss = []
for j in range(50):
ts = []
stepsizes = [.01,.05,.1,.5,1,5,10]
index=0
while len(ts) < len(stepsizes):
w0 = 0
score1 = abs(truth-w0)
score=score1
delta = 0
t = 0
u = 0
stepsize=stepsizes[index]
while (score1 > .5)&(t<1000):
w1 = w0+np.random.normal(delta,stepsize)
score2 = abs(truth-w1)
if -score2>-score1:
delta = w1-w0
w0 = w1
score1=score2
u+=1
t+=1
print(t,score1,u)
if score1 <=.5:
ts.append(t)
index+=1
tss.append(ts)
tss=np.array(tss)
stepsize = stepsizes[np.argmin(np.mean(tss,axis=0))]
truth = 5
w0 = 0
score1 = abs(truth-w0)
score=score1
delta = 0
t = 0
u = 0
while (score1 > .5)&(t<1000):
w1 = w0+np.random.normal(delta,stepsize)
score2 = abs(truth-w1)
if -score2>-score1:
delta = w1-w0
w0 = w1
score1=score2
u+=1
t+=1
print(t,score1,u)
import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
dat = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/00519/heart_failure_clinical_records_dataset.csv")
pd.set_option("display.max_columns",500)
dat.tail()
covars = ['age','anaemia','creatinine_phosphokinase',
'diabetes','ejection_fraction','high_blood_pressure',
'platelets','serum_creatinine','serum_sodium',
'sex','smoking','time']
X=dat[covars].copy()
Y=dat['DEATH_EVENT']
Yodds = Y/(1-Y)
Yodds = np.where(Yodds==np.inf,1e16,1e-16)
Ylogodds = np.log(Yodds)
X=(X-np.mean(X,axis=0))/np.std(X,axis=0)
X['int']=1
random.seed(42)
index = np.array(random.choices([1,2,3,4,5],k=len(X)))
xv = X[index==5].copy()
yv = Ylogodds[index==5].copy()
xt = X[index!=5].copy()
yt = Ylogodds[index!=5].copy()
coefs = np.linalg.pinv(xt.T@xt)@(xt.T@yt)
predtlogodds = xt@coefs
predvlogodds = xv@coefs
predt=np.exp(predtlogodds)/(1+np.exp(predtlogodds))
predt=np.where(predt>.5,1,0)
predv=np.exp(predvlogodds)/(1+np.exp(predvlogodds))
predv=np.where(predv>.5,1,0)
act_t = np.exp(yt)/(1+np.exp(yt))
act_t=np.where(act_t>.5,1,0)
act_v = np.exp(yv)/(1+np.exp(yv))
act_v=np.where(act_v>.5,1,0)
logregt_acc=sum(np.where(predt==act_t,1,0))/len(predt)
logregv_acc = sum(np.where(predv==act_v,1,0))/len(predv)
print("logreg training acc:",logregt_acc,"val acc:",logregv_acc)
from sklearn.linear_model import LogisticRegression
xv = X[index==5].copy()
yv = Y[index==5].copy()
xt = X[index!=5].copy()
yt = Y[index!=5].copy()
lr = LogisticRegression(fit_intercept=False,solver = 'newton-cg',penalty='l2')
lr.fit(xt,yt)
sum(np.where(lr.predict(xt)==yt,1,0))/len(yt)
sum(np.where(lr.predict(xv)==yv,1,0))/len(yv)
#BASE KNN Maximizing Recall
from sklearn.neighbors import KNeighborsClassifier
X=dat[covars].copy()
Y=dat['DEATH_EVENT']
X=(X-np.mean(X,axis=0))/np.std(X,axis=0)
random.seed(42)
index = np.array(random.choices([1,2,3,4,5,6],k=len(X)))
acc = []
for i in list(range(2,30)):
avgscore=[]
for t in [1,2,3,4,5]:
xv = X[index==t].copy()
yv = Y[index==t].copy()
xt = X[~pd.Series(index).isin([t,6])].copy()
yt = Y[~pd.Series(index).isin([t,6])].copy()
knn = KNeighborsClassifier(n_neighbors=i,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt,yt)
tp=sum(np.where((knn.predict(xv)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = precision
avgscore.append(score)
acc.append(np.mean(avgscore))
plt.plot(acc)
plt.xticks(list(range(28)),list(range(2,30)))
plt.show()
#k=18
k=4
k=16
def model_precision(X,Y,w,k):
random.seed(42)
index = np.array(random.choices([1,2,3,4,5,6],k=len(X)))
initscores=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[pd.Series(index).isin([val])].copy()
xt = X[~pd.Series(index).isin([val,6])].copy()
yt = Y[~pd.Series(index).isin([val,6])].copy()
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*w,yt)
tp=sum(np.where((knn.predict(xv*w)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*w)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*w)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*w)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = precision
initscores.append(score)
score=np.mean(initscores)
return score
def model_recall(X,Y,w,k):
random.seed(42)
index = np.array(random.choices([1,2,3,4,5,6],k=len(X)))
initscores=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[ | pd.Series(index) | pandas.Series |
# importing required modules
import pandas as pd
import numpy as np
import requests
import datetime
import re
import io
def clean_up_dates(x):
cleaned_date = x
m = re.match('(\d{4})-(\d{4})', str(x))
if m:
cleaned_date = m.groups()[-1]
else:
n = re.match('(\d{4})Q\d{1}', str(x))
if n:
cleaned_date = n.groups()[-1]
else:
o = re.match('(\d{4})M\d{2}', str(x))
if o:
cleaned_date = o.groups()[-1]
return cleaned_date
def amida_action(x):
action = "Updated. No action required."
end = x['end_year-check']
start = x['start_year-check']
if (not end) & (not start):
action = "Not updated (both start and end dates)"
elif end & (not start):
action = "Not updated (start date only)"
elif (not end) & (start):
action = "Not updated (end date only)"
if (x['Site'] == 'gv') & (action != "Updated. No action required."):
action = "[Gov dataset - For Luis' action] " + action
return action
def main():
date_today = datetime.date.today().isoformat()
# getting user input for data files
ans_key = input("""What's the filename of the CSV file which contains the ideal start and end year dates for each dataset?
Note that this code assumes that this CSV has the following columns:
['slug', 'title', 'source_name', 'source_link', 'legal_text',
'legal_link', 'id', 'Dataset / sub-dataset', 'Site', 'Data Coverage',
'Start Year', 'Latest Year']""")
# Load Data
df_answer_key = | pd.read_csv(ans_key + '.csv', encoding='latin-1') | pandas.read_csv |
import selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
import pandas as pd
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
'''
The day of the week (Tuesday)
@ -18,46 +12,13 @@ The temperature low and high, with a function of your own to convert into Celsiu
For each element you scrape, The name of the item you targetted (ex: DailyContent--daypartDate--3MM0J)
'''
df = pd.DataFrame(columns=["Item_name","weekday","date","Description","Temperature °C"])
df = | pd.DataFrame(columns=['Weekday', 'Date', 'Description', 'Temperature (Cº)', 'Item targetted']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
from nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA
sia = SIA()
NAME_COL = [1,2]
NUMER_ORD = [3,4,5,6,7,8,9,10,11,12,13,15,16,17,18,19]
CAT_ORD = [14,27,29]
BIN_PRES = [20,21,22,23,24]
COMM = [25,26]
REL_COLS = []
REL_COLS.extend(NUMER_ORD)
REL_COLS.extend(CAT_ORD)
REL_COLS.extend(BIN_PRES)
REL_COLS.extend(COMM)
REL_COLS = sorted(REL_COLS)
sheet_names = pd.ExcelFile("BaseData.xlsx").sheet_names
sheet_names_main = [sheet_name for (i,sheet_name) in enumerate(sheet_names) if i%4==2]
sheet_names_results = [sheet_name for (i,sheet_name) in enumerate(sheet_names) if i%4==3]
def main():
data4_list = []
for sheet_name in sheet_names_main:
data = pd.read_excel("BaseData.xlsx", sheet_name=sheet_name, header=0)
new_header = data.iloc[0]
data = data[1:]
data.columns = new_header
data2 = data.iloc[:,REL_COLS]
data3 = data.iloc[:,NAME_COL]
for i in NUMER_ORD:
MAX,MIN = data.iloc[:,i].min(), data.iloc[:,i].max()
data.iloc[:,i] = data.iloc[:,i].apply(lambda x: (x - MIN)/(MAX - MIN) )
for i in CAT_ORD:
if(i==14):
def helper_14(x):
if("somewhat" in str(x).lower()): return 0.5
elif("highly" in str(x).lower()): return 1.0
else: return 0.0
data.iloc[:,i] = data.iloc[:,i].apply(lambda x: helper_14(x))
if(i==27):
def helper_27(x):
if("can be there" in str(x).lower()): return 1.0
elif("no chance" in str(x).lower()): return 0.0
else: return 0.5
try: data.iloc[:,i] = data.iloc[:,i].apply(lambda x: helper_27(x))
except: continue
if(i==29):
def helper_29(x):
if("low" in str(x).lower()): return 0.0
elif("high" in str(x).lower()): return 1.0
else: return 0.5
data.iloc[:,i] = data.iloc[:,i].apply(lambda x: helper_29(x))
for i in BIN_PRES:
data.iloc[:,i] = data.iloc[:,i].apply(lambda x: int(pd.isna(x)))
for i in COMM:
def helper_COMM(x):
if( | pd.isna(x) | pandas.isna |
# -*- coding: utf-8 -*-
"""
Created on Sat May 5 00:27:52 2018
@author: sindu
About: Feature Selection on Genome Data"""
import pandas as pd
import numpy as np
import math
import operator
from sklearn import metrics
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors.nearest_centroid import NearestCentroid
from sklearn import linear_model
filename = 'GenomeTrainXY.txt'
data = pd.read_csv('GenomeTrainXY.txt', header=-1).as_matrix()
testDataFile = "GenomeTestX.txt"
testData = pd.read_csv("GenomeTestX.txt", header=-1).as_matrix()
headerinfo = data[0]
classlabelinfo = list(set(headerinfo))
clbl, clblcnt = np.unique(headerinfo, return_counts=True)
classlabelcountinfo = dict(zip(clbl, clblcnt))
n_genomesize = len(headerinfo)
k_groupsize = len(clbl)
df = pd.DataFrame(data)
dftranspose = df.transpose()
fscores = pd.DataFrame()
fscorenumval = None
fscoredenom = None
fscorenumdf = pd.DataFrame()
fscoredenomdf = pd.DataFrame()
#calculate mean of all features for a specific class label
featuremeandata = df.transpose().groupby(dftranspose[:][0]).mean()
featuremeandata = featuremeandata.loc[:, 1:]
centroidData = featuremeandata.transpose().as_matrix()
#calculate variance of all features for a specific class label
featurevardata = df.transpose().groupby(dftranspose[:][0]).var()
featurevardata = featurevardata.loc[:, 1:]
#calculate average of each of the feature
featureavg = df.mean(axis=1)
featureavgdata = pd.DataFrame(featureavg).transpose()
featureavgdata = featureavgdata.loc[:, 1:]
def getfeaturemeandata(classlblval, val):
meanrowdata = | pd.DataFrame() | pandas.DataFrame |
"""A collection of implementations of ProcessorBase"""
import pandas as pd
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder as SKLearnOneHotEncoder
from ds.processing.base import ProcessBase
from ds.io import AsyncFetch
class DropColumns(ProcessBase):
"""Drops columns from the dataset."""
def fit(self, df: pd.DataFrame):
self._dropped = df[[col for col in df.columns if col not in self.columns]]
return df.drop(columns=self.columns, errors='ignore')
def transform(self, df: pd.DataFrame):
return self.fit(df)
def inverse_transform(self, df: pd.DataFrame):
return pd.concat(df, self._dropped, axis=1)
class ReplaceNaN(ProcessBase):
"""Replaces all NaN in the dataset."""
def __init__(self, value: int = 0):
self.value = 0
def fit(self, df: pd.DataFrame):
return df.fillna(self.value)
def transform(self, df: pd.DataFrame):
return self.fit(df)
def inverse_transform(self, df: pd.DataFrame):
log.warning("ReplaceNaN.inverse_transform() currently doesn't support an inverse_transfrom. Original DataFrame returned")
return df
class OneHotEncode(ProcessBase):
"""OneHotEncodes categorical variables."""
def __init__(self, columns, **kwargs):
super().__init__(columns)
self.encoder = SKLearnOneHotEncoder(**kwargs, sparse=False)
def fit(self, df: pd.DataFrame):
X = self.encoder.fit_transform(df[self.columns])
self._new_col_names = self.encoder.get_feature_names(self.columns)
df = df.drop(columns=self.columns)
new_df = pd.DataFrame(X, columns=self._new_col_names, index=df.index)
df = pd.concat((df, new_df), axis=1)
return df
def transform(self, df: pd.DataFrame):
X = self.encoder.transform(df[self.columns])
df = df.drop(columns=self.columns)
new_df = pd.DataFrame(X, columns=self._new_col_names, index=df.index)
df = pd.concat((df, new_df), axis=1)
return df
def inverse_transform(self, df):
X = self.encoder.inverse_transfrom(df[self._new_col_names])
df = df.drop(columns=self._new_col_names)
df[self.columns] = X
return df
class SKLearnProcessor(ProcessBase):
"""A wrapper for any SKLearn processing objects."""
def __init__(self, sklearn_class, columns: List[str]):
super().__init__(columns)
self._class = sklearn_class
def fit(self, df: pd.DataFrame):
df[self.columns] = self._class.fit_transform(df[self.columns])
return df
def transform(self, df: pd.DataFrame):
df[self.columns] = self._class.transform(df[self.columns])
return df
def inverse_transform(self, df: pd.DataFrame):
df[self.columns] = self._class.inverse_transform(df[self.columns])
return df
class DatetimeEncoder(ProcessBase):
def __init__(self, columns: List[str], min_freq: str = "month"):
super().__init__(columns)
self.min_freq = min_freq
def fit(self, df: pd.DataFrame):
for col in self.columns:
df = self.sinusoidal_position_encoding(df, col)
return df
def transform(self, df: pd.DataFrame):
return self.fit(df)
def inverse_transform(self, df: pd.DataFrame):
raise NotImplementedError
def sinusoidal_position_encoding(self, df: pd.DataFrame, col: str):
"""
Encodes the position of hour, day, month seaonality. RBF could be used in place.
"""
hour = df[col].dt.hour / 24
day = df[col].dt.day / 30.5
month = df[col].dt.month / 12
year = df[col].dt.year
if self.min_freq in ['hour']:
df[f'{col}_sin_hour'] = np.sin(2 * np.pi * hour)
df[f'{col}_cos_hour'] = np.cos(2 * np.pi * hour)
if self.min_freq in ["hour", "day"]:
df[f'{col}_sin_day'] = np.sin(2 * np.pi * day)
df[f'{col}_cos_day'] = np.cos(2 * np.pi * day)
if self.min_freq in ["hour", "day", "month"]:
df[f'{col}_sin_month'] = np.sin(2 * np.pi * month)
df[f'{col}_cos_month'] = np.cos(2 * np.pi * month)
df[f'{col}_year'] = year
df = df.drop(columns=[col])
return df
class Sentence2Vec(ProcessBase):
"""A huggingface sentence encoder leveraing a Transformers model."""
def __init__(self, columns: List[str], model:str = "paraphrase-MiniLM-L3-v2"):
#self.model = SentenceTransformer(model)
self.model = None
self.columns = columns
def fit(self, df):
for col in self.columns:
X = self.model.encode(df[col])
new_df = pd.DataFrame(X, columns=[f"{co}_emb_{i}" for i in range(len(X))])
df = df.drop(columns=[col])
df = | pd.concat((df, new_df), axis=1) | pandas.concat |
# Project: fuelmeter-tools
# Created by:T.Morgan # Created on: 12/3/2019
import numpy as np
import pandas as pd
#import puma.stats as stats
import os
class Neighborhood:
"""
Description: A group of houses and associated metrics for that group of houses
Attributes:
name-- name of the neighborhood. Generally a prefix also associated with house names.
houses--list of houses belongint to a neightobrood.
"""
def __init__(self,name,houses):
self.name = name
self.houses = houses #list of House objects in a neighborhood
def addHouse(self,house):
'''add a house to the neighborhood'''
self.houses.append(house)
def getTotalGallons(self,excludeHouses):
'''get the total gallons consumed in a neighborhood for a report period
:param: excludeHouses -- a list of houses that won't be included in the sum'''
tg = sum([h.report.total_gallons for h in self.houses if (h.name not in excludeHouses) & (h.report is not None)])
return tg
def getTotalArea(self,excludeHouses):
'''get the sum square footage for all houses combined in the neighborhood, except for houses in the excludeHouses list
:param: excludeHouses -- a list of houses that won't be included in the sum'''
area = sum([h.report.area for h in self.houses if (h.name not in excludeHouses) & (h.report is not None)])
return area
def applyStatsModels(self):
'''calls statistical models run in R to model fuel consumption in relation to indoor
and outdoor temperature and seperately model fuel consumption based on location.
Two seperate R scripts are used, one for spatial modeling, one for temperature.'''
import subprocess
#R is used for modeling with gamms and glmms
self.writeNeigborhoodStatsInput() #write a combined text file for all houses
processPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','R') #path to R scripts
rscript = "model_data.r"
rsource = os.path.join(processPath, rscript)
subprocess.call(["RScript",rsource]) #images are placed in report folders
#subprocess.call(["C:\\Program Files\\R\\R-4.0.0\\bin\\x64\\RScript", "--vanilla", rsource],shell=True) # images are placed in report folders
# generate the glmm spatial pngs
rscript = "spatial_model.R"
rsource = os.path.join(processPath, rscript)
subprocess.call(["RScript", rsource])
#subprocess.call(["C:\\Program Files\\R\\R-4.0.0\\bin\\x64\\RScript", "--vanilla", rsource], shell=True)
return
def writeNeigborhoodStatsInput(self):
'''coellesces necessary data for stats models into a csv file'''
filename = 'multiStoveInput.csv'
with open(os.path.join(*[os.getcwd(),"..","..","data","text"],filename), "w+") as outfile:
outfile.write("date," + ",".join(self.houses[0].report.statsInput.columns))
outfile.write("\n")
for h in self.houses:
try:
h.report.statsInput.to_csv(os.path.join(*[os.getcwd(),"..","..","data","text"],filename), mode='a', header=False)
except Exception as e:
print(h.name)
print(e)
return
def getUsageTable(self,excludeHouses):
'''Generates a data frame with location information and fuel consumption for each house in the neightborhood
excludeHouses are not included in the dataframe'''
newdf = pd.DataFrame(data=None, columns=['house', 'lat','long','fuel'])
houses = [{'house':h.name,'lat':h.location[1],'long':h.location[0], 'fuel':h.report.getMeanGallonsPerMonthPerAreaByYear().mean()} for h in self.houses if (h.name not in excludeHouses) & (h.report is not None)]
for h in houses:
newdf = newdf.append(h, ignore_index=True)
return newdf
def getMeanGallonsPerFt(self,excludeHouses):
'''Generate metrics for average fuel consumption and standard deviation of the sample in fuel consumption'''
mg = np.nanmean([h.report.gallons_per_ft for h in self.houses if (h.name not in excludeHouses) & (h.report is not None)])
std= np.nanstd([h.report.gallons_per_ft for h in self.houses if (h.name not in excludeHouses) & (h.report is not None)])
return mg,std
def getMeanMonthlyGPFByYear(self,excludeHouses):
'''Generate pandas series of mean and standard deviation in gallons per month per area
for each year of data for all houses in the neightborhood combined'''
mg = pd.concat([h.report.getMeanGallonsPerMonthPerAreaByYear() for h in self.houses if (h.name not in excludeHouses) & (h.report is not None)])
mgy = mg.groupby(pd.Grouper(freq="Y")).mean()
std = mg.groupby( | pd.Grouper(freq="Y") | pandas.Grouper |
import pandas as pd
import numpy as np
import random
def one_hot(df):
"""
@param df pandas DataFrame
@param cols a list of columns to encode
@return a DataFrame with one-hot encoding
"""
dummies = pd.get_dummies(data['iso'], prefix='Continent', drop_first=False)
df = pd.concat([df, dummies], axis=1)
return df
def caida_preprocess(data_CAIDA):
# impact = label = #total hijacked ASes / #total ASes with path to prefix
data_CAIDA['impact'] = (data_CAIDA.iloc[:, 4].astype(float)) / (data_CAIDA.iloc[:, 2].astype(float))
# delete rows where impact > 1 or impact < 0
data_CAIDA = data_CAIDA.drop(data_CAIDA[(data_CAIDA.impact < 0) | (data_CAIDA.impact > 1)].index)
# change the name of the column
data_CAIDA.rename(columns={list(data_CAIDA)[2]: 'total_ASes_with_path_to_prefix'}, inplace=True)
# delete rows where total ASes with path to prefix < 1000
data_CAIDA = data_CAIDA.drop(data_CAIDA[(data_CAIDA.total_ASes_with_path_to_prefix.astype(float) < 1000.0)].index)
return data_CAIDA
data_CAIDA = pd.read_csv('impact__CAIDA20190801_sims2000_hijackType0_per_monitor_onlyRC_NEW_with_mon_ASNs.csv', sep=",", dtype='unicode')
new_data_CAIDA = caida_preprocess(data_CAIDA)
data = | pd.read_csv('../AS_improvement_scores/metric_data.csv', sep=",", dtype='unicode') | pandas.read_csv |
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import warnings
import abc
import pandas as pd
import numpy as np
from fentool.pre_process.transformers import Minmax, Standard
from fentool.pre_process.encoders import Encoder
from fentool.models import Model
logger = logging.getLogger('fentool')
logger.setLevel(logging.INFO)
class Fentool(object):
""" Fentool feature engineering tool
Parameters
----------
sup_learning_type: String
Determines the type of supervised learning, should be
regression or classification
model_type: String
Sets the type of "regression" or "classification" model.
Currently only regression is implemented, the values of the model
type should be 'linreg', 'lasso', 'lassocv', 'ridge', 'ridgecv',
'rfr', 'svr'.
encoder_type: String
Sets the type of encoding for the data sets. Currenlty
one-hot encoding and ordinal encoding is available. The values
should be 'one-hot' or 'Ordinal'
input_treatment: String
Sets the type of treatment for the input(feature set)
Currenlty only normalization (minmax) and
standardization (mean and standard deviation) is implemented.
The values should be 'normalize' or 'standardize'.
output_treatment: String
Set the type of treatment for the output(target).
Currenlty only normalization (minmax) and
standardization (mean and standard deviation) is implemented.
The values should be 'normalize' or 'standardize'.
time_series: bool, Default=False
Flag evaluating if the problem is a time series problem.
Currently Fentool does not have support for time-series.
fillna: String
Method to remove or replace nans, nulls, etc. Should be "None",
"drop", "mean", "zeros"
test_size: Float, Default=0.3
Sets the size of the training set for evaluatingn the model.
null_tol_ratio: Float, Default=0.8
A value that determines the maximum tolerance for
fentool to handle datasets with many null values. Must be
between 0 and 1.
null_warn_ratio: Float, Default=0.3
A value that determines the lower threshold for
fentool to give warnings with datasets containig
many null values. Must be between 0 and 1.
"""
def __init__(self,
sup_learning_type='regression',
model_type='linreg',
encoder_type=None,
input_treatment=None,
output_treatment=None,
time_series=False,
fillna='drop',
test_size=0.3,
null_tol_ratio=0.8,
null_warn_ratio=0.3,
**kwargs):
self.sup_learning_type = sup_learning_type
self.model_type = model_type
self.encoder_type = encoder_type
self.input_treatment = input_treatment
self.output_treatment = output_treatment
self.time_series = time_series
self.fillna = fillna
self.test_size = test_size
self.null_tol_ratio = null_tol_ratio
self.null_warn_ratio = null_warn_ratio
self.target = []
self.model = []
self.df = pd.DataFrame()
self.x = | pd.DataFrame() | pandas.DataFrame |
'''Python script to benchmark different companies'''
'''Authors - <NAME>
'''
import numpy as np
import pandas as pd
from datetime import datetime
import collections
from .helpers import *
class Benchmark:
def __init__(self, companies):
print("INIT BENCHMARK")
self.mrr, self.fin_perf, self.oper_stats, self.cash_flow_stat, self.oth_metrics, self.rev_retention, self.logo_retention, self.cumulative = {}, {}, {}, {}, {}, {}, {}, {}
self.companies = companies;
for company in companies.keys():
self.mrr[company] = | pd.DataFrame(companies[company]["Rev Analysis"]["MRR by Customer"]) | pandas.DataFrame |
# coding: utf-8
# The code takes the corrected file from *sso_freeze* (hardwired by user) and peforms a corrdinate transformation on the X-ray emission to wrap the PSF around Jupiter
# In[1]:
#Authors: <NAME> (<EMAIL>), apadpted from <NAME>'s 'gochandra' IDL script
"""All the relevant packages are imported for code below"""
import go_chandra_analysis_tools as gca_tools # import the defined functions to analysis Chandra data nad perfrom coordinate transformations
import custom_cmap as make_me_colors # import custom color map script
import label_maker as make_me_labels # import script to label mutliple subplots
import numpy as np
import pandas as pd
import scipy
from scipy import interpolate
from astropy.io import ascii
from astropy.io import fits as pyfits
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import colors
import matplotlib.gridspec as gridspec
import os
# AU to meter conversion - useful later on (probably a function built in already)
AU_2_m = 1.49598E+11
AU_2_km = 1.49598E+8
# obsIDs = ['2519', '15669', '18608', '20000', '18678']
obsIDs = ['18609', '20001']
'''obsIDs = []
dirs = os.listdir('/Users/mcentees/Desktop/Chandra')
for x in dirs:
if os.path.isdir(os.path.join('/Users/mcentees/Desktop/Chandra', x)):
obsIDs.append(x)
obsIDs.remove('ArLac')
obsIDs.remove('G21')
obsIDs.remove('Horizons_files')
obsIDs.remove('JGR_light_curves')
obsIDs.remove('18303')
obsIDs.remove('1862')
obsIDs.remove('2519')
obsIDs.remove('15669')
obsIDs.remove('18608')
obsIDs.remove('20000')
obsIDs.remove('18678')
obsIDs.remove('22146')'''
# Accounting for different filepaths of ObsIDs that originlly had SAMP values and others that did not.
df = | pd.read_csv('/Users/mcentees/Desktop/Chandra/ObsIDs_with_samp.txt', header=None, delimiter='\t') | pandas.read_csv |
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import enum
import functools
import itertools
from typing import Callable, Union
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
try:
import cudf
import cupy as cp
import dask_cudf
from cudf.core.column import as_column, build_column
from cudf.utils.dtypes import is_list_dtype, is_string_dtype
HAS_GPU = True
except ImportError:
HAS_GPU = False
cp = None
cudf = None
try:
# Dask >= 2021.5.1
from dask.dataframe.core import hash_object_dispatch
except ImportError:
# Dask < 2021.5.1
from dask.dataframe.utils import hash_object_dispatch
try:
import nvtx
annotate = nvtx.annotate
except ImportError:
# don't have nvtx installed - don't annotate our functions
def annotate(*args, **kwargs):
def inner1(func):
@functools.wraps(func)
def inner2(*args, **kwargs):
return func(*args, **kwargs)
return inner2
return inner1
if HAS_GPU:
DataFrameType = Union[pd.DataFrame, cudf.DataFrame]
SeriesType = Union[pd.Series, cudf.Series]
else:
DataFrameType = Union[pd.DataFrame]
SeriesType = Union[pd.Series]
class ExtData(enum.Enum):
"""Simple Enum to track external-data types"""
DATASET = 0
ARROW = 1
CUDF = 2
PANDAS = 3
DASK_CUDF = 4
DASK_PANDAS = 5
PARQUET = 6
CSV = 7
def get_lib():
return cudf if HAS_GPU else pd
def _is_dataframe_object(x):
# Simple check if object is a cudf or pandas
# DataFrame object
if not HAS_GPU:
return isinstance(x, pd.DataFrame)
return isinstance(x, (cudf.DataFrame, pd.DataFrame))
def _is_series_object(x):
# Simple check if object is a cudf or pandas
# Series object
if not HAS_GPU:
return isinstance(x, pd.Series)
return isinstance(x, (cudf.Series, pd.Series))
def _is_cpu_object(x):
# Simple check if object is a cudf or pandas
# DataFrame object
return isinstance(x, (pd.DataFrame, pd.Series))
def is_series_or_dataframe_object(maybe_series_or_df):
return _is_series_object(maybe_series_or_df) or _is_dataframe_object(maybe_series_or_df)
def _hex_to_int(s, dtype=None):
def _pd_convert_hex(x):
if pd.isnull(x):
return pd.NA
return int(x, 16)
if isinstance(s, pd.Series):
# Pandas Version
if s.dtype == "object":
s = s.apply(_pd_convert_hex)
return s.astype("Int64").astype(dtype or "Int32")
else:
# CuDF Version
if s.dtype == "object":
s = s.str.htoi()
return s.astype(dtype or np.int32)
def _random_state(seed, like_df=None):
"""Dispatch for numpy.random.RandomState"""
if not HAS_GPU or isinstance(like_df, (pd.DataFrame, pd.Series)):
return np.random.RandomState(seed)
else:
return cp.random.RandomState(seed)
def _arange(size, like_df=None, dtype=None):
"""Dispatch for numpy.arange"""
if not HAS_GPU or isinstance(like_df, (np.ndarray, pd.DataFrame, pd.Series)):
return np.arange(size, dtype=dtype)
else:
return cp.arange(size, dtype=dtype)
def _array(x, like_df=None, dtype=None):
"""Dispatch for numpy.array"""
if not HAS_GPU or isinstance(like_df, (np.ndarray, pd.DataFrame, pd.Series)):
return np.array(x, dtype=dtype)
else:
return cp.array(x, dtype=dtype)
def _zeros(size, like_df=None, dtype=None):
"""Dispatch for numpy.array"""
if not HAS_GPU or isinstance(like_df, (np.ndarray, pd.DataFrame, pd.Series)):
return np.zeros(size, dtype=dtype)
else:
return cp.zeros(size, dtype=dtype)
def _hash_series(s):
"""Row-wise Series hash"""
if not HAS_GPU or isinstance(s, pd.Series):
# Using pandas hashing, which does not produce the
# same result as cudf.Series.hash_values(). Do not
# expect hash-based data transformations to be the
# same on CPU and CPU. TODO: Fix this (maybe use
# murmurhash3 manually on CPU).
return hash_object_dispatch(s).values
else:
if _is_list_dtype(s):
return s.list.leaves.hash_values()
else:
return s.hash_values()
def _natural_log(df):
"""Natural logarithm of all columns in a DataFrame"""
if isinstance(df, pd.DataFrame):
return pd.DataFrame(np.log(df.values), columns=df.columns, index=df.index)
else:
return df.log()
def _series_has_nulls(s):
"""Check if Series contains any null values"""
if isinstance(s, pd.Series):
return s.isnull().values.any()
else:
return s._column.has_nulls
def _is_list_dtype(ser):
"""Check if Series contains list elements"""
if not HAS_GPU or isinstance(ser, pd.Series):
if not len(ser): # pylint: disable=len-as-condition
return False
return pd.api.types.is_list_like(ser.values[0])
return is_list_dtype(ser)
def _is_string_dtype(obj):
if not HAS_GPU:
return pd.api.types.is_string_dtype(obj)
else:
return is_string_dtype(obj)
def _flatten_list_column(s):
"""Flatten elements of a list-based column"""
if isinstance(s, pd.Series):
return pd.DataFrame({s.name: itertools.chain(*s)})
else:
return cudf.DataFrame({s.name: s.list.leaves})
def _concat_columns(args: list):
"""Dispatch function to concatenate DataFrames with axis=1"""
if len(args) == 1:
return args[0]
else:
_lib = cudf if HAS_GPU and isinstance(args[0], cudf.DataFrame) else pd
return _lib.concat(
[a.reset_index(drop=True) for a in args],
axis=1,
)
return None
def _read_parquet_dispatch(df: DataFrameType) -> Callable:
return _read_dispatch(df=df, fmt="parquet")
def _read_dispatch(df: DataFrameType = None, cpu=None, collection=False, fmt="parquet") -> Callable:
"""Return the necessary read_parquet function to generate
data of a specified type.
"""
if cpu or isinstance(df, pd.DataFrame) or not HAS_GPU:
_mod = dd if collection else pd
else:
_mod = dask_cudf if collection else cudf.io
_attr = "read_csv" if fmt == "csv" else "read_parquet"
return getattr(_mod, _attr)
def _parquet_writer_dispatch(df: DataFrameType, path=None, **kwargs):
"""Return the necessary ParquetWriter class to write
data of a specified type.
If `path` is specified, an initialized `ParquetWriter`
object will be returned. To do this, the pyarrow schema
will be inferred from df, and kwargs will be used for the
ParquetWriter-initialization call.
"""
_args = []
if isinstance(df, pd.DataFrame):
_cls = pq.ParquetWriter
if path:
_args.append(pa.Table.from_pandas(df, preserve_index=False).schema)
else:
_cls = cudf.io.parquet.ParquetWriter
if not path:
return _cls
ret = _cls(path, *_args, **kwargs)
if isinstance(df, pd.DataFrame):
ret.write_table = lambda df: _cls.write_table(
ret, pa.Table.from_pandas(df, preserve_index=False)
)
return ret
def _encode_list_column(original, encoded, dtype=None):
"""Convert `encoded` to be a list column with the
same offsets as `original`
"""
if isinstance(original, pd.Series):
# Pandas version (not very efficient)
offset = 0
new_data = []
for val in original.values:
size = len(val)
new_data.append(np.array(encoded[offset : offset + size], dtype=dtype))
offset += size
return pd.Series(new_data)
else:
# CuDF version
encoded = as_column(encoded)
if dtype:
encoded = encoded.astype(dtype, copy=False)
list_dtype = cudf.core.dtypes.ListDtype(encoded.dtype if dtype is None else dtype)
return build_column(
None,
dtype=list_dtype,
size=original.size,
children=(original._column.offsets, encoded),
)
def _pull_apart_list(original):
values = _flatten_list_column(original)
if isinstance(original, pd.Series):
offsets = | pd.Series([0]) | pandas.Series |
import pandas as pd
import numpy as np
START_PULL_UPS_UPPER_ANGLE_THRESHOLD = 40
END_PULL_UPS_UPPER_ANGLE_THRESHOLD = 130
TIME_FRAME_LIST = 20
reps_position = []
count_reps = 0
in_reps = 0
precedent_pos = 0
df_reps = pd.DataFrame(columns=['x_Nose','y_Nose','x_Neck','y_Neck','x_RShoulder','y_RShoulder','x_RElbow',
'y_RElbow','x_RWrist','y_RWrist','x_LShoulder','y_LShoulder','x_LElbow','y_LElbow','x_LWrist','y_LWrist',
'x_RHip','y_RHip','x_RKnee','y_RKnee','x_RAnkle','y_RAnkle','x_LHip','y_LHip','x_LKnee','y_LKnee','x_LAnkle','y_LAnkle',
'x_REye','y_REye','x_LEye','y_LEye','x_REar','y_REar','x_LEar','y_LEar','Right_Up_Angle','Left_Up_Angle','Right_Low_Angle','Left_Low_Angle'])
def start_reps_pull_ups(right_upper_angle,left_upper_angle,y_RWrist,y_LWrist,y_RElbow,y_LElbow):
if right_upper_angle < START_PULL_UPS_UPPER_ANGLE_THRESHOLD and left_upper_angle < START_PULL_UPS_UPPER_ANGLE_THRESHOLD and y_RWrist < y_RElbow and y_LWrist < y_LElbow:
return 1
else:
return 0
def count_pull_ups_rep(pos_list,right_upper_angle,left_upper_angle):
if right_upper_angle > 80 and left_upper_angle > 80 and mean_list(pos_list) >= 0.2:
return [] ,1
else:
return pos_list,0
def mean_list(pos_list):
if len(pos_list) < TIME_FRAME_LIST :
return 0
else:
return sum(pos_list[-TIME_FRAME_LIST:])/TIME_FRAME_LIST
df_human = pd.read_csv('./keypoints/IMG_6606human_1.csv')
del df_human['Unnamed: 0']
for k in range(len(df_human[:800])):
print(k)
val_start_reps = start_reps_pull_ups(df_human['Right_Up_Angle'][k],df_human['Left_Up_Angle'][k],df_human['y_RWrist'][k],df_human['y_LWrist'][k],df_human['y_RElbow'][k],df_human['y_LElbow'][k])
reps_position.append(val_start_reps)
reps_position, val_count = count_pull_ups_rep(reps_position,df_human['Right_Up_Angle'][k], df_human['Left_Up_Angle'][k])
if val_count:
count_reps = count_reps + 1
if val_start_reps:
in_reps = 1
if in_reps:
df_reps = df_reps.append(df_human.iloc[k])
if precedent_pos == 0 and val_start_reps == 1:
if count_reps == 0:
pass
else:
if len(df_reps) <= 30:
df_reps = | pd.DataFrame(columns=['x_Nose','y_Nose','x_Neck','y_Neck','x_RShoulder','y_RShoulder','x_RElbow',
'y_RElbow','x_RWrist','y_RWrist','x_LShoulder','y_LShoulder','x_LElbow','y_LElbow','x_LWrist','y_LWrist',
'x_RHip','y_RHip','x_RKnee','y_RKnee','x_RAnkle','y_RAnkle','x_LHip','y_LHip','x_LKnee','y_LKnee','x_LAnkle','y_LAnkle',
'x_REye','y_REye','x_LEye','y_LEye','x_REar','y_REar','x_LEar','y_LEar','Right_Up_Angle','Left_Up_Angle','Right_Low_Angle','Left_Low_Angle'])
else:
print(df_reps) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Tools producing reports of fairness, bias, or model performance measures
Contributors:
camagallen <<EMAIL>>
"""
import aif360.sklearn.metrics as aif
from functools import reduce
from IPython.display import HTML
import logging
import numpy as np
import pandas as pd
from sklearn.metrics import (mean_absolute_error, mean_squared_error,
precision_score, balanced_accuracy_score,
classification_report)
from scipy import stats
from warnings import catch_warnings, simplefilter, warn, filterwarnings
# Tutorial Libraries
from . import __performance_metrics as pmtrc, __fairness_metrics as fcmtrc
from .__fairness_metrics import eq_odds_diff, eq_odds_ratio
from .__preprocessing import (standard_preprocess, stratified_preprocess,
report_labels, y_cols)
from .__validation import ValidationError
from .utils import format_errwarn, iterate_cohorts, limit_alert
# ToDo: find better solution for these warnings
filterwarnings('ignore', module='pandas')
filterwarnings('ignore', module='sklearn')
''' Mini Reports '''
def classification_performance(y_true, y_pred, target_labels=None,
sig_fig:int=4):
""" Returns a pandas dataframe of the scikit-learn classification report,
formatted for use in fairMLHealth tools
Args:
y_true (array): Target values. Must be compatible with model.predict().
y_pred (array): Prediction values. Must be compatible with
model.predict().
target_labels (list of str): Optional labels for target values.
"""
if target_labels is None:
target_labels = [f"target = {t}" for t in set(y_true)]
report = classification_report(y_true, y_pred, output_dict=True,
target_names=target_labels)
report = pd.DataFrame(report).transpose()
# Move accuracy to separate row
accuracy = report.loc['accuracy', :]
report.drop('accuracy', inplace=True)
report.loc['accuracy', 'accuracy'] = accuracy[0]
report = report.round(sig_fig)
return report
def regression_performance(y_true, y_pred, sig_fig:int=4):
""" Returns a pandas dataframe of the regression performance metrics,
similar to scikit's classification_performance
Args:
y_true (array): Target values. Must be compatible with model.predict().
y_pred (array): Prediction values. Must be compatible with
model.predict().
"""
report = {}
y = y_cols()['disp_names']['yt']
yh = y_cols()['disp_names']['yh']
report[f'{y} Mean'] = np.mean(y_true.values)
report[f'{yh} Mean'] = np.mean(y_pred.values)
report['MSE'] = mean_squared_error(y_true, y_pred)
report['MAE'] = mean_absolute_error(y_true, y_pred)
report['Rsqrd'] = pmtrc.r_squared(y_true, y_pred)
report = pd.DataFrame().from_dict(report, orient='index'
).rename(columns={0: 'Score'})
report = report.round(sig_fig)
return report
''' Main Reports '''
def flag(df, caption:str="", sig_fig:int=4, as_styler:bool=True):
""" Generates embedded html pandas styler table containing a highlighted
version of a model comparison dataframe
Args:
df (pandas dataframe): Model comparison dataframe (see)
caption (str, optional): Optional caption for table. Defaults to "".
as_styler (bool, optional): If True, returns a pandas Styler of the
highlighted table (to which other styles/highlights can be added).
Otherwise, returns the table as an embedded HTML object. Defaults
to False .
Returns:
Embedded html or pandas.io.formats.style.Styler
"""
return __Flagger().apply_flag(df, caption, sig_fig, as_styler)
def bias_report(X, y_true, y_pred, features:list=None, pred_type="classification",
sig_fig:int=4, flag_oor=True, **kwargs):
""" Generates a table of stratified bias metrics
Args:
X (array-like): Sample features
y_true (array-like, 1-D): Sample targets
y_pred (array-like, 1-D): Sample target predictions
features (list): columns in X to be assessed if not all columns.
Defaults to None (i.e. all columns).
pred_type (str, optional): One of "classification" or "regression".
Defaults to "classification".
flag_oor (bool): if true, will apply flagging function to highlight
fairness metrics which are considered to be outside the "fair" range
(Out Of Range). Defaults to False.
priv_grp (int): Specifies which label indicates the privileged
group. Defaults to 1.
Raises:
ValueError
Returns:
pandas Data Frame
"""
validtypes = ["classification", "regression"]
if pred_type not in validtypes:
raise ValueError(f"Summary report type must be one of {validtypes}")
if pred_type == "classification":
df = __classification_bias_report(X=X, y_true=y_true, y_pred=y_pred,
features=features, **kwargs)
elif pred_type == "regression":
df = __regression_bias_report(X=X, y_true=y_true, y_pred=y_pred,
features=features, **kwargs)
#
if flag_oor:
df = flag(df, sig_fig=sig_fig)
else:
df = df.round(sig_fig)
return df
def data_report(X, Y, features:list=None, targets:list=None, add_overview=True,
sig_fig:int=4):
"""
Generates a table of stratified data metrics
Args:
X (pandas dataframe or compatible object): sample data to be assessed
Y (pandas dataframe or compatible object): sample targets to be
assessed. Note that any observations with missing targets will be
ignored.
features (list): columns in X to be assessed if not all columns.
Defaults to None (i.e. all columns).
targets (list): columns in Y to be assessed if not all columns.
Defaults to None (i.e. all columns).
add_overview (bool): whether to add a summary row with metrics for
"ALL FEATURES" and "ALL VALUES" as a single group. Defaults to True.
Requirements:
Each feature must be discrete to run stratified analysis. If any data
are not discrete and there are more than 11 values, the reporter will
reformat those data into quantiles
Returns:
pandas Data Frame
"""
#
def entropy(x):
# use float type for x to avoid boolean interpretation issues if any
# pd.NA (integer na) values are prent
try:
_x = x.astype(float)
except ValueError: # convert strings to numeric categories
_x = pd.Categorical(x).codes
return stats.entropy(np.unique(_x, return_counts=True)[1], base=2)
def __data_dict(x, col):
''' Generates a dictionary of statistics '''
res = {'Obs.': x.shape[0]}
if not x[col].isna().all():
res[col + " Mean"] = x[col].mean()
res[col + " Median"] = x[col].median()
res[col + " Std. Dev."] = x[col].std()
else:
# Force addition of second column to ensure proper formatting
# as pandas series
for c in [col + " Mean", col + " Median", col + " Std. Dev."]:
res[c] = np.nan
return res
#
X_df = stratified_preprocess(X=X, features=features)
Y_df = stratified_preprocess(X=Y, features=targets)
if X_df.shape[0] != Y_df.shape[0]:
raise ValidationError("Number of observations mismatch between X and Y")
#
if features is None:
features = X_df.columns.tolist()
strat_feats = [f for f in features if f in X_df.columns]
limit_alert(strat_feats, item_name="features")
#
if targets is None:
targets = Y_df.columns.tolist()
strat_targs = [t for t in targets if t in Y_df.columns]
limit_alert(strat_targs, item_name="targets", limit=3,
issue="This may make the output difficult to read.")
#
res = []
# "Obs."" included in index for ease of calculation
ix_cols = ['Feature Name', 'Feature Value', 'Obs.']
for t in strat_targs:
X_df[t] = Y_df[t]
feat_subset = [f for f in strat_feats if f != t]
if not any(feat_subset):
continue
res_t = __apply_featureGroups(feat_subset, X_df, __data_dict, t)
# convert id columns to strings to work around bug in pd.concat
for m in ix_cols:
res_t[m] = res_t[m].astype(str)
res.append(res_t.set_index(ix_cols))
results = pd.concat(res, axis=1).reset_index()
#
results['Obs.'] = results['Obs.'].astype(float).astype(int)
results['Value Prevalence'] = results['Obs.']/X_df.shape[0]
n_missing = X_df[strat_feats].replace('nan', np.nan).isna().sum().reset_index()
n_missing.columns = ['Feature Name', 'Missing Values']
entropy = X_df[strat_feats].apply(axis=0, func=entropy).reset_index()
entropy.columns = ['Feature Name', 'Entropy']
results = results.merge(n_missing, how='left', on='Feature Name'
).merge(entropy, how='left', on='Feature Name')
#
if add_overview:
res = []
for i, t in enumerate(strat_targs):
res_t = pd.DataFrame(__data_dict(X_df, t), index=[0])
res.append(res_t.set_index('Obs.'))
overview = pd.concat(res, axis=1).reset_index()
N_feat = len(strat_feats)
N_missing = n_missing['Missing Values'].sum()
N_obs = X_df.shape[0]
overview['Feature Name'] = "ALL FEATURES"
overview['Feature Value'] = "ALL VALUES"
overview['Missing Values'] = N_missing,
overview['Value Prevalence'] = (N_obs*N_feat-N_missing)/(N_obs*N_feat)
rprt = pd.concat([overview, results], axis=0, ignore_index=True)
else:
rprt = results
#
rprt = sort_report(rprt)
rprt = rprt.round(sig_fig)
return rprt
def performance_report(X, y_true, y_pred, y_prob=None, features:list=None,
pred_type="classification", sig_fig:int=4,
add_overview=True):
""" Generates a table of stratified performance metrics
Args:
X (pandas dataframe or compatible object): sample data to be assessed
y_true (array-like, 1-D): Sample targets
y_pred (array-like, 1-D): Sample target predictions
y_prob (array-like, 1-D): Sample target probabilities. Defaults to None.
features (list): columns in X to be assessed if not all columns.
Defaults to None (i.e. all columns).
pred_type (str, optional): One of "classification" or "regression".
Defaults to "classification".
add_overview (bool): whether to add a summary row with metrics for
"ALL FEATURES" and "ALL VALUES" as a single group. Defaults to True.
Raises:
ValueError
Returns:
pandas DataFrame
"""
validtypes = ["classification", "regression"]
if pred_type not in validtypes:
raise ValueError(f"Summary report type must be one of {validtypes}")
if pred_type == "classification":
df = __classification_performance_report(X, y_true, y_pred, y_prob,
features, add_overview)
elif pred_type == "regression":
df = __regression_performance_report(X, y_true, y_pred,
features, add_overview)
#
df = df.round(sig_fig)
return df
def sort_report(report):
""" Sorts columns in standardized order
Args:
report (pd.DataFrame): any of the stratified reports produced by this
module
Returns:
pandas DataFrame: sorted report
"""
yname = y_cols()['disp_names']['yt']
yhname = y_cols()['disp_names']['yh']
head_names = ['Feature Name', 'Feature Value', 'Obs.',
f'{yname} Mean', f'{yhname} Mean']
head_cols = [c for c in head_names if c in report.columns]
tail_cols = sorted([c for c in report.columns if c not in head_cols])
return report[head_cols + tail_cols]
def summary_report(X, prtc_attr, y_true, y_pred, y_prob=None, flag_oor=True,
pred_type="classification", priv_grp=1, sig_fig:int=4,
**kwargs):
""" Generates a summary of fairness measures for a set of predictions
relative to their input data
Args:
X (array-like): Sample features
prtc_attr (array-like, named): Values for the protected attribute
(note: protected attribute may also be present in X)
y_true (array-like, 1-D): Sample targets
y_pred (array-like, 1-D): Sample target predictions
y_prob (array-like, 1-D): Sample target probabilities. Defaults to None.
flag_oor (bool): if true, will apply flagging function to highlight
fairness metrics which are considered to be outside the "fair" range
(Out Of Range). Defaults to False.
pred_type (str, optional): One of "classification" or "regression".
Defaults to "classification".
priv_grp (int): Specifies which label indicates the privileged
group. Defaults to 1.
Raises:
ValueError
Returns:
pandas DataFrame
"""
validtypes = ["classification", "regression"]
if pred_type not in validtypes:
raise ValueError(f"Summary report type must be one of {validtypes}")
if pred_type == "classification":
df = __classification_summary(X=X, prtc_attr=prtc_attr, y_true=y_true,
y_pred=y_pred, y_prob=y_prob,
priv_grp=priv_grp, **kwargs)
elif pred_type == "regression":
df = __regression_summary(X=X, prtc_attr=prtc_attr, y_true=y_true,
y_pred=y_pred, priv_grp=priv_grp, **kwargs)
#
if flag_oor:
df = flag(df, sig_fig=sig_fig)
else:
df = df.round(sig_fig)
return df
''' Private Functions '''
@format_errwarn
def __apply_featureGroups(features, df, func, *args):
""" Iteratively applies a function across groups of each stratified feature,
collecting errors and warnings to be displayed succinctly after processing
Args:
features (list): columns of df to be iteratively analyzed
df (pd.DataFrame): data to be analyzed
func (function): a function accepting *args and returning a dictionary
Returns:
pandas DataFrame: set of results for each feature-value
"""
#
errs = {}
warns = {}
res = []
for f in features:
# Data are expected in string format
with catch_warnings(record=True) as w:
simplefilter("always")
try:
grp = df.groupby(f)
grp_res = grp.apply(lambda x: pd.Series(func(x, *args)))
except BaseException as e:
errs[f] = e
continue
if len(w) > 0:
warns[f] = w
grp_res = grp_res.reset_index().rename(columns={f: 'Feature Value'})
grp_res.insert(0, 'Feature Name', f)
res.append(grp_res)
if len(res) == 0:
results = pd.DataFrame(columns=['Feature Name', 'Feature Value'])
else:
results = | pd.concat(res, ignore_index=True) | pandas.concat |
import os
import time
import urllib.parse
import pandas as pd
import scrapy
from bs4 import BeautifulSoup
from DataHouse.items import LiePin
liepin_job_list = []
LIEPIN_JOB_DATA_DIR = './DataSet/liepin/'
JOB_LIST = ['数据挖掘']
SLEEP_TIME = 3
class LiePinSpider(scrapy.Spider):
name = "liepin"
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Host': 'www.liepin.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/'
'58.0.3029.110 Chrome/58.0.3029.110 Safari/537.36'
}
def start_requests(self):
if os.path.exists(LIEPIN_JOB_DATA_DIR):
os.removedirs(LIEPIN_JOB_DATA_DIR)
os.makedirs(LIEPIN_JOB_DATA_DIR)
urls = ['https://www.liepin.com/zhaopin/?fromSearchBtn=2°radeFlag=0&init=-1&key=' +
urllib.parse.quote('数据挖掘') + '&curPage=%s' % str(i) for i in range(100)]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse, headers=self.headers)
def parse(self, response):
soup = BeautifulSoup(response.text, 'html5lib')
joblist_ul = soup.find_all(class_='sojob-list')[0]
for li in joblist_ul.find_all('li'):
if li.i.b.get_text().strip() == '企':
jobProperty = '企'
elif li.i.b.get_text().strip() == '猎':
jobProperty = '猎'
elif li.i.b.get_text().strip() == '急':
jobProperty = '急'
else:
jobProperty = '无'
title = li.div.div.h3.a.get_text().strip()
jobid = li.div.div.h3.a['href'].strip().split('/')[-1].replace('.shtml', '')
salary = li.div.div.p['title'].split('_')[0].strip()
location = li.div.div.p['title'].split('_')[1].strip()
education = li.div.div.p['title'].split('_')[2].strip()
experience = li.div.div.p['title'].split('_')[3].strip()
publishTime = li.find(class_="time-info clearfix").time.get_text().strip()
feedback = li.find(class_="time-info clearfix").span.get_text().strip()
company = li.find(class_="company-info nohover").find_all('p')[0].a.get_text().strip()
industryField = li.find(class_="company-info nohover").find_all('p')[1].span.a.get_text().strip()
tags = [_.get_text().strip() for _ in
li.find(class_="company-info nohover").find_all('p')[2].find_all('span')]
liepin = LiePin(jobid=jobid, title=title, salary=salary, location=location, education=education,
experience=experience, company=company, industryField=industryField, tags=tags,
publishTime=publishTime, feedback=feedback, jobProperty=jobProperty)
liepin_job_list.append(liepin)
print(liepin)
def parse_detail_page(job_id):
"""
get job detailed description
:param job_id:
:return:
"""
headers = {
'Host': 'www.liepin.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Ubuntu Chromium/58.0.3029.110 Chrome/58.0.3029.110 Safari/537.36'
}
desciption = ''
import requests
response = requests.get('https://www.liepin.com/job/%s.shtml' % str(job_id), headers=headers)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html5lib')
for _ in soup.find_all(class_="content content-word"):
desciption += _.get_text().strip()
else:
print('ERROR!!!')
return desciption.strip()
def write_txt(content, jobid):
with open(os.path.join(LIEPIN_JOB_DATA_DIR, '%s.txt') % jobid, mode='wt', encoding='UTF-8') as f:
f.write(content)
f.flush()
f.close()
description = parse_detail_page(jobid)
write_txt(description, jobid)
time.sleep(SLEEP_TIME)
def close(spider, reason):
df = | pd.DataFrame(liepin_job_list) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from math import sqrt
from dramkit.gentools import power
from dramkit.gentools import isnull
from dramkit.gentools import cal_pct
from dramkit.gentools import x_div_y
from dramkit.gentools import check_l_allin_l0
from dramkit.gentools import get_update_kwargs
from dramkit.gentools import con_count_ignore
from dramkit.gentools import replace_repeat_func_iter
from dramkit.datetimetools import diff_days_date
from dramkit.logtools.utils_logger import logger_show
from dramkit.plottools.plot_common import plot_series
from dramkit.plottools.plot_common import plot_series_conlabel
#%%
def signal_merge(data, sig1_col, sig2_col, merge_type=1):
'''
两个信号合并成一个信号
Parameters
----------
data : pandas.DataFrame
待处理数据,必须包含 ``sig1_col`` 和 ``sig2_col`` 指定的列
sig1_col, sig2_col : str
指定信号列,值为-1表示买(做多),1表示卖(做空)
merge_type : int
设置信号合并方式:
- 1: 两个信号出现任何一个都算有效信号
- 2: 根据两个信号的持仓量叠加计算交易信号(返回信号不适用反向开仓)
- 3: 只有两个信号方向相同时才算交易信号(返回信号不适用反向开仓)
:returns: `pd.Series` - 合并之后的信号
'''
df = data.reindex(columns=[sig1_col, sig2_col])
df.rename(columns={sig1_col: 'sig1', sig2_col: 'sig2'},
inplace=True)
if merge_type == 1:
df['sig'] = df['sig1'] + df['sig2']
df['sig'] = df['sig'].apply(lambda x: -1 if x < 0 else \
(1 if x > 0 else 0))
return df['sig']
elif merge_type == 2:
df['hold1'] = df['sig1'].replace(0, np.nan)
df['hold1'] = df['hold1'].fillna(method='ffill').fillna(0)
df['hold2'] = df['sig2'].replace(0, np.nan)
df['hold2'] = df['hold2'].fillna(method='ffill').fillna(0)
df['hold'] = df['hold1'] + df['hold2']
df['trade'] = df['hold'].diff()
df['sig'] = df['trade'].apply(lambda x: -1 if x < 0 else \
(1 if x > 0 else 0))
return df['sig']
elif merge_type == 3:
df['hold1'] = df['sig1'].replace(0, np.nan)
df['hold1'] = df['hold1'].fillna(method='ffill').fillna(0)
df['hold2'] = df['sig2'].replace(0, np.nan)
df['hold2'] = df['hold2'].fillna(method='ffill').fillna(0)
df['hold'] = df['hold1'] + df['hold2']
df['hold'] = df['hold'].apply(lambda x: 1 if x == 2 else \
(-1 if x == -2 else 0))
df['trade'] = df['hold'].diff()
df['sig'] = df['trade'].apply(lambda x: -1 if x < 0 else \
(1 if x > 0 else 0))
return df['sig']
#%%
def cal_cost_add(hold_vol, hold_cost, add_vol, add_price):
'''
| 计算加仓之后的平均持仓成本
| hold_vol为加仓前持仓量,hold_cost为加仓前平均持仓成本,add_vol为加仓量
'''
holdCost = hold_vol * hold_cost
totCost = holdCost + add_vol * add_price
return totCost / (hold_vol + add_vol)
def get_mean_cost(trade_records, dirt_col, price_col, vol_col):
'''
根据交易记录计算每期持仓成本
Parameters
----------
trade_records : pd.DataFrame
交易记录数据,必须包含 ``dirt_col`` 、 ``price_col`` 和 `vol_col` 指定的列
dirt_col : str
买卖方向列,1为买入(做多),-1为卖出(做空)
price_col : str
成交价格列
vol_col : str
为成交量列
:returns: `pd.DataFrame` - 在trade_records上增加了'holdVol', 'holdCost', 'meanCost'三列
'''
df = trade_records.copy()
ori_idx = df.index
df.index = range(0, df.shape[0])
vol_col_ = vol_col + '_'
df[vol_col_] = df[dirt_col] * df[vol_col]
df['holdVol'] = df[vol_col_].cumsum().round(4)
df.loc[df.index[0], 'holdCost'] = df[price_col].iloc[0] * df[vol_col_].iloc[0]
df.loc[df.index[0], 'meanCost'] = df[price_col].iloc[0]
for k in range(1, df.shape[0]):
holdVol_pre = df['holdVol'].iloc[k-1]
holdCost_pre = df['holdCost'].iloc[k-1]
holdVol = df['holdVol'].iloc[k]
tradeVol = df[vol_col_].iloc[k]
if tradeVol == 0:
holdCost, meanCost = holdCost_pre, df['meanCost'].iloc[k-1]
elif holdVol == 0: # 平仓
holdCost, meanCost = 0, 0
elif holdVol_pre >= 0 and holdVol > holdVol_pre: # 买入开仓或加仓
tradeVal = df[vol_col_].iloc[k] * df[price_col].iloc[k]
holdCost = holdCost_pre + tradeVal
meanCost = holdCost / holdVol
elif holdVol_pre >= 0 and holdVol > 0 and holdVol < holdVol_pre: # 买入减仓
meanCost = df['meanCost'].iloc[k-1]
holdCost = meanCost * holdVol
elif holdVol_pre >= 0 and holdVol < 0: # 买入平仓反向卖出
meanCost = df[price_col].iloc[k]
holdCost = holdVol * meanCost
elif holdVol_pre <= 0 and holdVol < holdVol_pre: # 卖出开仓或加仓
tradeVal = df[vol_col_].iloc[k] * df[price_col].iloc[k]
holdCost = holdCost_pre + tradeVal
meanCost = holdCost / holdVol
elif holdVol_pre <= 0 and holdVol < 0 and holdVol > holdVol_pre: # 卖出减仓
meanCost = df['meanCost'].iloc[k-1]
holdCost = meanCost * holdVol
elif holdVol_pre <= 0 and holdVol > 0: # 卖出平仓反向买入
meanCost = df[price_col].iloc[k]
holdCost = holdVol * meanCost
df.loc[df.index[k], 'holdCost'] = holdCost
df.loc[df.index[k], 'meanCost'] = meanCost
df.index = ori_idx
return df
#%%
def cal_gain_con_futures(price_open, price_now, n, player,
fee=0.1/100, lever=100,
n_future2target=0.001):
'''
永续合约收益计算,如火币BTC合约
Parameters
----------
price_open : float
开仓价格
price_now : float
现价
n : int
数量(张)
player : str
做空或做多
fee : float
手续费比例
lever : int
杠杆
n_future2target : float
一份合约对应的标的数量
Returns
-------
gain_lever : float
盈亏金额
gain_pct : float
盈亏比例
'''
if n == 0:
return 0, 0
b_name = ['buyer', 'Buyer', 'b', 'B', 'buy', 'Buy']
s_name = ['seller', 'Seller', 'seler', 'Seler', 's', 'S', 'sell',
'Sell', 'sel', 'Sel']
price_cost_ = price_open * n_future2target / lever
price_now_ = price_now * n_future2target / lever
if player in b_name:
Cost = price_cost_ * n * (1+fee)
Get = price_now_ * n * (1-fee)
gain = Get - Cost
gain_pct = gain / Cost
elif player in s_name:
Cost = price_now_ * n * (1+fee)
Get = price_cost_ * n * (1-fee)
gain = Get - Cost
gain_pct = gain / Get
gain_lever = gain * lever
return gain_lever, gain_pct
def cal_gain_con_futures2(price_open, price_now, n, player,
fee=0.1/100, lever=100):
'''
永续合约收益计算,如币安ETH合约
Parameters
----------
price_open : float
开仓价格
price_now : float
现价
n : int
数量(标的量)
player : str
做空或做多
fee : float
手续费比例
lever : int
杠杆
Returns
-------
gain_lever : float
盈亏金额
gain_pct : float
盈亏比例
'''
if n == 0:
return 0, 0
b_name = ['buyer', 'Buyer', 'b', 'B', 'buy', 'Buy']
s_name = ['seller', 'Seller', 'seler', 'Seler', 's', 'S', 'sell',
'Sell', 'sel', 'Sel']
price_cost_ = price_open / lever
price_now_ = price_now / lever
if player in b_name:
Cost = price_cost_ * n * (1+fee)
Get = price_now_ * n * (1-fee)
gain = Get - Cost
gain_pct = gain / Cost
elif player in s_name:
Cost = price_now_ * n * (1+fee)
Get = price_cost_ * n * (1-fee)
gain = Get - Cost
gain_pct = gain / Get
gain_lever = gain * lever
return gain_lever, gain_pct
#%%
def cal_expect_return(hit_prob, gain_loss_ratio):
'''根据胜率和盈亏比计算期望收益'''
return hit_prob*gain_loss_ratio - (1-hit_prob)
def cal_gain_pct_log(price_cost, price, pct_cost0=1):
'''
| 计算对数收益率
| price_cost为成本
| price为现价
| pct_cost0为成本price_cost为0时的返回值'''
if isnull(price_cost) or isnull(price):
return np.nan
if price_cost == 0:
return pct_cost0
elif price_cost > 0:
return np.log(price) - np.log(price_cost)
else:
raise ValueError('price_cost必须大于等于0!')
def cal_gain_pct(price_cost, price, pct_cost0=1):
'''
| 计算百分比收益率
| price_cost为成本
| price为现价
| pct_cost0为成本price_cost为0时的返回值
Note
----
默认以权利方成本price_cost为正(eg. 买入价为100,则price_cost=100)、
义务方成本price_cost为负进行计算(eg. 卖出价为100,则price_cost=-100)
'''
if isnull(price_cost) or isnull(price):
return np.nan
if price_cost > 0:
return price / price_cost - 1
elif price_cost < 0:
return 1 - price / price_cost
else:
return pct_cost0
def cal_gain_pcts(price_series, gain_type='pct',
pct_cost0=1, logger=None):
'''
| 计算资产价值序列price_series(`pd.Series`)每个时间的收益率
| gain_type:
| 为'pct',使用普通百分比收益
| 为'log',使用对数收益率(当price_series存在小于等于0时不能使用)
| 为'dif',收益率为前后差值(适用于累加净值情况)
| pct_cost0为当成本为0时收益率的指定值
'''
if (price_series <= 0).sum() > 0:
gain_type = 'pct'
logger_show('存在小于等于0的值,将用百分比收益率代替对数收益率!',
logger, 'warning')
if gain_type == 'pct':
df = pd.DataFrame({'price_now': price_series})
df['price_cost'] = df['price_now'].shift(1)
df['pct'] = df[['price_cost', 'price_now']].apply(lambda x:
cal_gain_pct(x['price_cost'], x['price_now'],
pct_cost0=pct_cost0), axis=1)
return df['pct']
elif gain_type == 'log':
return price_series.apply(np.log).diff()
elif gain_type == 'dif':
return price_series.diff()
else:
raise ValueError('未识别的`gain_type`,请检查!')
#%%
def cal_beta(values_target, values_base, gain_type='pct', pct_cost0=1):
'''
| 计算贝塔系数
| values_target, values_base分别为目标价值序列和基准价值序列
| gain_type和pct_cost0同 :func:`dramkit.fintools.utils_gains.cal_gain_pcts` 中的参数
| 参考:
| https://www.joinquant.com/help/api/help#api:风险指标
| https://blog.csdn.net/thfyshz/article/details/83443783
'''
values_target = pd.Series(values_target)
values_base = | pd.Series(values_base) | pandas.Series |
from arche.rules.result import Level, Message, Result
from conftest import create_named_df, create_result
import pandas as pd
import pytest
@pytest.mark.parametrize(
"source, target",
[(("summary", "details", {"err": ["1"]}), ("summary", "details", {"err": ["1"]}))],
)
def test_message_eq(source, target):
assert Message(*source) == Message(*target)
@pytest.mark.parametrize(
"source, target",
[
(
("summary", "details", {"err": ["0"]}),
("summary", "details", {"err": ["1"]}),
),
(("summary", "details"), ("summary", "other")),
],
)
def test_message_not_eq(source, target):
assert Message(*source) != Message(*target)
@pytest.mark.parametrize(
"source, target",
[
(
| pd.Series([0, 1], index=["f", "l"], name="n") | pandas.Series |
import numpy as np
import pandas as pd
from pandas.io.json import json_normalize
import copy, json
import functools
from dataclasses import dataclass, field
from typing import List, Any
import pickle
from cachetools import cached
from cachetools.keys import hashkey
from functools import partial
import logging
logger = logging.getLogger(__name__)
@dataclass
class GaRequest:
"""
.. todo::
add segments parameters
"""
pageToken:int = 0
dateRange: list = field(default_factory=lambda: ['7daysago', 'yesterday'])
metrics: list = field(default_factory=lambda: ['pageviews','users'])
dimensions:list = field(default_factory=lambda: ['deviceCategory','date'])
segments: list = field(default_factory=lambda: [])
viewId: str = ''
pageSize: str = 1000
#TODO segments
def __str__(self):
s0 = f"viewId: {self.viewId}"
s1 = f"date: {self.dateRange[0]} - {self.dateRange[1]}"
s2 = f"metrics: {','.join(self.metrics)}"
s3 = f"dimensions: {','.join(self.dimensions)}"
s4 = f"segments: {','.join(self.segments)}"
s5 = f"paging: token:{str(self.pageToken)} size:{str(self.pageSize)}"
return "\n".join([s0, s1, s2, s3, s4, s5])
def get(self):
return {
'pageToken': str(self.pageToken),
'dateRanges': [{'startDate': self.dateRange[0],
'endDate': self.dateRange[1]}],
'metrics': [{'expression': 'ga:'+x} for x in self.metrics],
'dimensions': [{'name':'ga:'+x} for x in self.dimensions],
'viewId': self.viewId,
'pageSize': self.pageSize,
}
class GaData:
def __init__(self, service_ga3, service_ga4):
self.service_ga3 = service_ga3
self.service_ga4 = service_ga4
self.mng = service_ga3.management()
self.viewId = None
def retrieve_imported_data_cat(self, accountId=None, webPropertyId=None):
"""
for only catalog info.
Google does not provide an api for downloading
"""
jsn = (self.mng.customDataSources()
.list(accountId=accountId, webPropertyId=webPropertyId).execute())
cat = json_normalize(jsn['items'])
logger.info(cat[['id', 'name']])
return cat
def get_dimension_metrics(self, accountId=None, webPropertyId=None):
res_dm = (self.mng.customDimensions()
.list(accountId=accountId, webPropertyId=webPropertyId).execute())
res_mt = (self.mng.customMetrics()
.list(accountId=accountId, webPropertyId=webPropertyId).execute())
pass
def get_my_segments(self, accountId=None):
jsn = self.mng.segments().list().execute()
ret = json_normalize(jsn['items'])
return ret.loc[:,['id', 'name', 'definition', 'updated', 'created', 'type', 'selfLink']]
def get_account_summary(self):
"""
get google analytics account summary in profile level
"""
jsn = self.mng.accountSummaries().list().execute()
wp = pd.io.json.json_normalize(
jsn['items'],
record_path='webProperties', meta=['id','name'],
meta_prefix='ac_'
).drop(['kind','profiles'], axis=1)
wp.set_index('id', inplace=True)
profiles = pd.io.json.json_normalize(
jsn['items'], record_path=['webProperties', 'profiles'],
record_prefix="profile_", meta=[['webProperties','id']]
).drop(['profile_kind'], axis=1)
profiles.set_index('webProperties.id', inplace=True)
profiles.index.name = 'id'
return pd.merge(profiles, wp, on="id")
def _is_valid_request(self, req):
#dimensions is not necessary in google API, though I put it
KEYS = ['dateRanges', 'metrics', 'dimensions']
if type(req) is not dict:
raise TypeError
if not all([x in req.keys() for x in KEYS]):
print(f'at least {",".join(KEYS)} needed')
raise ValueError
return True
@cached(cache={}, key=partial(hashkey, '_report'))
def _report(self, body:str):
body = json.loads(body)
ret = self.service_ga4.reports().batchGet(body=body).execute()
logger.debug(f"body:{body}")
return ret
def report(self,
viewId=None,
requests:list=[GaRequest],
maxreq:int=5):
"""get data: Note: request is list of dictionary, so be carefull not to pass reference"""
for req in requests:
pass
#self._is_valid_request(req) #maybe i do not need it any more
for req in requests:
req.viewId = str(viewId)
# use copy to prevent nextPageToken be change of the global var
body = {"reportRequests": copy.deepcopy([x.get() for x in requests])}
res = self._report(json.dumps(body)) # json.dumps() for caching pass immutable data
pickle.dump(res, open("log/gadata_res.pickle", 'wb'))
logger.debug(res)
##only to get first reports -> first requests
rowCount = res['reports'][0]['data']['rowCount']
if requests[0].pageToken == 0:
print(f"total rows: {rowCount}")
yield from self._changeToDataFrame(res['reports'])
if 'nextPageToken' not in res['reports'][-1]:
print(f"done: rows {rowCount}")
return
else:
#make requests object again with requests[0]
newReq = copy.deepcopy(requests[0])
newReq.pageSize = 10000
nextPageToken = int(res['reports'][-1]['nextPageToken'])
newReq.pageToken = str(nextPageToken)
requests = [newReq]
while nextPageToken + 10000 < rowCount and len(requests) < maxreq:
req = copy.deepcopy(newReq)
nextPageToken = nextPageToken + 10000
req.pageToken = str(nextPageToken)
requests.append(req)
print(f"batch get:{len(requests)}requests: \
{','.join([x.pageToken for x in requests])}")
yield from self.report(viewId, requests, maxreq=maxreq)
def _changeToDataFrame(self, reports):
for report in reports:
#print(f"row num: {len(report['data']['rows'])}")
dim_names = [x.replace("ga:","") for x in
report.get("columnHeader").get("dimensions")]
mtr_names = [x['name'].replace("ga:","") for x in
report.get("columnHeader").get("metricHeader").get("metricHeaderEntries")]
mtr_dtypes = [x['type'].replace("ga:","") for x in
report.get("columnHeader").get("metricHeader").get("metricHeaderEntries")]
mydic = {'STRING': str, "INTEGER": np.int, "FLOAT": np.float}
mtr_dtypes = [mydic[x] for x in mtr_dtypes]
dim_ind = [x['dimensions'] for x in report['data']['rows']]
mtr_dat = np.array([x['metrics'][0]['values'] for x in report['data']['rows']])
tmp = pd.concat([
pd.DataFrame(dim_ind, columns=dim_names),
pd.DataFrame(mtr_dat.astype(int),columns=mtr_names)], axis=1)
if 'date' in tmp.columns:
tmp.index = pd.to_datetime(tmp['date'], format="%Y%m%d")
del tmp['date']
if 'dateHour' in tmp.columns:
tmp.index = | pd.to_datetime(tmp['dateHour'], format="%Y%m%d%H") | pandas.to_datetime |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pyrates.utility.grid_search import grid_search
from copy import deepcopy
from scipy.ndimage.filters import gaussian_filter1d
from pyrates.utility.visualization import plot_timeseries, create_cmap
import seaborn as sns
import matplotlib as mpl
linewidth = 1.2
fontsize1 = 10
fontsize2 = 12
markersize1 = 60
markersize2 = 60
dpi = 200
plt.style.reload_library()
plt.style.use('seaborn-whitegrid')
mpl.rcParams['text.latex.preamble'] = [r'\usepackage{sfmath} \boldmath']
#mpl.rc('text', usetex=True)
mpl.rcParams["font.sans-serif"] = ["Roboto"]
mpl.rcParams["font.size"] = fontsize1
mpl.rcParams["font.weight"] = "bold"
mpl.rcParams['lines.linewidth'] = linewidth
mpl.rcParams['axes.titlesize'] = fontsize2
mpl.rcParams['axes.titleweight'] = 'bold'
mpl.rcParams['axes.labelsize'] = 'large'
mpl.rcParams['axes.labelweight'] = 'bold'
mpl.rcParams['xtick.color'] = 'black'
mpl.rcParams['ytick.color'] = 'black'
mpl.rcParams['ytick.alignment'] = 'center'
mpl.rcParams['legend.fontsize'] = fontsize1
# parameter definitions
#######################
# simulation parameters
dt = 1e-4
dts = 1e-1
T = 2050.0
sim_steps = int(np.round(T/dt))
stim_offset = int(np.round(1400.0/dt))
stim_dur = int(np.round(600.0/dt))
stim_delayed = int(np.round(1700.0/dt))
stim_delayed_dur = int(np.round(300.0/dt))
stim_amp = 1.0
stim_var = 50.0
stim_freq = 14.0
ctx = np.zeros((sim_steps, 1))
ctx[stim_offset:stim_offset+stim_dur, 0] = stim_amp #np.linspace(0., -stim_amp, stim_dur)
# ctx[stim_delayed:stim_delayed+stim_delayed_dur, 0] = 60*stim_amp #np.linspace(0.0, 50*stim_amp, stim_dur)
ctx = gaussian_filter1d(ctx, stim_var, axis=0)
# stria = np.zeros((sim_steps, 1))
# stria[stim_delayed:stim_delayed+stim_delayed_dur, 0] = 0.5*stim_amp #np.linspace(0.0, 2*stim_amp, stim_dur)
# stria = gaussian_filter1d(stria, stim_var, axis=0)
# plt.figure()
# plt.plot(ctx)
# plt.plot(stria)
# plt.show()
# model parameters
k_gp = 7.6
k_p = 2.0
k_i = 1.5
k = 100.0
eta = 100.0
delta = 100.0
param_grid = {
'k_ee': [0.8*k],
'k_ae': [3.0*k],
'k_pe': [8.0*k],
'k_ep': [10.0*k],
'k_pp': [1.0*k_gp*k_p*k/k_i],
'k_ap': [1.0*k_gp*k_p*k_i*k],
'k_aa': [1.0*k_gp*k/(k_p*k_i)],
'k_pa': [1.0*k_gp*k_i*k/k_p],
'k_ps': [20.0*k],
'k_as': [20.0*k],
'eta_e': [4.0*eta],
'eta_p': [4.0*eta],
'eta_a': [1.0*eta],
'eta_s': [0.002],
'delta_e': [0.3*delta],
'delta_p': [0.9*delta],
'delta_a': [1.2*delta],
'tau_e': [13.0],
'tau_p': [25.0],
'tau_a': [20.0],
'tau_ampa_r': [0.8],
'tau_ampa_d': [3.7],
'tau_gabaa_r': [0.5],
'tau_gabaa_d': [5.0],
'tau_stn': [2.0]
}
param_grid = | pd.DataFrame.from_dict(param_grid) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
import glob
from pmdarima.arima import ndiffs
from pandas.tseries.offsets import QuarterBegin, QuarterEnd
from .hand_select import hand_select
import pandas_datareader.data as web
import xlrd, csv
from openpyxl.workbook import Workbook
from openpyxl.reader.excel import load_workbook, InvalidFileException
def set_date_as_index(df):
df.columns = [name.lower() for name in df.columns]
df["date"] = pd.to_datetime(df["date"])
df.set_index("date", inplace=True)
return df
def make_float(df):
df = df.replace(".", np.nan)
df = df.astype(float)
return df
def read_files(paths, fillna=True):
csv_list = []
xls_list = []
for path in paths:
csv_files = glob.glob(path + "/*.csv")
xls_files = glob.glob(path + "/*.xls")
for elt in csv_files:
df = pd.read_csv(elt)
df = set_date_as_index(df)
df = make_float(df)
if fillna:
df = df.fillna(method='ffill')
csv_list.append(df)
for elt in xls_files:
try:
df = pd.read_excel(elt)
df = set_date_as_index(df)
df = make_float(df)
if fillna:
df = df.fillna(method='ffill')
xls_files.append(df)
except Exception:
pass
return csv_list, xls_list
def make_stationary(df):
df = hand_select(df)
df = df.dropna()
columns = df.columns
for name in columns:
x = df[name].values
d_kpss = ndiffs(x, test='kpss')
d_adf = ndiffs(x, test='adf')
d_pp = ndiffs(x, test='pp')
d_ = max(d_kpss, d_adf, d_pp)
if d_ > 0:
new_name = name + '_diff' + str(d_)
if d_ == 1:
df[new_name] = df[name].diff()
elif d_ == 2:
df[new_name] = df[name].diff().diff()
elif d_ > 2:
raise ValueError('High order differentiation')
else:
raise Exception('Some thing is wrong')
df = df.drop(columns=[name])
return df
def open_xls_as_xlsx(filename):
# first open using xlrd
book = xlrd.open_workbook(filename)
index = 0
nrows, ncols = 0, 0
while nrows * ncols == 0:
sheet = book.sheet_by_index(index)
nrows = sheet.nrows
ncols = sheet.ncols
index += 1
# prepare a xlsx sheet
book1 = Workbook()
sheet1 = book1.active
for row in range(1, nrows):
for col in range(1, ncols):
sheet1.cell(row=row, column=col).value = sheet.cell_value(row, col)
return book1
def read_data(path, sheet=False, header='infer'):
file_format = path.split('.')[-1]
if 'msci' in path:
header = 6
if sheet is False:
# if file_format == 'csv':
# df = pd.read_csv(path, header=header)
# elif file_format == 'xls':
# df = open_xls_as_xlsx(path)
# else:
try:
df = pd.read_excel(path, header=header, engine='openpyxl')
except Exception:
try:
df = open_xls_as_xlsx(path)
except Exception as e:
try:
df = pd.read_csv(path, header=header)
except Exception as e:
raise Exception(e)
else:
try:
# excel_file = pd.ExcelFile(path)
# assert sheet in excel_file.sheet_names
# df = excel_file.parse(sheet, header=header)
df = pd.read_excel(path, header=header, engine='openpyxl', sheet_name=sheet)
except Exception:
raise Exception("Can not read sheet")
df.columns = [name.lower() for name in df.columns]
if 'year2' in df.columns:
drop_columns = ['year2']
else:
drop_columns = []
for elt in df.columns:
if 'unnamed' in elt:
drop_columns.append(elt)
df.drop(columns=drop_columns, inplace=True)
first_valid = df.iloc[:, 1].first_valid_index()
last_valid = df.iloc[:, 1].last_valid_index() + 1
df = df.iloc[first_valid:last_valid]
df.columns = df.columns.str.replace('.', '_')
df.columns = df.columns.str.replace(' ', '_')
df.columns = df.columns.str.replace('__', '_')
return df
def make_monthly_date(df, offset=True):
datetime = pd.to_datetime(
(
df['year'].astype(int) * 100
+ df['month'].astype(int)
).astype(str),
format='%Y%m'
)
if offset:
datetime += pd.tseries.offsets.MonthBegin(1)
else:
datetime = datetime
df['date'] = datetime
df.drop(columns=['year', 'month'], inplace=True)
df.set_index('date', inplace=True)
df.columns = [elt + '_monthly' for elt in df.columns]
return df
def make_quarterly_date(df, offset=True):
df['year'] = df['year'].str.lower()
df['year'] = df['year'].str.replace(r'(q\d)-(\d+)', r'\2-\1')
if offset:
# Bug that quarterbegin is March 01
df['date'] = pd.to_datetime(df['year'])\
+ pd.tseries.offsets.DateOffset(days=1)\
+ pd.tseries.offsets.QuarterBegin(1, startingMonth=1)
else:
df['date'] = pd.to_datetime(df['year'])
df.drop(columns=['year'], inplace=True)
df.set_index('date', inplace=True)
# Manually shift because of QuarterBegin bug
df.columns = [elt + '_quarterly' for elt in df.columns]
df = df.dropna()
return df
def make_daily_date(df):
datetime = pd.to_datetime(
(
df['year'].astype(int) * 10000
+ df['month'].astype(int) * 100
+ df['day'].astype(int)
).astype(str),
format='%Y%m%d'
)
df['date'] = datetime
df.drop(columns=['year', 'month', 'day'], inplace=True)
df.set_index('date', inplace=True)
df.columns = [elt + '_daily' for elt in df.columns]
return df
# If date of low frequency data is specified, assume It is announced
# before the start of the market
# If not specified, assume it is announced after the market is closed
def daily_data(df, freq, offset=True, fill_method='ffill'):
drop_columns = []
for elt in df.columns:
if 'unnamed' in elt:
drop_columns.append(elt)
df.drop(columns=drop_columns, inplace=True)
if freq.lower() == 'monthly':
try:
df = make_monthly_date(df, offset=offset)
except Exception:
print("set monthly date as index")
datetime = pd.to_datetime(df['date'])
df['date'] = datetime
df.set_index('date', inplace=True)
df.columns = [elt + '_monthly' for elt in df.columns]
df = make_stationary(df)
if offset:
daily_datetime = pd.date_range(
df.index[0] + pd.tseries.offsets.MonthBegin(1),
df.index[-1] + pd.tseries.offsets.MonthEnd(1),
freq='D'
)
else:
daily_datetime = pd.date_range(
df.index[0] + pd.tseries.offsets.MonthBegin(1),
df.index[-1] + pd.tseries.offsets.MonthEnd(1),
freq='D'
)
df = df.reindex(daily_datetime, method=fill_method)
elif freq.lower() == 'daily':
try:
df = make_daily_date(df)
except Exception:
print("set daily date as index")
datetime = pd.to_datetime(df['date'])
df['date'] = datetime
df.set_index('date', inplace=True)
df.columns = [elt + '_daily' for elt in df.columns]
df = make_stationary(df)
daily_datetime = pd.date_range(
df.index[0],
df.index[-1],
freq='D'
)
df = df.reindex(daily_datetime, method=fill_method)
elif freq.lower() == 'quarterly':
try:
df = make_quarterly_date(df)
except Exception:
print("set quarterly date as index")
datetime = pd.to_datetime(df['date'])
df['date'] = datetime
df.set_index('date', inplace=True)
df.columns = [elt + '_quarterly' for elt in df.columns]
df = make_stationary(df)
if offset:
daily_datetime = pd.date_range(
df.index[0] + QuarterBegin(1, startingMonth=1),
df.index[-1] + QuarterEnd(1, startingMonth=1),
freq='D'
)
else:
daily_datetime = pd.date_range(
df.index[0],
df.index[-1] + pd.tseries.offsets.QuarterEnd(1),
freq='D'
)
df = df.reindex(daily_datetime, method=fill_method)
else:
print("Type frequency")
daily_datetime = pd.date_range(
df.index[0], df.index[-1],
freq='D'
)
df = df.reindex(daily_datetime, method=fill_method)
drop_columns = []
for elt in df.columns:
if 'unnamed' in elt:
drop_columns.append(elt)
df.drop(columns=drop_columns, inplace=True)
return df
def get_nonfinancial():
print('monthly epu')
monthly_epu = read_data(
'https://www.policyuncertainty.com/media/All_Country_Data.xlsx'
)
daily_epu = daily_data(monthly_epu, 'monthly')
daily_epu.columns = ['epu_' + elt for elt in daily_epu.columns]
print('daily_infectious')
daily_infectious = read_data(
'https://www.policyuncertainty.com/media/All_Infectious_EMV_Data.csv'
)
daily_infectious = daily_data(daily_infectious, 'daily')
daily_infectious.columns = [
'daily_infectious_' + elt for elt in daily_infectious.columns]
print('categorical_epu')
categorical_epu = read_data(
'https://www.policyuncertainty.com/media/Categorical_EPU_Data.xlsx'
)
categorical_epu = daily_data(categorical_epu, 'monthly')
categorical_epu.columns = [
'categorical_epu_' + elt for elt in categorical_epu.columns]
# print('eurq_data')
# eurq_data = read_data(
# '../../data/epu/EURQ_data.xlsx',
# sheet='EURQ'
# )
# eurq_data = daily_data(eurq_data, 'monthly')
# eurq_data.columns = ['eurq_data_' + elt for elt in eurq_data.columns]
# print('trade_unc')
# trade_uncertainty_data = read_data(
# 'https://www.policyuncertainty.com/media/Trade_Uncertainty_Data.xlsx'
# )
# trade_uncertainty_data = daily_data(trade_uncertainty_data, 'monthly')
# trade_uncertainty_data.columns = [
# 'trade_uncertainty_' + elt for elt in trade_uncertainty_data.columns
# ]
print('wpui')
wpui_url = (
'https://worlduncertaintyindex.com/'
'wp-content/uploads/2020/07/WPUI_Data.xlsx'
)
wpui_data = read_data(
wpui_url, sheet='F1', header=1
)
wpui_data = daily_data(wpui_data, 'quarterly')
wpui_data.columns = [
'wpui_' + elt for elt in wpui_data.columns
]
print('wui')
wui_url = (
'https://worlduncertaintyindex.com/'
'wp-content/uploads/2020/07/WUI_Data.xlsx'
)
wui_data = read_data(
wui_url, sheet='F1', header=2
)
wui_data = daily_data(wui_data, 'quarterly')
wui_data.columns = [
'wui_' + elt for elt in wui_data.columns
]
df_non_financial = pd.concat(
[
daily_epu, daily_infectious, categorical_epu,
# eurq_data, trade_uncertainty_data,
wpui_data, wui_data
], axis=1
)
print('non-financial data')
return df_non_financial
def get_financial():
print('finance data')
sp500 = df = web.DataReader(
'^GSPC', 'yahoo',
start='1990-01-03', end='2020-08-31'
)
sp500.columns = [elt.lower().replace(' ', '_') for elt in sp500.columns]
try:
sp500.set_index('date', inplace=True)
except Exception:
pass
sp500.index.name = 'date'
sp500.index = | pd.DatetimeIndex(sp500.index) | pandas.DatetimeIndex |
#Chapter 3 - Combining data for analysis
#Combining rows of data
# Concatenate uber1, uber2, and uber3: row_concat
row_concat = pd.concat([uber1, uber2,uber3])
# Print the shape of row_concat
print(row_concat.shape)
# Print the head of row_concat
print(row_concat.head())
#Combining columns of data
# Concatenate ebola_melt and status_country column-wise: ebola_tidy
ebola_tidy = pd.concat([ebola_melt,status_country],axis=1)
# Print the shape of ebola_tidy
print(ebola_tidy.shape)
# Print the head of ebola_tidy
print(ebola_tidy.head())
#Finding files that match a pattern
# Import necessary modules
import glob
import pandas as pd
# Write the pattern: pattern
pattern = '*.csv'
# Save all file matches: csv_files
csv_files = glob.glob(pattern)
# Print the file names
print(csv_files)
# Load the second file into a DataFrame: csv2
csv2 = | pd.read_csv(csv_files[1]) | pandas.read_csv |
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class StringOperatorTests(TestCase):
def test_operator_decorator(self):
self.assertTrue(StringType("foo").equal_to.is_operator)
def test_string_equal_to(self):
self.assertTrue(StringType("foo").equal_to("foo"))
self.assertFalse(StringType("foo").equal_to("Foo"))
def test_string_not_equal_to(self):
self.assertTrue(StringType("foo").not_equal_to("Foo"))
self.assertTrue(StringType("foo").not_equal_to("boo"))
self.assertFalse(StringType("foo").not_equal_to("foo"))
def test_string_equal_to_case_insensitive(self):
self.assertTrue(StringType("foo").equal_to_case_insensitive("FOo"))
self.assertTrue(StringType("foo").equal_to_case_insensitive("foo"))
self.assertFalse(StringType("foo").equal_to_case_insensitive("blah"))
def test_string_starts_with(self):
self.assertTrue(StringType("hello").starts_with("he"))
self.assertFalse(StringType("hello").starts_with("hey"))
self.assertFalse(StringType("hello").starts_with("He"))
def test_string_ends_with(self):
self.assertTrue(StringType("hello").ends_with("lo"))
self.assertFalse(StringType("hello").ends_with("boom"))
self.assertFalse(StringType("hello").ends_with("Lo"))
def test_string_contains(self):
self.assertTrue(StringType("hello").contains("ell"))
self.assertTrue(StringType("hello").contains("he"))
self.assertTrue(StringType("hello").contains("lo"))
self.assertFalse(StringType("hello").contains("asdf"))
self.assertFalse(StringType("hello").contains("ElL"))
def test_string_matches_regex(self):
self.assertTrue(StringType("hello").matches_regex(r"^h"))
self.assertFalse(StringType("hello").matches_regex(r"^sh"))
def test_non_empty(self):
self.assertTrue(StringType("hello").non_empty())
self.assertFalse(StringType("").non_empty())
self.assertFalse(StringType(None).non_empty())
class NumericOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, err_string):
NumericType("foo")
def test_numeric_type_validates_and_casts_decimal(self):
ten_dec = Decimal(10)
ten_int = 10
ten_float = 10.0
if sys.version_info[0] == 2:
ten_long = long(10)
else:
ten_long = int(10) # long and int are same in python3
ten_var_dec = NumericType(ten_dec) # this should not throw an exception
ten_var_int = NumericType(ten_int)
ten_var_float = NumericType(ten_float)
ten_var_long = NumericType(ten_long)
self.assertTrue(isinstance(ten_var_dec.value, Decimal))
self.assertTrue(isinstance(ten_var_int.value, Decimal))
self.assertTrue(isinstance(ten_var_float.value, Decimal))
self.assertTrue(isinstance(ten_var_long.value, Decimal))
def test_numeric_equal_to(self):
self.assertTrue(NumericType(10).equal_to(10))
self.assertTrue(NumericType(10).equal_to(10.0))
self.assertTrue(NumericType(10).equal_to(10.000001))
self.assertTrue(NumericType(10.000001).equal_to(10))
self.assertTrue(NumericType(Decimal('10.0')).equal_to(10))
self.assertTrue(NumericType(10).equal_to(Decimal('10.0')))
self.assertFalse(NumericType(10).equal_to(10.00001))
self.assertFalse(NumericType(10).equal_to(11))
def test_numeric_not_equal_to(self):
self.assertTrue(NumericType(10).not_equal_to(10.00001))
self.assertTrue(NumericType(10).not_equal_to(11))
self.assertTrue(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.1')))
self.assertFalse(NumericType(10).not_equal_to(10))
self.assertFalse(NumericType(10).not_equal_to(10.0))
self.assertFalse(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.0')))
def test_other_value_not_numeric(self):
error_string = "10 is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, error_string):
NumericType(10).equal_to("10")
def test_numeric_greater_than(self):
self.assertTrue(NumericType(10).greater_than(1))
self.assertFalse(NumericType(10).greater_than(11))
self.assertTrue(NumericType(10.1).greater_than(10))
self.assertFalse(NumericType(10.000001).greater_than(10))
self.assertTrue(NumericType(10.000002).greater_than(10))
def test_numeric_greater_than_or_equal_to(self):
self.assertTrue(NumericType(10).greater_than_or_equal_to(1))
self.assertFalse(NumericType(10).greater_than_or_equal_to(11))
self.assertTrue(NumericType(10.1).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000001).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000002).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10).greater_than_or_equal_to(10))
def test_numeric_less_than(self):
self.assertTrue(NumericType(1).less_than(10))
self.assertFalse(NumericType(11).less_than(10))
self.assertTrue(NumericType(10).less_than(10.1))
self.assertFalse(NumericType(10).less_than(10.000001))
self.assertTrue(NumericType(10).less_than(10.000002))
def test_numeric_less_than_or_equal_to(self):
self.assertTrue(NumericType(1).less_than_or_equal_to(10))
self.assertFalse(NumericType(11).less_than_or_equal_to(10))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.1))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000001))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000002))
self.assertTrue(NumericType(10).less_than_or_equal_to(10))
class BooleanOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType("foo")
err_string = "None is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType(None)
def test_boolean_is_true_and_is_false(self):
self.assertTrue(BooleanType(True).is_true())
self.assertFalse(BooleanType(True).is_false())
self.assertFalse(BooleanType(False).is_true())
self.assertTrue(BooleanType(False).is_false())
class SelectOperatorTests(TestCase):
def test_contains(self):
self.assertTrue(SelectType([1, 2]).contains(2))
self.assertFalse(SelectType([1, 2]).contains(3))
self.assertTrue(SelectType([1, 2, "a"]).contains("A"))
def test_does_not_contain(self):
self.assertTrue(SelectType([1, 2]).does_not_contain(3))
self.assertFalse(SelectType([1, 2]).does_not_contain(2))
self.assertFalse(SelectType([1, 2, "a"]).does_not_contain("A"))
class SelectMultipleOperatorTests(TestCase):
def test_contains_all(self):
self.assertTrue(SelectMultipleType([1, 2]).
contains_all([2, 1]))
self.assertFalse(SelectMultipleType([1, 2]).
contains_all([2, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
contains_all([2, 1, "A"]))
def test_is_contained_by(self):
self.assertTrue(SelectMultipleType([1, 2]).
is_contained_by([2, 1, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
is_contained_by([2, 3, 4]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
is_contained_by([2, 1, "A"]))
def test_shares_at_least_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_at_least_one_element_with([4, "A"]))
def test_shares_exactly_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_exactly_one_element_with([4, "A"]))
self.assertFalse(SelectMultipleType([1, 2, 3]).
shares_exactly_one_element_with([2, 3, "a"]))
def test_shares_no_elements_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_no_elements_with([4, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_no_elements_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2, "a"]).
shares_no_elements_with([4, "A"]))
class DataframeOperatorTests(TestCase):
def test_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ],
})
result: pd.Series = DataframeType({"value": df}).exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_not_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ]
})
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, "", 7, ],
"var2": [3, 5, 6, "", 2, ],
"var3": [1, 3, 8, "", 7, ],
"var4": ["test", "issue", "one", "", "two", ]
})
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to({
"target": "--r1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 20
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var4",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False, False, ])))
def test_not_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": 20
}).equals(pandas.Series([True, True, True])))
def test_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "", "new", "val"],
"var2": ["WORD", "", "test", "VAL"],
"var3": ["LET", "", "GO", "read"]
})
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "NEW"
}).equals(pandas.Series([False, False, True, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False])))
def test_not_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "new", "val"],
"var2": ["WORD", "test", "VAL"],
"var3": ["LET", "GO", "read"],
"var4": ["WORD", "NEW", "VAL"]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
def test_less_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than({
"target": "--r1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": 3
}).equals(pandas.Series([True, True, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than_or_equal_to({
"target": "--r1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([False, False, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 5, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than_or_equal_to({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_greater_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": 5000
}).equals(pandas.Series([False, False, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than_or_equal_to({
"target": "var1",
"comparator": "--r4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([True, True, False])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 3, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than_or_equal_to({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_contains(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": 5
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).does_not_contain({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([False, True, True])))
def test_contains_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["POKEMON", "CHARIZARD", "BULBASAUR"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "PIKACHU"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["pikachu", "charizard", "bulbasaur"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var1",
"comparator": "IVYSAUR"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([False, True, True])))
def test_is_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [4,5,6]
}).equals(pandas.Series([False, False, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([True, True, False])))
def test_is_not_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([False, False, True])))
def test_is_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
def test_is_not_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
def test_prefix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).prefix_matches_regex({
"target": "--r2",
"comparator": "w.*",
"prefix": 2
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([False, False])))
def test_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).suffix_matches_regex({
"target": "--r1",
"comparator": "es.*",
"suffix": 3
}).equals(pandas.Series([False, True])))
self.assertTrue(DataframeType({"value": df}).suffix_matches_regex({
"target": "var1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([False, False])))
def test_not_prefix_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_prefix_matches_regex({
"target": "--r1",
"comparator": ".*",
"prefix": 2
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).not_prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([True, True])))
def test_not_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_suffix_matches_regex({
"target": "var1",
"comparator": ".*",
"suffix": 3
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_suffix_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([True, True])))
def test_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).matches_regex({
"target": "--r1",
"comparator": ".*",
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).matches_regex({
"target": "var2",
"comparator": "[0-9].*",
}).equals(pandas.Series([False, False])))
def test_not_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_matches_regex({
"target": "var1",
"comparator": ".*",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
}).equals(pandas.Series([True, True])))
def test_starts_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).starts_with({
"target": "--r1",
"comparator": "WO",
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).starts_with({
"target": "var2",
"comparator": "ABC",
}).equals(pandas.Series([False, False])))
def test_ends_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).ends_with({
"target": "--r1",
"comparator": "abc",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).ends_with({
"target": "var1",
"comparator": "est",
}).equals(pandas.Series([False, True])))
def test_has_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals( | pandas.Series([True, False]) | pandas.Series |
# Standard imports
from os.path import dirname
# Third party imports
from teacher.utils import recognize_features_type, set_discrete_continuous, label_encode
import pandas as pd
import numpy as np
MODULE_PATH = dirname(__file__)
def generate_dataset(df, columns, class_name, discrete, name):
"""Generate the dataset suitable for LORE usage
Parameters
----------
df : pandas.core.frame.DataFrame
Pandas DataFrame with the original data to prepare
columns : list
List of the columns used in the dataset
class_name : str
Name of the class column
discrete : list
List with all the columns to be considered to have discrete values
name : str
Name of the dataset
Returns
-------
dataset : dict
Dataset as a dictionary with the following elements:
name : Name of the dataset
df : Pandas DataFrame with the original data
columns : list of the columns of the dataframe
class_name : name of the class variable
possible_outcomes : list with all the values of the class column
type_features : dict with all the variables grouped by type
features_type : dict with the type of each feature
discrete : list with all the columns to be considered to have discrete values
continuous : list with all the columns to be considered to have continuous values
idx_features : dict with the column name of each column once arranged in a numpy array
label_encoder : label encoder for the discrete values
X : numpy array with all the columns except for the class
y : numpy array with the class column
"""
possible_outcomes = list(df[class_name].unique())
type_features, features_type = recognize_features_type(df, class_name)
discrete, continuous = set_discrete_continuous(columns, type_features, class_name, discrete, continuous=None)
columns_tmp = list(columns)
columns_tmp.remove(class_name)
idx_features = {i: col for i, col in enumerate(columns_tmp)}
# Dataset Preparation for Scikit Alorithms
df_le, label_encoder = label_encode(df, discrete)
X = df_le.loc[:, df_le.columns != class_name].values
y = df_le[class_name].values
dataset = {
'name': name,
'df': df,
'columns': list(columns),
'class_name': class_name,
'possible_outcomes': possible_outcomes,
'type_features': type_features,
'features_type': features_type,
'discrete': discrete,
'continuous': continuous,
'idx_features': idx_features,
'label_encoder': label_encoder,
'X': X,
'y': y,
}
return dataset
def load_german():
"""Loads and returns the german credit dataset
Returns
-------
dataset : dict
Returns a dataset as formatted in generate_dataset()
"""
# Read Dataset
df = pd.read_csv(MODULE_PATH + '/data/german_credit.csv', delimiter=',')
# Features Categorization
columns = df.columns
class_name = 'default'
discrete = ['installment_as_income_perc', 'present_res_since', 'credits_this_bank', 'people_under_maintenance']
return generate_dataset(df, columns, class_name, discrete, 'german_credit')
def load_adult():
"""Loads and returns the adult dataset
Returns
-------
dataset : dict
Returns a dataset as formatted in generate_dataset()
"""
# Read Dataset
df = pd.read_csv(MODULE_PATH + '/data/adult.csv', delimiter=',', skipinitialspace=True)
# Remove useless columns
del df['fnlwgt']
del df['education-num']
# Remove Missing Values
for col in df.columns:
df[col].replace('?', df[col].value_counts().index[0], inplace=True)
# Features Categorization
columns = df.columns.tolist()
columns = columns[-1:] + columns[:-1]
df = df[columns]
class_name = 'class'
discrete = []
return generate_dataset(df, columns, class_name, discrete, 'adult')
def load_compas():
# Read Dataset
df = | pd.read_csv(MODULE_PATH + '/data/compas-scores-two-years.csv', delimiter=',', skipinitialspace=True) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 20 11:40:41 2019
@author: johnmount
"""
import math
import statistics
import hashlib
import numpy
import pandas
import vtreat.stats_utils
def safe_to_numeric_array(x):
# work around https://github.com/WinVector/pyvtreat/issues/7
# noinspection PyTypeChecker
return numpy.asarray(pandas.Series(x) + 0.0, dtype=float)
def can_convert_v_to_numeric(x):
"""check if non-empty vector can convert to numeric"""
try:
numpy.asarray(x + 0.0, dtype=float)
return True
except TypeError:
return False
def is_bad(x):
""" for numeric vector x, return logical vector of positions that are null, NaN, infinite"""
if can_convert_v_to_numeric(x):
x = safe_to_numeric_array(x)
return numpy.logical_or(
pandas.isnull(x), numpy.logical_or(numpy.isnan(x), numpy.isinf(x))
)
return pandas.isnull(x)
def has_range(x):
x = safe_to_numeric_array(x)
not_bad = numpy.logical_not(is_bad(x))
n_not_bad = sum(not_bad)
if n_not_bad < 2:
return False
x = x[not_bad]
return numpy.max(x) > numpy.min(x)
def summarize_column(x, *, fn=numpy.mean):
"""
Summarize column to a non-missing scalar.
:param x: a vector/Series or column of numbers
:param fn: summarize function (such as numpy.mean), only passed non-bad positions
:return: scalar float summary of the non-None positions of x (otherwise 0)
"""
x = safe_to_numeric_array(x)
not_bad = numpy.logical_not(is_bad(x))
n_not_bad = sum(not_bad)
if n_not_bad < 1:
return 0.0
x = x[not_bad]
v = 0.0 + fn(x)
if | pandas.isnull(v) | pandas.isnull |
from app.helpers.plotter import plot_confusion_matrix
from app.helpers.model_exec import model_exec
from app.helpers.selection import metric_data_prep
from app.helpers.selection import get_mic_chi2_s_df
from app.helpers.model_exec import model_eval
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score as acc
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import chi2
from sklearn.naive_bayes import GaussianNB
from sklearn.feature_selection import SelectFromModel
import pandas as pd
import numpy as np
import operator
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
plt.ion()
import os
path_ = os.getcwd()
# STEP-1: Count vectorizer data import.
data_path = path_ + r"/data_prep/final_data/growth/"
lit_df = pd.read_csv(data_path+"lit.csv")
pos_df = pd.read_csv(data_path+"pos.csv")
neg_df = pd.read_csv(data_path+"neg.csv")
unc_df = pd.read_csv(data_path+"unc.csv")
thl_df = pd.read_csv(data_path+"thala.csv")
bag_df = | pd.read_csv(data_path+"bagfw.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 9 11:15:48 2018
@author: bfyang.cephei
"""
import numpy as np
import pandas as pd
#import pandas_datareader.data as web
#import tushare as ts
#import datetime
#==============================================================================
def poss_date(date):
if len(date) == 10:
return date[:4]+'-'+date[5:7]+'-'+date[8:]
elif len(date) == 8:
return date[:4]+'-0'+date[5]+'-0'+date[-1]
elif date[-2] == r'/':
return date[:4]+'-'+date[5:7]+'-0'+date[-1]
else:
return date[:4]+'-0'+date[5]+'-'+date[-2:]
data = pd.read_csv(r'C:\Users\bfyang.cephei\Desktop\CTA\data\daybar_20180204\futures_20180204.csv')
data['tradedate'] = data['tradedate'].apply(lambda x : poss_date(x))
data_train = data[(data['tradedate']>='2017-01-01') & (data['tradedate']<='2017-01-31')]
#==============================================================================
data_train.head()
## sklearn
from sklearn import linear_model
y_train = (data_train['close']-data_train['pre_close'])/data_train['pre_close']
y_train = np.array(y_train.fillna(0))
x_train = data_train[['swing','oi','open']]
x_train = np.array(x_train.fillna(0))
data_test = data[(data['tradedate']>='2017-02-01') & (data['tradedate']<='2017-02-31')]
y_test = (data_test['close']-data_test['pre_close'])/data_test['pre_close']
y_test = np.array(y_test.fillna(0))
x_test = data_test[['swing','oi','open']]
x_test = np.array(x_test.fillna(0))
# Create linear regression object
linear = linear_model.LinearRegression()
# Train the model using the training sets and check score
linear.fit(x_train,y_train)
linear.score(x_train, y_train)
#Equation coefficient and Intercept
print('Coefficient: n', linear.coef_) # 贝塔系数
print('Intercept: n', linear.intercept_) #
#Predict Output
predicted= linear.predict(x_test)
# correlation
res1 = np.corrcoef(predicted,y_test) # numpy 数组格式求相关系数
res2 = pd.Series(predicted).corr(pd.Series(y_test)) # dataframe 数据格式求相关系数
import os
os.chdir('D:/yh_min-mfactors')
from poss_data_format import *
from address_data import *
import pandas as pd
import statsmodels.api as sm
import numpy as np
# 某一时间截面,所有个股的收益对所有个股的各个因子进行多元回归,
# 得到某个因子在某个时间个股的残差值,数据量191*227*300,得到有效因子
# 然后对每个截面求得预测收益和实际收益的相关系数,即IC(t)值,最后得到一个时间序列的IC值
# 对IC值进行T检验
# 第一步 读取行业数据
code_HS300 = pd.read_excel(add_gene_file + 'data_mkt.xlsx',sheetname='HS300')
stockList = list(code_HS300['code'][:])
industry = pd.read_pickle\
(add_gene_file + 'industry.pkl').drop_duplicates()
industry = industry[industry['code'].isin(stockList)]
industry.index = industry['code']
industry.drop(['code'],axis = 1,inplace = True)
industry = industry.T
industry.reset_index(inplace = True)
industry.rename(columns={'index':'date'},inplace = True)
# 第二步 读取风格因子数据
# 因子数据截止到2017-12-06日'
style_filenames = os.listdir(add_Nstyle_factors)
style_list = list(map(lambda x : x[:-4],style_filenames))
for sfilename in style_filenames:
names = locals()
names[sfilename[:-4]] = | pd.read_csv(add_Nstyle_factors+sfilename) | pandas.read_csv |
import time
from pathlib import Path
from typing import Tuple, Sequence
from collections import Counter
import numpy as np
import pandas as pd
from torch.utils import data
from tqdm import tqdm
from sandstone.datasets.factory import RegisterDataset
from sandstone.utils.generic import log, md5
import warnings
warnings.simplefilter("ignore")
class MIMIC_IV_Abstract_Dataset(data.Dataset):
"""Abstract class for different MIMIC-IV tasks.
Handles data loading, caching, splitting, and various generic preprocessing steps.
"""
def __init__(self, args, split_group):
super(MIMIC_IV_Abstract_Dataset, self).__init__()
self.args = args
self.split_group = split_group
cache_static_filename = get_cache_filename('static', args=args)
cache_hourly_filename = get_cache_filename('hourly', args=args)
print(f"Loading item mapping ({args.item_map_path})")
item_mapping = pd.read_csv(args.item_map_path, low_memory=False)
if Path(args.cache_dir, cache_static_filename).is_file() and Path(args.cache_dir, cache_hourly_filename).is_file():
print("Loading cached static_df and aggregated_df:", cache_static_filename, cache_hourly_filename)
static_df = pd.read_parquet(Path(args.cache_dir, cache_static_filename))
aggregated_df = pd.read_parquet(Path(args.cache_dir, cache_hourly_filename))
else:
# compute which csvs are needed
task_csv_subset = set(self.task_specific_features.keys())
features_csv_subset = set(item_mapping.origin.loc[item_mapping.origin != 'static'].dropna())
# by default, patients, chartevents, admissions and icustays are loaded
self.csv_subset = set(('patients', 'chartevents', 'admissions', 'icustays')).union(task_csv_subset).union(features_csv_subset)
raw_dataframes = load_data(args.dataset_path, subset=self.csv_subset, nrows=args.nrows, chunksize=args.chunksize, cache_dir=args.cache_dir)
static_df, aggregated_df = self.create_dataframes(args, item_mapping, **raw_dataframes)
# cache final dataframes
static_df.to_parquet(Path(args.cache_dir, cache_static_filename))
aggregated_df.to_parquet(Path(args.cache_dir, cache_hourly_filename))
print("Generating labels")
self.create_labels(static_df, aggregated_df, task=args.task, threshold=args.los_threshold)
if args.dataset == 'mimic-iv-sepsis':
print(f"Extracting {args.data_hours} hours of data")
aggregated_df = self.extract_timerange(args, aggregated_df, task=args.task)
print("Adding onset hour to static_df")
onset = aggregated_df.groupby('hadm_id')[args.task+'_onset_hour'].mean()
static_df = static_df.merge(onset, how='left', on='hadm_id')
# filter static_df to only include patients in aggregated_df
static_df = static_df[static_df.hadm_id.isin(aggregated_df.hadm_id.unique())]
print("Filter for just feature columns")
static_cols = ['subject_id', 'hadm_id', 'intime', 'y', args.task+'_onset_hour']
cols_to_keep = ['hadm_id', 'hour']
if len(args.features) != 0:
# convert to lower case
args.features = [x.lower() for x in args.features]
if args.group_by_level2:
static_cols.extend(args.features)
cols_to_keep.extend(args.features)
else:
feature_ids = list(item_mapping.loc[item_mapping['LEVEL2'].str.lower().isin(args.features)]['itemid'].map(str))
static_cols.extend(feature_ids)
cols_to_keep.extend(feature_ids)
else:
static_cols.extend(list(item_mapping.itemid.map(str)))
if args.group_by_level2:
cols_to_keep.extend(list(item_mapping.LEVEL2))
else:
cols_to_keep.extend(list(item_mapping.itemid.map(str)))
if args.feature_search is not None:
args.feature_search = args.feature_search.lower()
if args.group_by_level2:
print("Search feature:", args.feature_search)
static_cols.extend(args.feature_search)
cols_to_keep.extend(args.feature_search)
else:
search_ids = list(item_mapping.loc[item_mapping['LEVEL2'].str.lower() == (args.feature_search)]['itemid'].map(str))
print("Search IDs:", search_ids)
cols_to_keep.extend(search_ids)
static_cols.extend(search_ids)
if len(args.feature_remove) != 0:
# convert to lower case
args.feature_remove = [x.lower() for x in args.feature_remove]
if args.group_by_level2:
remove_ids = args.feature_remove
else:
remove_ids = list(item_mapping.loc[item_mapping['LEVEL2'].str.lower().isin(args.feature_remove)]['itemid'].map(str))
for feature in remove_ids:
if feature in cols_to_keep:
print("Removed feature:", feature)
cols_to_keep.remove(feature)
if feature in static_cols:
static_cols.remove(feature)
original_cols = [c for c in cols_to_keep if c in aggregated_df.columns]
if args.impute_method == 'simple':
exist_cols = [c+'_exist' for c in original_cols if c not in ['hadm_id', 'hour']]
time_cols = [c+'_time_since' for c in original_cols if c not in ['hadm_id', 'hour']]
cols_to_keep.extend(exist_cols)
cols_to_keep.extend(time_cols)
static_df = static_df.loc[:, static_df.columns.isin(static_cols)]
aggregated_df = aggregated_df.loc[:,aggregated_df.columns.isin(cols_to_keep)]
if args.dataset == 'mimic-iv-sepsis':
print(f"Re-indexing and zero filling")
aggregated_df = reindex_timeseries(aggregated_df)
aggregated_df.fillna({x:0 for x in original_cols}, inplace=True)
if args.impute_method == 'simple':
aggregated_df.fillna({x:0 for x in exist_cols}, inplace=True)
aggregated_df.fillna({x:100 for x in time_cols}, inplace=True)
print("Static df size:", static_df.shape)
print("Static df columns:", static_df.columns)
print("Aggregated df size:", aggregated_df.shape)
print("Aggregated df columns:", aggregated_df.columns)
print("Static df stats:")
print(static_df.describe())
print("Aggregated df stats:")
print(aggregated_df.describe())
print("Binarize/One-hot encode categorical feature columns")
if 'gender' in static_df.columns:
static_df['gender'] = (static_df.gender == 'M').astype(bool)
for col in ['marital_status', 'ethnicity']:
if col in static_df.columns:
dummies = pd.get_dummies(static_df[col]).add_prefix(col+"_").astype(bool)
static_df.drop(columns=col, inplace=True)
static_df[dummies.columns] = dummies
self.assign_splits(static_df)
if args.normalize is not None:
print("Normalizing values to zero-mean and unit variance.")
if args.group_by_level2:
normalize_feats = set(args.normalize)
else:
normalize_feats = set(item_mapping.loc[item_mapping['LEVEL2'].isin(args.normalize)].itemid.unique())
static_norm_cols = list(normalize_feats.intersection(static_df.columns))
hourly_norm_cols = list(normalize_feats.intersection(aggregated_df.columns))
unused_norm_cols = normalize_feats.difference(set(static_norm_cols + hourly_norm_cols))
if len(unused_norm_cols) != 0:
print("WARNING: Couldn't find specified columns to normalize by: {}!".format(unused_norm_cols))
static_train = static_df.loc[static_df.split_group == 'train']
static_normalize_df = static_train[static_norm_cols]
hourly_normalize_df = aggregated_df.loc[aggregated_df.hadm_id.isin(static_train.hadm_id.unique()), hourly_norm_cols]
# compute stats over train data
static_mean, static_std = static_normalize_df.mean(), static_normalize_df.std()
hourly_mean, hourly_std = hourly_normalize_df.mean(), hourly_normalize_df.std()
# prevent division by zero
static_std.loc[static_std == 0] = 1
hourly_std.loc[hourly_std == 0] = 1
# apply to whole dataset
static_df[static_norm_cols] = (static_df[static_norm_cols] - static_mean) / static_std
aggregated_df[hourly_norm_cols] = (aggregated_df[hourly_norm_cols] - hourly_mean) / hourly_std
if args.flatten_timeseries:
flattened_df = flatten_timeseries(aggregated_df)
static_df = static_df.merge(flattened_df, on='hadm_id')
elif args.timeseries_moments:
moments_df = compute_timeseries_moments(aggregated_df, args.timeseries_moments)
static_df = static_df.merge(moments_df, on='hadm_id')
static_df.columns = static_df.columns.map(str)
self.static_features = [col for col in static_df.columns if col not in ['y', 'subject_id', 'hadm_id', 'intime', 'split_group', args.task+'_onset_hour']]
self.timeseries_features = [col for col in aggregated_df.columns if col not in ['hadm_id', 'charttime', 'hour']]
static_df = static_df.loc[static_df['split_group'] == split_group]
if not args.static_only:
# if non-flattened hourly data is used, also filter aggregated_df
aggregated_df = aggregated_df.loc[aggregated_df['hadm_id'].isin(static_df['hadm_id'].unique())]
static_df.drop(columns='split_group', inplace=True)
if args.static_only:
self.dataset = self.create_dataset(static_df)
else:
self.dataset = self.create_dataset(static_df, aggregated_df)
# Class weighting
label_dist = [d['y'] for d in self.dataset]
label_counts = Counter(label_dist)
weight_per_label = 1./ len(label_counts)
label_weights = {
label: weight_per_label/count for label, count in label_counts.items()
}
self.weights = [ label_weights[d['y']] for d in self.dataset]
log(self.get_summary_statement(self.args.task, split_group, self.args.current_test_years, self.args.onset_bucket, label_counts), args)
@property
def task(self):
raise NotImplementedError("Abstract method needs to be overridden!")
@property
def task_specific_features(self, task=None):
"""Defines some itemids/gsns/icd_codes that are needed for the task.
Returns:
a dictionary mapping origin dataset -> list of itemids.
"""
return {}
def create_dataframes(self, args, item_mapping, **raw_dataframes):
"""Preprocesses raw dataframes into static_df and aggregated_df.
Returns:
- static_df
- must include columns 'hadm_id', and 'y' for the label.
- any additional columns will be used as input features for prediction.
- timeseries_df
"""
raise NotImplementedError("Abstract method needs to be overridden!")
def assign_splits(self, meta):
if self.args.timesplit:
# assign train_years as a list of years [2008, 2010] inclusive for instance.
train_start, train_end = map(int, self.args.train_years.split('-'))
meta['split_group'] = None
meta.loc[(meta['intime'].dt.year>=train_start) & (meta['intime'].dt.year<=train_end), 'split_group'] = 'train'
# dev will be a subset of train, of proportion split_probs[dev]
dev_prob = self.args.split_probs[1]
train_rows = meta[meta.split_group=='train'].shape[0]
dev_rows = int(dev_prob*train_rows)
meta.loc[meta[meta['split_group']=='train'].head(dev_rows).index, 'split_group'] = 'dev'
# if testing on training years, then final split is test set
if self.args.train_years == self.args.current_test_years:
test_prob = self.args.split_probs[2]
test_rows = int(test_prob*train_rows)
mask = meta.index.isin(meta[meta['split_group']=='train'].tail(test_rows).index)
else:
test_start, test_end = map(int, self.args.current_test_years.split('-'))
mask = meta['intime'].dt.year>=test_start
mask &= meta['intime'].dt.year<=test_end
# adding to the mask onset bucket
if self.args.onset_bucket is not None:
hour_start, hour_end = map(int, self.args.onset_bucket.split('-'))
mask &= meta[self.args.task+'_onset_hour'] >= hour_start
mask &= meta[self.args.task+'_onset_hour'] <= hour_end
meta.loc[mask, 'split_group'] = 'test'
else:
subject_ids = list(sorted(meta['subject_id'].unique()))
start_idx = 0
meta['split_group'] = None
for split, prob in zip(['train', 'dev', 'test'], self.args.split_probs):
end_idx = start_idx + int(len(subject_ids) * prob)
start = subject_ids[start_idx]
end = subject_ids[end_idx-1]
meta.loc[(meta['subject_id'] >= start) & (meta['subject_id'] <= end), 'split_group'] = split
start_idx = end_idx
if meta.loc[meta['subject_id']==subject_ids[end_idx-1]]['split_group'].isnull().any():
meta.loc[meta['subject_id']==subject_ids[end_idx-1], 'split_group'] = split
return meta
def create_dataset(self, static_df, aggregated_df=None):
"""Turns DataFrames into a list of samples, which are dicts containing 'pid', 'x', 'y', and
possibly 'x_timeseries' keys
"""
dataset = []
pids = static_df['subject_id'].values.astype(np.int32)
hadm_ids = static_df['hadm_id'].values.astype(np.int32)
ys = static_df['y'].values.astype(np.float32)
xs = static_df[self.static_features].values.astype(np.float32)
for i in tqdm(range(len(pids)), desc='Creating dataset', total=len(pids)):
patient_dict = {}
patient_dict['pid'] = pids[i]
patient_dict['y'] = ys[i]
patient_dict['x'] = xs[i]
if aggregated_df is not None:
patient_rows = aggregated_df.loc[aggregated_df.hadm_id == hadm_ids[i]]
assert len(patient_rows) > 0, "Found patient with no timeseries data!"
x_timeseries = patient_rows[self.timeseries_features].values.astype(np.float32)
patient_dict['x_timeseries'] = x_timeseries
dataset.append(patient_dict)
return dataset
def create_labels(self, static_df, aggregated_df, task, threshold):
"""Generates per-patient labels for the given task
Returns:
- static_df with an extra 'y' column
"""
raise NotImplementedError("Abstract method needs to be overridden!")
def extract_timerange(self, args, aggregated_df, task):
"""Extracts a fixed no. of hours of data to predict from
"""
raise NotImplementedError("Abstract method needs to be overridden!")
def get_summary_statement(self, task, split_group, years, hours, class_balance):
return "Created MIMIC-IV {} {} dataset for years {} and onset hours {} with the following class balance:\n{}".format(task, split_group, years, hours, class_balance)
def set_args(self, args):
args.num_classes = 2
args.input_dim = len(self.static_features)
if not args.flatten_timeseries:
args.timeseries_dim = len(self.timeseries_features)
args.timeseries_len = args.data_hours
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
@RegisterDataset("mimic-iv-sepsis")
class MIMIC_IV_Sepsis_Dataset(MIMIC_IV_Abstract_Dataset):
@property
def task(self):
return "Sepsis-3"
@property
def task_specific_features(self):
return {
'inputevents': [221662, 221653, 221289, 221906], # dopamine, dobutamine, epinephrine, norepinephrine
'outputevents': [226559, 226560, 226561, 226584, 226563, 226564, 226565, 226567, 226557,
226558, 227488, 227489], # for urine output
'labevents': [51265, 50885, 50912, 50821, 51301], # platelets, bilirubin, creatinine, PO2, WBC-count
'chartevents': [223835, 220739, 223900, 223901, 223849, 229314, # FiO2, GCS-Eye, GCS-Verbal, GCS-Motor, vent_mode, vent_mode (Hamilton)
223762, 223761, 220045, 220210, 224690], # temp_C, temp_F, heart rate, resp rate, resp rate (total)
'microbiologyevents': None, # all microbio samples (no id filtering happens on microbioevents, so None can be used here)
'prescriptions': None,
}
def create_dataframes(self, args, item_mapping, patients, chartevents, admissions, icustays,
inputevents, labevents, microbiologyevents=None, prescriptions=None, outputevents=None,
diagnoses_icd=None, procedureevents=None, **extra_dfs):
# filter patients and merge data (code from before)
admissions, patients, icustays = filter_eligible_patients(admissions, patients, icustays,
args.min_patient_age, args.min_hours, args.gap_hours,
args.min_icu_stay, args.max_icu_stay)
chartevents = filter_table_patients(chartevents, patients)
labevents = filter_table_patients(labevents, patients)
inputevents = filter_table_patients(inputevents, patients)
microbiologyevents = filter_table_patients(microbiologyevents, patients)
prescriptions = filter_table_patients(prescriptions, patients)
outputevents = filter_table_patients(outputevents, patients)
diagnoses_icd = filter_table_patients(diagnoses_icd, patients)
procedureevents = filter_table_patients(procedureevents, patients)
print("Merging static data...")
static_df = patients[["subject_id", "gender", "anchor_age"]]
static_df = static_df.merge(admissions[["subject_id", "hadm_id", "admittime", "dischtime", "insurance", "admission_type", "marital_status", "ethnicity"]],
how="inner", on="subject_id")
static_df = static_df.merge(icustays[["hadm_id", "stay_id", "first_careunit", "intime", "outtime", "los"]],
how="inner", on="hadm_id")
static_df.rename(columns={"anchor_age": "age", "stay_id": "icustay_id"}, inplace=True)
print("Filter events")
chartevents_features = item_mapping.loc[item_mapping.origin == 'chartevents'].itemid.astype(int).tolist()
inputevents_features = item_mapping.loc[item_mapping.origin == 'inputevents'].itemid.astype(int).tolist()
outputevents_features = item_mapping.loc[item_mapping.origin == 'outputevents'].itemid.astype(int).tolist()
labevents_features = item_mapping.loc[item_mapping.origin == 'labevents'].itemid.astype(int).tolist()
procedureevents_features = item_mapping.loc[item_mapping.origin == 'procedureevents'].itemid.astype(int).tolist()
prescriptions_features = item_mapping.loc[item_mapping.origin == 'prescriptions'].itemid.tolist()
inputevents_features.extend(self.task_specific_features['inputevents'])
outputevents_features.extend(self.task_specific_features['outputevents'])
labevents_features.extend(self.task_specific_features['labevents'])
chartevents_features.extend(self.task_specific_features['chartevents'])
filtered_inputevents = inputevents.loc[inputevents['itemid'].isin(inputevents_features)]
filtered_outputevents = outputevents.loc[outputevents['itemid'].isin(outputevents_features)]
filtered_labevents = labevents.loc[labevents['itemid'].isin(labevents_features)]
filtered_chartevents = filter_variables(chartevents, chartevents_features)
filtered_prescriptions = prescriptions.loc[prescriptions['gsn'].isin(prescriptions_features)]
antibiotics = filter_antibiotics(prescriptions)
filtered_diagnoses = filter_diagnoses(diagnoses_icd, item_mapping)
filtered_procedures = procedureevents.loc[procedureevents['itemid'].isin(procedureevents_features)]
# standardize units
print("Standardizing units")
filtered_chartevents = standardize_units(filtered_chartevents, item_mapping)
# merge diagnoses with static_df
filtered_diagnoses['value'] = 1
pivot_diagnoses = filtered_diagnoses.pivot_table(index='hadm_id', columns='icd_code', values ='value')
static_df = static_df.merge(pivot_diagnoses, on='hadm_id', how='left')
static_df[pivot_diagnoses.columns] = static_df[pivot_diagnoses.columns].fillna(0)
print("Filter events to stay")
filtered_inputevents.rename(columns={"starttime": "charttime"}, inplace=True)
antibiotics.rename(columns={'starttime':'charttime'}, inplace=True)
filtered_procedures.rename(columns={'starttime':'charttime'}, inplace=True)
chartlab_events = pd.concat([filtered_chartevents, filtered_labevents], join='outer')
filtered_prescriptions.rename(columns={'starttime':'charttime', 'gsn':'itemid'}, inplace=True)
# Pass chartevents dataframe and inputevents through hourly aggregation
chartlab_events = filter_events_to_stay(chartlab_events, static_df)
filtered_inputevents = filter_events_to_stay(filtered_inputevents, static_df)
microbiologyevents = filter_events_to_stay(microbiologyevents, static_df)
antibiotics = filter_events_to_stay(antibiotics, static_df)
filtered_prescriptions = filter_events_to_stay(filtered_prescriptions, static_df)
filtered_outputevents = filter_events_to_stay(filtered_outputevents, static_df)
filtered_procedures = filter_events_to_stay(filtered_procedures, static_df)
if args.group_by_level2:
print("Group itemids by actual feature they represent")
item_mapping_chartlab = item_mapping.loc[item_mapping.origin == 'chartevents', ['itemid', 'LEVEL2']].astype({'itemid': int})
chartlab_events = chartlab_events.merge(item_mapping_chartlab, on='itemid', how='left')
group_mask = ~chartlab_events.LEVEL2.isna()
chartlab_events.loc[group_mask, 'itemid'] = chartlab_events.loc[group_mask, 'LEVEL2']
print("Hourly aggregation")
# fill NaN with 1 for incisions etc.
chartlab_events[['value','valuenum']].fillna(1, inplace=True)
aggregated_df = hourly_aggregation(chartlab_events, static_df, filtered_inputevents, antibiotics, microbiologyevents, filtered_outputevents, filtered_procedures, filtered_prescriptions)
print("Calculate SOFA, SI and Sepsis-3")
# import vents -- can move this code into SOFA score if necessary
vents_df = pd.read_csv(args.vent_path, low_memory=False)
vents_df = pd.merge(vents_df, static_df[['subject_id', 'hadm_id', 'icustay_id']],
how='inner', left_on='stay_id', right_on='icustay_id') # filter for relevant stay & patients
vents_df['starttime'] = pd.to_datetime(vents_df.starttime)
vents_df['endtime'] = pd.to_datetime(vents_df.endtime)
vents_df = anchor_dates(vents_df, ['starttime', 'endtime'], patients)
aggregated_df = add_vents(aggregated_df, vents_df)
# Calculate SOFA scores as additional columns
aggregated_df = calculate_SOFA(aggregated_df)
# Calculate Suspicion of Infection as an additional column
aggregated_df = calculate_SI(aggregated_df)
# Calculate Sepsis from SOFA and SI
aggregated_df = calculate_sepsis(aggregated_df, task="sepsis3", consider_difference=args.sepsis_consider_sofa_difference, decrease_baseline=args.sepsis_decrease_sofa_baseline)
# Add SIRS definition as column
# XXX: commented out because of conflict with itemid grouping
#aggregated_df = calculate_SIRS(aggregated_df)
# Calculate Sepsis from SIRS and SI
#aggregated_df = calculate_sepsis(aggregated_df, task="sepsis1", consider_difference=args.sepsis_consider_sofa_difference, decrease_baseline=args.sepsis_decrease_sofa_baseline)
# print("Filtering out patients without enough data")
# # Filtering out patients without enough data:
# counts = aggregated_df['hadm_id'].value_counts()
# aggregated_df = aggregated_df[aggregated_df['hadm_id'].isin(counts[counts>(args.data_hours+args.gap_hours)].index)]
print("Computing approximate real dates...")
static_df = anchor_dates(static_df, ["admittime", "dischtime", "intime", "outtime"], patients)
if 'charttime' in aggregated_df.columns:
aggregated_df = aggregated_df.merge(static_df[['hadm_id','subject_id']], on='hadm_id')
aggregated_df = anchor_dates(aggregated_df, ['charttime'], patients)
# drop patients where any one feature has no vitals
if args.dascena_drop:
print("Dropping patients with any vital missing")
categories = ["heart rate", "respiratory rate", "temperature", "systolic blood pressure",
"diastolic blood pressure", "oxygen saturation"]
for vital in categories:
if args.group_by_level2:
if vital not in aggregated_df.columns:
continue
mask = aggregated_df.set_index("hadm_id")[vital].notnull().groupby(level=0).any()
else:
ids = list(item_mapping.loc[item_mapping['LEVEL2'].str.lower() == vital]['itemid'].map(str))
valid_ids = [i for i in ids if i in aggregated_df.columns]
if len(valid_ids) == 0:
continue
mask = aggregated_df.set_index("hadm_id")[valid_ids].notnull().groupby(level=0).any().any(axis=1)
aggregated_df = aggregated_df.set_index("hadm_id")[mask].reset_index()
# Impute
print("Imputing NaNs")
total_values = (aggregated_df.shape[0] * aggregated_df.shape[1])
print("- Ratio of Nans:", aggregated_df.isna().sum().sum() / total_values)
ignore_cols = ['hadm_id', 'charttime', 'hour', 'subject_id'] + list(aggregated_df.select_dtypes(include="bool").columns)
impute_cols = [col for col in aggregated_df.columns if col not in ignore_cols]
aggregated_df = impute_timeseries(aggregated_df, method=args.impute_method, feature_cols=impute_cols)
total_values = (aggregated_df.shape[0] * aggregated_df.shape[1])
print("After imputation:")
print("- Ratio of zeroes:", (aggregated_df == 0).sum().sum() / total_values)
return static_df, aggregated_df
def create_labels(self, static_df, aggregated_df, task='sepsis3', threshold=None):
# generate per-patient sepsis3 label
sepsis_hadm_ids = aggregated_df.hadm_id[aggregated_df[task] == True].unique()
static_df['y'] = False
static_df.loc[static_df.hadm_id.isin(sepsis_hadm_ids), 'y'] = True
def extract_timerange(self, args, aggregated_df, task='sepsis3'):
sepsis_onset_hour = aggregated_df[aggregated_df[task+'_onset']][['hadm_id', 'hour']]
sepsis_onset_hour.rename(columns={'hour': task+'_onset_hour'}, inplace=True)
aggregated_df = extract_data_prior_to_event(aggregated_df, sepsis_onset_hour, key='hadm_id', events_hour_column=task+'_onset_hour',
gap_hours=args.gap_hours, data_hours=args.data_hours, case_control=args.case_control, dascena_control=args.dascena_control)
return aggregated_df
@RegisterDataset("mimic-iv-los")
class MIMIC_IV_Los_Dataset(MIMIC_IV_Abstract_Dataset):
@property
def task(self):
return "Length of Stay"
def create_dataframes(self, args, item_mapping, patients, chartevents, admissions, icustays):
admissions, patients, icustays = filter_eligible_patients(admissions, patients, icustays,
args.min_patient_age, args.min_hours, args.gap_hours,
args.min_icu_stay, args.max_icu_stay)
chartevents = filter_table_patients(chartevents, patients)
print("Merging static data...")
static_df = patients[["subject_id", "gender", "anchor_age"]]
static_df = static_df.merge(admissions[["subject_id", "hadm_id", "admittime", "dischtime", "insurance", "admission_type", "marital_status", "ethnicity"]],
how="inner", on="subject_id")
static_df = static_df.merge(icustays[["hadm_id", "stay_id", "first_careunit", "intime", "outtime", "los"]],
how="inner", on="hadm_id")
static_df.rename(columns={"anchor_age": "age", "stay_id": "icustay_id"}, inplace=True)
print("Filter events")
chartevents_features = item_mapping.loc[item_mapping.origin == 'chartevents'].itemid.astype(int).tolist()
filtered_chartevents = filter_variables(chartevents, chartevents_features)
print("Standardizing units")
filtered_chartevents = standardize_units(filtered_chartevents, item_mapping)
print("Filter events to stay")
filtered_chartevents = filter_events_to_stay(filtered_chartevents, static_df)
if args.group_by_level2:
print("Group itemids by actual feature they represent")
item_mapping_chart = item_mapping.loc[item_mapping.origin == 'chartevents', ['itemid', 'LEVEL2']].astype({'itemid': int})
filtered_chartevents = filtered_chartevents.merge(item_mapping_chart, on='itemid', how='left')
group_mask = ~filtered_chartevents.LEVEL2.isna()
filtered_chartevents.loc[group_mask, 'itemid'] = filtered_chartevents.loc[group_mask, 'LEVEL2']
print("Hourly aggregation")
aggregated_df = hourly_aggregation(filtered_chartevents, static_df)
print("Computing approximate real dates...")
static_df = anchor_dates(static_df, ["admittime", "dischtime", "intime", "outtime"], patients)
if 'charttime' in aggregated_df.columns:
aggregated_df = aggregated_df.merge(static_df[['hadm_id','subject_id']], on='hadm_id')
aggregated_df = anchor_dates(aggregated_df, ['charttime'], patients)
print(f"Extracting {args.data_hours} hours of data")
aggregated_df = self.extract_timerange(args, aggregated_df, task=args.task)
print("Reindexing timeseries")
aggregated_df = reindex_timeseries(aggregated_df)
# Imputing
print("Imputing NaNs")
total_values = (aggregated_df.shape[0] * aggregated_df.shape[1])
print("- Ratio of Nans:", aggregated_df.isna().sum().sum() / total_values)
impute_cols = [col for col in aggregated_df.columns if col not in ['hadm_id', 'charttime', 'hour', 'subject_id']]
aggregated_df = impute_timeseries(aggregated_df, method=args.impute_method, feature_cols=impute_cols)
total_values = (aggregated_df.shape[0] * aggregated_df.shape[1])
print("After imputation:")
print("- Ratio of zeroes:", (aggregated_df == 0).sum().sum() / total_values)
# filter static_df to only include patients in aggregated_df
static_df = static_df[static_df.hadm_id.isin(aggregated_df.hadm_id.unique())]
return static_df, aggregated_df
def create_labels(self, static_df, aggregated_df, task=None, threshold=4):
static_df['y'] = static_df['los'] >= threshold
# extract first data_hours data from each patient
def extract_timerange(self, args, aggregated_df, task=None):
# aggregated_df['hour'] = aggregated_df.groupby('hadm_id')['hour'].rank('first')
df = aggregated_df.loc[aggregated_df['hour']<= args.data_hours]
return df
@RegisterDataset("mimic-iv-icumort")
class MIMIC_IV_ICUMort_Dataset(MIMIC_IV_Abstract_Dataset):
@property
def task(self):
return "ICU Mortality"
def create_dataframes(self, args, item_mapping, patients, chartevents, admissions, icustays):
admissions, patients, icustays = filter_eligible_patients(admissions, patients, icustays,
args.min_patient_age, args.min_hours, args.gap_hours,
args.min_icu_stay, args.max_icu_stay)
chartevents = filter_table_patients(chartevents, patients)
print("Merging static data...")
static_df = patients[["subject_id", "gender", "anchor_age"]]
static_df = static_df.merge(admissions[["subject_id", "hadm_id", "admittime", "dischtime", "deathtime", "insurance", "admission_type", "marital_status", "ethnicity"]],
how="inner", on="subject_id")
static_df = static_df.merge(icustays[["hadm_id", "stay_id", "first_careunit", "intime", "outtime", "los"]],
how="inner", on="hadm_id")
static_df['death_in_icu'] = (~static_df['deathtime'].isna()) & (static_df['deathtime'] >= static_df['intime']) & \
(static_df['deathtime'] <= static_df['outtime'])
static_df.rename(columns={"anchor_age": "age", "stay_id": "icustay_id"}, inplace=True)
print("Filter events")
chartevents_features = item_mapping.loc[item_mapping.origin == 'chartevents'].itemid.astype(int).tolist()
filtered_chartevents = filter_variables(chartevents, chartevents_features)
print("Standardizing units")
filtered_chartevents = standardize_units(filtered_chartevents, item_mapping)
print("Filter events to stay")
filtered_chartevents = filter_events_to_stay(filtered_chartevents, static_df)
if args.group_by_level2:
print("Group itemids by actual feature they represent")
item_mapping_chart = item_mapping.loc[item_mapping.origin == 'chartevents', ['itemid', 'LEVEL2']].astype({'itemid': int})
filtered_chartevents = filtered_chartevents.merge(item_mapping_chart, on='itemid', how='left')
group_mask = ~filtered_chartevents.LEVEL2.isna()
filtered_chartevents.loc[group_mask, 'itemid'] = filtered_chartevents.loc[group_mask, 'LEVEL2']
print("Hourly aggregation")
aggregated_df = hourly_aggregation(filtered_chartevents, static_df)
print("Computing approximate real dates...")
static_df = anchor_dates(static_df, ["admittime", "dischtime", "intime", "outtime"], patients)
if 'charttime' in aggregated_df.columns:
aggregated_df = aggregated_df.merge(static_df[['hadm_id','subject_id']], on='hadm_id')
aggregated_df = anchor_dates(aggregated_df, ['charttime'], patients)
print(f"Extracting {args.data_hours} hours of data")
aggregated_df = self.extract_timerange(args, aggregated_df, task=args.task)
print("Reindexing timeseries")
aggregated_df = reindex_timeseries(aggregated_df)
# Imputing
print("Imputing NaNs")
total_values = (aggregated_df.shape[0] * aggregated_df.shape[1])
print("- Ratio of Nans:", aggregated_df.isna().sum().sum() / total_values)
impute_cols = [col for col in aggregated_df.columns if col not in ['hadm_id', 'charttime', 'hour', 'subject_id']]
aggregated_df = impute_timeseries(aggregated_df, method=args.impute_method, feature_cols=impute_cols)
total_values = (aggregated_df.shape[0] * aggregated_df.shape[1])
print("After imputation:")
print("- Ratio of zeroes:", (aggregated_df == 0).sum().sum() / total_values)
# filter static_df to only include patients in aggregated_df
static_df = static_df[static_df.hadm_id.isin(aggregated_df.hadm_id.unique())]
return static_df, aggregated_df
def create_labels(self, static_df, aggregated_df, task=None, threshold=None):
static_df['y'] = static_df['death_in_icu']
# extract first data_hours data from each patient
def extract_timerange(self, args, aggregated_df, task=None):
# aggregated_df['hour'] = aggregated_df.groupby('hadm_id')['hour'].rank('first')
df = aggregated_df.loc[aggregated_df['hour']<= args.data_hours]
return df
# args that affect cache
CACHE_ARGS = ['dataset', 'min_patient_age', 'data_hours', 'min_hours', 'gap_hours', 'min_icu_stay', 'max_icu_stay', 'item_map_path',
'sepsis_consider_sofa_difference', 'sepsis_decrease_sofa_baseline', 'group_by_level2', 'impute_method', 'dascena_drop']
def get_cache_filename(filename, args, extension='parquet'):
args_dict = vars(args)
args_str = ""
for arg in CACHE_ARGS:
arg_val = args_dict[arg]
args_str += '#' + arg + '=' + str(arg_val)
filename += "#" + md5(args_str) + '.' + extension
return filename
def calculate_SIRS(aggregated_df):
""" returns a dataframe with an additional column for SIRS score at every hour for the patient """
# Temperature
aggregated_df['temp_SIRS'] = 0
aggregated_df.loc[aggregated_df['223762'] < 10, '223762'] = float("NaN")
aggregated_df.loc[aggregated_df['223762'] > 50, '223762'] = float("NaN")
aggregated_df.loc[aggregated_df['223761'] < 70, '223761'] = float("NaN")
aggregated_df.loc[aggregated_df['223761'] > 120, '223761'] = float("NaN")
aggregated_df.loc[aggregated_df['223762'] > 38, 'temp_SIRS'] = 1
aggregated_df.loc[aggregated_df['223762'] < 36, 'temp_SIRS'] = 1
aggregated_df.loc[aggregated_df['223761'] > 100.4, 'temp_SIRS'] = 1
aggregated_df.loc[aggregated_df['223761'] < 96.8, 'temp_SIRS'] = 1
# Heart rate
aggregated_df['hr_SIRS'] = 0
aggregated_df.loc[aggregated_df['220045'] > 300, '220045'] = float("NaN")
aggregated_df.loc[aggregated_df['220045'] < 0, '220045'] = float("NaN")
aggregated_df.loc[aggregated_df['220045'] > 90, 'hr_SIRS'] = 1
# Respiratory rate
aggregated_df['resp_SIRS'] = 0
aggregated_df.loc[aggregated_df['220210'] > 70, '220210'] = float("NaN")
aggregated_df.loc[aggregated_df['220210'] < 0, '220210'] = float("NaN")
aggregated_df.loc[aggregated_df['224690'] > 70, '224690'] = float("NaN")
aggregated_df.loc[aggregated_df['224690'] < 0, '224690'] = float("NaN")
aggregated_df.loc[aggregated_df['220210'] > 20, 'resp_SIRS'] = 1
aggregated_df.loc[aggregated_df['224690'] > 20, 'resp_SIRS'] = 1
# WBC
aggregated_df['wbc_SIRS'] = 0
aggregated_df.loc[aggregated_df['51301'] > 12, 'wbc_SIRS'] = 1
aggregated_df.loc[aggregated_df['51301'] < 4, 'wbc_SIRS'] = 1
# Aggregation
sirs_cols = ['temp_SIRS', 'hr_SIRS', 'resp_SIRS', 'wbc_SIRS']
aggregated_df[sirs_cols] = aggregated_df.groupby('hadm_id')[sirs_cols].ffill().fillna(0).astype(int)
aggregated_df['SIRS'] = aggregated_df[sirs_cols].sum(axis=1)
aggregated_df.drop(columns=sirs_cols, inplace=True)
return aggregated_df
def calculate_SOFA(aggregated_df):
""" returns a dataframe with an additional column for SOFA score at every hour for the patient """
scores = [0, 1, 2, 3, 4]
reverse_scores = [4, 3, 2, 1, 0]
# Respiration
aggregated_df.loc[aggregated_df['223835'] < 1, '223835'] = aggregated_df['223835'] * 100
aggregated_df.loc[aggregated_df['223835'] < 20, '223835'] = float("NaN")
aggregated_df['pao2fio2ratio'] = aggregated_df['50821'] / aggregated_df['223835'] * 100
aggregated_df['pao2fio2ratio_novent'] = aggregated_df.loc[aggregated_df['InvasiveVent']==0]['pao2fio2ratio']
aggregated_df['pao2fio2ratio_vent'] = aggregated_df.loc[aggregated_df['InvasiveVent']==1]['pao2fio2ratio']
aggregated_df['resp_SOFA'] = 0
aggregated_df.loc[aggregated_df['pao2fio2ratio_novent'] < 400, 'resp_SOFA'] = 1
aggregated_df.loc[aggregated_df['pao2fio2ratio_novent'] < 300, 'resp_SOFA'] = 2
aggregated_df.loc[aggregated_df['pao2fio2ratio_vent'] < 200, 'resp_SOFA'] = 3
aggregated_df.loc[aggregated_df['pao2fio2ratio_vent'] < 100, 'resp_SOFA'] = 4
# Liver
bilirubin_bins = [-1, 1.2, 2, 6, 12, float("inf")]
aggregated_df['liver_SOFA'] = pd.cut(aggregated_df['50885'], bilirubin_bins, labels=scores).astype('float')
# Coagulation
coag_bins = [-1, 20, 50, 100, 150, float("inf")]
aggregated_df['coag_SOFA'] = pd.cut(aggregated_df['51265'], coag_bins, labels=reverse_scores).astype('float')
# Renal
creat_bins = [-1, 1.2, 2, 3.5, 5, float("inf")]
aggregated_df['renal_SOFA'] = pd.cut(aggregated_df['50912'], creat_bins, labels=scores).astype('float')
urine_output_cols = ['226559', '226560', '226561', '226584', '226563', '226564', '226565', '226567',
'226557', '226558', '227488', '227489']
aggregated_df.loc[aggregated_df['227488']>0, '227488'] = -aggregated_df['227488']
aggregated_df['urine_output'] = aggregated_df[urine_output_cols].sum(axis=1)
aggregated_df.loc[aggregated_df['urine_output'] < 500, 'renal_SOFA'] = 3
aggregated_df.loc[aggregated_df['urine_output'] < 200, 'renal_SOFA'] = 4
# Cardiovascular
# features = [221662, 221653, 221289, 221906] # dopamine, dobutamine, epinephrine, norepinephrine
aggregated_df.loc[(aggregated_df['221662_rate']>0) | (aggregated_df['221653_rate']>0), 'cardio_SOFA'] = 2
aggregated_df.loc[(aggregated_df['221662_rate']>5) | ((aggregated_df['221289_rate'] > 0) & (aggregated_df['221289_rate']<=0.1)) | ((aggregated_df['221906_rate'] > 0) & (aggregated_df['221906_rate']<=0.1)), 'cardio_SOFA'] = 3
aggregated_df.loc[(aggregated_df['221662_rate']>15) | (aggregated_df['221289_rate']>0.1) | (aggregated_df['221906_rate'] > 0.1), 'cardio_SOFA'] = 4
# GCS
# [220739, 223900, 223901] GCS-Eye, GCS-Verbal, GCS-Motor
aggregated_df['220739'] = aggregated_df.groupby('hadm_id')['220739'].ffill().fillna(4).astype(int)
aggregated_df['223900'] = aggregated_df.groupby('hadm_id')['223900'].ffill().fillna(5).astype(int)
aggregated_df['223901'] = aggregated_df.groupby('hadm_id')['223901'].ffill().fillna(6).astype(int)
aggregated_df['gcs'] = aggregated_df['220739'] + aggregated_df['223900'] + aggregated_df['223901']
aggregated_df.loc[aggregated_df['223900'] == 0, 'gcs'] = 15
gcs_bins = [-1, 6, 9, 12, 14, 16]
aggregated_df['gcs_SOFA'] = pd.cut(aggregated_df['gcs'], gcs_bins, labels=reverse_scores).astype('float')
# forwardfill for SOFA scores first, then replace NA's with 0.
sofa_cols = ['liver_SOFA', 'coag_SOFA', 'renal_SOFA', 'cardio_SOFA', 'resp_SOFA', 'gcs_SOFA']
aggregated_df[sofa_cols] = aggregated_df.groupby('hadm_id')[sofa_cols].ffill().fillna(0).astype(int)
aggregated_df['SOFA'] = aggregated_df[sofa_cols].sum(axis=1)
sofa_cols = sofa_cols + ['gcs', 'urine_output']
aggregated_df.drop(columns=sofa_cols, inplace=True)
return aggregated_df
def calculate_SI(aggregated_df):
""" calculates suspicion of infection as per Sepsis-3 on aggregated hourly dataframe and saves it under the column `suspicion_of_infection`.
Note:
aggregated_df must contain `antibiotics` and `microbio-sample` columns.
"""
df = aggregated_df[['hadm_id', 'hour', 'antibiotics', 'microbio-sample']] # reduce data, speeds up computation
df['antibiotics'].fillna(0, inplace=True)
def _fix_columns(antibiotics_window_df):
"""Fixes resulting columns/index from GroupBy.rolling so that there are just hadm_id, hour, and antibiotics cols"""
if 'hadm_id' in antibiotics_window_df.index.names and 'hadm_id' in df.columns:
antibiotics_window_df.drop(columns='hadm_id', inplace=True)
if 'hour' in antibiotics_window_df.index.names and 'hour' in df.columns:
antibiotics_window_df.drop(columns='hour', inplace=True)
antibiotics_window_df = antibiotics_window_df.reset_index()[['hadm_id', 'hour', 'antibiotics']]
return antibiotics_window_df
antibiotics_last_24h = df.groupby('hadm_id').rolling(on='hour', window=24, min_periods=1).antibiotics.sum()
antibiotics_last_24h = _fix_columns(antibiotics_last_24h)
antibiotics_last_24h = antibiotics_last_24h.rename(columns={'antibiotics': 'antibiotics_last_24h'})
antibiotics_next_72h = df[::-1].groupby('hadm_id').rolling(on='hour', window=72, min_periods=1).antibiotics.sum()[::-1]
antibiotics_next_72h = _fix_columns(antibiotics_next_72h)
antibiotics_next_72h = antibiotics_next_72h.rename(columns={'antibiotics': 'antibiotics_next_72h'})
df = df.merge(antibiotics_last_24h, on=['hadm_id', 'hour'])
df = df.merge(antibiotics_next_72h, on=['hadm_id', 'hour'])
microbio_sample = df['microbio-sample'] == 1
suspicion_of_infection = microbio_sample & (df['antibiotics_last_24h'] > 0)
suspicion_of_infection |= microbio_sample & (df['antibiotics_next_72h'] > 0)
aggregated_df['suspicion_of_infection'] = suspicion_of_infection
return aggregated_df
def _sepsis_sofa_diff(df, hours_before_si=48, hours_after_si=24, metric='SOFA', sepsis_col='sepsis',
decrease_baseline=False, sofa_diff_threshold=2, ):
"""Computes sepsis indicator labels for a single patient, by comparing SOFA score at each timestep in window around SI to
baseline value from first hour of window.
Based off the following script by <NAME>:
https://github.com/BorgwardtLab/mgp-tcn/blob/master/src/query/compute_sepsis_onset_from_exported_sql_table.py
Parameters:
- df: hourly values for patient (must contain columns 'hour', 'suspicion_of_infection', and 'SOFA' or other metric)
- hours_before_si: defines size of window around SI
- hours_after_si: defines size of window around SI
- metric: column name of to check for acute increase of (SOFA or SIRS)
- sepsis_col: which column to store sepsis flag under.
- decrease_baseline: whether to decrease the baseline if a lower SOFA value occurs during window.
- sofa_diff_threshold: threshold of SOFA-increase for sepsis to occur (default: 2)
Note:
Sepsis onset time is set to be the time of of SOFA-increase.
"""
df[sepsis_col] = False # initalize to all False
df_s = df.iloc[np.argsort(df.hour)] # sort by hour, increasing
si_hours_df = df_s.loc[df_s.suspicion_of_infection == 1]
si_hours = si_hours_df.hour.tolist()
for i, si_hour in enumerate(si_hours):
# for every SI ocurrence, calculate window around hour of SI
si_window = df_s.loc[(si_hour-hours_before_si <= df_s.hour) & (df_s.hour <= si_hour+hours_after_si)]
si_window["SI_hour"] = si_hour
# check if there is an increase in SOFA during window
sofa = np.array(si_window[metric])
min_sofa = sofa[0]
for i in range(len(sofa)):
current_sofa = sofa[i]
if decrease_baseline and current_sofa < min_sofa:
min_sofa = current_sofa
else:
diff = current_sofa - min_sofa
if diff >= sofa_diff_threshold:
# if there was an increase >= 2, set sepsis-time to SOFA-increase time
sepsis_time = si_window['hour'].iloc[i]
df.loc[df.hour == sepsis_time, sepsis_col] = True
# if there was an increase >= 2, set sepsis-time to SI-time
#df.loc[df.hour == si_hour, sepsis_col] = True
break # break to outer for-loop
return df[sepsis_col]
def calculate_sepsis(aggregated_df, hours_before_si=48, hours_after_si=24, task='sepsis3', consider_difference=True, decrease_baseline=True):
""" Calculates sepsis labels from hourly SOFA/SIRS and suspicion of infection.
Note:
Similar to other implementations, sepsis-3 is considered to happen if SOFA was >= 2 at any point in a window
around a suspicion of infection. Thus, it is not considered whether the SOFA increased or decreased from the start value.
Parameters:
- aggregated_df
- hours_before_si: how many hours previous to the SI-time to evaluate SOFA scores for.
- hours_after_si: how many hours after the SI-time to evaluate SOFA scores for.
- metric:
- consider_difference: if true, will use slower algorithm that considers increase in metric by 2 from baseline instead of any values >= 2.
Returns: aggregated_df with two additional columns:
- 'sepsis': a binary label indicating times of sepsis.
- 'sepsis_onset' a binary label only containing the first case of sepsis per-admission.
"""
if task == 'sepsis1':
metric = 'SIRS'
elif task == 'sepsis3':
metric = 'SOFA'
else:
raise ValueError ("Task undefined: please choose between sepsis1 and sepsis3")
if not consider_difference:
max_sofa_last_x_hours = aggregated_df[['hadm_id', 'hour', metric]].groupby('hadm_id').rolling(on='hour', window=hours_before_si, min_periods=1)[metric].max()
indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=hours_after_si)
max_sofa_next_y_hours = aggregated_df[['hadm_id', 'hour', metric]].groupby('hadm_id').rolling(on='hour', window=indexer, min_periods=1)[metric].max()
df = aggregated_df[['hadm_id', 'hour', 'suspicion_of_infection']].set_index(['hadm_id', 'hour'])
df['max_sofa_last_x_hours'] = max_sofa_last_x_hours
df['max_sofa_next_y_hours'] = max_sofa_next_y_hours
df.reset_index(inplace=True)
sepsis = df['suspicion_of_infection'] & (df.max_sofa_last_x_hours >= 2)
sepsis |= df['suspicion_of_infection'] & (df.max_sofa_next_y_hours >= 2)
aggregated_df[task] = sepsis
else:
print("Computing sepsis")
start = time.time()
sepsis = aggregated_df[['hadm_id','hour','suspicion_of_infection',metric]].groupby("hadm_id").apply(_sepsis_sofa_diff, hours_after_si=hours_after_si,
hours_before_si=hours_before_si, metric=metric,
decrease_baseline=decrease_baseline)
sepsis.index = sepsis.index.get_level_values(1) #drop hadm_id index to have same index as aggregated_df
aggregated_df[task] = sepsis
print("Took", time.time()-start, "s")
# compute first point of sepsis3 per admission
sepsis_onset = aggregated_df.loc[sepsis == True, ['hadm_id', 'hour', task]].sort_values(['hour']).groupby('hadm_id').first()
sepsis_onset = sepsis_onset.rename(columns={task: task+"_onset"}).reset_index()
aggregated_df = aggregated_df.merge(sepsis_onset, on=['hadm_id','hour'], how='left')
aggregated_df[task+'_onset'].fillna(False, inplace=True)
return aggregated_df
def create_metadata_json(static_df, timeseries_df=None):
"""Creates metadata json from dataframes
"""
metadata_json = []
for _, row in tqdm(static_df.iterrows(), total=len(static_df)):
patient_json = {}
for key, value in row.items():
if type(value) is pd.Timestamp:
patient_json[key] = str(value)
else:
patient_json[key] = value
# add non-static features under the key 'hourly_values'
if timeseries_df is not None:
hourly_vals = timeseries_df.loc[timeseries_df['subject_id'] == row['subject_id']]
assert len(hourly_vals) > 0, "Zero rows found for patient"
patient_json['hourly_values'] = []
cols = hourly_vals.columns[~(hourly_vals.columns == 'subject_id')]
for _, row in hourly_vals.loc[:, cols].iterrows():
patient_json['hourly_values'].append(dict(row.items()))
metadata_json.append(patient_json)
return metadata_json
MIMIC_IV_CSV_SUBFOLDER = {
'patients': 'core',
'admissions': 'core',
'chartevents': 'icu',
'icustays': 'icu',
'inputevents': 'icu',
'outputevents': 'icu',
'procedureevents': 'icu',
'labevents': 'hosp',
'microbiologyevents': 'hosp',
'prescriptions': 'hosp',
'diagnoses_icd': 'hosp',
}
MIMIC_IV_CSV_DTYPES = {
'patients': {
'subject_id': np.uint32,
'gender': str,
'anchor_age': np.uint32,
'anchor_year': str,
'anchor_year_group': str,
'dod': str,
},
'chartevents': {
'subject_id': np.uint32,
'hadm_id': np.uint32,
'stay_id': np.uint32,
'charttime': str,
'storetime': str,
'itemid': np.uint32,
'value': str,
'valuenum': float,
'valueuom': str,
'warning': bool
},
'admissions': {
'subject_id': np.uint32,
'hadm_id': np.uint32,
'admittime': str,
'dischtime': str,
'deathtime': str,
'admission_type': str,
'admission_location': str,
'discharge_location': str,
'insurance': str,
'language': str,
'marital_status': str,
'ethnicity': str,
'edregtime': str,
'edouttime': str,
'hospital_expire_flag': bool,
},
'icustays': {
'subject_id': np.uint32,
'hadm_id': np.uint32,
'stay_id': np.uint32,
'first_careunit': str,
'last_careunit': str,
'intime': str,
'outtime': str,
'los': float
},
'inputevents': None,
'labevents': None,
'microbiologyevents': None,
'prescriptions': None,
'outputevents': None,
'diagnoses_icd': None,
'procedureevents': None,
}
MIMIC_IV_CSV_DATETIME_CONVERSION = {
'patients': {
'anchor_year': lambda col: pd.to_datetime(col, format="%Y"),
'anchor_year_group': lambda col: pd.to_datetime(col.str.slice(stop=4), format="%Y"),
'dod': pd.to_datetime
},
'chartevents': {
'charttime': pd.to_datetime,
'storetime': pd.to_datetime
},
'admissions': {
'admittime': pd.to_datetime,
'dischtime': pd.to_datetime,
'deathtime': pd.to_datetime,
'edregtime': pd.to_datetime,
'edouttime': pd.to_datetime
},
'icustays': {
'intime': pd.to_datetime,
'outtime': pd.to_datetime
},
'inputevents': {
'starttime': pd.to_datetime,
'endtime': pd.to_datetime,
'storetime': pd.to_datetime,
},
'labevents': {
'charttime': pd.to_datetime,
'storetime': pd.to_datetime
},
'microbiologyevents': {
'chartdate': pd.to_datetime,
'charttime': pd.to_datetime,
'storedate': pd.to_datetime,
'storetime': pd.to_datetime,
},
'prescriptions': {
'starttime': pd.to_datetime,
'stoptime': pd.to_datetime,
},
'outputevents': {
'charttime': pd.to_datetime,
'storetime': pd.to_datetime
},
'procedureevents': {
'starttime': pd.to_datetime,
'endtime': pd.to_datetime,
'storetime': pd.to_datetime
},
'diagnoses_icd': {}
}
def load_data(mimic_dir: str, subset=['patients', 'chartevents', 'admissions', 'icustays'], nrows=None, chunksize=1000000, cache_dir=None) -> Sequence[pd.DataFrame]:
"""Loads MIMIC-IV Dataset with the correct types.
Parameters:
- mimic_dir: top-level directory of MIMIC-IV.
- subset: names of csvs to load.
- nrows: maximum number of rows to load from each csv.
- chunksize: how many rows to load at once.
- cache_dir: if set, will load and cache csvs as parquet files in this dir.
Returns:
- A tuple containing the specified dataframes.
"""
mimic_dir = Path(mimic_dir) / "1.0"
assert mimic_dir.exists(), f"{mimic_dir} does not exist!"
result = {}
loadingbar = tqdm(subset, leave=False)
for csv_name in loadingbar:
loadingbar.set_description(f"Loading {csv_name}")
start = time.time()
if cache_dir is not None:
path = Path(cache_dir, csv_name + '.parquet')
if path.is_file():
df = pd.read_parquet(path)
loaded_parquet = True
else:
loaded_parquet = False
else:
loaded_parquet = False
if not loaded_parquet:
path = Path(mimic_dir, MIMIC_IV_CSV_SUBFOLDER[csv_name], csv_name + '.csv.gz')
df = pd.read_csv(path, dtype=MIMIC_IV_CSV_DTYPES[csv_name], chunksize=chunksize, nrows=nrows, low_memory=False)
# if chunking, load data in chunks
if chunksize is not None:
data = []
for chunk in tqdm(df, position=1, leave=False):
data.append(chunk)
df = | pd.concat(data, axis=0) | pandas.concat |
# Copyright 2017 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Cleaner module cleans the data by:
- Taking CSV files with stage/phase information and filtering them, leaving date, time, result and phase fields.
- Taking individual files for stages (with data/time and result) and merging into a single file.
- Removing duplicates from the files merged.
"""
import os
import pandas as pd
from pathlib import Path
from tools.Utils import create_folder_if_not_exists, output_fields, \
raw_output_folder, results_folder
def filter_phase_data():
"""
Filter phase data for files in raw data folder to ensure only desired fields are taken.
"""
print("Filtering phase data...")
# path to analyse
path_list = Path(raw_output_folder).glob('**/*.csv')
# loop through files in the given path and store desired fields as array
for path in path_list:
path_in_str = str(path)
file_name = os.path.basename(path_in_str)
full_path = results_folder + 'phases/raw/' + file_name
data = pd.read_csv(full_path, header=0, skipinitialspace=True, usecols=output_fields)
df = pd.DataFrame(data)
# only output to CSV those which contain some data
if df.shape[0] > 0:
output_folder = results_folder + 'phases/processed/'
file_name = 'clean_' + file_name
# ensure folder exists before creating the file
create_folder_if_not_exists(output_folder)
# write output to a file
df.to_csv(output_folder + file_name, sep=',')
print("Phase data filtered!")
def combine_phase_data():
"""
Combine data from processed/filtered data to a single file.
"""
print("Combining phase data...")
# create an empty data frame
out_df = pd.DataFrame([])
# loop through files in the given path and store data in df
path_list = Path(results_folder + 'phases/processed/').glob('**/*.csv')
for path in path_list:
path_in_str = str(path)
file_name = os.path.basename(path_in_str)
full_path = results_folder + 'phases/processed/' + file_name
data = pd.read_csv(full_path, header=0, skipinitialspace=True, usecols=output_fields)
df = | pd.DataFrame(data) | pandas.DataFrame |
# -*- encoding: utf-8 -*-
import re
from collections import defaultdict
import fnmatch
import os
import json
from datetime import date
from django.core.management.base import BaseCommand
from django.utils import translation
from django.conf import settings
from django.contrib.gis.db.models.functions import Distance
from django.contrib.gis.geos import GEOSGeometry
from django.db.models import Q
from django.core.files import File
from django.template.defaultfilters import slugify
from django.utils.timezone import get_current_timezone
import requests
from dateutil.parser import parse as parse_date
import pandas as pd
import unicodecsv
from geogermany.models import State, District, Municipality, Borough
from ...models import (NursingHome, SupervisionAuthority, SupervisionReport,
report_file_path)
def convert_timestamp(ts):
return ts.tz_localize('Europe/Berlin').to_datetime()
QUOTES_RE = re.compile(r'''^["'](.*)["']$''')
EMAIL_RE = re.compile(r'[^\@]+@[\w\.-]+', re.I)
PLZ_RE = re.compile('\b(\d{5})\b')
STREET_RE = re.compile('^([^\. \d]+).*', re.U)
BERLIN_REPORT_RE = re.compile(r'\[\[http://ftp.berlinonline.de/lageso/([\w\.]+)\|(.*) vom (\d+)\.(\d+)\.(\d+)\]\]', re.U)
def clean_name(name):
name = name.strip()
name = QUOTES_RE.sub('\\1', name)
return name
def urlify(url):
url = stringify(url)
if not url:
return ''
if not url.startswith(('http://', 'https://')):
url += 'http://' + url
return url
def stringify(val):
if pd.isnull(val):
return ''
return val
def jsonify(val):
if pd.isnull(val):
return None
if isinstance(val, pd.tslib.Timestamp):
return val.isoformat()
return val
def get_pdfs_in_dir(dir):
for root, dirnames, filenames in os.walk(dir):
for filename in fnmatch.filter(filenames, '*.pdf'):
yield os.path.join(root, filename)
def read_nursinghomes(filename):
df = pd.read_csv(filename, parse_dates=['start_date', 'start_date_contract', 'letzte Aktualisierung durch Pflegeeinrichtung'], encoding='utf-8')
df['ags'] = df['ags'].apply(lambda x: None if pd.isnull(x) else str(int(x)).zfill(5))
df['PLZ'] = df['PLZ'].apply(lambda x: None if pd.isnull(x) else str(int(x)).zfill(5))
return df
class Command(BaseCommand):
help = "Import observational studies and related data"
def add_arguments(self, parser):
parser.add_argument('command', help='Subcommand')
parser.add_argument('filename', help='filename')
def handle(self, *args, **options):
getattr(self, options['command'])(**options)
def reindex(self, *args, **options):
NursingHome.objects.update_search_index()
def load(self, *args, **options):
translation.activate(settings.LANGUAGE_CODE)
filename = options['filename']
skip_keys = ['Name', 'slug', u'<NAME>', 'Ort', 'PLZ',
'traeger_art', 'Web', 'lng', 'lat',
'nachtpflege',
'kurzzeitpflege',
'tagespflege',
'vollstationaer',
'red_flag_food',
'red_flag_decubitus',
'red_flag_medicine',
'red_flag_incontinence',
'red_flag_pain'
]
df = read_nursinghomes(filename)
for _, row in df.iterrows():
data = {k: jsonify(v) for k, v in row.iteritems() if k not in skip_keys}
name = clean_name(row['Name'])
slug = row['slug']
nursinghome, created = NursingHome._default_manager.update_or_create(
slug=slug, defaults=dict(
name=name,
address=stringify(row[u'Straße Hausnr']),
location=stringify(row['Ort']),
postcode=stringify(row['PLZ']),
provider_type=stringify(row['traeger_art']),
web=urlify(row['Web']),
grade_total=row['grade_overall'],
grade_care=row['grade_care'],
care_night=row['nachtpflege'],
care_temp=row['kurzzeitpflege'],
care_day=row['tagespflege'],
care_full=row['vollstationaer'],
red_flag_food=row['red_flag_food'],
red_flag_decubitus=row['red_flag_decubitus'],
red_flag_medicine=row['red_flag_medicine'],
red_flag_incontinence=row['red_flag_incontinence'],
red_flag_pain=row['red_flag_pain'],
data=data,
geo=GEOSGeometry('POINT(%f %f)' % (row['lng'], row['lat']), srid=4326)
)
)
if created:
print('Created %s' % nursinghome)
else:
print('Updated %s' % nursinghome)
def set_geogermany(self, *args, **options):
for nursinghome in NursingHome._default_manager.filter(district__isnull=True):
print(nursinghome)
try:
district = District.objects.get(
geom__covers=nursinghome.geo)
except District.DoesNotExist:
district = District.objects.annotate(distance=Distance('geom', nursinghome.geo)).order_by('distance')[0]
nursinghome.district = district
nursinghome.state_id = district.part_of_id
nursinghome.save()
def load_supervision_authorities(self, *args, **options):
excel_file = pd.ExcelFile(options['filename'])
state_names = excel_file.sheet_names
for state_name in state_names:
state = State.objects.get(name=state_name)
df = excel_file.parse(state_name)
for _, row in df.iterrows():
try:
email = stringify(row['email'])
if email:
email = email.splitlines()[0]
email = EMAIL_RE.search(email)
email = email.group(0).strip() if email is not None else ''
email = email.lower()
authority, created = SupervisionAuthority.objects.update_or_create(
state=state, name=stringify(row['name']), defaults=dict(
address=stringify(row['address']),
contact=stringify(row['contact']),
email=email,
url=stringify(row['url']),
report_url=stringify(row.get(u'Verfügbare Berichte', ''))
)
)
if created:
print(authority)
except Exception:
print(row['name'])
raise
def assign_authorities(self, *args, **options):
central = [u'Berlin',
# u'Brandenburg',
u'Bremen',
# u'Hessen',
# u'Rheinland-Pfalz',
u'Saarland',
u'Sachsen',
u'Sachsen-Anhalt',
u'Thüringen'
]
central_states = [
State.objects.get(name=state_name) for state_name in central
]
central_authorities = {
state: SupervisionAuthority.objects.get(state=state) for state in central_states
}
# Assign central authorities
central_homes = NursingHome._default_manager.filter(supervision_authority__isnull=True, state__in=central_states)
for nursinghome in central_homes:
nursinghome.supervision_authority = central_authorities[nursinghome.state]
nursinghome.save()
decentral = [
u'Baden-Württemberg',
u'Bayern',
u'Hamburg',
u'Mecklenburg-Vorpommern',
u'Niedersachsen',
u'Nordrhein-Westfalen',
u'Schleswig-Holstein',
]
decentral_states = [
State.objects.get(name=state_name) for state_name in decentral
]
for state in decentral_states:
print('\n' * 3)
print('=' * 20)
print(state)
state_districts = District.objects.filter(part_of=state)
for district in state_districts:
if SupervisionAuthority.objects.filter(district=district).exists():
continue
authorities = SupervisionAuthority.objects.filter(state=state, name__contains=district.name, district__isnull=True)
if len(authorities) == 0:
authorities = SupervisionAuthority.objects.filter(state=state, name__contains=district.name.split()[0], district__isnull=True)
if len(authorities) > 1:
authorities = authorities.filter(
name__icontains=re.split('\W', district.name)[-1])
if len(authorities) > 1:
print(authorities)
if 'Landkreis' in district.kind_detail:
authorities = authorities.filter(Q(name__contains='Landkreis') |
Q(name__contains='Kreisver'))
if 'Stadt' in district.kind_detail:
authorities = authorities.filter(Q(name__icontains='Stadt') |
Q(name__contains='kreisfrei'))
if len(authorities) == 1:
auth = authorities[0]
auth.district = district
auth.save()
else:
print(district, district.kind_detail)
print(authorities)
hamburg_state = State.objects.get(name='Hamburg')
hamburg_district = District.objects.get(part_of=hamburg_state)
hamburg = Municipality.objects.get(part_of=hamburg_district)
boroughs = Borough.objects.filter(part_of=hamburg)
for borough in boroughs:
authorities = SupervisionAuthority.objects.filter(state=hamburg_state, name__contains=borough.name, borough__isnull=True)
if len(authorities) == 1:
auth = authorities[0]
auth.district = hamburg_district
auth.borough = borough
auth.save()
NursingHome.objects.filter(supervision_authority__isnull=True, geo__coveredby=borough.geom).update(supervision_authority=auth)
else:
print(borough.name)
print(authorities)
central_homes = NursingHome.objects.filter(supervision_authority__isnull=True, state__in=decentral_states)
for nursinghome in central_homes:
try:
auth = SupervisionAuthority.objects.get(district=nursinghome.district)
nursinghome.supervision_authority = auth
nursinghome.save()
except SupervisionAuthority.DoesNotExist:
print('Missing Authority %s' % nursinghome.district)
def assign_hessen(self, *args, **options):
hessen_state = State.objects.get(name='Hessen')
df = pd.read_csv(options['filename'])
auth_key = [x for x in df.columns if 'HAVS' in x][0]
auth_names = list(df[auth_key].value_counts().index)
auth_mapping = {
a: SupervisionAuthority.objects.get(state=hessen_state, name__contains=a.split()[0]) for a in auth_names
}
hessen_plz = df['PLZ'].value_counts()
hessen_plz_unique = hessen_plz[hessen_plz == 1]
hessen_plz_non_unique = hessen_plz[hessen_plz > 1]
for _, row in df[df['PLZ'].isin(hessen_plz_unique.index)].iterrows():
plz = str(row['PLZ'])
auth = auth_mapping[row[auth_key]]
NursingHome.objects.filter(supervision_authority__isnull=True,
state=hessen_state, postcode=plz).update(
supervision_authority=auth
)
for _, row in df[df['PLZ'].isin(hessen_plz_non_unique.index)].iterrows():
plz = str(row['PLZ'])
location = str(row['Ort'])
if pd.isnull(row[auth_key]):
print(row)
continue
auth = auth_mapping[row[auth_key]]
NursingHome.objects.filter(supervision_authority__isnull=True,
state=hessen_state, postcode=plz,
location=location).update(
supervision_authority=auth
)
def assign_brandenburg(self, *args, **options):
brandenburg_state = State.objects.get(name='Brandenburg')
excel_file = pd.ExcelFile(options['filename'])
df = excel_file.parse('Brandenburg')
assigned_auths = defaultdict(list)
locations = {}
for _, row in df.iterrows():
auth = SupervisionAuthority.objects.get(state=brandenburg_state, name=row['name'])
locations[auth] = GEOSGeometry('POINT(%f %f)' % (row['lng'], row['lat']), srid=4326)
assigned_districts = row[u'Landkreis-Zuständigkeit'].splitlines()
for district_name in assigned_districts:
districts = District.objects.filter(part_of=brandenburg_state, name=district_name)
if len(districts) != 1:
print(district_name)
print(districts)
else:
assigned_auths[districts[0]].append(auth)
for nursinghome in NursingHome.objects.filter(supervision_authority__isnull=True,
state=brandenburg_state):
district = District.objects.get(geom__covers=nursinghome.geo)
auths = assigned_auths[district]
if len(auths) == 1:
nursinghome.supervision_authority = auths[0]
nursinghome.save()
else:
min_distance = None
best_auth = None
for auth, point in locations.items():
if auth not in auths:
continue
dist = NursingHome.objects.filter(pk=nursinghome.pk
).annotate(distance=Distance('geo', point))
dist = dist[0].distance.m
if min_distance is None or dist < min_distance:
min_distance = dist
best_auth = auth
nursinghome.supervision_authority = best_auth
nursinghome.save()
def assign_rheinlandpfalz(self, *args, **options):
rp_state = State.objects.get(name='Rheinland-Pfalz')
excel_file = | pd.ExcelFile(options['filename']) | pandas.ExcelFile |
import numpy as np
import pandas as pd
import joblib, os, logging
from joblib import Parallel, delayed
from scipy.interpolate import interp2d
from sklearn.metrics import mean_squared_error
def my_scorer(estimator, X, y=None):
X_reduced = estimator.transform(X)
X_preimage = estimator.inverse_transform(X_reduced)
return -1 * mean_squared_error(X, X_preimage)
def rescale(arr, nrows, ncol):
W, H = arr.shape
new_W, new_H = (nrows, ncol)
xrange = lambda x: np.linspace(0, 1, x)
f = interp2d(xrange(H), xrange(W), arr, kind="linear")
new_arr = f(xrange(new_H), xrange(new_W))
return new_arr
def rescale_mean(arr):
arr_new = np.zeros([int(np.ceil(arr.shape[0]/2)), int(np.ceil(arr.shape[1]/2))])
for i in range(0, arr.shape[0], 2):
for j in range(0, arr.shape[1], 2):
arr_new[int((i+1)/2),int((j+1)/2)] = np.mean(arr[i:i+2, j:j+2])
return arr_new
def stack_2d(X, sample, compress):
if compress:
sample = rescale(sample, 8, 8)
if len(sample.shape) == 3:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 3:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :, :]))
elif len(sample.shape) == 2:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 2:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :]))
return X
def stack_3d(X, sample):
if X.shape[0] == 0:
X = sample
elif len(sample.shape)!=len(X.shape):
X = np.vstack((X, sample[np.newaxis]))
else:
X = np.vstack((X, sample))
return X
class dataset_creator_dense():
def __init__(self, projects_group, projects, data, path_nwp, nwp_model, nwp_resolution, data_variables, njobs=1, test=False):
self.projects = projects
self.isfortest = test
self.projects_group = projects_group
self.data = data
self.path_nwp = path_nwp
self.create_logger()
self.check_dates()
self.nwp_model = nwp_model
self.nwp_resolution = nwp_resolution
if self.nwp_resolution == 0.05:
self.compress = True
else:
self.compress = False
self.njobs = njobs
self.variables = data_variables
def create_logger(self):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(os.path.dirname(self.path_nwp), 'log_' + self.projects_group + '.log'), 'a')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
self.logger.addHandler(handler)
def check_dates(self):
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates is checked. Number of time samples %s', str(len(dates)))
self.dates = pd.DatetimeIndex(dates)
def check_empty_nwp(self, nwp, nwp_next, nwp_prev, variables):
flag = True
for var in variables:
if nwp[var].shape[0] == 0 and nwp_next[var].shape[0] == 0 and nwp_prev[var].shape[0] == 0:
flag = False
break
return flag
def stack_daily_nwps(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables, compress):
X = dict()
y = dict()
X_3d = dict()
fname = os.path.join(path_nwp, nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H').strftime(
'%d%m%y%H%M')
for project in projects:
X[project['_id']] = pd.DataFrame()
y[project['_id']] = pd.DataFrame()
X_3d[project['_id']] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in pdates:
try:
nwp = nwps[date]
date = pd.to_datetime(date, format='%d%m%y%H%M')
nwp_prev = nwps[(date - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
if self.check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
y[project['_id']] = pd.concat([y[project['_id']], pd.DataFrame(data.loc[date, project['_id']], columns=['target'], index=[date])])
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project['_id']], longs[project['_id']], project['static_data']['type'])
X[project['_id']] = pd.concat([X[project['_id']], inp])
X_3d[project['_id']] = stack_2d(X_3d[project['_id']], inp_cnn, False)
except:
continue
else:
for date in pdates:
try:
nwp = nwps[date]
date = pd.to_datetime(date, format='%d%m%y%H%M')
nwp_prev = nwps[(date - | pd.DateOffset(hours=1) | pandas.DateOffset |
import numpy as np
import pandas as pd
import scipy as sp
import matplotlib.pyplot as plt
import seaborn as sns; sns.set_context('notebook')
from collections import OrderedDict
import pickle
from pystan import StanModel
"""Multilevel Modeling with Poststratification (MRP)"""
# Use multilevel regression to model individual survey responses as a function of demographic and geographic
# predictors, partially pooling respondents across states/regions to an extent determined by the data.
# The final step is post-stratification.
# Read the data & define variables
# Data are from http://www.stat.columbia.edu/~gelman/arm/examples/election88
"""Step 1: gather national opinion polls (they need to include respondent information down to the level of disaggregation
the analysis is targetting) """
# Load in data from the CBS polls with the following covariates (individual level):
# - org: organisation which collected the poll
# - year: year id
# - survey: survey id
# - bush: indicator (=1) for support of bush
# - state: state id
# - edu: categorical variable indicating level of education
# - age: categorical variable indicating age
# - female: indicator (=1) for female
# - black: indicator (=1) for black
# - weight: sample weight
polls = pd.read_csv('./data/polls.csv')
polls = polls.drop(polls.columns[[0]], axis=1)
"""Step 2: create a separate dataset of state-level predictors """
# Load in data for region indicators (state level). The variables are:
# - state_abbr: abbreviations of state names
# - regions: 1=northeast, 2=south, 3=north central, 4=west, 5=d.c.
# - not_dc: indicator variable which is 1 for non_dc states
state_info = pd.read_csv('./data/state.csv')
state_info = state_info.rename(columns={'Unnamed: 0': 'state'})
# Include a measure of previous vote as a state-level predictor. The variables are:
# - g76_84pr: state average in previous election
# - stnum2: state id
presvote = pd.read_csv("./data/presvote.csv")
presvote = presvote.drop(presvote.columns[[0]], axis=1)
presvote = presvote.rename(columns={'g76_84pr': 'v_prev', 'stnum2': 'state'})
# Include a measure of candidate effects as a state-level predictor and add empty row for DC.
candidate_effects = pd.read_csv("./data/candidate_effects.csv")
candidate_effects = candidate_effects.drop(candidate_effects.columns[[0]], axis=1)
candidate_effects = candidate_effects.rename(columns={'state': 'state_abbr'})
candidate_effects.loc[:,'candidate_effects_weighted'] = (candidate_effects.loc[:,'X76'] + candidate_effects.loc[:,'X80'] + candidate_effects.loc[:,'X84']) / 3.0
candidate_effects_1 = candidate_effects.iloc[:9]
candidate_effects = pd.concat([candidate_effects_1,candidate_effects.iloc[8:]]).reset_index(drop=True)
candidate_effects.iloc[8] = 0
candidate_effects = candidate_effects.set_value(8, 'state_abbr', 'DC')
presvote.loc[:,'v_prev'] += candidate_effects.loc[:,'candidate_effects_weighted']
# Merge all three dataframes into one:
polls = pd.merge(polls, state_info, on='state', how='left')
polls = pd.merge(polls, presvote, on='state', how='left')
# Select subset of polls:
polls_subset = polls.loc[polls['survey'] == '9158']
# Change female to sex and black to race:
polls_subset.loc[:,'sex'] = polls_subset.loc[:,'female'] + 1
polls_subset.loc[:,'race'] = polls_subset.loc[:,'black'] + 1
# Drop unnessary columns:
polls_subset = polls_subset.drop(['org', 'year', 'survey', 'region', 'not_dc', 'state_abbr', 'weight', 'female', 'black'], axis=1)
polls_subset['main'] = np.where(polls_subset['bush'] == 1, 1, np.where(polls_subset['bush'] == 0, 1, 0))
# Drop nan in polls_subset.bush
polls_subset_no_nan = polls_subset[polls_subset.bush.notnull()]
polls_subset_no_nan = polls_subset_no_nan.drop(['main'], axis=1)
# define other data summaries
n = len(polls_subset.bush) # of survey respondents
n_no_nan = len(polls_subset_no_nan.bush) # of survey respondents
n_sex = max(polls_subset.sex) # of sex categories
n_race = max(polls_subset.race) # of race categories
n_age = max(polls_subset.age) # of age categories
n_edu = max(polls_subset.edu) # of education categories
n_state = max(polls_subset.state) # of states
""" Extra Step: Validation Data"""
# load in 1988 election data as a validation check
election88 = pd.read_csv("./data/election88.csv")
election88 = election88.drop(election88.columns[[0]], axis=1)
# stnum: state id
# st: state abbreviation
# electionresult: is the outcome of the election
# samplesize:
# raking:
# merge_:
"""Step 3: Load 1988 census data to enable poststratification."""
census88 = pd.read_csv("./data/census88.csv")
census88 = census88.drop(census88.columns[[0]], axis=1)
census88 = pd.merge(census88, state_info, on='state', how='left')
census88 = pd.merge(census88, presvote, on='state', how='left')
# edu: categorical variable indicating level of education
# age: categorical variable indicating age
# female: indicator (=1) for female
# black: indicator (=1) for black
# N: size of population in this cell
# Change female to sex and black to race:
census88.loc[:,'sex'] = census88.loc[:,'female'] + 1
census88.loc[:,'race'] = census88.loc[:,'black'] + 1
census88 = census88.drop(['female', 'black'], axis=1)
"""Step 4: Fit a regression model for an individual survey response given demographics, geography etc."""
################################
#### 1st model: Probability that a voter casts a vote on a main party candidate
################################
# Pr(Y_i \in {Obama, Romney}) = logit^{-1}(alpha[1] + alpha[2] * v_prev_j[i] + a^state_j[i] + a^edu_j[i] + a^sex_j[i] + a^age_j[i]
# + a^race_j[i] + a^partyID_j[i] + a^ideology_j[i] + a^lastvote_j[i])
# a^{}_j[i] are the varying coefficients associated with each categorical variable; with independent prior distributions:
# a^{}_j[i] ~ N(0,sigma^2_var)
# the variance parameters are assigned a hyper prior distribution:
# sigma^2_var ~ invX^2(v,sigma^2_0)
# with a weak prior specification for v and sigma^2_0
# Model description:
model_1 = """
data {
int<lower=0> N;
int<lower=0> n_state;
int<lower=0> n_edu;
int<lower=0> n_sex;
int<lower=0> n_age;
int<lower=0> n_race;
#int<lower=0> n_party_id;
#int<lower=0> n_ideology;
#int<lower=0> n_lastvote;
vector[N] state_v_prev;
int<lower=0,upper=n_state> state[N];
int<lower=0,upper=n_edu> edu[N];
int<lower=0,upper=n_sex> sex[N];
int<lower=0,upper=n_age> age[N];
int<lower=0,upper=n_race> race[N];
#int<lower=0,upper=n_party_id> party_id[N];
#int<lower=0,upper=n_ideology> ideology[N];
#int<lower=0,upper=n_lastvote> lastvote[N];
int<lower=0,upper=1> y[N];
}
parameters {
vector[2] alpha;
vector[n_state] a;
vector[n_edu] b;
vector[n_sex] c;
vector[n_age] d;
vector[n_race] e;
#vector[n_party_id] f;
#vector[n_ideology] g;
#vector[n_lastvote] h;
real<lower=0,upper=100> sigma_a;
real<lower=0,upper=100> sigma_b;
real<lower=0,upper=100> sigma_c;
real<lower=0,upper=100> sigma_d;
real<lower=0,upper=100> sigma_e;
#real<lower=0,upper=100> sigma_f;
#real<lower=0,upper=100> sigma_g;
#real<lower=0,upper=100> sigma_h;
real<lower=0> mu;
real<lower=0,upper=100> sigma_0;
}
transformed parameters {
vector[N] y_hat;
for (i in 1:N)
y_hat[i] = alpha[1] + alpha[2] * state_v_prev[i] + a[state[i]] + b[edu[i]] + c[sex[i]] + d[age[i]] +
e[race[i]]; #+ f[party_id[i]] + g[ideology[i]] + h[lastvote[i]];
}
model {
a ~ normal (0, sigma_a);
b ~ normal (0, sigma_b);
c ~ normal (0, sigma_c);
d ~ normal (0, sigma_d);
e ~ normal (0, sigma_e);
#f ~ normal (0, sigma_f);
#g ~ normal (0, sigma_g);
#h ~ normal (0, sigma_h);
alpha ~ normal(0, 100);
sigma_a ~ scaled_inv_chi_square(mu,sigma_0);
sigma_b ~ scaled_inv_chi_square(mu,sigma_0);
sigma_c ~ scaled_inv_chi_square(mu,sigma_0);
sigma_d ~ scaled_inv_chi_square(mu,sigma_0);
sigma_e ~ scaled_inv_chi_square(mu,sigma_0);
#sigma_f ~ scaled_inv_chi_square(mu,sigma_0);
#sigma_g ~ scaled_inv_chi_square(mu,sigma_0);
#sigma_h ~ scaled_inv_chi_square(mu,sigma_0);
mu ~ uniform(0, 100);
sigma_0 ~ uniform(0, 100);
y ~ bernoulli_logit(y_hat);
}
"""
# Model parameters and data:
model_1_data_dict = {'N': n, 'n_state': n_state, 'n_edu': n_edu, 'n_sex': n_sex, 'n_age': n_age, 'n_race': n_race,
'state': polls_subset.state, 'edu': polls_subset.edu, 'sex': polls_subset.sex, 'age': polls_subset.age,
'race': polls_subset.race, 'state_v_prev': polls_subset.v_prev, 'y': polls_subset.main}
# Fitting the model:
n_chains = 2
n_iter = 1000
sm = StanModel(model_code=model_1)
with open('./models/model_1.pkl', 'wb') as f:
pickle.dump(sm, f)
sm = pickle.load(open('./models/model_1.pkl', 'rb'))
model_1_fit = sm.sampling(data=model_1_data_dict, iter=n_iter, chains=n_chains)
# Plot coefficients with confidence intervals:
params_demo = model_1_fit.extract(['alpha', 'b', 'c', 'd', 'e'])
params_alpha_0 = | pd.DataFrame({'Intercept' : params_demo['alpha'][:,0]}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import preprocessor
import loanrequest
import main
import montecarlo
import torch
from flask import Flask, request
app = Flask(__name__)
@app.route("/test_candidate.py")
def hello():
print('hello called')
origination_channel = request.args.get('origination_channel', type=float)
seller_name = request.args.get('seller_name', type=float)
interest_rate = request.args.get('interest_rate', type=float)
upb = request.args.get('upb', type=float)
orig_loan_t = request.args.get('orig_loan_t', type=float)
total_price = request.args.get('total_price', type=float)
first_lien = request.args.get('first_lien', type=float)
current_amount = request.args.get('current_amount', type=float)
second_amount = request.args.get('second_amount', type=float)
num_borr = request.args.get('num_borr', type=float)
monthly_payments = request.args.get('monthly_payments', type=float)
income = request.args.get('income', type=float)
borrower_credit_score = request.args.get('borrower_credit_score', type=float)
first_time = request.args.get('first_time', type=float)
loan_purp = request.args.get('loan_purp', type=float)
num_units = request.args.get('num_units', type=float)
occ_type = request.args.get('occ_type', type=float)
zip = request.args.get('zip', type=float)
co_credit_score = request.args.get('co_credit_score', type=float)
ins_perc = request.args.get('ins_perc', type=float)
ins_type = request.args.get('ins_type', type=float)
reloc_ind = request.args.get('reloc_ind', type=float)
state = request.args.get('state', type=float)
stats = pd.read_csv('dataset\statistics.csv')
stats_mean = stats.iloc[1]
stats_std = stats.iloc[2]
# Other parameters we need to calculate:
ltv = upb / total_price * 100
cltv = ( first_lien + current_amount + second_amount ) / total_price * 100
dti = monthly_payments / income * 100
if interest_rate == -1:
interest_rate = stats_mean[3]
# Build feature list for new person
new_person = np.array([origination_channel, seller_name, interest_rate, upb, orig_loan_t, ltv, cltv,
num_borr, dti, borrower_credit_score, first_time, loan_purp, num_units, occ_type, 1.0, state, zip,
ins_perc, co_credit_score, ins_type, reloc_ind])
# Instansiate net
net = main.Net(DROPOUT_RATE=0.0001)
net.load_state_dict(
torch.load('models/final_weights.pkl', map_location='cpu'))
# Call the net with the person's online data
new_person_np = new_person
new_person = | pd.DataFrame(new_person) | pandas.DataFrame |
import warnings
warnings.filterwarnings("once", category=DeprecationWarning) # noqa: E402
import unittest
import os
import shutil
from distutils.version import LooseVersion
import pytest
import shapely.geometry as shpg
import numpy as np
import pandas as pd
import xarray as xr
salem = pytest.importorskip('salem')
rasterio = pytest.importorskip('rasterio')
gpd = pytest.importorskip('geopandas')
# Local imports
import oggm
from oggm.core import (gis, inversion, gcm_climate, climate, centerlines,
flowline, massbalance)
import oggm.cfg as cfg
from oggm import utils, tasks
from oggm.utils import get_demo_file, tuple2int
from oggm.tests.funcs import get_test_dir, init_columbia, init_columbia_eb
from oggm import workflow
from oggm.exceptions import InvalidWorkflowError, MassBalanceCalibrationError
pytestmark = pytest.mark.test_env("prepro")
def read_svgcoords(svg_file):
"""Get the vertices coordinates out of a SVG file"""
from xml.dom import minidom
doc = minidom.parse(svg_file)
coords = [path.getAttribute('d') for path
in doc.getElementsByTagName('path')]
doc.unlink()
_, _, coords = coords[0].partition('C')
x = []
y = []
for c in coords.split(' '):
if c == '':
continue
c = c.split(',')
x.append(np.float(c[0]))
y.append(np.float(c[1]))
x.append(x[0])
y.append(y[0])
return np.rint(np.asarray((x, y)).T).astype(np.int64)
class TestGIS(unittest.TestCase):
def setUp(self):
# test directory
self.testdir = os.path.join(get_test_dir(), 'tmp')
if not os.path.exists(self.testdir):
os.makedirs(self.testdir)
self.clean_dir()
# Init
cfg.initialize()
cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))
cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
cfg.PATHS['working_dir'] = self.testdir
def tearDown(self):
self.rm_dir()
def rm_dir(self):
shutil.rmtree(self.testdir)
def clean_dir(self):
shutil.rmtree(self.testdir)
os.makedirs(self.testdir)
def test_init_gdir(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
assert gdir.has_file('outlines')
assert gdir.has_file('intersects')
assert not gdir.has_file('glacier_grid')
def test_define_region(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
extent = gdir.extent_ll
tdf = gdir.read_shapefile('outlines')
myarea = tdf.geometry.area * 10**-6
np.testing.assert_allclose(myarea, np.float(tdf['Area']), rtol=1e-2)
self.assertTrue(gdir.has_file('intersects'))
np.testing.assert_array_equal(gdir.intersects_ids,
['RGI50-11.00846', 'RGI50-11.00950'])
# From string
gdir = oggm.GlacierDirectory(gdir.rgi_id, base_dir=self.testdir)
# This is not guaranteed to be equal because of projection issues
np.testing.assert_allclose(extent, gdir.extent_ll, atol=1e-5)
# Change area
prev_area = gdir.rgi_area_km2
prev_lon = gdir.cenlon
prev_lat = gdir.cenlat
cfg.PARAMS['use_rgi_area'] = False
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir,
reset=True)
gis.define_glacier_region(gdir)
# Close but not same
assert gdir.rgi_area_km2 != prev_area
assert gdir.cenlon != prev_lon
assert gdir.cenlat != prev_lat
np.testing.assert_allclose(gdir.rgi_area_km2, prev_area, atol=0.01)
np.testing.assert_allclose(gdir.cenlon, prev_lon, atol=1e-4)
np.testing.assert_allclose(gdir.cenlat, prev_lat, atol=1e-4)
assert gdir.status == 'Glacier or ice cap'
def test_reproject(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
fn = 'resampled_dem'
cfg.BASENAMES[fn] = ('res_dem.tif', 'for testing')
gis.rasterio_to_gdir(gdir, get_demo_file('hef_srtm.tif'), fn)
with rasterio.open(gdir.get_filepath(fn), 'r',
driver='GTiff') as ds:
totest = ds.read(1).astype(rasterio.float32)
np.testing.assert_allclose(gis.read_geotiff_dem(gdir), totest)
# With other resampling less exact
fn = 'resampled_dem_n'
cfg.BASENAMES[fn] = ('res_dem.tif', 'for testing')
gis.rasterio_to_gdir(gdir, get_demo_file('hef_srtm.tif'), fn,
resampling='bilinear')
with rasterio.open(gdir.get_filepath(fn), 'r',
driver='GTiff') as ds:
totest = ds.read(1).astype(rasterio.float32)
np.testing.assert_allclose(gis.read_geotiff_dem(gdir), totest,
rtol=0.01)
def test_init_glacier_regions(self):
hef_rgi = gpd.read_file(get_demo_file('Hintereisferner_RGI5.shp'))
gdir = workflow.init_glacier_regions(hef_rgi)[0]
nx, ny = gdir.grid.nx, gdir.grid.ny
# Change something and note that no change occurs because dem is there
cfg.PARAMS['border'] = 12
gdir = workflow.init_glacier_regions(hef_rgi)[0]
assert nx == gdir.grid.nx
assert ny == gdir.grid.ny
def test_divides_as_glaciers(self):
hef_rgi = gpd.read_file(get_demo_file('divides_alps.shp'))
hef_rgi = hef_rgi.loc[hef_rgi.RGIId == 'RGI50-11.00897']
# Rename the RGI ID
hef_rgi['RGIId'] = ['RGI50-11.00897' + d for d in
['_d01', '_d02', '_d03']]
# Just check that things are working
gdirs = workflow.init_glacier_directories(hef_rgi)
workflow.gis_prepro_tasks(gdirs)
assert gdirs[0].rgi_id == 'RGI50-11.00897_d01'
assert gdirs[-1].rgi_id == 'RGI50-11.00897_d03'
def test_raise_on_duplicate(self):
hef_rgi = gpd.read_file(get_demo_file('divides_alps.shp'))
hef_rgi = hef_rgi.loc[hef_rgi.RGIId == 'RGI50-11.00897']
# Rename the RGI ID
rids = ['RGI60-11.00897', 'RGI60-11.00897_d01', 'RGI60-11.00897']
hef_rgi['RGIId'] = rids
# Just check that things are raised
with pytest.raises(InvalidWorkflowError):
workflow.init_glacier_directories(hef_rgi)
with pytest.raises(InvalidWorkflowError):
workflow.init_glacier_directories(rids)
def test_dx_methods(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
# Test fixed method
cfg.PARAMS['grid_dx_method'] = 'fixed'
cfg.PARAMS['fixed_dx'] = 50
gis.define_glacier_region(gdir)
mygrid = salem.Grid.from_json(gdir.get_filepath('glacier_grid'))
np.testing.assert_allclose(np.abs(mygrid.dx), 50.)
# Test linear method
cfg.PARAMS['grid_dx_method'] = 'linear'
cfg.PARAMS['d1'] = 5.
cfg.PARAMS['d2'] = 10.
cfg.PARAMS['dmax'] = 100.
gis.define_glacier_region(gdir)
targetdx = np.rint(5. * gdir.rgi_area_km2 + 10.)
targetdx = np.clip(targetdx, 10., 100.)
mygrid = salem.Grid.from_json(gdir.get_filepath('glacier_grid'))
np.testing.assert_allclose(mygrid.dx, targetdx)
# Test square method
cfg.PARAMS['grid_dx_method'] = 'square'
cfg.PARAMS['d1'] = 5.
cfg.PARAMS['d2'] = 10.
cfg.PARAMS['dmax'] = 100.
gis.define_glacier_region(gdir)
targetdx = np.rint(5. * np.sqrt(gdir.rgi_area_km2) + 10.)
targetdx = np.clip(targetdx, 10., 100.)
mygrid = salem.Grid.from_json(gdir.get_filepath('glacier_grid'))
np.testing.assert_allclose(mygrid.dx, targetdx)
def test_repr(self):
from textwrap import dedent
expected = dedent("""\
<oggm.GlacierDirectory>
RGI id: RGI50-11.00897
Region: 11: Central Europe
Subregion: 11-01: Alps
Name: Hintereisferner
Glacier type: Glacier
Terminus type: Land-terminating
Area: 8.036 km2
Lon, Lat: (10.7584, 46.8003)
Grid (nx, ny): (159, 114)
Grid (dx, dy): (50.0, -50.0)
""")
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
self.assertEqual(gdir.__repr__(), expected)
def test_glacierdir(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
# this should simply run
oggm.GlacierDirectory(entity.RGIId, base_dir=self.testdir)
def test_glacier_masks(self):
# The GIS was double checked externally with IDL.
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.process_dem(gdir)
gis.glacier_masks(gdir)
gis.gridded_attributes(gdir)
with utils.ncDataset(gdir.get_filepath('gridded_data')) as nc:
glacier_mask = nc.variables['glacier_mask'][:]
glacier_ext = nc.variables['glacier_ext'][:]
glacier_ext_erosion = nc.variables['glacier_ext_erosion'][:]
ice_divides = nc.variables['ice_divides'][:]
area = np.sum(glacier_mask * gdir.grid.dx**2)
np.testing.assert_allclose(area*10**-6, gdir.rgi_area_km2,
rtol=1e-1)
assert np.all(glacier_mask[glacier_ext == 1])
assert np.all(glacier_mask[glacier_ext_erosion == 1])
assert np.all(glacier_ext[ice_divides == 1])
assert np.all(glacier_ext_erosion[ice_divides == 1])
np.testing.assert_allclose(np.std(glacier_ext_erosion - glacier_ext),
0, atol=0.1)
entity['RGIFlag'] = '2909'
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir, reset=True)
with pytest.raises(RuntimeError):
gis.glacier_masks(gdir)
@pytest.mark.skipif((LooseVersion(rasterio.__version__) <
LooseVersion('1.0')),
reason='requires rasterio >= 1.0')
def test_simple_glacier_masks(self):
# The GIS was double checked externally with IDL.
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.simple_glacier_masks(gdir, write_hypsometry=True)
with utils.ncDataset(gdir.get_filepath('gridded_data')) as nc:
area = np.sum(nc.variables['glacier_mask'][:] * gdir.grid.dx**2)
np.testing.assert_allclose(area*10**-6, gdir.rgi_area_km2,
rtol=1e-1)
# Check that HEF doesn't "badly" need a divide
mask = nc.variables['glacier_mask'][:]
ext = nc.variables['glacier_ext'][:]
dem = nc.variables['topo'][:]
np.testing.assert_allclose(np.max(dem[mask.astype(bool)]),
np.max(dem[ext.astype(bool)]),
atol=10)
df = utils.compile_glacier_statistics([gdir], path=False)
np.testing.assert_allclose(df['dem_max_elev_on_ext'],
df['dem_max_elev'],
atol=10)
assert np.all(df['dem_max_elev'] > df['dem_max_elev_on_ext'])
dfh = pd.read_csv(gdir.get_filepath('hypsometry'))
np.testing.assert_allclose(dfh['Slope'], entity.Slope, atol=0.5)
np.testing.assert_allclose(dfh['Aspect'], entity.Aspect, atol=5)
np.testing.assert_allclose(dfh['Zmed'], entity.Zmed, atol=20)
np.testing.assert_allclose(dfh['Zmax'], entity.Zmax, atol=20)
np.testing.assert_allclose(dfh['Zmin'], entity.Zmin, atol=20)
bins = []
for c in dfh.columns:
try:
int(c)
bins.append(c)
except ValueError:
pass
dfr = pd.read_csv(get_demo_file('Hintereisferner_V5_hypso.csv'))
dfh.index = ['oggm']
dft = dfh[bins].T
dft['ref'] = dfr[bins].T
assert dft.sum()[0] == 1000
assert utils.rmsd(dft['ref'], dft['oggm']) < 5
@pytest.mark.skipif((LooseVersion(rasterio.__version__) <
LooseVersion('1.0')),
reason='requires rasterio >= 1.0')
def test_glacier_masks_other_glacier(self):
# This glacier geometry is simplified by OGGM
# https://github.com/OGGM/oggm/issues/451
entity = gpd.read_file(get_demo_file('RGI60-14.03439.shp')).iloc[0]
cfg.PATHS['dem_file'] = get_demo_file('RGI60-14.03439.tif')
cfg.PARAMS['border'] = 1
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
# The test below does NOT pass on OGGM
shutil.copyfile(gdir.get_filepath('gridded_data'),
os.path.join(self.testdir, 'default_masks.nc'))
gis.simple_glacier_masks(gdir, write_hypsometry=True)
with utils.ncDataset(gdir.get_filepath('gridded_data')) as nc:
area = np.sum(nc.variables['glacier_mask'][:] * gdir.grid.dx**2)
np.testing.assert_allclose(area*10**-6, gdir.rgi_area_km2,
rtol=1e-1)
shutil.copyfile(gdir.get_filepath('gridded_data'),
os.path.join(self.testdir, 'simple_masks.nc'))
dfh = pd.read_csv(gdir.get_filepath('hypsometry'))
np.testing.assert_allclose(dfh['Slope'], entity.Slope, atol=1)
np.testing.assert_allclose(dfh['Aspect'], entity.Aspect, atol=10)
np.testing.assert_allclose(dfh['Zmed'], entity.Zmed, atol=20)
np.testing.assert_allclose(dfh['Zmax'], entity.Zmax, atol=20)
np.testing.assert_allclose(dfh['Zmin'], entity.Zmin, atol=20)
@pytest.mark.skipif((LooseVersion(rasterio.__version__) <
LooseVersion('1.0')),
reason='requires rasterio >= 1.0')
def test_rasterio_glacier_masks(self):
# The GIS was double checked externally with IDL.
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
# specifying a source will look for a DEN in a respective folder
self.assertRaises(ValueError, gis.rasterio_glacier_mask,
gdir, source='SRTM')
# this should work
gis.rasterio_glacier_mask(gdir, source=None)
# read dem mask
with rasterio.open(gdir.get_filepath('glacier_mask'),
'r', driver='GTiff') as ds:
profile = ds.profile
data = ds.read(1).astype(profile['dtype'])
# compare projections
self.assertEqual(ds.width, gdir.grid.nx)
self.assertEqual(ds.height, gdir.grid.ny)
self.assertEqual(ds.transform[0], gdir.grid.dx)
self.assertEqual(ds.transform[4], gdir.grid.dy)
# orgin is center for gdir grid but corner for dem_mask, so shift
self.assertAlmostEqual(ds.transform[2], gdir.grid.x0 - gdir.grid.dx/2)
self.assertAlmostEqual(ds.transform[5], gdir.grid.y0 - gdir.grid.dy/2)
# compare dem_mask size with RGI area
mask_area_km2 = data.sum() * gdir.grid.dx**2 * 1e-6
self.assertAlmostEqual(mask_area_km2, gdir.rgi_area_km2, 1)
# how the mask is derived from the outlines it should always be larger
self.assertTrue(mask_area_km2 > gdir.rgi_area_km2)
# not sure if we want such a hard coded test, but this will fail if the
# sample data changes but could also indicate changes in rasterio
self.assertTrue(data.sum() == 3218)
def test_intersects(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
self.assertTrue(gdir.has_file('intersects'))
def test_dem_source_text(self):
for s in ['TANDEM', 'AW3D30', 'MAPZEN', 'DEM3', 'ASTER', 'SRTM',
'RAMP', 'GIMP', 'ARCTICDEM', 'DEM3', 'REMA', 'COPDEM',
'NASADEM', 'ALASKA']:
assert s in gis.DEM_SOURCE_INFO.keys()
def test_dem_daterange_dateinfo(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
# dem_info should return a string
self.assertIsInstance(gdir.dem_info, str)
# there is no daterange for demo/custom data
self.assertIsNone(gdir.dem_daterange)
# but we can make some
with open(os.path.join(gdir.dir, 'dem_source.txt'), 'a') as f:
f.write('Date range: 2000-2000')
# delete lazy properties
delattr(gdir, '_lazy_dem_daterange')
# now call again and check return type
self.assertIsInstance(gdir.dem_daterange, tuple)
self.assertTrue(all(isinstance(year, int)
for year in gdir.dem_daterange))
def test_custom_basename(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
cfg.add_to_basenames('mybn', 'testfb.pkl', docstr='Some docs')
out = {'foo': 1.5}
gdir.write_pickle(out, 'mybn')
assert gdir.read_pickle('mybn') == out
def test_gridded_data_var_to_geotiff(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
target_var = 'topo'
gis.gridded_data_var_to_geotiff(gdir, varname=target_var)
gtiff_path = os.path.join(gdir.dir, target_var+'.tif')
assert os.path.exists(gtiff_path)
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
gridded_topo = ds[target_var]
gtiff_ds = salem.open_xr_dataset(gtiff_path)
assert ds.salem.grid == gtiff_ds.salem.grid
assert np.allclose(gridded_topo.data, gtiff_ds.data)
class TestCenterlines(unittest.TestCase):
def setUp(self):
# test directory
self.testdir = os.path.join(get_test_dir(), 'tmp')
if not os.path.exists(self.testdir):
os.makedirs(self.testdir)
self.clean_dir()
# Init
cfg.initialize()
cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))
cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
cfg.PARAMS['border'] = 10
def tearDown(self):
self.rm_dir()
def rm_dir(self):
shutil.rmtree(self.testdir)
def clean_dir(self):
shutil.rmtree(self.testdir)
os.makedirs(self.testdir)
def test_filter_heads(self):
f = get_demo_file('glacier.svg')
coords = read_svgcoords(f)
polygon = shpg.Polygon(coords)
hidx = np.array([3, 9, 80, 92, 108, 116, 170, len(coords)-12])
heads = [shpg.Point(*c) for c in coords[hidx]]
heads_height = np.array([200, 210, 1000., 900, 1200, 1400, 1300, 250])
radius = 25
_heads, _ = centerlines._filter_heads(heads, heads_height, radius,
polygon)
_headsi, _ = centerlines._filter_heads(heads[::-1],
heads_height[::-1],
radius, polygon)
self.assertEqual(_heads, _headsi[::-1])
self.assertEqual(_heads, [heads[h] for h in [2, 5, 6, 7]])
def test_mask_to_polygon(self):
from oggm.core.centerlines import _mask_to_polygon
mask = np.zeros((5, 5))
mask[1, 1] = 1
p1, p2 = _mask_to_polygon(mask)
assert p1 == p2
mask = np.zeros((5, 5))
mask[1:-1, 1:-1] = 1
p1, p2 = _mask_to_polygon(mask)
assert p1 == p2
mask = np.zeros((5, 5))
mask[1:-1, 1:-1] = 1
mask[2, 2] = 0
p1, _ = _mask_to_polygon(mask)
assert len(p1.interiors) == 1
assert p1.exterior == p2.exterior
for i_line in p1.interiors:
assert p2.contains(i_line)
n = 30
for i in range(n):
mask = np.zeros((n, n))
mask[1:-1, 1:-1] = 1
_, p2 = _mask_to_polygon(mask)
for i in range(n*2):
mask[np.random.randint(2, n-2), np.random.randint(2, n-2)] = 0
p1, _ = _mask_to_polygon(mask)
assert len(p1.interiors) > 1
assert p1.exterior == p2.exterior
for i_line in p1.interiors:
assert p2.contains(i_line)
def test_centerlines(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
cls = gdir.read_pickle('centerlines')
for cl in cls:
for j, ip, ob in zip(cl.inflow_indices, cl.inflow_points,
cl.inflows):
self.assertEqual(cl.line.coords[j], ip.coords[0])
self.assertEqual(ob.flows_to_point.coords[0],
ip.coords[0])
self.assertEqual(cl.line.coords[ob.flows_to_indice],
ip.coords[0])
self.assertEqual(len(cls), 3)
self.assertEqual(set(cls), set(centerlines.line_inflows(cls[-1])))
def test_downstream(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.compute_downstream_line(gdir)
d = gdir.read_pickle('downstream_line')
cl = gdir.read_pickle('inversion_flowlines')[-1]
self.assertEqual(
len(d['full_line'].coords) - len(d['downstream_line'].coords),
cl.nx)
def test_downstream_bedshape(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
default_b = cfg.PARAMS['border']
cfg.PARAMS['border'] = 80
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.compute_downstream_line(gdir)
centerlines.compute_downstream_bedshape(gdir)
out = gdir.read_pickle('downstream_line')
for o, h in zip(out['bedshapes'], out['surface_h']):
assert np.all(np.isfinite(o))
assert np.all(np.isfinite(h))
tpl = gdir.read_pickle('inversion_flowlines')[-1]
c = gdir.read_pickle('downstream_line')['downstream_line']
c = centerlines.Centerline(c, dx=tpl.dx)
# Independant reproduction for a few points
o = out['bedshapes']
i0s = [0, 5, 10, 15, 20]
for i0 in i0s:
wi = 11
i0 = int(i0)
cur = c.line.coords[i0]
n1, n2 = c.normals[i0]
line = shpg.LineString([shpg.Point(cur + wi / 2. * n1),
shpg.Point(cur + wi / 2. * n2)])
from oggm.core.centerlines import line_interpol
from scipy.interpolate import RegularGridInterpolator
points = line_interpol(line, 0.5)
with utils.ncDataset(gdir.get_filepath('gridded_data')) as nc:
topo = nc.variables['topo_smoothed'][:]
x = nc.variables['x'][:]
y = nc.variables['y'][:]
xy = (np.arange(0, len(y) - 0.1, 1), np.arange(0, len(x) - 0.1, 1))
interpolator = RegularGridInterpolator(xy, topo)
zref = [interpolator((p.xy[1][0], p.xy[0][0])) for p in points]
myx = np.arange(len(points))
myx = (myx - np.argmin(zref)) / 2 * gdir.grid.dx
myz = o[i0] * myx**2 + np.min(zref)
# In this case the fit is simply very good (plot it if you want!)
assert utils.rmsd(zref, myz) < 20
cfg.PARAMS['border'] = default_b
@pytest.mark.slow
def test_baltoro_centerlines(self):
cfg.PARAMS['border'] = 2
cfg.PARAMS['dmax'] = 100
cfg.PATHS['dem_file'] = get_demo_file('baltoro_srtm_clip.tif')
b_file = get_demo_file('baltoro_wgs84.shp')
gdf = gpd.read_file(b_file)
kienholz_file = get_demo_file('centerlines_baltoro_wgs84.shp')
kdf = gpd.read_file(kienholz_file)
# add fake attribs
area = gdf['AREA']
del gdf['RGIID']
del gdf['AREA']
gdf['RGIId'] = 'RGI50-00.00000'
gdf['GLIMSId'] = gdf['GLIMSID']
gdf['Area'] = area
gdf['CenLat'] = gdf['CENLAT']
gdf['CenLon'] = gdf['CENLON']
gdf['BgnDate'] = '-999'
gdf['Name'] = 'Baltoro'
gdf['GlacType'] = '0000'
gdf['Status'] = '0'
gdf['O1Region'] = '01'
gdf['O2Region'] = '01'
entity = gdf.iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
my_mask = np.zeros((gdir.grid.ny, gdir.grid.nx), dtype=np.uint8)
cls = gdir.read_pickle('centerlines')
assert gdir.rgi_date == 2009
sub = centerlines.line_inflows(cls[-1])
self.assertEqual(set(cls), set(sub))
assert sub[-1] is cls[-1]
sub = centerlines.line_inflows(cls[-2])
assert set(sub).issubset(set(cls))
np.testing.assert_equal(np.unique(sorted([cl.order for cl in sub])),
np.arange(cls[-2].order+1))
assert sub[-1] is cls[-2]
# Mask
for cl in cls:
x, y = tuple2int(cl.line.xy)
my_mask[y, x] = 1
# Transform
kien_mask = np.zeros((gdir.grid.ny, gdir.grid.nx), dtype=np.uint8)
from shapely.ops import transform
for index, entity in kdf.iterrows():
def proj(lon, lat):
return salem.transform_proj(salem.wgs84, gdir.grid.proj,
lon, lat)
kgm = transform(proj, entity.geometry)
# Interpolate shape to a regular path
e_line = []
for distance in np.arange(0.0, kgm.length, gdir.grid.dx):
e_line.append(*kgm.interpolate(distance).coords)
kgm = shpg.LineString(e_line)
# Transform geometry into grid coordinates
def proj(x, y):
return gdir.grid.transform(x, y, crs=gdir.grid.proj)
kgm = transform(proj, kgm)
# Rounded nearest pix
def project(x, y):
return (np.rint(x).astype(np.int64),
np.rint(y).astype(np.int64))
kgm = transform(project, kgm)
x, y = tuple2int(kgm.xy)
kien_mask[y, x] = 1
# We test the Heidke Skill score of our predictions
rest = kien_mask + 2 * my_mask
# gr.plot_array(rest)
na = len(np.where(rest == 3)[0])
nb = len(np.where(rest == 2)[0])
nc = len(np.where(rest == 1)[0])
nd = len(np.where(rest == 0)[0])
denom = np.float((na+nc)*(nd+nc)+(na+nb)*(nd+nb))
hss = np.float(2.) * ((na*nd)-(nb*nc)) / denom
if cfg.PARAMS['grid_dx_method'] == 'linear':
self.assertTrue(hss > 0.53)
if cfg.PARAMS['grid_dx_method'] == 'fixed': # quick fix
self.assertTrue(hss > 0.41)
class TestElevationBandFlowlines(unittest.TestCase):
def setUp(self):
# test directory
self.testdir = os.path.join(get_test_dir(), 'tmp')
if not os.path.exists(self.testdir):
os.makedirs(self.testdir)
self.clean_dir()
# Init
cfg.initialize()
cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))
cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
cfg.PARAMS['border'] = 10
cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
cfg.PARAMS['baseline_climate'] = ''
def tearDown(self):
self.rm_dir()
def rm_dir(self):
shutil.rmtree(self.testdir)
def clean_dir(self):
shutil.rmtree(self.testdir)
os.makedirs(self.testdir)
def test_irregular_grid(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.simple_glacier_masks(gdir)
centerlines.elevation_band_flowline(gdir)
df = pd.read_csv(gdir.get_filepath('elevation_band_flowline'), index_col=0)
# Almost same because of grid VS shape
np.testing.assert_allclose(df.area.sum(), gdir.rgi_area_m2, rtol=0.01)
# Length is very different but that's how it is
np.testing.assert_allclose(df.dx.sum(), entity['Lmax'], rtol=0.2)
# Slope is similar enough
avg_slope = np.average(np.rad2deg(df.slope), weights=df.area)
np.testing.assert_allclose(avg_slope, entity['Slope'], rtol=0.12)
def test_to_inversion_flowline(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.simple_glacier_masks(gdir)
centerlines.elevation_band_flowline(gdir)
centerlines.fixed_dx_elevation_band_flowline(gdir)
# The tests below are overkill but copied from another test
# they check everything, which is OK
area = 0.
otherarea = 0.
evenotherarea = 0
hgt = []
harea = []
cls = gdir.read_pickle('inversion_flowlines')
for cl in cls:
harea.extend(list(cl.widths * cl.dx))
hgt.extend(list(cl.surface_h))
area += np.sum(cl.widths * cl.dx)
evenotherarea += np.sum(cl.widths_m * cl.dx_meter)
with utils.ncDataset(gdir.get_filepath('gridded_data')) as nc:
otherarea += np.sum(nc.variables['glacier_mask'][:])
with utils.ncDataset(gdir.get_filepath('gridded_data')) as nc:
mask = nc.variables['glacier_mask'][:]
topo = nc.variables['topo_smoothed'][:]
rhgt = topo[np.where(mask)][:]
tdf = gdir.read_shapefile('outlines')
np.testing.assert_allclose(area, otherarea, rtol=0.1)
np.testing.assert_allclose(evenotherarea, gdir.rgi_area_m2)
area *= gdir.grid.dx ** 2
otherarea *= gdir.grid.dx ** 2
np.testing.assert_allclose(area * 10 ** -6, np.float(tdf['Area']),
rtol=1e-4)
# Check for area distrib
bins = np.arange(utils.nicenumber(np.min(hgt), 50, lower=True),
utils.nicenumber(np.max(hgt), 50) + 1,
50.)
h1, b = np.histogram(hgt, weights=harea, density=True, bins=bins)
h2, b = np.histogram(rhgt, density=True, bins=bins)
assert utils.rmsd(h1 * 100 * 50, h2 * 100 * 50) < 1.5
# Check that utility function is doing what is expected
hh, ww = gdir.get_inversion_flowline_hw()
new_area = np.sum(ww * cl.dx * gdir.grid.dx)
np.testing.assert_allclose(new_area * 10 ** -6, np.float(tdf['Area']))
def test_inversion(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.simple_glacier_masks(gdir)
centerlines.elevation_band_flowline(gdir)
centerlines.fixed_dx_elevation_band_flowline(gdir)
climate.process_custom_climate_data(gdir)
mbdf = gdir.get_ref_mb_data()
res = climate.t_star_from_refmb(gdir, mbdf=mbdf['ANNUAL_BALANCE'])
t_star, bias = res['t_star'], res['bias']
climate.local_t_star(gdir, tstar=t_star, bias=bias)
climate.mu_star_calibration(gdir)
inversion.prepare_for_inversion(gdir)
v1 = inversion.mass_conservation_inversion(gdir)
inversion.distribute_thickness_per_altitude(gdir)
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
ds1 = ds.load()
# Repeat normal workflow
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir, reset=True)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.catchment_area(gdir)
centerlines.catchment_width_geom(gdir)
centerlines.catchment_width_correction(gdir)
climate.process_custom_climate_data(gdir)
mbdf = gdir.get_ref_mb_data()
res = climate.t_star_from_refmb(gdir, mbdf=mbdf['ANNUAL_BALANCE'])
t_star, bias = res['t_star'], res['bias']
climate.local_t_star(gdir, tstar=t_star, bias=bias)
climate.mu_star_calibration(gdir)
inversion.prepare_for_inversion(gdir)
v2 = inversion.mass_conservation_inversion(gdir)
inversion.distribute_thickness_per_altitude(gdir)
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
ds2 = ds.load()
# Total volume is different at only 10%
np.testing.assert_allclose(v1, v2, rtol=0.1)
# And the distributed diff is not too large either
rms = utils.rmsd(ds1.distributed_thickness, ds2.distributed_thickness)
assert rms < 20
def test_run(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.simple_glacier_masks(gdir)
centerlines.elevation_band_flowline(gdir)
centerlines.fixed_dx_elevation_band_flowline(gdir)
centerlines.compute_downstream_line(gdir)
centerlines.compute_downstream_bedshape(gdir)
climate.process_custom_climate_data(gdir)
mbdf = gdir.get_ref_mb_data()
res = climate.t_star_from_refmb(gdir, mbdf=mbdf['ANNUAL_BALANCE'])
t_star, bias = res['t_star'], res['bias']
climate.local_t_star(gdir, tstar=t_star, bias=bias)
climate.mu_star_calibration(gdir)
inversion.prepare_for_inversion(gdir)
inversion.mass_conservation_inversion(gdir)
inversion.filter_inversion_output(gdir)
flowline.init_present_time_glacier(gdir)
model = flowline.run_random_climate(gdir, nyears=50, y0=1985)
fl = model.fls[-1]
assert np.all(fl.is_trapezoid[:30])
with xr.open_dataset(gdir.get_filepath('model_diagnostics')) as ds:
# it's running and it is retreating
assert ds.volume_m3[-1] < ds.volume_m3[0]
assert ds.length_m[-1] < ds.length_m[0]
class TestGeometry(unittest.TestCase):
def setUp(self):
# test directory
self.testdir = os.path.join(get_test_dir(), 'tmp')
if not os.path.exists(self.testdir):
os.makedirs(self.testdir)
self.clean_dir()
# Init
cfg.initialize()
cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))
cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
cfg.PARAMS['border'] = 10
def tearDown(self):
self.rm_dir()
def rm_dir(self):
shutil.rmtree(self.testdir)
def clean_dir(self):
shutil.rmtree(self.testdir)
os.makedirs(self.testdir)
def test_catchment_area(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.catchment_area(gdir)
cis = gdir.read_pickle('geometries')['catchment_indices']
# The catchment area must be as big as expected
with utils.ncDataset(gdir.get_filepath('gridded_data')) as nc:
mask = nc.variables['glacier_mask'][:]
mymask_a = mask * 0
mymask_b = mask * 0
for i, ci in enumerate(cis):
mymask_a[tuple(ci.T)] += 1
mymask_b[tuple(ci.T)] = i+1
self.assertTrue(np.max(mymask_a) == 1)
np.testing.assert_allclose(mask, mymask_a)
def test_flowlines(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
cls = gdir.read_pickle('inversion_flowlines')
for cl in cls:
for j, ip, ob in zip(cl.inflow_indices, cl.inflow_points,
cl.inflows):
self.assertEqual(cl.line.coords[j], ip.coords[0])
self.assertEqual(ob.flows_to_point.coords[0], ip.coords[0])
self.assertEqual(cl.line.coords[ob.flows_to_indice],
ip.coords[0])
self.assertEqual(len(cls), 3)
x, y = map(np.array, cls[0].line.xy)
dis = np.sqrt((x[1:] - x[:-1])**2 + (y[1:] - y[:-1])**2)
np.testing.assert_allclose(dis * 0 + cfg.PARAMS['flowline_dx'], dis,
rtol=0.01)
d = gdir.get_diagnostics()
assert d['perc_invalid_flowline'] > 0.1
df = utils.compile_glacier_statistics([gdir], path=False)
assert np.all(df['dem_source'] == 'USER')
assert np.all(df['perc_invalid_flowline'] > 0.1)
assert np.all(df['dem_perc_area_above_max_elev_on_ext'] < 0.1)
def test_geom_width(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.catchment_area(gdir)
centerlines.catchment_intersections(gdir)
centerlines.catchment_width_geom(gdir)
def test_width(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.catchment_area(gdir)
centerlines.catchment_width_geom(gdir)
centerlines.catchment_width_correction(gdir)
area = 0.
otherarea = 0.
evenotherarea = 0
hgt = []
harea = []
cls = gdir.read_pickle('inversion_flowlines')
for cl in cls:
harea.extend(list(cl.widths * cl.dx))
hgt.extend(list(cl.surface_h))
area += np.sum(cl.widths * cl.dx)
evenotherarea += np.sum(cl.widths_m * cl.dx_meter)
with utils.ncDataset(gdir.get_filepath('gridded_data')) as nc:
otherarea += np.sum(nc.variables['glacier_mask'][:])
with utils.ncDataset(gdir.get_filepath('gridded_data')) as nc:
mask = nc.variables['glacier_mask'][:]
topo = nc.variables['topo_smoothed'][:]
rhgt = topo[np.where(mask)][:]
tdf = gdir.read_shapefile('outlines')
np.testing.assert_allclose(area, otherarea, rtol=0.1)
np.testing.assert_allclose(evenotherarea, gdir.rgi_area_m2)
area *= (gdir.grid.dx) ** 2
otherarea *= (gdir.grid.dx) ** 2
np.testing.assert_allclose(area * 10**-6, np.float(tdf['Area']),
rtol=1e-4)
# Check for area distrib
bins = np.arange(utils.nicenumber(np.min(hgt), 50, lower=True),
utils.nicenumber(np.max(hgt), 50)+1,
50.)
h1, b = np.histogram(hgt, weights=harea, density=True, bins=bins)
h2, b = np.histogram(rhgt, density=True, bins=bins)
self.assertTrue(utils.rmsd(h1*100*50, h2*100*50) < 1)
# Check that utility function is doing what is expected
hh, ww = gdir.get_inversion_flowline_hw()
new_area = np.sum(ww * cl.dx * gdir.grid.dx)
np.testing.assert_allclose(new_area * 10**-6, np.float(tdf['Area']))
def test_nodivides_correct_slope(self):
# Init
cfg.initialize()
cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))
cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
cfg.PARAMS['border'] = 40
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
fls = gdir.read_pickle('inversion_flowlines')
min_slope = np.deg2rad(cfg.PARAMS['min_slope'])
for fl in fls:
dx = fl.dx * gdir.grid.dx
slope = np.arctan(-np.gradient(fl.surface_h, dx))
self.assertTrue(np.all(slope >= min_slope))
class TestClimate(unittest.TestCase):
def setUp(self):
# test directory
self.testdir = os.path.join(get_test_dir(), 'tmp_prepro')
if not os.path.exists(self.testdir):
os.makedirs(self.testdir)
self.testdir_cru = os.path.join(get_test_dir(), 'tmp_prepro_cru')
if not os.path.exists(self.testdir_cru):
os.makedirs(self.testdir_cru)
self.clean_dir()
# Init
cfg.initialize()
cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))
cfg.PATHS['working_dir'] = self.testdir
cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
cfg.PARAMS['border'] = 10
cfg.PARAMS['run_mb_calibration'] = True
cfg.PARAMS['baseline_climate'] = ''
def tearDown(self):
self.rm_dir()
def rm_dir(self):
shutil.rmtree(self.testdir)
shutil.rmtree(self.testdir_cru)
def clean_dir(self):
shutil.rmtree(self.testdir)
os.makedirs(self.testdir)
shutil.rmtree(self.testdir_cru)
os.makedirs(self.testdir_cru)
def test_distribute_climate(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
climate.process_custom_climate_data(gdir)
ci = gdir.get_climate_info()
self.assertEqual(ci['baseline_hydro_yr_0'], 1802)
self.assertEqual(ci['baseline_hydro_yr_1'], 2003)
with utils.ncDataset(get_demo_file('histalp_merged_hef.nc')) as nc_r:
ref_h = nc_r.variables['hgt'][1, 1]
ref_p = nc_r.variables['prcp'][:, 1, 1]
ref_t = nc_r.variables['temp'][:, 1, 1]
f = os.path.join(gdir.dir, 'climate_historical.nc')
with utils.ncDataset(f) as nc_r:
self.assertTrue(ref_h == nc_r.ref_hgt)
np.testing.assert_allclose(ref_t, nc_r.variables['temp'][:])
np.testing.assert_allclose(ref_p, nc_r.variables['prcp'][:])
def test_distribute_climate_grad(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
cfg.PARAMS['temp_use_local_gradient'] = True
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
climate.process_custom_climate_data(gdir)
ci = gdir.get_climate_info()
self.assertEqual(ci['baseline_hydro_yr_0'], 1802)
self.assertEqual(ci['baseline_hydro_yr_1'], 2003)
with xr.open_dataset(gdir.get_filepath('climate_historical')) as ds:
grad = ds['gradient'].data
try:
assert np.std(grad) > 0.0001
except TypeError:
pass
cfg.PARAMS['temp_use_local_gradient'] = False
def test_distribute_climate_parallel(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
climate.process_custom_climate_data(gdir)
ci = gdir.get_climate_info()
self.assertEqual(ci['baseline_hydro_yr_0'], 1802)
self.assertEqual(ci['baseline_hydro_yr_1'], 2003)
with utils.ncDataset(get_demo_file('histalp_merged_hef.nc')) as nc_r:
ref_h = nc_r.variables['hgt'][1, 1]
ref_p = nc_r.variables['prcp'][:, 1, 1]
ref_t = nc_r.variables['temp'][:, 1, 1]
f = os.path.join(gdir.dir, 'climate_historical.nc')
with utils.ncDataset(f) as nc_r:
self.assertTrue(ref_h == nc_r.ref_hgt)
np.testing.assert_allclose(ref_t, nc_r.variables['temp'][:])
np.testing.assert_allclose(ref_p, nc_r.variables['prcp'][:])
def test_distribute_climate_cru(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdirs = []
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gdirs.append(gdir)
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir_cru)
gis.define_glacier_region(gdir)
gdirs.append(gdir)
climate.process_custom_climate_data(gdirs[0])
cfg.PATHS['climate_file'] = ''
cfg.PARAMS['baseline_climate'] = 'CRU'
tasks.process_cru_data(gdirs[1])
cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
ci = gdir.get_climate_info()
self.assertEqual(ci['baseline_hydro_yr_0'], 1902)
self.assertEqual(ci['baseline_hydro_yr_1'], 2014)
gdh = gdirs[0]
gdc = gdirs[1]
f1 = os.path.join(gdh.dir, 'climate_historical.nc')
f2 = os.path.join(gdc.dir, 'climate_historical.nc')
with xr.open_dataset(f1) as nc_h:
with xr.open_dataset(f2) as nc_c:
# put on the same altitude
# (using default gradient because better)
temp_cor = nc_c.temp - 0.0065 * (nc_h.ref_hgt - nc_c.ref_hgt)
totest = temp_cor - nc_h.temp
self.assertTrue(totest.mean() < 0.5)
# precip
totest = nc_c.prcp - nc_h.prcp
self.assertTrue(totest.mean() < 100)
def test_distribute_climate_dummy(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdirs = []
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gdirs.append(gdir)
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir_cru)
gis.define_glacier_region(gdir)
gdirs.append(gdir)
tasks.process_dummy_cru_file(gdirs[0], seed=0)
cfg.PATHS['climate_file'] = ''
cfg.PARAMS['baseline_climate'] = 'CRU'
tasks.process_cru_data(gdirs[1])
cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
ci = gdir.get_climate_info()
self.assertEqual(ci['baseline_hydro_yr_0'], 1902)
self.assertEqual(ci['baseline_hydro_yr_1'], 2014)
gdh = gdirs[0]
gdc = gdirs[1]
f1 = os.path.join(gdh.dir, 'climate_historical.nc')
f2 = os.path.join(gdc.dir, 'climate_historical.nc')
with xr.open_dataset(f1) as nc_d:
with xr.open_dataset(f2) as nc_c:
# same altitude
assert nc_d.ref_hgt == nc_c.ref_hgt
np.testing.assert_allclose(nc_d.temp.mean(), nc_c.temp.mean(),
atol=0.2)
np.testing.assert_allclose(nc_d.temp.mean(), nc_c.temp.mean(),
rtol=0.1)
an1 = nc_d.temp.groupby('time.month').mean()
an2 = nc_c.temp.groupby('time.month').mean()
np.testing.assert_allclose(an1, an2, atol=1)
an1 = nc_d.prcp.groupby('time.month').mean()
an2 = nc_c.prcp.groupby('time.month').mean()
np.testing.assert_allclose(an1, an2, rtol=0.2)
def test_distribute_climate_historicalalp_new(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdirs = []
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gdirs.append(gdir)
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir_cru)
gis.define_glacier_region(gdir)
gdirs.append(gdir)
climate.process_custom_climate_data(gdirs[0])
cfg.PATHS['climate_file'] = ''
cfg.PARAMS['baseline_climate'] = 'HISTALP'
tasks.process_histalp_data(gdirs[1], y0=1850, y1=2003)
cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
ci = gdir.get_climate_info()
self.assertEqual(ci['baseline_hydro_yr_0'], 1851)
self.assertEqual(ci['baseline_hydro_yr_1'], 2003)
gdh = gdirs[0]
gdc = gdirs[1]
f1 = os.path.join(gdh.dir, 'climate_historical.nc')
f2 = os.path.join(gdc.dir, 'climate_historical.nc')
with xr.open_dataset(f1) as nc_h:
with xr.open_dataset(f2) as nc_c:
nc_hi = nc_h.isel(time=slice(49*12, 2424))
np.testing.assert_allclose(nc_hi['temp'], nc_c['temp'])
# for precip the data changed in between versions, we
# can't test for absolute equality
np.testing.assert_allclose(nc_hi['prcp'].mean(),
nc_c['prcp'].mean(),
atol=1)
np.testing.assert_allclose(nc_hi.ref_pix_dis,
nc_c.ref_pix_dis)
def test_sh(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
# We have to make a non cropped custom file
fpath = cfg.PATHS['climate_file']
ds = xr.open_dataset(fpath)
ds = ds.sel(time=slice('1802-01-01', '2002-12-01'))
nf = os.path.join(self.testdir, 'testdata.nc')
ds.to_netcdf(nf)
cfg.PATHS['climate_file'] = nf
gdirs = []
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
# Trick
assert gdir.hemisphere == 'nh'
gdir.hemisphere = 'sh'
gis.define_glacier_region(gdir)
gdirs.append(gdir)
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir_cru)
assert gdir.hemisphere == 'nh'
gdir.hemisphere = 'sh'
gis.define_glacier_region(gdir)
gdirs.append(gdir)
climate.process_custom_climate_data(gdirs[0])
ci = gdirs[0].get_climate_info()
self.assertEqual(ci['baseline_hydro_yr_0'], 1803)
self.assertEqual(ci['baseline_hydro_yr_1'], 2002)
cfg.PATHS['climate_file'] = ''
cfg.PARAMS['baseline_climate'] = 'CRU'
tasks.process_cru_data(gdirs[1])
cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
ci = gdir.get_climate_info()
self.assertEqual(ci['baseline_hydro_yr_0'], 1902)
self.assertEqual(ci['baseline_hydro_yr_1'], 2014)
gdh = gdirs[0]
gdc = gdirs[1]
with xr.open_dataset(
os.path.join(gdh.dir, 'climate_historical.nc')) as nc_h:
assert nc_h['time.month'][0] == 4
assert nc_h['time.year'][0] == 1802
assert nc_h['time.month'][-1] == 3
assert nc_h['time.year'][-1] == 2002
with xr.open_dataset(
os.path.join(gdc.dir, 'climate_historical.nc')) as nc_c:
assert nc_c['time.month'][0] == 4
assert nc_c['time.year'][0] == 1901
assert nc_c['time.month'][-1] == 3
assert nc_c['time.year'][-1] == 2014
# put on the same altitude
# (using default gradient because better)
temp_cor = nc_c.temp - 0.0065 * (nc_h.ref_hgt - nc_c.ref_hgt)
totest = temp_cor - nc_h.temp
self.assertTrue(totest.mean() < 0.5)
# precip
totest = nc_c.prcp - nc_h.prcp
self.assertTrue(totest.mean() < 100)
def test_mb_climate(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
climate.process_custom_climate_data(gdir)
with utils.ncDataset(get_demo_file('histalp_merged_hef.nc')) as nc_r:
ref_h = nc_r.variables['hgt'][1, 1]
ref_p = nc_r.variables['prcp'][:, 1, 1]
ref_t = nc_r.variables['temp'][:, 1, 1]
ref_t = np.where(ref_t < cfg.PARAMS['temp_melt'], 0,
ref_t - cfg.PARAMS['temp_melt'])
hgts = np.array([ref_h, ref_h, -8000, 8000])
time, temp, prcp = climate.mb_climate_on_height(gdir, hgts)
prcp /= cfg.PARAMS['prcp_scaling_factor']
ref_nt = 202*12
self.assertTrue(len(time) == ref_nt)
self.assertTrue(temp.shape == (4, ref_nt))
self.assertTrue(prcp.shape == (4, ref_nt))
np.testing.assert_allclose(temp[0, :], ref_t)
np.testing.assert_allclose(temp[0, :], temp[1, :])
np.testing.assert_allclose(prcp[0, :], prcp[1, :])
np.testing.assert_allclose(prcp[3, :], ref_p)
np.testing.assert_allclose(prcp[2, :], ref_p*0)
np.testing.assert_allclose(temp[3, :], ref_p*0)
yr = [1802, 1802]
time, temp, prcp = climate.mb_climate_on_height(gdir, hgts,
year_range=yr)
prcp /= cfg.PARAMS['prcp_scaling_factor']
ref_nt = 1*12
self.assertTrue(len(time) == ref_nt)
self.assertTrue(temp.shape == (4, ref_nt))
self.assertTrue(prcp.shape == (4, ref_nt))
np.testing.assert_allclose(temp[0, :], ref_t[0:12])
np.testing.assert_allclose(temp[0, :], temp[1, :])
np.testing.assert_allclose(prcp[0, :], prcp[1, :])
np.testing.assert_allclose(prcp[3, :], ref_p[0:12])
np.testing.assert_allclose(prcp[2, :], ref_p[0:12]*0)
np.testing.assert_allclose(temp[3, :], ref_p[0:12]*0)
yr = [1803, 1804]
time, temp, prcp = climate.mb_climate_on_height(gdir, hgts,
year_range=yr)
prcp /= cfg.PARAMS['prcp_scaling_factor']
ref_nt = 2*12
self.assertTrue(len(time) == ref_nt)
self.assertTrue(temp.shape == (4, ref_nt))
self.assertTrue(prcp.shape == (4, ref_nt))
np.testing.assert_allclose(temp[0, :], ref_t[12:36])
np.testing.assert_allclose(temp[0, :], temp[1, :])
np.testing.assert_allclose(prcp[0, :], prcp[1, :])
np.testing.assert_allclose(prcp[3, :], ref_p[12:36])
np.testing.assert_allclose(prcp[2, :], ref_p[12:36]*0)
np.testing.assert_allclose(temp[3, :], ref_p[12:36]*0)
def test_yearly_mb_climate(self):
cfg.PARAMS['prcp_scaling_factor'] = 1
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
climate.process_custom_climate_data(gdir)
with utils.ncDataset(get_demo_file('histalp_merged_hef.nc')) as nc_r:
ref_h = nc_r.variables['hgt'][1, 1]
ref_p = nc_r.variables['prcp'][:, 1, 1]
ref_t = nc_r.variables['temp'][:, 1, 1]
ref_t = np.where(ref_t <= cfg.PARAMS['temp_melt'], 0,
ref_t - cfg.PARAMS['temp_melt'])
# NORMAL --------------------------------------------------------------
hgts = np.array([ref_h, ref_h, -8000, 8000])
years, temp, prcp = climate.mb_yearly_climate_on_height(gdir, hgts)
ref_nt = 202
self.assertTrue(len(years) == ref_nt)
self.assertTrue(temp.shape == (4, ref_nt))
self.assertTrue(prcp.shape == (4, ref_nt))
yr = [1802, 1802]
years, temp, prcp = climate.mb_yearly_climate_on_height(gdir, hgts,
year_range=yr)
ref_nt = 1
self.assertTrue(len(years) == ref_nt)
self.assertTrue(years == 1802)
self.assertTrue(temp.shape == (4, ref_nt))
self.assertTrue(prcp.shape == (4, ref_nt))
np.testing.assert_allclose(temp[0, :], np.sum(ref_t[0:12]))
np.testing.assert_allclose(temp[0, :], temp[1, :])
np.testing.assert_allclose(prcp[0, :], prcp[1, :])
np.testing.assert_allclose(prcp[3, :], np.sum(ref_p[0:12]))
np.testing.assert_allclose(prcp[2, :], np.sum(ref_p[0:12])*0)
np.testing.assert_allclose(temp[3, :], np.sum(ref_p[0:12])*0)
yr = [1803, 1804]
years, temp, prcp = climate.mb_yearly_climate_on_height(gdir, hgts,
year_range=yr)
ref_nt = 2
self.assertTrue(len(years) == ref_nt)
np.testing.assert_allclose(years, yr)
self.assertTrue(temp.shape == (4, ref_nt))
self.assertTrue(prcp.shape == (4, ref_nt))
np.testing.assert_allclose(prcp[2, :], [0, 0])
np.testing.assert_allclose(temp[3, :], [0, 0])
# FLATTEN -------------------------------------------------------------
hgts = np.array([ref_h, ref_h, -8000, 8000])
years, temp, prcp = climate.mb_yearly_climate_on_height(gdir, hgts,
flatten=True)
ref_nt = 202
self.assertTrue(len(years) == ref_nt)
self.assertTrue(temp.shape == (ref_nt,))
self.assertTrue(prcp.shape == (ref_nt,))
yr = [1802, 1802]
hgts = np.array([ref_h])
years, temp, prcp = climate.mb_yearly_climate_on_height(gdir, hgts,
year_range=yr,
flatten=True)
ref_nt = 1
self.assertTrue(len(years) == ref_nt)
self.assertTrue(years == 1802)
self.assertTrue(temp.shape == (ref_nt,))
self.assertTrue(prcp.shape == (ref_nt,))
np.testing.assert_allclose(temp[:], np.sum(ref_t[0:12]))
yr = [1802, 1802]
hgts = np.array([8000])
years, temp, prcp = climate.mb_yearly_climate_on_height(gdir, hgts,
year_range=yr,
flatten=True)
np.testing.assert_allclose(prcp[:], np.sum(ref_p[0:12]))
def test_mu_candidates(self):
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.catchment_area(gdir)
centerlines.catchment_width_geom(gdir)
centerlines.catchment_width_correction(gdir)
climate.process_custom_climate_data(gdir)
with pytest.warns(DeprecationWarning):
se = climate.glacier_mu_candidates(gdir)
self.assertTrue(se.index[0] == 1802)
self.assertTrue(se.index[-1] == 2003)
df = | pd.DataFrame() | pandas.DataFrame |
#para rodar o streamlit tem que digitar no terminal o comando streamlit run NomedoArquivo
import pandas as pd
import streamlit as st
import numpy as np
import folium
from streamlit_folium import folium_static
from folium.plugins import MarkerCluster
import plotly.express as px
from datetime import datetime
st.set_page_config(layout='wide') #para que os conteúdos da página preencham toda a tela
@st.cache(allow_output_mutation=True) #para otimizar a performance do código
def get_data(path):
df = pd.read_csv(path)
return df
def set_feature(data):
# tranformação da variavel de foot para metros
data['sqft_lot_m'] = data['sqft_lot'] * (0.3048)
#add new feature
data['price_m2'] = data['price'] / data['sqft_lot_m']
return data
def overview_data(data):
# data overview
f_attribute = st.sidebar.multiselect('Enter columns',
data.columns) # Q2 - filtro que permite escolher uma ou mais variáveis para visualizar (Q2)
f_zipcode = st.sidebar.multiselect('Enter ZipCode', data['zipcode'].unique()) # Q1 - filtro para visualizar os imóveis de uma ou várias regiões (Q1)
st.title('Data Overview') # título na página
# if (f_zipcode != []) & (f_attribute != []):
# data = data.loc[data['zipcode'].isin(f_zipcode), f_attribute]
# elif (f_zipcode != []) & (f_attribute == []):
# data = data.loc[data['zipcode'].isin(f_zipcode), :]
# elif (f_zipcode == []) & (f_attribute != []):
# data = data.loc[:, f_attri0ute]
# else:
# data = data.copy()
if (f_attribute == []):
if f_zipcode != []:
data = data.loc[data['zipcode'].isin(f_zipcode), :]
data2 = data.loc[data['zipcode'].isin(f_zipcode), :]
else: #f_zipcode == []
data = data.copy()
data2 = data.copy()
else: #f_attribute != []
if f_zipcode != []:
data2 = data.loc[data['zipcode'].isin(f_zipcode), f_attribute]
data = data.loc[data['zipcode'].isin(f_zipcode), :]
else: #f_zipcode == []
data2 = data.loc[:, f_attribute]
data = data.copy()
st.dataframe(data2)
c1, c2 = st.columns((1, 1)) # para colocar uma tabela do lado da outra
# average metrics
# Q3 - Observar o número total de imóveis, a média de preço, a média da sala de estar
# e também a média do preço por metro quadrado em cada um dos códigos postais.
# data2 = get_data(path)
df1 = data[['id', 'zipcode']].groupby('zipcode').count().reset_index() # número total de imóveis
df2 = data[['price', 'zipcode']].groupby('zipcode').mean().reset_index() # média de preço
df3 = data[['sqft_living', 'zipcode']].groupby('zipcode').mean().reset_index() # média da sala de estar
df4 = data[['price_m2', 'zipcode']].groupby('zipcode').mean().reset_index() # média do preço por metro quadrado
# merge
m1 = pd.merge(df1, df2, on='zipcode', how='inner')
m2 = pd.merge(m1, df3, on='zipcode', how='inner')
df = | pd.merge(m2, df4, on='zipcode', how='inner') | pandas.merge |
# License: Apache-2.0
import databricks.koalas as ks
import pandas as pd
import numpy as np
import pytest
from pandas.testing import assert_frame_equal
from gators.imputers.numerics_imputer import NumericsImputer
from gators.imputers.int_imputer import IntImputer
from gators.imputers.float_imputer import FloatImputer
from gators.imputers.object_imputer import ObjectImputer
ks.set_option('compute.default_index_type', 'distributed-sequence')
@pytest.fixture()
def data():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', None], 'F': ['a', 'a', 's', np.nan]})
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]})
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]})
X_object_expected = pd.DataFrame(
{'E': ['q', 'w', 'w', 'MISSING'], 'F': ['a', 'a', 's', 'MISSING']})
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
'object': X_object_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_num():
X_int = pd.DataFrame(
{'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]},
dtype=np.float32)
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]},
dtype=np.float32)
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]},
dtype=np.float32)
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]},
dtype=np.float32)
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
X_dict = {
'int': X_int,
'float': X_float,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_no_missing():
X_int = pd.DataFrame({'A': [0, 1, 1, 8], 'B': [3, 4, 4, 8]}, dtype=int)
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 9.], 'D': [2.1, 3.1, 4.1, 9.]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', 'x'], 'F': ['a', 'a', 's', 'x']})
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_expected_dict = {
'int': X_int.copy(),
'float': X_float.copy(),
'object': X_object.copy(),
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture
def data_full():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, 1.1, 3.1, 'w', 'a']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int = IntImputer(strategy='constant', value=-9).fit(X)
obj_float = FloatImputer(strategy='median').fit(X)
obj_object = ObjectImputer(strategy='most_frequent').fit(X)
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X, X_expected
@pytest.fixture()
def data_ks():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', None], 'F': ['a', 'a', 's', np.nan]})
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]})
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]})
X_object_expected = pd.DataFrame(
{'E': ['q', 'w', 'w', 'MISSING'], 'F': ['a', 'a', 's', 'MISSING']})
X_int_ks = ks.from_pandas(X_int)
X_float_ks = ks.from_pandas(X_float)
X_object_ks = ks.from_pandas(X_object)
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_dict = {
'int': X_int_ks,
'float': X_float_ks,
'object': X_object_ks,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
'object': X_object_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_num_ks():
X_int = ks.DataFrame(
{'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]},
dtype=np.float32)
X_float = ks.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]},
dtype=np.float32)
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]},
dtype=np.float32)
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]},
dtype=np.float32)
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
X_dict = {
'int': X_int,
'float': X_float,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_no_missing_ks():
X_int = ks.DataFrame({'A': [0, 1, 1, 8], 'B': [3, 4, 4, 8]}, dtype=int)
X_float = ks.DataFrame(
{'C': [0.1, 1.1, 2.1, 9.], 'D': [2.1, 3.1, 4.1, 9.]})
X_object = ks.DataFrame(
{'E': ['q', 'w', 'w', 'x'], 'F': ['a', 'a', 's', 'x']})
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_expected_dict = {
'int': X_int.to_pandas().copy(),
'float': X_float.to_pandas().copy(),
'object': X_object.to_pandas().copy(),
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture
def data_full_ks():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = ks.from_pandas(pd.concat([X_int, X_float, X_object], axis=1))
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, 1.1, 3.1, 'w', 'a']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int = IntImputer(strategy='constant', value=-9).fit(X)
obj_float = FloatImputer(strategy='median').fit(X)
obj_object = ObjectImputer(strategy='most_frequent').fit(X)
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X, X_expected
def test_int_pd(data):
objs_dict, X_dict, X_expected_dict = data
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']), X_expected_dict['int'],
)
def test_float_pd(data):
objs_dict, X_dict, X_expected_dict = data
assert_frame_equal(
objs_dict['float'].transform(
X_dict['float']), X_expected_dict['float'],
)
def test_object_pd(data):
objs_dict, X_dict, X_expected_dict = data
assert_frame_equal(
objs_dict['object'].transform(
X_dict['object']), X_expected_dict['object'],
)
@pytest.mark.koalas
def test_int_ks(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']).to_pandas(),
X_expected_dict['int'],)
@pytest.mark.koalas
def test_float_ks(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
assert_frame_equal(
objs_dict['float'].transform(X_dict['float']).to_pandas(),
X_expected_dict['float'])
@pytest.mark.koalas
def test_object_ks(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
assert_frame_equal(
objs_dict['object'].transform(X_dict['object']).to_pandas(),
X_expected_dict['object'],
)
def test_int_pd_np(data):
objs_dict, X_dict, X_expected_dict = data
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
def test_float_pd_np(data):
objs_dict, X_dict, X_expected_dict = data
X_new_np = objs_dict['float'].transform_numpy(X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
def test_object_pd_np(data):
objs_dict, X_dict, X_expected_dict = data
X_new_np = objs_dict['object'].transform_numpy(X_dict['object'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['object'].columns)
assert_frame_equal(X_new, X_expected_dict['object'])
@pytest.mark.koalas
def test_int_ks_np(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
@pytest.mark.koalas
def test_float_ks_np(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
X_new_np = objs_dict['float'].transform_numpy(
X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
@pytest.mark.koalas
def test_object_ks_np(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
X_new_np = objs_dict['object'].transform_numpy(
X_dict['object'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['object'].columns)
assert_frame_equal(X_new, X_expected_dict['object'])
def test_num_int_pd(data_num):
objs_dict, X_dict, X_expected_dict = data_num
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']), X_expected_dict['int'],
)
def test_num_float_pd(data_num):
objs_dict, X_dict, X_expected_dict = data_num
assert_frame_equal(
objs_dict['float'].transform(
X_dict['float']), X_expected_dict['float'],
)
@pytest.mark.koalas
def test_num_int_ks(data_num_ks):
objs_dict, X_dict, X_expected_dict = data_num_ks
assert_frame_equal(objs_dict['int'].transform(
X_dict['int'].to_pandas()), X_expected_dict['int'],
)
@pytest.mark.koalas
def test_num_float_ks(data_num_ks):
objs_dict, X_dict, X_expected_dict = data_num_ks
assert_frame_equal(objs_dict['float'].transform(
X_dict['float'].to_pandas()), X_expected_dict['float'],
)
def test_num_int_pd_np(data_num):
objs_dict, X_dict, X_expected_dict = data_num
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
def test_num_float_pd_np(data_num):
objs_dict, X_dict, X_expected_dict = data_num
X_new_np = objs_dict['float'].transform_numpy(X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
@pytest.mark.koalas
def test_num_int_ks_np(data_num_ks):
objs_dict, X_dict, X_expected_dict = data_num_ks
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
@pytest.mark.koalas
def test_num_float_ks_np(data_num_ks):
objs_dict, X_dict, X_expected_dict = data_num_ks
X_new_np = objs_dict['float'].transform_numpy(
X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
def test_no_missing_int_pd(data_no_missing):
objs_dict, X_dict, X_expected_dict = data_no_missing
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']), X_expected_dict['int'],
)
def test_no_missing_float_pd(data_no_missing):
objs_dict, X_dict, X_expected_dict = data_no_missing
assert_frame_equal(
objs_dict['float'].transform(
X_dict['float']), X_expected_dict['float'],
)
def test_no_missing_object_pd(data_no_missing):
objs_dict, X_dict, X_expected_dict = data_no_missing
assert_frame_equal(
objs_dict['object'].transform(
X_dict['object']), X_expected_dict['object'],
)
@pytest.mark.koalas
def test_no_missing_int_ks(data_no_missing_ks):
objs_dict, X_dict, X_expected_dict = data_no_missing_ks
assert_frame_equal(objs_dict['int'].transform(
X_dict['int'].to_pandas()), X_expected_dict['int'],
)
@pytest.mark.koalas
def test_no_missing_float_ks(data_no_missing_ks):
objs_dict, X_dict, X_expected_dict = data_no_missing_ks
assert_frame_equal(objs_dict['float'].transform(
X_dict['float'].to_pandas()), X_expected_dict['float'],
)
@pytest.mark.koalas
def test_no_missing_object_ks(data_no_missing_ks):
objs_dict, X_dict, X_expected_dict = data_no_missing_ks
assert_frame_equal(objs_dict['object'].transform(
X_dict['object'].to_pandas()), X_expected_dict['object'],
)
def test_no_missing_int_pd_np(data_no_missing):
objs_dict, X_dict, X_expected_dict = data_no_missing
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
| assert_frame_equal(X_new, X_expected_dict['int']) | pandas.testing.assert_frame_equal |
import numpy
import pandas
from recordlinkage.utils import listify
from recordlinkage.index import Block
from recordlinkage.utils import construct_multiindex
class NeighbourhoodBlock(Block):
'''
:class:`recordlinkage.index.Block` with extended matching types
* Proximity in record ranking order (like :class:`SortedNeighbourhood`)
, except multiple orderings (one for each field) are allowed
* Wildcard matching of null values
* A limited number of complete field mismatches
Parameters
----------
left_on : label, optional
A column name or a list of column names of dataframe A. These
columns are used for matching records.
right_on : label, optional
A column name or a list of column names of dataframe B. These
columns are used for matching records. If 'right_on' is None,
the `left_on` value is used. Default None.
max_nulls: int, optional
Include record pairs with up to this number of wildcard matches (see
below). Default: 0 (no wildcard matching)
max_non_matches: int, optional
Include record pairs with up to this number of field mismatches (see
below). Default: 0 (no mismatches allowed)
windows: int, optional
An integer or list of integers representing the window widths (as in
:class:`SortedNeighbourhood`). If fewer are specified than the number
of keys (in *left_on* and/or *right_on*), the final one is repeated
for the remaining keys.
**kwargs :
Additional keyword arguments to pass to
:class:`recordlinkage.base.BaseIndexAlgorithm`.
Wildcard matching
-----------------
Missing values can be treated as wild (ie: matching any other value)
for a limited number of fields determined by the max_nulls parameter.
Relationship to other index types
---------------------------------
Special cases of this indexer are equivalent to other index types:
* :class:`Block`: max_nulls=0, max_non_matches=0, *windows=1
(the defaults)
* :class:`SortedNeighbourhood`: max_nulls=0, max_non_matches=0,
windows=[window value for the sorting key, 1 otherwise]
* :class:`Full`: max_non_matches >= number of keys
Example
-------
In the following example, the record pairs are made for two historical
datasets with census data. The datasets are named ``census_data_1980``
and ``census_data_1990``. The index includes record pairs with matches
in (at least) any 3 out of the 5 nominated fields. Proximity matching is
allowed in the first two fields, and up to one wildcard match of a missing
value is also allowed.
>>> from recordlinkage.contrib.index import NeighbourhoodBlock
>>> keys = ['first_name', 'surname', 'date_of_birth', 'address', 'ssid']
>>> windows = [9, 3, 1, 1, 1]
>>> indexer = NeighbourhoodBlock(
>>> keys, windows=windows, max_nulls=1, max_non_matches=2)
>>> indexer.index(census_data_1980, census_data_1990)
'''
def __init__(self,
left_on=None,
right_on=None,
max_nulls=0,
max_non_matches=0,
windows=1,
**kwargs):
super(NeighbourhoodBlock, self).__init__(
left_on=left_on, right_on=right_on, **kwargs)
self.max_nulls = max_nulls
self.max_non_matches = max_non_matches
self.windows = listify(windows)
def __repr__(self):
cls = type(self)
attrs = [
'left_on', 'right_on', 'max_nulls', 'max_non_matches', 'windows'
]
attrs_repr = ', '.join(
'{}={}'.format(attr, repr(getattr(self, attr))) for attr in attrs)
return '<{cls.__name__} {attrs_repr}>'.format(**locals())
_coarsening_factor = 2
def _index(self, df_a, df_b=None):
dfs = [df_a, df_a if df_b is None else df_b]
def split_to_match(a, to_match):
ndx_bounds = numpy.r_[0, numpy.cumsum([len(x) for x in to_match])]
assert len(a) == ndx_bounds[-1]
return [
a[start:stop]
for start, stop in zip(ndx_bounds, ndx_bounds[1:])
]
def deduped_blocks_and_indices(blocks, indices=None):
if indices is None:
indices = [numpy.arange(len(blocks))]
deduped_blocks, index_tx = numpy.unique(
blocks, axis=0, return_inverse=True)
return deduped_blocks, [index_tx[raw_ndx] for raw_ndx in indices]
def get_normalized_linkage_params():
def default_on_possibilities():
yield self.left_on
yield self.right_on
yield [
c for c in dfs[0].columns
if all(c in df.columns for df in dfs)
]
default_on = next(
iter(
filter(lambda x: x is not None,
default_on_possibilities())))
key_columns = [
listify(side_on or default_on)
for side_on in [self.left_on, self.right_on]
]
key_cols = set(map(len, key_columns))
n_key_cols = next(iter(key_cols))
if (len(key_cols) > 1) or (n_key_cols == 0):
raise IndexError('Invalid blocking keys')
combined_ranks = numpy.vstack([
pandas.concat([df[col] for df, col in zip(dfs, col_grp)]).rank(
method='dense',
na_option='keep').fillna(0).astype(int).values - 1
for col_grp in zip(*key_columns)
]).astype(float).T
combined_ranks[combined_ranks < 0] = numpy.nan
blocks, indices = deduped_blocks_and_indices(
blocks=combined_ranks,
indices=split_to_match(numpy.arange(len(combined_ranks)), dfs))
n_keys = blocks.shape[1]
windows = self.windows + self.windows[-1:] * (
n_keys - len(self.windows))
if (len(windows) > n_keys) or not all(
isinstance(w, int) and (w > 0) and (w % 2 == 1)
for w in windows):
raise ValueError(
'Windows must be positive odd integers and the maximum'
'number allowed is the number of blocking keys'
)
rank_distance_limits = (
numpy.array(windows) // 2).astype(float).reshape((1, -1))
return blocks, indices, rank_distance_limits
def many_to_many_join_indices(left_keys, right_keys, key_link):
joined = pandas.DataFrame(
key_link, columns=['left_key', 'right_key'])
for side, values in [('left', left_keys), ('right', right_keys)]:
joined = joined.join(
pandas.DataFrame(
{
'{side}_ndx'.format(**locals()):
numpy.arange(len(values))
},
index=values),
how='inner',
on='{side}_key'.format(**locals()))
return joined[['left_ndx', 'right_ndx']].values
def chain_indices(*index_groups):
remaining_groups = iter(index_groups)
result = list(next(remaining_groups))
for txs in remaining_groups:
result = [tx[r] for tx, r in zip(txs, result)]
return result
def linkage_index_codes(blocks,
indices,
rank_distance_limits,
rank_max=None):
if rank_max is None:
rank_max = | pandas.DataFrame(blocks) | pandas.DataFrame |
"""Functions to calculate mean squared displacements from trajectory data
This module includes functions to calculate mean squared displacements and
additional measures from input trajectory datasets as calculated by the
Trackmate ImageJ plugin.
"""
import warnings
import random as rand
import pandas as pd
import numpy as np
import numpy.ma as ma
import scipy.stats as stats
from scipy import interpolate
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import diff_classifier.aws as aws
def nth_diff(dataframe, n=1, axis=0):
"""Calculates the nth difference between vector elements
Returns a new vector of size N - n containing the nth difference between
vector elements.
Parameters
----------
dataframe : pandas.core.series.Series of int or float
Input data on which differences are to be calculated.
n : int
Function calculated xpos(i) - xpos(i - n) for all values in pandas
series.
axis : {0, 1}
Axis along which differences are to be calculated. Default is 0. If 0,
input must be a pandas series. If 1, input must be a numpy array.
Returns
-------
diff : pandas.core.series.Series of int or float
Pandas series of size N - n, where N is the original size of dataframe.
Examples
--------
>>> df = np.ones((5, 10))
>>> nth_diff(df)
array([[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> df = np.ones((5, 10))
>>> nth_diff (df)
array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
"""
assert isinstance(n, int), "n must be an integer."
if dataframe.ndim == 1:
length = dataframe.shape[0]
if n <= length:
test1 = dataframe[:-n].reset_index(drop=True)
test2 = dataframe[n:].reset_index(drop=True)
diff = test2 - test1
else:
diff = np.array([np.nan, np.nan])
else:
length = dataframe.shape[0]
if n <= length:
if axis == 0:
test1 = dataframe[:-n, :]
test2 = dataframe[n:, :]
else:
test1 = dataframe[:, :-n]
test2 = dataframe[:, n:]
diff = test2 - test1
else:
diff = np.array([np.nan, np.nan])
return diff
def msd_calc(track, length=10):
"""Calculates mean squared displacement of input track.
Returns numpy array containing MSD data calculated from an individual track.
Parameters
----------
track : pandas.core.frame.DataFrame
Contains, at a minimum a 'Frame', 'X', and 'Y' column
Returns
-------
new_track : pandas.core.frame.DataFrame
Similar to input track. All missing frames of individual trajectories
are filled in with NaNs, and two new columns, MSDs and Gauss are added:
MSDs, calculated mean squared displacements using the formula
MSD = <(xpos-x0)**2>
Gauss, calculated Gaussianity
Examples
--------
>>> data1 = {'Frame': [1, 2, 3, 4, 5],
... 'X': [5, 6, 7, 8, 9],
... 'Y': [6, 7, 8, 9, 10]}
>>> df = pd.DataFrame(data=data1)
>>> new_track = msd.msd_calc(df, 5)
>>> data1 = {'Frame': [1, 2, 3, 4, 5],
... 'X': [5, 6, 7, 8, 9],
... 'Y': [6, 7, 8, 9, 10]}
>>> df = pd.DataFrame(data=data1)
>>> new_track = msd.msd_calc(df)
"""
meansd = np.zeros(length)
gauss = np.zeros(length)
new_frame = np.linspace(1, length, length)
old_frame = track['Frame']
oldxy = [track['X'], track['Y']]
fxy = [interpolate.interp1d(old_frame, oldxy[0], bounds_error=False,
fill_value=np.nan),
interpolate.interp1d(old_frame, oldxy[1], bounds_error=False,
fill_value=np.nan)]
intxy = [ma.masked_equal(fxy[0](new_frame), np.nan),
ma.masked_equal(fxy[1](new_frame), np.nan)]
data1 = {'Frame': new_frame,
'X': intxy[0],
'Y': intxy[1]
}
new_track = pd.DataFrame(data=data1)
for frame in range(0, length-1):
xy = [np.square(nth_diff(new_track['X'], n=frame+1)),
np.square(nth_diff(new_track['Y'], n=frame+1))]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
meansd[frame+1] = np.nanmean(xy[0] + xy[1])
gauss[frame+1] = np.nanmean(xy[0]**2 + xy[1]**2
)/(2*(meansd[frame+1]**2))
new_track['MSDs'] = pd.Series(meansd, index=new_track.index)
new_track['Gauss'] = pd.Series(gauss, index=new_track.index)
return new_track
def all_msds(data):
"""Calculates mean squared displacements of a trajectory dataset
Returns numpy array containing MSD data of all tracks in a trajectory
pandas dataframe.
Parameters
----------
data : pandas.core.frame.DataFrame
Contains, at a minimum a 'Frame', 'Track_ID', 'X', and
'Y' column. Note: it is assumed that frames begins at 1, not 0 with this
function. Adjust before feeding into function.
Returns
-------
new_data : pandas.core.frame.DataFrame
Similar to input data. All missing frames of individual trajectories
are filled in with NaNs, and two new columns, MSDs and Gauss are added:
MSDs, calculated mean squared displacements using the formula
MSD = <(xpos-x0)**2>
Gauss, calculated Gaussianity
Examples
--------
>>> data1 = {'Frame': [1, 2, 3, 4, 5, 1, 2, 3, 4, 5],
... 'Track_ID': [1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
... 'X': [5, 6, 7, 8, 9, 1, 2, 3, 4, 5],
... 'Y': [6, 7, 8, 9, 10, 2, 3, 4, 5, 6]}
>>> df = pd.DataFrame(data=data1)
>>> all_msds(df)
"""
trackids = data.Track_ID.unique()
partcount = trackids.shape[0]
length = int(max(data['Frame']))
new = {}
new['length'] = partcount*length
new['frame'] = np.zeros(new['length'])
new['ID'] = np.zeros(new['length'])
new['xy'] = [np.zeros(new['length']),
np.zeros(new['length'])]
meansd = np.zeros(new['length'])
gauss = np.zeros(new['length'])
for particle in range(0, partcount):
single_track = data.loc[data['Track_ID'] ==
trackids[particle]
].sort_values(['Track_ID', 'Frame'],
ascending=[1, 1]
).reset_index(drop=True)
if particle == 0:
index1 = 0
index2 = length
else:
index1 = index2
index2 = index2 + length
new['single_track'] = msd_calc(single_track, length=length)
new['frame'][index1:index2] = np.linspace(1, length, length)
new['ID'][index1:index2] = particle+1
new['xy'][0][index1:index2] = new['single_track']['X']
new['xy'][1][index1:index2] = new['single_track']['Y']
meansd[index1:index2] = new['single_track']['MSDs']
gauss[index1:index2] = new['single_track']['Gauss']
data1 = {'Frame': new['frame'],
'Track_ID': new['ID'],
'X': new['xy'][0],
'Y': new['xy'][1],
'MSDs': meansd,
'Gauss': gauss}
new_data = | pd.DataFrame(data=data1) | pandas.DataFrame |
import boto3
import os
import pandas as pd
import awswrangler as wr
def lambda_handler(event, context):
s3 = boto3.client('s3')
s3_resource = boto3.resource('s3')
comprehend = boto3.client('comprehend')
t_prefix = 'quicksight/data/entity'
paginator = s3.get_paginator('list_objects_v2')
pages = paginator.paginate(Bucket=os.environ['entityDetectionBucket'], Prefix='comprehendInput/')
tempcols = ['Type', 'Score']
df_temp = | pd.DataFrame(columns=tempcols) | pandas.DataFrame |
import functions as func
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from math import sqrt
from multiprocessing import cpu_count
from joblib import Parallel
from joblib import delayed
from warnings import catch_warnings
from warnings import filterwarnings
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from sklearn.metrics import mean_squared_error
from numpy import array
import sys
import time
from statsmodels.tsa.ar_model import AR
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
import re
import datetime
import sklearn.metrics as skm
from warnings import catch_warnings
from warnings import filterwarnings
from statsmodels.tsa.statespace.sarimax import SARIMAX
import os
def custom_cv_kfolds_testdataonly(X, kfolds, th):
n = X.shape[0]
# print(n)
print('******** creating custom CV:')
i = 1
while i <= kfolds:
np.random.seed(i)
idx = np.empty(0, dtype=int)
for index in np.arange(0, n-(th*6), step=(th*6), dtype=int):
randwindowpoint = np.random.randint(0, 6, size=1)
idx = np.append(idx, [randwindowpoint+index])
# print(idx)
print(idx[0:10])
yield idx[:int(len(idx))]
i = i+1
def temporal_horizon(df, pd_steps, target):
pd_steps = pd_steps * 6
target_values = df[[target]]
target_values = target_values.drop(
target_values.index[0: pd_steps], axis=0)
target_values.index = np.arange(0, len(target_values[target]))
df = df.drop(
df.index[len(df.index)-pd_steps: len(df.index)], axis=0)
df['Target_'+target] = target_values
print('Target_'+target)
return df
def ARIMAregression(train, test, config, cat, directory, file, target, PrH_index, n_steps, CV, result_filename, timeline):
p, d, q = config
history = [x for x in train]
predictions = list()
# walk-forward validation
for t in range(len(test)):
model = ARIMA(history, order=(p, d, q))
model_fit = model.fit(trend='nc', disp=0)
# plot some history and the forecast with confidence intervals
# model_fit.plot_predict(len(train)-10, len(train)+1)
# plt.legend(loc='upper left')
# plt.show()
output, stderr, conf = model_fit.forecast()
# summarize forecast and confidence intervals
# print('Expected: %.3f' % test[t])
# print('Forecast: %.3f' % output)
# print('Standard Error: %.3f' % stderr)
# print('95%% Confidence Interval: %.3f to %.3f' % (conf[0][0], conf[0][1]))
yhat = output
predictions.append(yhat)
history.append(test[t])
# print(predictions[-1])
# print('predicted=%f, expected=%f' % (yhat, test[t]))
# evaluate forecasts
r2_score = skm.r2_score(test, predictions)
print('Test r2_score: %.3f' % r2_score)
# plot forecasts against actual outcomes
# plt.plot(test)
# plt.plot(predictions, color='red')
# plt.show()
saveResults(predictions, test, cat, directory, file,
target, PrH_index, n_steps, CV, result_filename, timeline, config)
return r2_score
def movingAverage(train_X, train_y, test_X, test_y):
# persistence model on training set
train_pred = [x for x in train_X]
# calculate residuals
train_resid = [train_y[i]-train_pred[i] for i in range(len(train_pred))]
# model the training set residuals
model = AR(train_resid)
model_fit = model.fit()
window = model_fit.k_ar
coef = model_fit.params
# walk forward over time steps in test
history = train_resid[len(train_resid)-window:]
history = [history[i] for i in range(len(history))]
predictions = list()
# expected_error = list()
for t in range(len(test_y)):
# persistence
yhat = test_X[t]
error = test_y[t] - yhat
# expected_error.append(error)
# predict error
length = len(history)
lag = [history[i] for i in range(length-window, length)]
pred_error = coef[0]
for d in range(window):
pred_error += coef[d+1] * lag[window-d-1]
yhat = yhat + pred_error
predictions.append(yhat)
history.append(error)
# print('predicted error=%f, expected error=%f' %
# (pred_error, error))
# plt.plot(test_y)
# plt.plot(predictions, color='red')
# plt.legend()
# plt.show()
return predictions
def AutoRegression(train, test):
model = AR(train)
model_fit = model.fit()
window = model_fit.k_ar
coef = model_fit.params
# walk forward over time steps in test
history = train[len(train)-window:]
# print(len(history))
history = [history[i]for i in range(len(history))]
# print(history[0:5])
predictions = list()
for t in range(len(test)):
length = len(history)
lag = [history[i] for i in range(length-window, length)]
yhat = coef[0]
for d in range(window):
yhat += coef[d+1] * lag[window-d-1]
obs = test[t]
predictions.append(yhat)
history.append(obs) # new observations added to history
# print('predicted=%f, expected=%f' % (yhat, obs))
return predictions, window, coef
def custom_cv_2folds(X, kfolds, th):
n = X.shape[0]
print('******** creating custom CV:')
i = 1
while i <= kfolds:
np.random.seed(i)
idx = np.empty(0, dtype=int)
for index in np.arange(0, n-(th*6), step=(th*6), dtype=int):
randwindowpoint = np.random.randint(0, 6, size=1)
idx = np.append(idx, [randwindowpoint+index])
# print(idx)
print(idx[0: 10])
yield idx[: int(len(idx)*0.7)], idx[int(len(idx)*0.7):]
i = i+1
# one-step Holt Winter’s Exponential Smoothing forecast
def exp_smoothing_forecast_onestep(history, config):
t, d, s, p, b, r = config
# define model
history = array(history)
model = ExponentialSmoothing(
history, trend=t, damped=d, seasonal=s, seasonal_periods=p)
# fit model
model_fit = model.fit(optimized=True, use_boxcox=b, remove_bias=r)
# make one step forecast
yhat = model_fit.predict(len(history), len(history))
return yhat[0]
####
# walk-forward validation for exp_smoothing_forecast
def exp_smoothing_forecast(train, test, cfg, cat, directory, file, target, PrH_index, n_steps, CV, result_filename, timeline):
predictions = list()
# seed history with training dataset
history = [x for x in train]
# step over each time-step in the test set
for i in range(len(test)):
# fit model and make forecast for history
yhat = exp_smoothing_forecast_onestep(history, cfg)
# store forecast in list of predictions
predictions.append(yhat)
# add actual observation to history for the next loop
history.append(test[i])
# print(predictions[-1])
# estimate prediction error
saveResults(predictions, test, cat, directory, file,
target, PrH_index, n_steps, CV, result_filename, timeline, cfg)
r2_score = skm.r2_score(test, predictions)
print(r2_score)
return r2_score
# one-step sarima forecast
def sarima_forecast_oneStep(history, config):
order, sorder, trend = config
# define model
model = SARIMAX(history, order=order, seasonal_order=sorder, trend=trend,
enforce_stationarity=False, enforce_invertibility=False)
# fit model
model_fit = model.fit(disp=False, maxiter=2000)
# make one step forecast
yhat = model_fit.predict(len(history), len(history))
# print(yhat[0])
return yhat[0]
def sarima_forecast(train, test, cfg, cat, directory, file, target, PrH_index, n_steps, CV, result_filename, timeline):
predictions = list()
# seed history with training dataset
history = [x for x in train]
# step over each time-step in the test set
for i in range(len(test)):
# fit model and make forecast for history
yhat = sarima_forecast_oneStep(history, cfg)
# store forecast in list of predictions
predictions.append(yhat)
# add actual observation to history for the next loop
history.append(test[i])
# print(predictions[-1])
saveResults(predictions, test, cat, directory, file,
target, PrH_index, n_steps, CV, result_filename, timeline, cfg)
r2_score = skm.r2_score(test, predictions)
return r2_score
def exp_smoothing_configs():
models = list()
# define config lists
t_params = ['add', 'mul'] # None
d_params = [True, False]
s_params = [None] # 'add', 'mul'] # None
p_params = [None, 24*30, 24*30*5]
b_params = [False] # True
r_params = [True, False]
# create config instances
for t in t_params:
for d in d_params:
for s in s_params:
for p in p_params:
for b in b_params:
for r in r_params:
cfg = [t, d, s, p, b, r]
models.append(cfg)
return models
def score_model(model, train, test, cfg, cat, directory, file, target, PrH_index, n_steps, CV, result_filename, timeline, debug=False):
print(model)
result = None
# convert config to a key
key = str(cfg)
# show all warnings and fail on exception if debugging
if debug:
result = model(train, test, cfg, cat, directory, file,
target, PrH_index, n_steps, CV, result_filename, timeline)
else:
# one failure during model validation suggests an unstable config
try:
# never show warnings when grid searching, too noisy
with catch_warnings():
filterwarnings("ignore")
if model == 'SARIMA':
result = sarima_forecast(train, test, cfg, cat, directory, file,
target, PrH_index, n_steps, CV, result_filename, timeline)
elif model == 'ARIMA':
result = ARIMAregression(train, test, cfg, cat, directory, file,
target, PrH_index, n_steps, CV, result_filename, timeline)
elif model == 'ETS':
result = exp_smoothing_forecast(train, test, cfg, cat, directory, file,
target, PrH_index, n_steps, CV, result_filename, timeline)
except:
error = None
# check for an interesting result
if result is not None:
print(' > Model[%s] %.3f' % (key, result))
return (key, result)
def ARIMA_configs():
models = list()
# define config lists
p_values = range(1, 3)
d_values = range(0, 2)
q_values = range(0, 3)
# create config instances
for p in p_values:
for d in d_values:
for q in q_values:
cfg = [p, d, q]
models.append(cfg)
return models
# create a set of sarima configs to try
# order: A tuple p, d, and q parameters for the modeling of the trend.
# seasonal order: A tuple of P, D, Q, and m parameters for the modeling the seasonality
# trend: A parameter for controlling a model of the deterministic trend as one of ‘n’, ‘c’, ‘t’, and ‘ct’ for no trend, constant, linear, and constant with linear trend, respectively.
def sarima_configs(seasonal=[0]):
models = list()
# define config lists
p_params = [0, 1, 2]
d_params = [0, 1]
q_params = [0, 1, 2]
t_params = ['n', 'c', 't', 'ct']
P_params = [0, 1, 2]
D_params = [0, 1]
Q_params = [0, 1, 2]
m_params = seasonal
# create config instances
for p in p_params:
for d in d_params:
for q in q_params:
for t in t_params:
for P in P_params:
for D in D_params:
for Q in Q_params:
for m in m_params:
cfg = [(p, d, q), (P, D, Q, m), t]
models.append(cfg)
return models
def saveResults(predictions, test_y, cat, directory, file, target, PrH_index, n_steps, i, result_filename, timeline, config):
print(cat, directory, file, target,
PrH_index, n_steps, i, result_filename, config)
if cat == 1:
predictions = np.array(predictions).astype(int)
test_y = np.array(test_y).astype(int)
cm0 = func.forecast_accuracy(
predictions, test_y, cat)
filename = file + '_' + \
target+'_TH' + \
str(PrH_index)+'_lag' + \
str(n_steps)+'_'+str(i)+'_config'+str(config)
directorydeeper = directory+'more/'
if not os.path.exists(directorydeeper):
os.makedirs(directorydeeper)
data = {'time': timeline,
'Actual': test_y,
'Predictions': predictions}
df = pd.DataFrame(data=data)
df.to_csv(directorydeeper+filename +
'.csv', index=False)
plt.scatter(timeline.values,
test_y, s=1)
plt.scatter(timeline.values,
predictions, s=1)
plt.legend(['actual', 'predictions'],
loc='upper right')
plt.xticks(rotation=45)
plt.savefig(directorydeeper+filename+'.png')
plt.close()
# print(directorydeeper)
# print(filename)
# print(cm0)
method = 'OrgData'
if cat == 1:
data = {'CV': i, 'target_names': target, 'method_names': method, 'temporalhorizons': PrH_index, 'window_nuggets': 1, 'config': [config],
'file_names': file, 'F1_0': cm0[0], 'F1_1': cm0[1], 'P_0': cm0[2], 'P_1': cm0[3], 'R_0': cm0[4], 'R_1': cm0[5], 'acc0_1': cm0[6], 'F1_0_1': cm0[7], 'F1_all': cm0[8], 'fbeta': [cm0[9]]}
elif cat == 0:
data = {'CV': i, 'target_names': target, 'method_names': method, 'temporalhorizons': PrH_index, 'window_nuggets': 1, 'config': [config],
'file_names': file, 'mape': cm0[0], 'me': cm0[1], 'mae': cm0[2], 'mpe': cm0[3], 'rmse': cm0[4], 'R2': cm0[5]}
df = | pd.DataFrame(data=data, index=[0]) | pandas.DataFrame |
import datetime
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import Timedelta, merge_asof, read_csv, to_datetime
import pandas._testing as tm
from pandas.core.reshape.merge import MergeError
class TestAsOfMerge:
def read_data(self, datapath, name, dedupe=False):
path = datapath("reshape", "merge", "data", name)
x = read_csv(path)
if dedupe:
x = x.drop_duplicates(["time", "ticker"], keep="last").reset_index(
drop=True
)
x.time = to_datetime(x.time)
return x
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.trades = self.read_data(datapath, "trades.csv")
self.quotes = self.read_data(datapath, "quotes.csv", dedupe=True)
self.asof = self.read_data(datapath, "asof.csv")
self.tolerance = self.read_data(datapath, "tolerance.csv")
self.allow_exact_matches = self.read_data(datapath, "allow_exact_matches.csv")
self.allow_exact_matches_and_tolerance = self.read_data(
datapath, "allow_exact_matches_and_tolerance.csv"
)
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 3, 7]}
)
result = pd.merge_asof(left, right, on="a")
tm.assert_frame_equal(result, expected)
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.048",
"20160525 13:30:00.049",
"20160525 13:30:00.072",
"20160525 13:30:00.075",
]
),
"ticker": [
"GOOG",
"MSFT",
"MSFT",
"MSFT",
"GOOG",
"AAPL",
"GOOG",
"MSFT",
],
"bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
"ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
},
columns=["time", "ticker", "bid", "ask"],
)
pd.merge_asof(trades, quotes, on="time", by="ticker")
pd.merge_asof(
trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms")
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.97, np.nan, np.nan, np.nan],
"ask": [np.nan, 51.98, np.nan, np.nan, np.nan],
},
columns=["time", "ticker", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(
trades,
quotes,
on="time",
by="ticker",
tolerance=pd.Timedelta("10ms"),
allow_exact_matches=False,
)
tm.assert_frame_equal(result, expected)
def test_examples3(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, np.nan]}
)
result = pd.merge_asof(left, right, on="a", direction="forward")
tm.assert_frame_equal(result, expected)
def test_examples4(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, 7]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype("category")
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype("category")
expected.ticker = expected.ticker.astype("category")
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_left_index(self):
# GH14253
expected = self.asof
trades = self.trades.set_index("time")
quotes = self.quotes
result = merge_asof(
trades, quotes, left_index=True, right_on="time", by="ticker"
)
# left-only index uses right"s index, oddly
expected.index = result.index
# time column appears after left"s columns
expected = expected[result.columns]
tm.assert_frame_equal(result, expected)
def test_basic_right_index(self):
expected = self.asof
trades = self.trades
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_on="time", right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_basic_left_index_right_index(self):
expected = self.asof.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_index=True, right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_multi_index_on(self):
def index_by_time_then_arbitrary_new_level(df):
df = df.set_index("time")
df = pd.concat([df, df], keys=["f1", "f2"], names=["f", "time"])
return df.reorder_levels([1, 0]).sort_index()
trades = index_by_time_then_arbitrary_new_level(self.trades)
quotes = index_by_time_then_arbitrary_new_level(self.quotes)
expected = index_by_time_then_arbitrary_new_level(self.asof)
result = merge_asof(trades, quotes, on="time", by=["ticker"])
tm.assert_frame_equal(result, expected)
def test_on_and_index(self):
# "on" parameter and index together is prohibited
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, left_on="price", left_index=True, right_index=True
)
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, right_on="bid", left_index=True, right_index=True
)
def test_basic_left_by_right_by(self):
# GH14253
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(
trades, quotes, on="time", left_by="ticker", right_by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != "MSFT"]
result = merge_asof(trades, q, on="time", by="ticker")
expected.loc[expected.ticker == "MSFT", ["bid", "ask"]] = np.nan
tm.assert_frame_equal(result, expected)
def test_multiby(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL"],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_heterogeneous_types(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": [1, 0, 0, 0, 1, 2],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_indexed(self):
# GH15676
left = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a"],
[pd.to_datetime("20160602"), 2, "a"],
[pd.to_datetime("20160603"), 1, "b"],
[pd.to_datetime("20160603"), 2, "b"],
],
columns=["time", "k1", "k2"],
).set_index("time")
right = pd.DataFrame(
[
[pd.to_datetime("20160502"), 1, "a", 1.0],
[pd.to_datetime("20160502"), 2, "a", 2.0],
[pd.to_datetime("20160503"), 1, "b", 3.0],
[pd.to_datetime("20160503"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
expected = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a", 1.0],
[pd.to_datetime("20160602"), 2, "a", 2.0],
[pd.to_datetime("20160603"), 1, "b", 3.0],
[pd.to_datetime("20160603"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
result = pd.merge_asof(
left, right, left_index=True, right_index=True, by=["k1", "k2"]
)
tm.assert_frame_equal(expected, result)
with pytest.raises(MergeError):
pd.merge_asof(
left,
right,
left_index=True,
right_index=True,
left_by=["k1", "k2"],
right_by=["k1"],
)
def test_basic2(self, datapath):
expected = self.read_data(datapath, "asof2.csv")
trades = self.read_data(datapath, "trades2.csv")
quotes = self.read_data(datapath, "quotes2.csv", dedupe=True)
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_no_by(self):
f = (
lambda x: x[x.ticker == "MSFT"]
.drop("ticker", axis=1)
.reset_index(drop=True)
)
# just use a single ticker
expected = f(self.asof)
trades = f(self.trades)
quotes = f(self.quotes)
result = merge_asof(trades, quotes, on="time")
tm.assert_frame_equal(result, expected)
def test_valid_join_keys(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(trades, quotes, left_on="time", right_on="bid", by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, on=["time", "ticker"], by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, by="ticker")
def test_with_duplicates(self, datapath):
q = (
pd.concat([self.quotes, self.quotes])
.sort_values(["time", "ticker"])
.reset_index(drop=True)
)
result = merge_asof(self.trades, q, on="time", by="ticker")
expected = self.read_data(datapath, "asof.csv")
tm.assert_frame_equal(result, expected)
def test_with_duplicates_no_on(self):
df1 = pd.DataFrame({"key": [1, 1, 3], "left_val": [1, 2, 3]})
df2 = pd.DataFrame({"key": [1, 2, 2], "right_val": [1, 2, 3]})
result = merge_asof(df1, df2, on="key")
expected = pd.DataFrame(
{"key": [1, 1, 3], "left_val": [1, 2, 3], "right_val": [1, 1, 3]}
)
tm.assert_frame_equal(result, expected)
def test_valid_allow_exact_matches(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", allow_exact_matches="foo"
)
def test_valid_tolerance(self):
trades = self.trades
quotes = self.quotes
# dti
merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("1s"))
# integer
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1,
)
# incompat
with pytest.raises(MergeError):
merge_asof(trades, quotes, on="time", by="ticker", tolerance=1)
# invalid
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1.0,
)
# invalid negative
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", tolerance=-Timedelta("1s")
)
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=-1,
)
def test_non_sorted(self):
trades = self.trades.sort_values("time", ascending=False)
quotes = self.quotes.sort_values("time", ascending=False)
# we require that we are already sorted on time & quotes
assert not trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
trades = self.trades.sort_values("time")
assert trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
| merge_asof(trades, quotes, on="time", by="ticker") | pandas.merge_asof |
import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3,
np.nan, 4, 5, np.nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype='int')
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype('int64', -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int64')
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype='Sparse[int32]')
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int32')
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == SparseDtype(np.object, 'A')
assert arr.fill_value == 'A'
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with pytest.raises(ValueError, match="Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind='integer')
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [
None, IntIndex(1, [0]),
])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
| tm.assert_sp_array_equal(arr, exp) | pandas.util.testing.assert_sp_array_equal |
import numpy as np
from collections import defaultdict
import pandas as pd
genes = pd.read_table('cold/freeze12.rename.table', index_col=0, usecols=[0,1], header=None)
genes['genome'] = genes[1].map(lambda g: '.'.join(g.split('.')[:2]))
genome2genes = genes.groupby('genome').groups
genome2genes_catalog = defaultdict(set)
fr12_rep = pd.read_table('cold/GMGC.relationships.txt.fr12.catalog.sorted', header=None, usecols=[0,2], names=['orig', 'rep'])
fr12_2_rep = fr12_rep.groupby('orig').groups
catalog_genes = [line.strip() for line in open('cold/derived/GMGC.95nr.old-headers.sorted')]
catalog = set(catalog_genes)
redundant = set(line.strip() for line in open('cold/redundant.complete.sorted.txt'))
catalog -= redundant
print(5)
fr12_rep_d = fr12_rep.rep.to_dict()
hists = | pd.read_table('./tables/genes.prevalence.hists.txt', index_col=0) | pandas.read_table |
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas.core.indexes.datetimes import cdate_range
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta,
date_range, TimedeltaIndex, _np_version_under1p10, Index,
datetime, Float64Index, offsets, bdate_range)
from pandas.tseries.offsets import BMonthEnd, CDay, BDay
from pandas.tests.test_base import Ops
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp('2011-01-01', tz=tz)
assert idx.max() == Timestamp('2011-01-03', tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
rng.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, rng.round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.round, freq='M')
# GH 14440 & 15578
index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ['us', 'ns']:
tm.assert_index_equal(index, index.round(freq))
index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
index = pd.DatetimeIndex(['2016-10-17 12:00:00.001501031'])
result = index.round('10ns')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001501030'])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning():
ts = '2016-10-17 12:00:00.001501031'
pd.DatetimeIndex([ts]).round('1010ns')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
assert result == expected
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
assert result == expected
def test_summary(self):
# GH9116
idx1 = | DatetimeIndex([], freq='D') | pandas.DatetimeIndex |
#---------------------------------------------------------------
#__main__.py
#this script collates measurements from individual csv outputs of
#the morphometriX GUI
#the csvs can be saved either all in one folder or within each individual
#animals folder.
#this version includes a safety net that recalculates the measurement using
#accurate altitude and focal lengths that the user must provie in csvs.
# this version uses PyQt5 instead of easygui (used in v2.0)
#created by: <NAME> (<EMAIL>), March 2020
#updated by: <NAME>, June 2021
#----------------------------------------------------------------
#import modules
import pandas as pd
import numpy as np
import os, sys
import math
from PyQt5 import QtCore
from PyQt5.QtWidgets import QApplication, QWidget, QInputDialog, QLineEdit, QFileDialog, QMessageBox, QLabel, QVBoxLayout
from PyQt5.QtGui import QIcon
import collatrix.collatrix_functions
from collatrix.collatrix_functions import anydup, readfile, fheader, lmeas, wmeas, setup, pull_data, safe_data, end_concat, df_formatting
from collatrix.collatrix_functions import collate_v4and5, collate_v6
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'close box to end script'
self.left = 10
self.top = 10
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.show()
#add message box with link to github documentation
msgBox = QMessageBox()
msgBox.setWindowTitle("For detailed input info click link below")
msgBox.setTextFormat(QtCore.Qt.RichText)
msgBox.setText('<a href = "https://github.com/cbirdferrer/collatrix#inputs">CLICK HERE</a> for detailed input instructions, \n then click on OK button to continue')
x = msgBox.exec_()
#do you want the Animal ID to be assigned based on the name of the folder
items = ('yes', 'no')
anFold, okPressed = QInputDialog.getItem(self,"Input #1", "Do you want the Animal ID to be assigned based on the name of the folder? \n yes or no",items,0,False)
if okPressed and anFold:
print("{0} Animal ID in folder name".format(anFold))
#ask if they want safey net
items = ('yes', 'no')
safety, okPressed = QInputDialog.getItem(self,"Input #2", "Do you want to use the safety? \n Yes or No?",items,0,False)
if okPressed and safety:
print("{0} safety".format(safety))
#if safety yes, ask for file
if safety == 'yes':
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
safe_csv, _ = QFileDialog.getOpenFileName(self,"2.1 Safety File: Image list with altitudes and other information.", "","All Files (*);;csv files (*.csv)", options=options)
print("safety csv = {0}".format(safe_csv))
elif safety == 'no':
pass
#animal id list?
items = ('no','yes')
idchoice, okPressed = QInputDialog.getItem(self, "Input #3", "Do you want output to only contain certain individuals? \n Yes or No?",items,0,False)
if idchoice and okPressed:
print("{0} subset list".format(idchoice))
if idchoice == 'yes':
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
idsCSV, _ = QFileDialog.getOpenFileName(self,"3.1 File containing ID list", "","All Files (*);;csv files (*.csv)", options=options)
if idsCSV:
print("ID list file = {0}".format(idsCSV))
elif idchoice == 'no':
pass
#ask for name of output
outname, okPressed = QInputDialog.getText(self, "Input #4", "Prefix for output file",QLineEdit.Normal,"")
#import safety csv if safety selected
if safety == 'yes':
dfList = pd.read_csv(safe_csv, sep = ",")
dfList = dfList.dropna(how="all",axis='rows').reset_index()
df_L = dfList.groupby('Image').first().reset_index()
df_L['Image'] = [x.strip() for x in df_L['Image']]
elif safety == 'no':
df_L = "no safety"
#get folders
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
GUIfold = QFileDialog.getExistingDirectory(None, "Input 5. Folder containing MorphoMetriX outputs",options=options)
saveFold = QFileDialog.getExistingDirectory(None,"Input 6. Folder where output should be saved",options = options)
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
#make lists
#for csvs
csvs_all = []
csvs = []
not_mmx = []
#for measurements
measurements = []
nonPercMeas = []
#walk through all folders in GUI folder and collect all csvs
for root,dirs,files in os.walk(GUIfold):
csvs_all += [os.path.join(root,f) for f in files if f.endswith('.csv')]
#make sure the csvs are morphometrix outputs by checking first row
csvs += [c for c in csvs_all if 'Image ID' in pd.read_csv(c,sep='^',header=None,prefix='X',engine = 'python',quoting=3, na_values = ['""','"'],encoding_errors = "ignore")['X0'][0]]
#make list of all csvs that were not morphometrix csvs to tell user
not_mmx += [x for x in csvs_all if x not in csvs]
#check for csvs that (for whatever reason) hit an error when being read in.
#makes a list of those csvs for users to examine
badcsvs = []
for f in csvs:
try:
temp=pd.read_csv(f,sep='^',header=None,prefix='X',engine = 'python',quoting=3, na_values = ['""','"'],encoding_errors = "ignore") #read in csv as one column
except:
print(f)
badcsvs += [f]
pass
badcsvs = set(badcsvs)
csvs = [x for x in csvs if x not in badcsvs]
#sort csvs into different versions of morphometrix measured_whales_outputs
v4csvs = []; v5csvs = []; v6csvs = []
for f in csvs:
df0 = readfile(f)
if 'Object' in df0[0].tolist():
idx = df0.loc[df0[0] == 'Object'].index #find index (row) values of 'Object'
elif 'Object Name' in df0[0].tolist():
idx = df0.loc[df0[0] == 'Object Name'].index #find index (row) values of 'Object'
df = df0.truncate(before=idx[0]) #take subset of df starting at first row containing Object
df = fheader(df)
ch = df.columns.tolist()
if 'Object Name' in ch:
v4csvs += [f]
elif 'Object' in ch:
if any("% Width" in c for c in ch):
v5csvs += [f]
elif any('% Width' in x for x in df['Widths (%)']):
v6csvs += [f]
else:
v5csvs += [f]
else:
not_mmx += [f]
print("these csvs were not morphometrix outputs: {0}".format(not_mmx))
#put together dataframe of inputs and error csvs to output
if safety == 'yes':
message = "Animal ID from folder name?: {0} \n\nThe safety file was: {1}\n\n\nThese csvs were not morphometrix outputs:{2}\n\nThese csvs could not be read in: {3}".format(anFold, safe_csv, not_mmx, badcsvs)
elif safety == 'no':
message = "Animal ID from folder name?: {0} \n\nSafety not used\n\n\nThese csvs were not morphometrix outputs:{1}\n\nThese csvs could not be read in: {2}".format(anFold, not_mmx, badcsvs)
mess = pd.DataFrame(data={'Processing Notes':message},index=[1])
mess_out = os.path.join(saveFold,"{0}_processing_notes.txt".format(outname))
mess.to_csv(mess_out)
#set up list of constants
constants = ['Image ID', 'Image Path', 'Focal Length', 'Altitude', 'Pixel Dimension']
# set up empty dataframes
df_all1 = pd.DataFrame(data = {})
df_all1_pc = pd.DataFrame(data = {})
## COLLATE V4 CSVS
if len(v4csvs) > 0:
v4_all,v4_all_pixc = collate_v4and5(v4csvs,'Object Name', 'Length', constants,safety,df_L, measurements, nonPercMeas, anFold)
df_all1 = pd.concat([df_all1,v4_all])
df_all1_pc = pd.concat([df_all1_pc,v4_all_pixc])
else: pass
## COLLATE V5 CSVS
if len(v5csvs) >0:
v5_all,v5_all_pixc = collate_v4and5(v5csvs,'Object', 'Length (m)', constants,safety,df_L,measurements, nonPercMeas, anFold)
df_all1 = pd.concat([df_all1,v5_all])
df_all1_pc = pd.concat([df_all1_pc,v5_all_pixc])
else: pass
## COLLATE V4 CSVS
if len(v6csvs) >0:
v6_all,v6_all_pixc = collate_v6(v6csvs, 'Object', 'Length (m)',constants,safety,df_L,measurements, nonPercMeas, anFold)
df_all1 = pd.concat([df_all1,v6_all])
df_all1_pc = | pd.concat([df_all1_pc,v6_all_pixc]) | pandas.concat |
from airflow.models import Variable
from minio import Minio
from minio.error import ResponseError
from minio.error import InvalidBucketError
from minio.error import NoSuchKey
import os
import shutil
import pandas as pd
import logging
import datetime
from airflow.hooks.mysql_hook import MySqlHook
import sqlalchemy as db
DATALAKE_MINIO_ENDPOINT = Variable.get('DATALAKE_MINIO_ENDPOINT')
DATALAKE_MINIO_ACCESS_KEY = Variable.get('DATALAKE_MINIO_ACCESS_KEY')
DATALAKE_MINIO_SECRET_KEY = Variable.get('DATALAKE_MINIO_SECRET_KEY')
DATALAKE_MINIO_AGG_BUCKET = Variable.get('DATALAKE_MINIO_AGG_BUCKET')
def extract_latest_date(db_engine, table, field):
"""
Function to extract latest date from a table
"""
query = "SELECT MAX({}) AS max_date FROM {}".format(field, table)
result_df = pd.read_sql(query, db_engine)
return result_df.loc[0, 'max_date']
def is_weekend(row):
"""
Function to determine whether a date row is weekend or not
"""
if row['dayofweek'] == 5 or row['dayofweek'] == 6:
return 1
else:
return 0
def create_date_table(start, end):
"""
Function to generate date in particular range
"""
df = pd.DataFrame({"date": | pd.date_range(start, end) | pandas.date_range |
import numpy as np
import numpy.linalg as linalg
import pandas as pd
def linear_regression(X, y):
return linalg.inv(X.T.dot(X)).dot(X.T).dot(y)
def go():
data = np.loadtxt('quasar_train.csv', delimiter=',')
wavelengths = data[0]
fluxes = data[1]
ones = np.ones(fluxes.size)
df_ones = pd.DataFrame(ones, columns=['xint'])
df_wavelengths = pd.DataFrame(wavelengths, columns=['wavelength'])
df_fluxes = pd.DataFrame(fluxes, columns=['flux'])
df = | pd.concat([df_ones, df_wavelengths, df_fluxes], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index= | Index(['hello', 'world', 'foo'], name='message') | pandas.Index |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([NaT, Timestamp("19900315")]),
Series([NaT, NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
def test_dt64_series_add_intlike(self, tz_naive_fixture):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
assert_invalid_addsub_type(ser, 1, msg)
assert_invalid_addsub_type(ser, other, msg)
assert_invalid_addsub_type(ser, np.array(other), msg)
assert_invalid_addsub_type(ser, pd.Index(other), msg)
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
assert td2._values.freq is None
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
| tm.assert_series_equal(result, exp) | pandas._testing.assert_series_equal |
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score
import yaml
from math import ceil
import collections
from sklearn.externals.six import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus
from imblearn.metrics import geometric_mean_score
import pickle
with open('nsga2/config_file.yaml', 'r') as f:
config = yaml.load(f)
def decode(var_range, **features):
"""
Decoding hyperaparameters.
"""
features['criterion'] = round(features['criterion'], 0)
if features['max_depth'] is not None:
features['max_depth'] = int(round(features['max_depth']))
else:
features['max_depth'] = var_range[1][1]
features['min_samples_split'] = int(round(features['min_samples_split']))
#features['min_samples_leaf'] = int(round(features['min_samples_leaf']))
if features['max_leaf_nodes'] is not None:
features['max_leaf_nodes'] = int(round(features['max_leaf_nodes']))
else:
features['max_leaf_nodes'] = var_range[3][1]
if features['class_weight'] is not None:
features['class_weight'] = int(round(features['class_weight']))
hyperparameters = ['criterion', 'max_depth', 'min_samples_split', 'max_leaf_nodes', 'class_weight']
list_of_hyperparameters = [(hyperparameter, features[hyperparameter]) for hyperparameter in hyperparameters]
features = collections.OrderedDict(list_of_hyperparameters)
return features
def read_data(df_name):
"""
Reads the dataset to work with.
"""
df = pd.read_csv(config['ROOT_PATH'] + '/data/' + df_name + '.csv', sep = ',')
return df
def score_text(v):
if v == 'Low':
return 0
elif v == 'Medium':
return 1
else:
return 2
def get_matrices(df_name, seed):
"""
Split dataframe into train and test.
"""
df = read_data(df_name)
X = df.iloc[:, :-1]
y = df.iloc[:, -1]
if(df_name == 'propublica_violent_recidivism'):
X = X[['sex', 'age', 'age_cat', 'race', 'juv_fel_count', 'juv_misd_count', 'juv_other_count', 'priors_count', 'c_charge_degree', 'c_charge_desc', 'decile_score', 'score_text']]
if(df_name == 'propublica_recidivism'):
X = X[['sex', 'age', 'age_cat', 'race', 'juv_fel_count', 'juv_misd_count', 'juv_other_count', 'priors_count', 'c_charge_degree', 'c_charge_desc', 'decile_score', 'score_text']]
le = preprocessing.LabelEncoder()
for column_name in X.columns:
if X[column_name].dtype == object:
X[column_name] = X[column_name].astype(str)
if(column_name == 'race' and df_name == 'adult'):
X[column_name] = np.where(X[column_name] == 'White', 0, 1)
elif(column_name == 'sex'):
X[column_name] = np.where(X[column_name] == 'Male', 0, 1)
elif(column_name == 'race' and (df_name == 'propublica_recidivism' or df_name == 'propublica_violent_recidivism')):
X[column_name] = np.where(X[column_name] == 'Caucasian', 0, 1)
elif(column_name == 'compas_screening_date' or column_name == 'screening_date' or column_name == 'dob'):
X[column_name] = | pd.to_datetime(X[column_name]) | pandas.to_datetime |
"""Training.
"""
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import progressbar
from stock_trading_backend.simulation import StockMarketSimulation
# pylint: disable=too-many-arguments
# pylint: disable=too-many-locals
def train_agent(agent, from_date=None, to_date=None, min_duration=60, max_duration=90, commission=0,
max_stock_owned=1, min_start_balance=1000, max_start_balance=4000, training=True,
stock_data_randomization=False, episode_batch_size=5, num_episodes=10):
"""Train an agent with provided params.
Args:
agent: the agent to train.
from_date: datetime date for the start of the range.
to_date: datetime date for the end of the range.
min_duration: minimum length of the episode.
max_duration: maximum length of the episode (if 0 will run for all available dates).
max_stock_owned: a maximum number of different stocks that can be owned.
commission: relative commission for each transcation.
max_stock_owned: a maximum number of different stocks that can be owned.
min_start_balance: the minimum starting balance.
max_start_balance: the maximum starting balance.
stock_data_randomization: whether to add stock data randomization.
episode_batch_size: the number of episodes in a training batch.
num_episodes: number of episodes that training going to last.
training: the param passed to make_decision in the agent.
"""
if not agent.requires_learning:
raise ValueError("This agent does not need learning")
if from_date is None or to_date is None:
today = datetime.today()
today = datetime(today.year, today.month, today.day)
from_date = today - timedelta(days=720)
to_date = today - timedelta(days=60)
simulation = StockMarketSimulation(agent.data_collection_config, from_date=from_date,
to_date=to_date, min_start_balance=min_start_balance,
max_start_balance=max_start_balance, commission=commission,
max_stock_owned=max_stock_owned, min_duration=min_duration,
max_duration=max_duration, reward_config=agent.reward_config,
stock_data_randomization=stock_data_randomization)
num_episodes_run = 0
overall_reward_history = []
loss_history = []
observation = simulation.reset()
_, kwargs = agent.make_decision(observation, simulation, False)
kwargs_keys = kwargs.keys()
batch_kwargs_keys = ["{}s_batch".format(key) for key in kwargs_keys]
with progressbar.ProgressBar(max_value=num_episodes) as progress_bar:
while num_episodes_run < num_episodes:
batch_kwargs = {key: [] for key in batch_kwargs_keys}
batch_rewards = []
batch_observations = []
batch_actions = []
batch_reward = 0
num_episodes_left_in_batch = episode_batch_size
# Run the simulations in the batch.
while num_episodes_left_in_batch > 0 and num_episodes_run < num_episodes:
rewards = []
actions = []
kwargs = {key: [] for key in kwargs_keys}
observation = simulation.reset()
observations = | pd.DataFrame(columns=observation.index) | pandas.DataFrame |
"""
(C) Copyright 2019 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on Feb 21, 2019
"""
import unittest
from causallib.utils import general_tools
from causallib.utils.stat_utils import robust_lookup
class TestUtils(unittest.TestCase):
def ensure_learner_is_fitted(self, unfitted_learner, X, y):
model_name = str(unfitted_learner.__class__).split(".")[-1]
with self.subTest("Check is_fitted of {}".format(model_name)):
self.assertFalse(general_tools.check_learner_is_fitted(unfitted_learner))
unfitted_learner.fit(X, y)
self.assertTrue(general_tools.check_learner_is_fitted(unfitted_learner))
def test_check_classification_learner_is_fitted(self):
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.datasets import make_classification
X, y = make_classification()
for clf in [LogisticRegression(solver='lbfgs'), DecisionTreeClassifier(),
RandomForestClassifier(), LinearSVC()]:
self.ensure_learner_is_fitted(clf, X, y)
def test_check_regression_learner_is_fitted(self):
from sklearn.linear_model import LinearRegression
from sklearn.tree import ExtraTreeRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_regression
X, y = make_regression()
for regr in [LinearRegression(), ExtraTreeRegressor(),
GradientBoostingRegressor(), SVR()]:
self.ensure_learner_is_fitted(regr, X, y)
def test_robust_lookup(self):
import pandas as pd
with self.subTest("zero-one columns, range index"):
X = pd.DataFrame({
0: [10, 20, 30, 40],
1: [100, 200, 300, 400]
})
a = pd.Series([0, 1, 0, 1])
extracted = robust_lookup(X, a)
expected = pd.Series([10, 200, 30, 400])
pd.testing.assert_series_equal(expected, extracted)
with self.subTest("integer columns, range index"):
X = pd.DataFrame({
3: [10, 20, 30, 40],
4: [100, 200, 300, 400]
})
a = | pd.Series([3, 4, 3, 4]) | pandas.Series |
import pandas as pd
import numpy as np
import os
import re
from bobaserver import app
from .bobastats import sensitivity
from .util import print_warn, remove_na, group_by
def get_decision_list ():
# get a list of decision names
return sorted([d['var'] for d in app.decisions])
def get_decision_df ():
# get the summary.csv without any non-decision columns
dec = [d['var'] for d in app.decisions]
return read_summary()[dec]
def get_field_name (field):
# get the column name of the field in df
return app.schema[field]['field']
def read_summary ():
""" read summary.csv """
if hasattr(app, 'summary'):
return app.summary
fn = os.path.join(app.data_folder, 'summary.csv')
smr = | pd.read_csv(fn, na_filter=False) | pandas.read_csv |
import numpy as np
from numpy.core.fromnumeric import shape
from numpy.core.shape_base import block
from numpy.lib.function_base import append
import pandas as pd
import torch
from xgboost import data
from models.krs_model import XGBoostModel
import datetime
import pickle
import re
class KRS:
def __init__(self, max_storage, curr_storage, year):
self.max_storage = max_storage
self.curr_amcs_storage = curr_storage
self.curr_non_amcs_storage = curr_storage
self.cycle_end_date = datetime.date(year+1, 5, 31)
self.cum_inflow_till_prev_day = 0
self.cum_outflow_till_prev_day = 0
self.normal_cum_inflow_till_prev_day = 0
# self.remaining_expected_monsoon_inflow = 0
# self.remaining_expected_non_monsoon_inflow = 0
# self.remaining_expected_monsoon_outflow = 0
# self.remaining_expected_non_monsoon_outflow = 0
# self.actual_monsoon_inflow = 0
# self.actual_monsoon_outflow = 0
# self.actual_non_monsoon_inflow = 0
# self.actual_non_monsoon_outflow = 0
self.expected_monsoon_inflow = 0
self.expected_non_monsoon_inflow = 0
self.expected_monsoon_outflow = 0
self.expected_non_monsoon_outflow = 0
self.normalizing_factor = 0
self.weather_df = pd.read_csv('dataset/Weather/mysuru_kodagu_hassan_weather.csv')
self.inflow_df = pd.read_csv('dataset/reservoir_dataset_augmented.csv')
self.inflow_df = self.inflow_df[self.inflow_df['RESERVOIR'] == 'K.R.S']
self.season_df = self.inflow_df[['MONTH', 'DATE', 'SEASON']].drop_duplicates()
self.normal_inflow_outflow_df = pd.read_csv('dataset/datewise_normal_inflow_outflow.csv')
# self.normal_inflow_outflow_df = pd.read_csv('dataset/datewise_normal_inflow_outflow.csv')
# Load encoders
with open('training/KRS/encoders/month_le.pkl', 'rb') as f:
self.month_le = pickle.load(f)
with open('training/KRS/encoders/date_le.pkl', 'rb') as f:
self.date_le = pickle.load(f)
with open('training/KRS/encoders/season_le.pkl', 'rb') as f:
self.season_le = pickle.load(f)
with open('training/KRS/encoders/rs_inflow.pkl', 'rb') as f:
self.rs_inflow = pickle.load(f)
with open('training/KRS/encoders/rs_outflow.pkl', 'rb') as f:
self.rs_outflow = pickle.load(f)
with open('training/KRS/encoders/rs_temp.pkl', 'rb') as f:
self.rs_temp = pickle.load(f)
with open('training/KRS/encoders/rs_feels_like.pkl', 'rb') as f:
self.rs_feels_like = pickle.load(f)
with open('training/KRS/encoders/rs_temp_min.pkl', 'rb') as f:
self.rs_temp_min = pickle.load(f)
with open('training/KRS/encoders/rs_temp_max.pkl', 'rb') as f:
self.rs_temp_max = pickle.load(f)
with open('training/KRS/encoders/rs_pressure.pkl', 'rb') as f:
self.rs_pressure = pickle.load(f)
with open('training/KRS/encoders/rs_humidity.pkl', 'rb') as f:
self.rs_humidity = pickle.load(f)
with open('training/KRS/encoders/rs_wind_speed.pkl', 'rb') as f:
self.rs_wind_speed = pickle.load(f)
with open('training/KRS/encoders/rs_wind_deg.pkl', 'rb') as f:
self.rs_wind_deg = pickle.load(f)
with open('training/KRS/encoders/rs_rain.pkl', 'rb') as f:
self.rs_rain = pickle.load(f)
with open('training/KRS/encoders/rs_clouds_all.pkl', 'rb') as f:
self.rs_clouds_all = pickle.load(f)
# # weather data scaling
# self.__apply_scaling(weather_df)
# weather data district df
self.kodagu_df = self.weather_df[self.weather_df['city_name'] == 'Kodagu'].reset_index(drop=True)
self.hassan_df = self.weather_df[self.weather_df['city_name'] == 'Hassan'].reset_index(drop=True)
self.mysuru_df = self.weather_df[self.weather_df['city_name'] == 'Mysuru'].reset_index(drop=True)
# # create region-wise forecast df
# start_date = datetime.date(year, 6, 1)
# weather_available_date = start_date + datetime.timedelta(days=15)
# self.mysuru_forecast_df, self.kodagu_forecast_df, self.hassan_forecast_df = self.__prepare_forecast_df(start_date, weather_available_date)
# self.mysuru_forecast_df = self.mysuru_df.copy()
# self.hassan_forecast_df = self.hassan_df.copy()
# self.kodagu_forecast_df = self.kodagu_df.copy()
self.mysuru_avg_df, self.kodagu_avg_df, self.hassan_avg_df = self.get_avg_weather_df(year)
# region-wise weather data scaling
self.__apply_scaling(self.kodagu_df)
self.__apply_scaling(self.hassan_df)
self.__apply_scaling(self.mysuru_df)
self.__apply_scaling(self.kodagu_avg_df)
self.__apply_scaling(self.hassan_avg_df)
self.__apply_scaling(self.mysuru_avg_df)
# self.__apply_scaling(self.kodagu_forecast_df)
# self.__apply_scaling(self.hassan_forecast_df)
# self.__apply_scaling(self.mysuru_forecast_df)
self.__load_models()
# create annual forecast
prev_ddmmyyyy_list = [str(datetime.date(year, 6, 1) + datetime.timedelta(days=day)) for day in range(-10, 0)]
if year == 2011:
self.prediction_df = self.inflow_df[self.inflow_df['FLOW_DATE'].isin(prev_ddmmyyyy_list)]
self.prediction_df['date'] = self.prediction_df['FLOW_DATE']
self.prediction_df = self.prediction_df[['date', 'INFLOW_CUSECS', 'OUTFLOW_CUECS']]
self.prediction_df.rename(columns={'INFLOW_CUSECS': 'ACTUAL INFLOW', 'OUTFLOW_CUECS': 'ACTUAL OUTFLOW'}, inplace=True)
self.prediction_df['STORAGE'] = np.nan
self.prediction_df['INFLOW'] = np.nan
self.prediction_df['OUTFLOW'] = np.nan
self.prediction_df['FORECAST'] = np.nan
self.prediction_df['DURATION'] = np.nan
self.prediction_df['EXPECTED MONSOON INFLOW'] = np.nan
self.prediction_df['EXPECTED MONSOON OUTFLOW'] = np.nan
self.prediction_df['EXPECTED NON MONSOON INFLOW'] = np.nan
# elif year == 2012:
# self.prediction_df = self.inflow_df[self.inflow_df['FLOW_DATE'].isin(prev_ddmmyyyy_list)]
# self.prediction_df['date'] = self.prediction_df['FLOW_DATE']
# self.prediction_df = self.prediction_df[['date', 'INFLOW_CUSECS', 'OUTFLOW_CUECS']]
# self.prediction_df.rename(columns={'INFLOW_CUSECS': 'ACTUAL INFLOW', 'OUTFLOW_CUECS': 'ACTUAL OUTFLOW'}, inplace=True)
# self.prediction_df['STORAGE'] = np.nan
# self.prediction_df['INFLOW'] = np.nan
# self.prediction_df['OUTFLOW'] = np.nan
# self.prediction_df['FORECAST'] = np.nan
# self.prediction_df['EXPECTED MONSOON INFLOW'] = np.nan
# self.prediction_df['EXPECTED NON MONSOON INFLOW'] = np.nan
else:
self.prediction_df = pd.read_json(f'predictions/KRS/predictions_{year-1}_{year}.json').reset_index()
self.prediction_df.rename(columns={'index': 'date'}, inplace=True)
self.prediction_df['date'] = self.prediction_df['date'].astype(str)
self.prediction_df = self.prediction_df[self.prediction_df['date'].isin(prev_ddmmyyyy_list)]
self.prediction_df.reset_index(drop=True, inplace=True)
# self.prediction_df = {'INFLOW FORECAST' : {}, 'OUTFLOW FORECAST' : {}}
# self.forecast(datetime.date(year, 6, 1))
# with open('fr.pk', 'wb') as f:
# pickle.dump(self.annual_forecast, f)
# with open('fr.pk', 'rb') as f:
# self.annual_forecast = pickle.load(f)
# print(self.annual_forecast)
# self.forecast(datetime.date(year+1, 5, 31))
def __apply_scaling(self, df):
df['temp'] = self.rs_temp.transform(df[['temp']]).flatten()
df['feels_like'] = self.rs_feels_like.transform(df[['feels_like']]).flatten()
df['temp_min'] = self.rs_temp_min.transform(df[['temp_min']]).flatten()
df['temp_max'] = self.rs_temp_max.transform(df[['temp_max']]).flatten()
df['pressure'] = self.rs_pressure.transform(df[['pressure']]).flatten()
df['humidity'] = self.rs_humidity.transform(df[['humidity']]).flatten()
df['wind_speed'] = self.rs_wind_speed.transform(df[['wind_speed']]).flatten()
df['wind_deg'] = self.rs_wind_deg.transform(df[['wind_deg']]).flatten()
df['rain'] = self.rs_rain.transform(df[['rain']]).flatten()
df['clouds_all'] = self.rs_clouds_all.transform(df[['clouds_all']]).flatten()
df.loc[:, ('broken clouds', 'overcast clouds', 'scattered clouds', 'sky is clear',
'few clouds', 'fog', 'light rain', 'mist', 'haze', 'moderate rain',
'heavy intensity rain', 'light intensity drizzle', 'drizzle',
'heavy intensity drizzle', 'very heavy rain',
'thunderstorm with heavy rain', 'thunderstorm with rain',
'thunderstorm', 'proximity shower rain', 'thunderstorm with light rain',
'shower rain', 'light intensity shower rain', 'light thunderstorm')] = df.loc[:, ('broken clouds', 'overcast clouds', 'scattered clouds', 'sky is clear',
'few clouds', 'fog', 'light rain', 'mist', 'haze', 'moderate rain',
'heavy intensity rain', 'light intensity drizzle', 'drizzle',
'heavy intensity drizzle', 'very heavy rain',
'thunderstorm with heavy rain', 'thunderstorm with rain',
'thunderstorm', 'proximity shower rain', 'thunderstorm with light rain',
'shower rain', 'light intensity shower rain', 'light thunderstorm')] / 24
def __load_models(self):
self.inflow_model = XGBoostModel('models/krs_inflow_xgboost.json')
self.outflow_model = XGBoostModel('models/krs_outflow_xgboost.json')
def __predict(self, loop_date, max_forecast_date, current_forecast_df=None):
mysuru_df = self.mysuru_df#self.mysuru_forecast_df# if forecast else self.mysuru_df
kodagu_df = self.kodagu_df#self.kodagu_forecast_df# if forecast else self.kodagu_df
hassan_df = self.hassan_df#self.hassan_forecast_df# if forecast else self.hassan_df
# kodagu_df = self.__apply_scaling(self.kodagu_df)
# hassan_df = self.__apply_scaling(self.hassan_df)
# mysuru_df = self.__apply_scaling(self.mysuru_df)
# date_objs = str(loop_date).split('-')
month = loop_date.month
date = loop_date.day
season = self.season_df[(self.season_df['DATE'] == date) & (self.season_df['MONTH'] == month)]['SEASON'].values[0]
# prev_date_list = [str(loop_date + datetime.timedelta(days=day)) for day in range(-10, 1)]
days_between_loop_date_max_forecast_date = (loop_date - max_forecast_date).days
if days_between_loop_date_max_forecast_date < 1:
prev_date_list = [str(loop_date + datetime.timedelta(days=day)) for day in range(-10, 1)]
sel_kodagu = kodagu_df[kodagu_df['date'].isin(prev_date_list)].drop(columns=['date', 'city_name']).values.flatten()
sel_hassan = hassan_df[hassan_df['date'].isin(prev_date_list)].drop(columns=['date', 'city_name']).values.flatten()
sel_mysuru = mysuru_df[mysuru_df['date'].isin(prev_date_list)].drop(columns=['date', 'city_name']).values.flatten()
# print('a', loop_date, max_forecast_date, sel_mysuru.shape)
else:
prev_actual_weather_dates = [str(loop_date + datetime.timedelta(days=day)) for day in range(-10, -days_between_loop_date_max_forecast_date+1)]
prev_avg_weather_dates = [str(loop_date + datetime.timedelta(days=day)) for day in range(max(-days_between_loop_date_max_forecast_date+1, -10), 1)]
kodagu_actual = kodagu_df[kodagu_df['date'].isin(prev_actual_weather_dates)].drop(columns=['date', 'city_name']).values.flatten()
kodagu_avg = self.kodagu_avg_df[self.kodagu_avg_df['date'].isin(prev_avg_weather_dates)].drop(columns=['date', 'city_name']).values.flatten()
sel_kodagu = np.concatenate([kodagu_actual, kodagu_avg])
hassan_actual = hassan_df[hassan_df['date'].isin(prev_actual_weather_dates)].drop(columns=['date', 'city_name']).values.flatten()
hassan_avg = self.hassan_avg_df[self.hassan_avg_df['date'].isin(prev_avg_weather_dates)].drop(columns=['date', 'city_name']).values.flatten()
sel_hassan = np.concatenate([hassan_actual, hassan_avg])
mysuru_actual = mysuru_df[mysuru_df['date'].isin(prev_actual_weather_dates)].drop(columns=['date', 'city_name']).values.flatten()
mysuru_avg = self.mysuru_avg_df[self.mysuru_avg_df['date'].isin(prev_avg_weather_dates)].drop(columns=['date', 'city_name']).values.flatten()
sel_mysuru = np.concatenate([mysuru_actual, mysuru_avg])
# print(prev_actual_weather_dates, prev_avg_weather_dates)
# print('b', loop_date, max_forecast_date, kodagu_avg.shape, hassan_avg.shape, mysuru_avg.shape)
# print(self.mysuru_avg_df[self.mysuru_avg_df['date'].isin(prev_avg_weather_dates)][-3:])
# if str(loop_date) == '2012-05-31':
# print(prev_actual_weather_dates, prev_avg_weather_dates)
# print('b', loop_date, max_forecast_date, sel_mysuru.shape, mysuru_actual.shape, mysuru_avg.shape)
# print(self.mysuru_avg_df[self.mysuru_avg_df['date'].isin(prev_avg_weather_dates)])
# print('--------------')
# if days_between_loop_date_max_forecast_date < 2:
# print('a')
if current_forecast_df is not None:
prev_inflow_dates = [str(loop_date + datetime.timedelta(days=day)) for day in range(-10, 0)]
forecast_inflow_df = current_forecast_df[current_forecast_df['date'].isin(prev_inflow_dates)]
missing_dates_in_forecast = set(prev_inflow_dates) - set(forecast_inflow_df['date'])
forecast_inflow = forecast_inflow_df['INFLOW'].values.flatten()
predicted_inflow = self.prediction_df[self.prediction_df['date'].isin(missing_dates_in_forecast)]['ACTUAL INFLOW'].values.flatten()
sel_inflow = np.concatenate([predicted_inflow, forecast_inflow])
# print(self.prediction_df[self.prediction_df['date'].isin(prev_inflow_dates)])
# print(date, sel_inflow.shape, self.prediction_df)
# sel_inflow = self.inflow_df[self.inflow_df['FLOW_DATE'].isin(prev_inflow_dates)]['INFLOW_CUSECS'].values.flatten()
else:
prev_inflow_dates = [str(loop_date + datetime.timedelta(days=day)) for day in range(-10, 0)]
sel_inflow = self.prediction_df[self.prediction_df['date'].isin(prev_inflow_dates)]['ACTUAL INFLOW'].values.flatten()
# print(sel_mysuru.shape, sel_inflow.shape)
# prev_actual_inflow_dates = [str(loop_date + datetime.timedelta(days=day)) for day in range(-10, -days_between_loop_date_max_forecast_date+1)]
# prev_pred_inflow_dates = [str(loop_date + datetime.timedelta(days=day)) for day in range(-days_between_loop_date_max_forecast_date+1, 0)]
# # actual_inflow = self.inflow_df[self.inflow_df['FLOW_DATE'].isin(prev_actual_inflow_dates)]['INFLOW_CUSECS'].values.flatten()
# actual_inflow = self.inflow_df[self.inflow_df['date'].isin(prev_actual_inflow_dates)]['ACTUAL INFLOW'].values.flatten()
# pred_inflow = current_forecast_df[current_forecast_df['date'].isin(prev_pred_inflow_dates)]['INFLOW'].values.flatten()
# # print(date, actual_inflow.shape, pred_inflow.shape)
# # print(current_forecast_df.shape)
# sel_inflow = np.concatenate([actual_inflow, pred_inflow])
# sel_kodagu = kodagu_df[kodagu_df['date'].isin(prev_date_list)].drop(columns=['date', 'city_name']).values.flatten()
# sel_hassan = hassan_df[hassan_df['date'].isin(prev_date_list)].drop(columns=['date', 'city_name']).values.flatten()
# sel_mysuru = mysuru_df[mysuru_df['date'].isin(prev_date_list)].drop(columns=['date', 'city_name']).values.flatten()
# if str(loop_date) in ['2011-06-01', '2011-06-02', '2011-06-03']:
# print(date, self.prediction_df[self.prediction_df['date'].isin(prev_ddmmyyyy_list)], prev_ddmmyyyy_list, sel_inflow.shape[0])
# print('----------------------')
# if sel_inflow.shape[0] != len(prev_ddmmyyyy_list):
# raise Exception('Inflow shape mismatch')
month_transformed = self.month_le.transform([month])
date_transformed = self.date_le.transform([date])
season_transformed = self.season_le.transform([season])
# print(sel_mysuru.shape, sel_inflow.shape)
inflow_prediction_input = np.concatenate([month_transformed, date_transformed, season_transformed, sel_mysuru, sel_kodagu, sel_hassan, self.rs_inflow.transform([sel_inflow])[0]]).reshape(1, -1)
# print(inflow_prediction_input)
# print(loop_date)
# print(inflow_prediction_input.shape, sel_hassan.shape, sel_kodagu.shape, sel_mysuru.shape)
y_hat_inflow = self.inflow_model(inflow_prediction_input)
y_hat_inflow = self.rs_inflow.inverse_transform([[y_hat_inflow.item()]])[0][0]
y_hat_inflow = round(y_hat_inflow) if y_hat_inflow > 0 else 0
# y_hat_inflow = min(y_hat_inflow, 30000)
# print(y_hat_inflow)
outflow_prediction_input = np.concatenate([month_transformed, date_transformed, season_transformed, sel_mysuru, self.rs_inflow.transform([sel_inflow])[0], self.rs_inflow.transform([[y_hat_inflow]])[0]]).reshape(1, -1)
y_hat_outflow = self.outflow_model(outflow_prediction_input)
y_hat_outflow = self.rs_outflow.inverse_transform([[y_hat_outflow.item()]])[0][0]
y_hat_outflow = round(y_hat_outflow)# + self.normalizing_factor)
y_hat_outflow = y_hat_outflow if y_hat_outflow > 0 else 0
return round(y_hat_inflow), round(y_hat_outflow)
def __get_storage(self, actual_inflow, outflow, curr_storage):
actual_inflow_tmc = 8.64e-05 * actual_inflow
pred_outflow_tmc = 8.64e-05 * outflow
storage = curr_storage + actual_inflow_tmc
if pred_outflow_tmc > storage or storage == 0:
outflow = 11574.074 * storage
storage = 0
else:
storage -= pred_outflow_tmc
if storage > self.max_storage:
outflow += 11574.074 * (storage - self.max_storage)
storage = self.max_storage
return storage, round(outflow)
def get_avg_weather_df(self, year):
# weather_forecast_available_end_date = weather_forecast_availibility_date
# weather_forecast_unavailable_start_date = weather_forecast_available_end_date + datetime.timedelta(days=1)
# weather_forecast_unavailable_end_date = checkpoint_date
# weather_forecast_available_date_list = pd.date_range(date, weather_forecast_available_end_date, freq='d').astype(str)
# forecast_df_start_date = date - datetime.timedelta(days=10)
# forecast_df_end_date = self.cycle_end_date
# weather_forecast_date_list = pd.date_range(, pd, freq='d')
date = datetime.date(year, 6, 1)
kodagu_df_tmp = self.weather_df[self.weather_df['city_name'] == 'Kodagu'].reset_index(drop=True)
hassan_df_tmp = self.weather_df[self.weather_df['city_name'] == 'Hassan'].reset_index(drop=True)
mysuru_df_tmp = self.weather_df[self.weather_df['city_name'] == 'Mysuru'].reset_index(drop=True)
# mysuru1 = mysuru_df_tmp[mysuru_df_tmp['date'].isin(weather_forecast_available_date_list)]
mysuru_df_tmp['YYYY'] = mysuru_df_tmp['date'].str[:4].astype(int)
mysuru_df_tmp['MM'] = mysuru_df_tmp['date'].str[5:7].astype(int)
mysuru_df_tmp['DD'] = mysuru_df_tmp['date'].str[8:].astype(int)
# kodagu1 = kodagu_df_tmp[kodagu_df_tmp['date'].isin(weather_forecast_available_date_list)]
kodagu_df_tmp['YYYY'] = kodagu_df_tmp['date'].str[:4].astype(int)
kodagu_df_tmp['MM'] = kodagu_df_tmp['date'].str[5:7].astype(int)
kodagu_df_tmp['DD'] = kodagu_df_tmp['date'].str[8:].astype(int)
# hassan1 = hassan_df_tmp[hassan_df_tmp['date'].isin(weather_forecast_available_date_list)]
hassan_df_tmp['YYYY'] = hassan_df_tmp['date'].str[:4].astype(int)
hassan_df_tmp['MM'] = hassan_df_tmp['date'].str[5:7].astype(int)
hassan_df_tmp['DD'] = hassan_df_tmp['date'].str[8:].astype(int)
mysuru = []
kodagu = []
hassan = []
delta = datetime.timedelta(days=1)
while date <= self.cycle_end_date:
yyyymmdd_str = str(date).split()[0]
yyyy = int(yyyymmdd_str.split('-')[0])
mm = int(yyyymmdd_str.split('-')[1])
dd = int(yyyymmdd_str.split('-')[2])
if mm == 2 and dd == 29:
mysuru.append([yyyymmdd_str] + ['mysuru'] + mysuru_df_tmp[(mysuru_df_tmp['YYYY'] < yyyy) & (mysuru_df_tmp['MM'] == mm) & (mysuru_df_tmp['DD'].isin([dd-1, dd, dd+1]))].drop(columns=['date', 'city_name', 'YYYY', 'MM', 'DD']).mean().values.tolist())
kodagu.append([yyyymmdd_str] + ['kodagu'] + kodagu_df_tmp[(kodagu_df_tmp['YYYY'] < yyyy) & (kodagu_df_tmp['MM'] == mm) & (kodagu_df_tmp['DD'].isin([dd-1, dd, dd+1]))].drop(columns=['date', 'city_name', 'YYYY', 'MM', 'DD']).mean().values.tolist())
hassan.append([yyyymmdd_str] + ['hassan'] + hassan_df_tmp[(hassan_df_tmp['YYYY'] < yyyy) & (hassan_df_tmp['MM'] == mm) & (hassan_df_tmp['DD'].isin([dd-1, dd, dd+1]))].drop(columns=['date', 'city_name', 'YYYY', 'MM', 'DD']).mean().values.tolist())
else:
mysuru.append([yyyymmdd_str] + ['mysuru'] + mysuru_df_tmp[(mysuru_df_tmp['YYYY'] < yyyy) & (mysuru_df_tmp['MM'] == mm) & (mysuru_df_tmp['DD'] == dd)].drop(columns=['date', 'city_name', 'YYYY', 'MM', 'DD']).mean().values.tolist())
kodagu.append([yyyymmdd_str] + ['kodagu'] + kodagu_df_tmp[(kodagu_df_tmp['YYYY'] < yyyy) & (kodagu_df_tmp['MM'] == mm) & (kodagu_df_tmp['DD'] == dd)].drop(columns=['date', 'city_name', 'YYYY', 'MM', 'DD']).mean().values.tolist())
hassan.append([yyyymmdd_str] + ['hassan'] + hassan_df_tmp[(hassan_df_tmp['YYYY'] < yyyy) & (hassan_df_tmp['MM'] == mm) & (hassan_df_tmp['DD'] == dd)].drop(columns=['date', 'city_name', 'YYYY', 'MM', 'DD']).mean().values.tolist())
# print(mysuru)
date += delta
columns = self.mysuru_df.columns
mysuru_avg_df = pd.DataFrame(mysuru, columns=columns)
kodagu_avg_df = pd.DataFrame(kodagu, columns=columns)
hassan_avg_df = pd.DataFrame(hassan, columns=columns)
# print(mysuru_forecast_df[mysuru_forecast_df.isnull().any(axis=1)])
return mysuru_avg_df, kodagu_avg_df, hassan_avg_df
def forecast(self, start_date, max_forecast_date):
date = start_date# + datetime.timedelta(days=1)
# actual_predict_date = date + datetime.timedelta(days=26)
# self.remaining_expected_monsoon_inflow = 0
# self.remaining_expected_non_monsoon_inflow = 0
# self.remaining_expected_monsoon_outflow = 0
# self.remaining_expected_non_monsoon_outflow = 0
forecast_df = pd.DataFrame(columns=['date', 'INFLOW', 'OUTFLOW'])
# else:
# if date.month >= 6:
# target_end_date = datetime.date(date.year, 12, 31)
# else:
# target_end_date = datetime.date(date.year, 5, 31)
# forecast = {}
delta = datetime.timedelta(days=1)
while date <= self.cycle_end_date:
# print(date, target_end_date)
inflow, outflow = self.__predict(date, max_forecast_date, forecast_df)
outflow = outflow + self.normalizing_factor
# index = self.prediction_df[self.prediction_df['date'].astype(str) == str(date)].index
# forecast[str(date)] = {'INFLOW': inflow, 'OUTFLOW': outflow}
# print(type(self.prediction_df['date']))
# if len(index) == 0:
index = forecast_df.shape[0]
forecast_df.loc[index, 'date'] = str(date)
forecast_df.loc[index, 'INFLOW'] = inflow
forecast_df.loc[index, 'OUTFLOW'] = outflow
# self.prediction_df['INFLOW FORECAST'].update({f'{formatted_date}': inflow})
# self.annual_forecast['OUTFLOW FORECAST'].update({f'{formatted_date}': outflow})
# if date.month < 6:
# # print(date, inflow, outflow)
# self.remaining_expected_non_monsoon_inflow += inflow
# self.remaining_expected_non_monsoon_outflow += outflow
# else:
# self.remaining_expected_monsoon_inflow += inflow
# self.remaining_expected_monsoon_outflow += outflow
date += delta
# if loop_date.month >= 6:
# self.total_expected_monsoon_inflow = self.cum_inflow_till_prev_day + inflow + self.remaining_expected_monsoon_inflow
# self.total_expected_monsoon_outflow = self.cum_outflow_till_prev_day + outflow + self.remaining_expected_monsoon_outflow
# self.total_expected_non_monsoon_inflow = self.remaining_expected_non_monsoon_inflow
# self.total_expected_non_monsoon_outflow = self.remaining_expected_non_monsoon_outflow
# self.actual_monsoon_inflow = self.cum_inflow_till_prev_day
# self.actual_monsoon_outflow = self.cum_outflow_till_prev_day
# self.actual_non_monsoon_inflow = 0
# self.actual_non_monsoon_outflow = 0
# else:
# # self.total_expected_monsoon_inflow = self.cum_inflow_till_prev_day + inflow + self.remaining_expected_monsoon_inflow
# # self.total_expected_monsoon_outflow = self.cum_outflow_till_prev_day + outflow + self.remaining_expected_monsoon_outflow
# if str(loop_date).endswith('01-01'):
# self.actual_monsoon_inflow = self.cum_inflow_till_prev_day
# self.actual_monsoon_outflow = self.cum_outflow_till_prev_day
# self.actual_non_monsoon_inflow = 0
# self.actual_non_monsoon_outflow = 0
# else:
# self.actual_non_monsoon_inflow = self.cum_inflow_till_prev_day - self.actual_monsoon_inflow
# self.actual_non_monsoon_outflow = self.cum_outflow_till_prev_day - self.actual_non_monsoon_outflow
# self.total_expected_non_monsoon_inflow = self.cum_inflow_till_prev_day + inflow + self.remaining_expected_non_monsoon_inflow - self.actual_monsoon_inflow
# self.total_expected_non_monsoon_outflow = self.cum_outflow_till_prev_day + outflow + self.remaining_expected_non_monsoon_outflow - self.actual_monsoon_outflow
# import matplotlib.pyplot as plt
# t = self.inflow_df.copy()
# t.set_index('FLOW_DATE', inplace=True)
# self.prediction_df['OUTFLOW'].plot()
# t['OUTFLOW_CUECS'].plot()
# plt.show(block=True)
# plt.pause()
#expected inflow
self.expected_monsoon_inflow = forecast_df[(pd.to_datetime(forecast_df['date']) <= pd.Timestamp(self.cycle_end_date.year-1, 12, 31)) & (pd.to_datetime(forecast_df['date']) > pd.Timestamp(self.cycle_end_date.year-1, 5, 31))]['INFLOW'].sum() + \
self.prediction_df[(pd.to_datetime(self.prediction_df['date']) <= pd.Timestamp(self.cycle_end_date.year-1, 12, 31)) & (pd.to_datetime(self.prediction_df['date']) > pd.Timestamp(self.cycle_end_date.year-1, 5, 31))]['ACTUAL INFLOW'].sum()
self.expected_non_monsoon_inflow = forecast_df[(pd.to_datetime(forecast_df['date']) <= pd.Timestamp(self.cycle_end_date.year, 5, 31)) & (pd.to_datetime(forecast_df['date']) >= pd.Timestamp(self.cycle_end_date.year, 1, 1))]['INFLOW'].sum() + \
self.prediction_df[( | pd.to_datetime(self.prediction_df['date']) | pandas.to_datetime |
#!python
##################################################
# ACCESS QC Module
# Innovation Laboratory
# Center For Molecular Oncology
# Memorial Sloan Kettering Cancer Research Center
# maintainer: <NAME> (<EMAIL>)
#
#
# This module functions as an aggregation step to combine QC metrics
# across Waltz runs on different bam types.
import shutil
import logging
import argparse
import numpy as np
import pandas as pd
from python_tools.constants import *
from python_tools.util import to_csv
def unique_or_tot(x):
if TOTAL_LABEL in x:
return TOTAL_LABEL
else:
return PICARD_LABEL
def get_read_counts_table(path, pool):
"""
This method is only used to generate stats for un-collapsed bams
"""
read_counts_path = os.path.join(path, AGBM_READ_COUNTS_FILENAME)
read_counts = pd.read_csv(read_counts_path, sep='\t')
# Melt our DF to get all values of the on target rate and duplicate rates as values
read_counts = pd.melt(read_counts, id_vars=[SAMPLE_ID_COLUMN], var_name='Category')
# We only want the read counts-related row values
read_counts = read_counts[~read_counts['Category'].isin(['bam', TOTAL_READS_COLUMN, UNMAPPED_READS_COLUMN, 'duplicate_fraction'])]
read_counts['method'] = read_counts['Category'].apply(unique_or_tot)
read_counts['pool'] = pool
# read_counts = read_counts.reset_index(drop=True)
return read_counts
def get_read_counts_total_table(path, pool):
"""
This table is used for "Fraction of Total Reads that Align to the Human Genome" plot
"""
full_path = os.path.join(path, AGBM_READ_COUNTS_FILENAME)
read_counts_total = pd.read_csv(full_path, sep='\t')
col_idx = ~read_counts_total.columns.str.contains(PICARD_LABEL)
read_counts_total = read_counts_total.iloc[:, col_idx]
read_counts_total['AlignFrac'] = read_counts_total[TOTAL_MAPPED_COLUMN] / read_counts_total[TOTAL_READS_COLUMN]
read_counts_total[TOTAL_OFF_TARGET_FRACTION_COLUMN] = 1 - read_counts_total[TOTAL_ON_TARGET_FRACTION_COLUMN]
read_counts_total['pool'] = pool
return read_counts_total
def get_coverage_table(path, pool):
"""
Coverage table
"""
full_path = os.path.join(path, AGBM_COVERAGE_FILENAME)
coverage_table = pd.read_csv(full_path, sep='\t')
coverage_table = pd.melt(coverage_table, id_vars=SAMPLE_ID_COLUMN, var_name='method', value_name='average_coverage')
coverage_table['method'] = coverage_table['method'].str.replace('average_coverage_', '')
coverage_table['pool'] = pool
return coverage_table
def get_collapsed_waltz_tables(path, method, pool):
"""
Creates read_counts, coverage, and gc_bias tables for collapsed bam metrics.
"""
read_counts_table_path = os.path.join(path, AGBM_READ_COUNTS_FILENAME)
read_counts_table = pd.read_csv(read_counts_table_path, sep='\t')
read_counts_table = pd.melt(read_counts_table, id_vars=[SAMPLE_ID_COLUMN], var_name='Category')
read_counts_table = read_counts_table.dropna(axis=0)
read_counts_table['method'] = [method] * len(read_counts_table)
read_counts_table['pool'] = pool
# Todo: merge with get_cov_table
coverage_table_path = '/'.join([path, AGBM_COVERAGE_FILENAME])
coverage_table = pd.read_csv(coverage_table_path, sep='\t', usecols=[0, 1], names=[SAMPLE_ID_COLUMN, 'average_coverage'], header=0)
coverage_table['method'] = [method] * len(coverage_table)
coverage_table['pool'] = pool
gc_bias_table = get_gc_table(method, WALTZ_INTERVALS_FILENAME_SUFFIX, path)
return [read_counts_table, coverage_table, gc_bias_table]
def get_gc_table(curr_method, intervals_filename_suffix, path):
"""
Function to create GC content table
"""
gc_with_cov = pd.DataFrame(columns=GC_BIAS_HEADER)
sample_files = [f for f in os.listdir(path) if intervals_filename_suffix in f]
for sample in sample_files:
filename = os.path.join(path, sample)
curr_table = | pd.read_csv(filename, names=WALTZ_INTERVALS_FILE_HEADER, sep='\t') | pandas.read_csv |
import numpy as np
import pandas as pd
from app_data import AppData
from page.base_page import BasePage
import streamlit as st
class Tag2VecPage(BasePage):
def __init__(self, app_data: AppData, **kwargs):
super().__init__(app_data, **kwargs)
self.title = 'Tag2Vec'
st.title(self.title)
def run(self):
self.tag2vec()
def tag2vec(self):
tag_tokens = self.app_data.tag_tokens
model = self.app_data.tag2vec_model
tag_select = st.selectbox('tag', list(tag_tokens.keys()))
tag_sim = model.wv.most_similar(tag_select, topn=100)
tag_sim = [list(t) for t in tag_sim]
for tag_info in tag_sim:
tks = tag_tokens.get(tag_info[0], [])
tag_info.append(len(tks))
tag_sim_df = | pd.DataFrame(tag_sim, columns=['tag', 'similarity', 'tokens_in_tag']) | pandas.DataFrame |
import pandas as pd
from unittest import TestCase
from datacatalog_fileset_enricher.gcs_storage_stats_summarizer import GCStorageStatsSummarizer
class GCStorageStatsSummarizerTestCase(TestCase):
def test_create_stats_from_dataframe_with_no_dataframe_should_summarize_the_bucket_stats(self):
dataframe = None
filtered_buckets_stats = [{'bucket_name': 'my_bucket', 'files': 100}]
execution_time = pd.Timestamp.utcnow()
bucket_prefix = None
stats = GCStorageStatsSummarizer.create_stats_from_dataframe(dataframe,
['gs://my_bucket/*'],
filtered_buckets_stats,
execution_time, bucket_prefix)
self.assertEqual(0, stats['count'])
self.assertEqual('gs://my_bucket/*', stats['prefix'])
self.assertEqual('my_bucket [count: 100]', stats['files_by_bucket'])
self.assertEqual(1, stats['buckets_found'])
self.assertEqual(execution_time, stats['execution_time'])
self.assertEqual(None, stats['bucket_prefix'])
def test_create_stats_from_dataframe_with_no_dataframe_and_no_bucket_stats_should_summarize_the_bucket_stats( # noqa: E501
self): # noqa:E125
dataframe = None
filtered_buckets_stats = []
execution_time = | pd.Timestamp.utcnow() | pandas.Timestamp.utcnow |
import numpy as np
import pandas as pd
from .utils import Utils
from .global_variables import LAMBDA
# TODO, classify methods which require wind_speed, or which require solar_rad.
class ETBase(Utils):
"""This is the base class for evapotranspiration calculation. It calculates
etp according to [Jensen and Haise](https://doi.org/10.1061/JRCEA4.0000287)
method. Any new ETP calculation must inherit from it and must implement
the `__call__` method.
"""
def __init__(self,
input_df: pd.DataFrame,
units:dict,
constants:dict,
**kwargs
):
"""
Arguments:
input_df :
units :
constants :
kwargs :
"""
self.name = self.__class__.__name__
super(ETBase, self).__init__(input_df.copy(),
units.copy(),
constants.copy(),
**kwargs)
def requirements(self, **kwargs):
if 'constants' in kwargs:
constants = kwargs['constants']
else:
constants = ['lat_dec_deg', 'altitude', 'ct', 'tx']
if 'ts' in kwargs:
ts = kwargs['ts']
else:
ts = ['temp']
for cons in constants:
if cons not in self.cons:
if cons in self.default_cons:
val = self.default_cons[cons]['def_val']
desc = self.default_cons[cons]['desc']
if val is not None:
print("Warning: default value {} of parameter {} which is {} is being used".format(val,
cons,
desc))
self.cons[cons] = val
else:
raise ValueError("Value of constant {} must be provided to calculate ETP using {}"
.format(cons, self.name))
for _ts in ts:
if _ts not in self.input.columns:
raise ValueError("Timeseries {} is required for calculation of ETP using {}"
.format(_ts, self.name))
def __call__(self, *args,
transform: bool=False,
**kwargs):
"""
as given (eq 9) in Xu and Singh, 2000 and implemented in [1]
uses: a_s, b_s, ct=0.025, tx=-3
Arguments:
transform : whether to transform the calculated etp to frequecies
other than at which it is calculated.
[1] https://github.com/DanluGuo/Evapotranspiration/blob/8efa0a2268a3c9fedac56594b28ac4b5197ea3fe/R/Evapotranspiration.R#L2734
"""
self.requirements(constants=['lat_dec_deg', 'altitude', 'ct', 'tx'],
ts=['temp'])
rs = self.rs()
tmp1 = np.multiply(np.multiply(self.cons['ct'], np.add(self.input['temp'], self.cons['tx'])), rs)
et = np.divide(tmp1, LAMBDA)
self.post_process(et, transform=transform)
return et
def post_process(self, et, transform=False):
if isinstance(et, np.ndarray):
et = pd.Series(et, index=self.input.index)
self.output['et_' + self.name + '_' + self.freq_str] = et
if transform:
self.transform_etp(self.name)
def summary(self):
methods_evaluated = []
for m in self.output.keys():
if 'Hourly' in m:
methods_evaluated.append(m)
for m in methods_evaluated:
ts = self.output[m]
yrs = np.unique(ts.index.year)
print('For {} \n'.format(m.split('_')[1], end=','))
for yr in yrs:
st, en = str(yr) + '0101', str(yr) + '1231'
yr_ts = ts[st:en]
yr_sum = yr_ts.sum().values[0]
yr_mean = yr_ts.mean().values[0]
print('for year {}:, sum: {:<10.1f} mean: {:<10.1f}'.format(yr, yr_sum, yr_mean))
class Abtew(ETBase):
"""
daily etp using equation 3 in [1]. `k` is a dimentionless coefficient.
uses: , k=0.52, a_s=0.23, b_s=0.5
:param `k` coefficient, default value taken from [1]
:param `a_s` fraction of extraterrestrial radiation reaching earth on sunless days
:param `b_s` difference between fracion of extraterrestrial radiation reaching full-sun days
and that on sunless days.
[1] <NAME>. (1996). EVAPOTRANSPIRATION MEASUREMENTS AND MODELING FOR THREE WETLAND SYSTEMS IN
SOUTH FLORIDA 1. JAWRA Journal of the American Water Resources Association, 32(3),
465-473. https://doi.org/10.1111/j.1752-1688.1996.tb04044.x
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'abtew_k'])
rs = self.rs()
et = np.multiply(self.cons['abtew_k'], np.divide(rs, LAMBDA))
self.post_process(et, kwargs.get('transform', False))
return et
class Albrecht(ETBase):
"""
Developed in Germany by Albrecht, 1950. Djaman et al., 2016 Wrote the formula as
eto = (0.1005 + 0.297 * u2) * (es - ea)
"""
def __call__(self, *args, **kwargs):
# Mean saturation vapour pressure
if 'es' not in self.input:
if self.freq_str == 'Daily':
es = self.mean_sat_vp_fao56()
elif self.freq_str == 'Hourly':
es = self.sat_vp_fao56(self.input['temp'].values)
elif self.freq_str == 'sub_hourly': # TODO should sub-hourly be same as hourly?
es = self.sat_vp_fao56(self.input['temp'].values)
else:
raise NotImplementedError
else:
es = self.input['es']
# actual vapour pressure
ea = self.avp_from_rel_hum()
u2 = self._wind_2m()
eto = (0.1005 + 0.297 * u2) * (es - ea)
self.post_process(eto, kwargs.get('transform', False))
return eto
class BlaneyCriddle(ETBase):
"""
using formulation of Blaney-Criddle for daily reference crop ETP using monthly mean tmin and tmax.
Inaccurate under extreme climates. underestimates in windy, dry and sunny conditions and overestimates under
calm, humid and clouded conditions.
<NAME>., & <NAME>. (1977). Crop water requirements, FAO Irrigation and Drainage.
Paper 24, 2a ed., Roma, Italy.
"""
def __call__(self, *args, **kwargs):
# TODO include modified BlaneyCriddle as introduced in [3]
self.requirements(constants=['e0', 'e1', 'e2', 'e3', 'e4']) # check that all constants are present
N = self.daylight_fao56() # mean daily percentage of annual daytime hours
u2 = self._wind_2m()
rh_min = self.input['rh_min']
n = self.input['sunshine_hrs']
ta = self.input['temp'].values
# undefined working variable (Allena and Pruitt, 1986; Shuttleworth, 1992) (S9.8)
a1 = self.cons['e0'] + self.cons['e1'] * rh_min + self.cons['e2'] * n / N
a2 = self.cons['e3'] * u2
a3 = self.cons['e4'] * rh_min * n / N + self.cons['e5'] * rh_min * u2
bvar = a1 + a2 + a3
# calculate yearly sum of daylight hours and assign that value to each point in array `N`
n_annual = assign_yearly(N, self.input.index)
# percentage of actual daytime hours for the day comparing to the annual sum of maximum sunshine hours
p_y = 100 * n / n_annual['N'].values
# reference crop evapotranspiration
et = (0.0043 * rh_min - n / N - 1.41) + bvar * p_y * (0.46 * ta + 8.13)
self.post_process(et, kwargs.get('transform', False))
return et
class BrutsaertStrickler(ETBase):
"""
using formulation given by BrutsaertStrickler
:param `alpha_pt` Priestley-Taylor coefficient = 1.26 for Priestley-Taylor model (Priestley and Taylor, 1972)
:param `a_s` fraction of extraterrestrial radiation reaching earth on sunless days
:param `b_s` difference between fracion of extraterrestrial radiation reaching full-sun days
and that on sunless days.
:param `albedo` Any numeric value between 0 and 1 (dimensionless), albedo of the evaporative surface
representing the portion of the incident radiation that is reflected back at the surface.
Default is 0.23 for surface covered with short reference crop.
:return: et
https://doi.org/10.1029/WR015i002p00443
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['alphaPT']) # check that all constants are present
delta = self.slope_sat_vp(self.input['temp'].values)
gamma = self.psy_const()
vabar = self.avp_from_rel_hum() # Vapour pressure, *ea*
vas = self.mean_sat_vp_fao56()
u2 = self._wind_2m()
f_u2 = np.add(2.626, np.multiply(1.381, u2))
r_ng = self.net_rad(vabar)
alpha_pt = self.cons['alphaPT']
et = np.subtract(np.multiply(np.multiply((2*alpha_pt-1),
np.divide(delta, np.add(delta, gamma))),
np.divide(r_ng, LAMBDA)),
np.multiply(np.multiply(np.divide(gamma, np.add(delta, gamma)), f_u2),
np.subtract(vas, vabar)))
self.post_process(et, kwargs.get('transform', False))
return et
class Camargo(ETBase):
"""
Originally presented by Camargo, 1971. Following formula is presented in Fernandes et al., 2012 quoting
Sedyiama et al., 1997.
eto = f * Tmean * ra * nd
Gurski et al., 2018 has not written nd in formula. He expressed formula to convert extra-terresterial radiation
into equivalent mm/day as
ra[mm/day] = ra[MegaJoulePerMeterSquare PerDay] / 2.45
where 2.45 is constant.
eto: reference etp in mm/day.
f: an empircal factor taken as 0.01
ra: extraterrestrial radiation expressed as mm/day
nd: length of time interval
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['f_camargo']) # check that all constants are present
ra = self._et_rad()
if self.freq_str == 'Daily':
ra = ra/2.45
else:
raise NotImplementedError
et = self.cons['f_camargo'] * self.input['temp'] * ra
self.post_process(et, kwargs.get('transform', False))
return et
class Caprio(ETBase):
"""
Developed by Caprio (1974). Pandey et al 2016 wrote the equation as
eto = (0.01092708*t + 0.0060706) * rs
"""
def __call__(self, *args, **kwargs):
rs = self.rs()
eto = (0.01092708 * self.input['temp'] + 0.0060706) * rs
self.post_process(eto, kwargs.get('transform', False))
return eto
class ChapmanAustralia(ETBase):
"""using formulation of Chapman, 2001,
uses: a_s=0.23, b_s=0.5, ap=2.4, alphaA=0.14, albedo=0.23
https://192.168.127.12/MODSIM03/Volume_01/A03/04_Chapman.pdf
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'alphaA', 'pan_ap', 'albedo'],
ts=['temp'])
lat = self.cons['lat_dec_deg']
a_p = 0.17 + 0.011 * abs(lat)
b_p = np.power(10, (0.66 - 0.211 * abs(lat))) # constants (S13.3)
epan = self.evap_pan()
et = np.add(np.multiply(a_p, epan), b_p)
self.post_process(et, kwargs.get('transform', False))
return et
class Copais(ETBase):
"""
Developed for central Greece by Alexandris et al 2006 and used in Alexandris et al 2008.
"""
def __call__(self, *args, **kwargs):
et = None
self.post_process(et, kwargs.get('transform', False))
return et
class Dalton(ETBase):
"""
using Dalton formulation as mentioned in [1] in mm/dday
uses:
es: mean saturation vapour pressure
ea: actual vapour pressure
u2: wind speed
References:
[1] https://water-for-africa.org/en/dalton.html
"""
def __call__(self, *args, **kwargs):
u2 = self._wind_2m()
fau = 0.13 + 0.14 * u2
# Mean saturation vapour pressure
if 'es' not in self.input:
if self.freq_str == 'Daily':
es = self.mean_sat_vp_fao56()
elif self.freq_str == 'Hourly':
es = self.sat_vp_fao56(self.input['temp'].values)
elif self.freq_str == 'sub_hourly': # TODO should sub-hourly be same as hourly?
es = self.sat_vp_fao56(self.input['temp'].values)
else:
raise NotImplementedError
else:
es = self.input['es']
# actual vapour pressure
ea = self.avp_from_rel_hum()
if 'vp_def' not in self.input:
vp_d = es - ea # vapor pressure deficit
else:
vp_d = self.input['vp_def']
etp = fau * vp_d
self.post_process(etp, kwargs.get('transform', False))
return etp
class DeBruinKeijman(ETBase):
"""
Calculates daily Pot ETP, developed by deBruin and Jeijman 1979 and used in Rosenberry et al 2004.
"""
class DoorenbosPruitt(ETBase):
"""
Developed by Doorenbos and Pruitt (1777), Poyen et al wrote following equation
et = a(delta/(delta+gamma) * rs) + b
b = -0.3
a = 1.066 - 0.13 x10^{-2} * rh + 0.045*ud - 0.2x10^{-3}*rh * ud - 0.315x10^{-4}*rh**2 - 0.11x10{-2}*ud**2
used in Xu HP 2000.
"""
class GrangerGray(ETBase):
"""
using formulation of Granger & Gray 1989 which is for non-saturated lands and modified form of penman 1948.
uses: , wind_f`='pen48', a_s=0.23, b_s=0.5, albedo=0.23
:param `wind_f` str, if 'pen48 is used then formulation of [1] is used otherwise formulation of [3] requires
wind_f to be 2.626.
:param `a_s fraction of extraterrestrial radiation reaching earth on sunless days
:param `b_s` difference between fracion of extraterrestrial radiation reaching full-sun days
and that on sunless days.
:param `albedo` Any numeric value between 0 and 1 (dimensionless), albedo of the evaporative surface
representing the portion of the incident radiation that is reflected back at the surface.
Default is 0.23 for surface covered with short reference crop.
:return:
https://doi.org/10.1016/0022-1694(89)90249-7
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['wind_f']) # check that all constants are present
if self.cons['wind_f'] not in ['pen48', 'pen56']:
raise ValueError('value of given wind_f is not allowed.')
if self.cons['wind_f'] == 'pen48':
_a = 2.626
_b = 0.09
else:
_a = 1.313
_b = 0.06
# rs = self.rs()
delta = self.slope_sat_vp(self.input['temp'].values)
gamma = self.psy_const()
vabar = self.avp_from_rel_hum() # Vapour pressure
r_n = self.net_rad(vabar) # net radiation
vas = self.mean_sat_vp_fao56()
u2 = self._wind_2m()
fau = _a + 1.381 * u2
ea = np.multiply(fau, np.subtract(vas, vabar))
# dimensionless relative drying power eq 7 in Granger, 1998
dry_pow = np.divide(ea, np.add(ea, np.divide(np.subtract(r_n, self.soil_heat_flux()), LAMBDA)))
# eq 6 in Granger, 1998
g_g = 1 / (0.793 + 0.20 * np.exp(4.902 * dry_pow)) + 0.006 * dry_pow
tmp1 = np.divide(np.multiply(delta, g_g), np.add(np.multiply(delta, g_g), gamma))
tmp2 = np.divide(np.subtract(r_n, self.soil_heat_flux()), LAMBDA)
tmp3 = np.multiply(np.divide(np.multiply(gamma, g_g), np.add(np.multiply(delta, g_g), gamma)), ea)
et = np.add(np.multiply(tmp1, tmp2), tmp3)
self.post_process(et, kwargs.get('transform', False))
return et
class Hamon(ETBase):
"""calculates evapotranspiration in mm using Hamon 1963 method as given in Lu et al 2005. It uses daily mean
temperature which can also be calculated
from daily max and min temperatures. It also requires `daylight_hrs` which is hours of day light, which if not
provided as input, will be calculated from latitutde. This means if `daylight_hrs` timeseries is not provided as
input, then argument `lat` must be provided.
pet = cts * n * n * vdsat
vdsat = (216.7 * vpsat) / (tavc + 273.3)
vpsat = 6.108 * exp((17.26939 * tavc)/(tavc + 237.3))
:uses cts: float, or array of 12 values for each month of year or a time series of equal length as input data.
if it is float, then that value will be considered for whole year. Default value of 0.0055 was used
by Hamon 1961, although he later used different value but I am using same value as it is used by
WDMUtil. It should be also noted that 0.0055 is to be used when pet is in inches. So I am dividing
the whole pet by 24.5 in order to convert from inches to mm while still using 0.0055.
<NAME>. (1963). Computation of direct runoff amounts from storm rainfall. International Association of
Scientific Hydrology Publication, 63, 52-62.
Lu et al. (2005). A comparison of six potential evaportranspiration methods for regional use in the
southeastern United States. Journal of the American Water Resources Association, 41, 621-633.
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'albedo', 'cts'],
ts=['temp'])
# allow cts to be provided as input while calling method, e.g we may want to use array
if 'cts' in kwargs:
cts = kwargs['cts']
else:
cts = self.cons['cts']
if 'sunshine_hrs' not in self.input.columns:
if 'daylight_hrs' not in self.input.columns:
daylight_hrs = self.daylight_fao56()
else:
daylight_hrs = self.input['daylight_hrus']
sunshine_hrs = daylight_hrs
print('Warning, sunshine hours are consiered equal to daylight hours')
else:
sunshine_hrs = self.input['sunshine_hrs']
sunshine_hrs = np.divide(sunshine_hrs, 12.0)
# preference should be given to tmin and tmax if provided and if tmin, tmax is not provided then use temp which
# is mean temperature. This is because in original equations, vd_sat is calculated as average of max vapour
# pressure and minimum vapour pressue.
if 'tmax' not in self.input.columns:
if 'temp' not in self.input.columns:
raise ValueError('Either tmax and tmin or mean temperature should be provided as input')
else:
vd_sat = self.sat_vp_fao56(self.input['temp'])
else:
vd_sat = self.mean_sat_vp_fao56()
# in some literature, the equation is divided by 100 by then the cts value is 0.55 instead of 0.0055
et = cts * 25.4 * np.power(sunshine_hrs, 2) * (216.7 * vd_sat * 10 / (np.add(self.input['temp'], 273.3)))
self.post_process(et, kwargs.get('transform', False))
return et
class HargreavesSamani(ETBase):
"""
estimates daily ETo using Hargreaves method Hargreaves and Samani, 1985.
:uses
temp
tmin
tmax
:param
method: str, if `1985`, then the method of 1985 (Hargreaves and Samani, 1985) is followed as calculated by and
mentioned by Hargreaves and Allen, 2003.
if `2003`, then as formula is used as mentioned in [1]
Note: Current test passes for 1985 method.
There is a variation of Hargreaves introduced by Trajkovic 2007 as mentioned in Alexandris 2008.
[1] https://rdrr.io/cran/Evapotranspiration/man/ET.HargreavesSamani.html
doi.org/10.13031/2013.26773
"""
def __call__(self, method='1985', **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'albedo'],
ts=['temp'])
if method == '2003':
tmp1 = np.multiply(0.0023, np.add(self.input['temp'], 17.8))
tmp2 = np.power(np.subtract(self.input['tmax'].values, self.input['tmin'].values), 0.5)
tmp3 = np.multiply(0.408, self._et_rad())
et = np.multiply(np.multiply(tmp1, tmp2), tmp3)
else:
ra_my = self._et_rad()
tmin = self.input['tmin'].values
tmax = self.input['tmax'].values
ta = self.input['temp'].values
# empirical coefficient by Hargreaves and Samani (1985) (S9.13)
c_hs = 0.00185 * np.power((np.subtract(tmax, tmin)), 2) - 0.0433 * (np.subtract(tmax, tmin)) + 0.4023
et = 0.0135 * c_hs * ra_my / LAMBDA * np.power((np.subtract(tmax, tmin)), 0.5) * (np.add(ta, 17.8))
self.post_process(et, kwargs.get('transform', False))
return et
class Haude(ETBase):
"""
only requires air temp and relative humidity at 2:00 pm. Good for moderate zones despite being simple [1].
<NAME>. (1954). Zur praktischen Bestimmung der aktuellen und potentiellen Evaporation und Evapotranspiration.
Schweinfurter Dr. und Verlag-Ges..
"""
def __call__(self, *args, **kwargs):
etp = None # f_mon * (6.11 × 10(7.48 × T / (237+T)) - rf × es)
self.post_process(etp, kwargs.get('transform', False))
return etp
class JensenHaiseBasins(ETBase):
"""
This method generates daily pan evaporation (inches) using a coefficient for the month `cts`, , the daily
average air temperature (F), a coefficient `ctx`, and solar radiation (langleys/day) as givn in
BASINS program[2].
The computations are
based on the Jensen and Haise (1963) formula.
PET = CTS * (TAVF - CTX) * RIN
where
PET = daily potential evapotranspiration (in)
CTS = monthly variable coefficient
TAVF = mean daily air temperature (F), computed from max-min
CTX = coefficient
RIN = daily solar radiation expressed in inches of evaporation
RIN = SWRD/(597.3 - (.57 * TAVC)) * 2.54
where
SWRD = daily solar radiation (langleys)
TAVC = mean daily air temperature (C)
:uses cts float or array like. Value of monthly coefficient `cts` to be used. If float, then same value is
assumed for all months. If array like then it must be of length 12.
:uses ctx `float` constant coefficient value of `ctx` to be used in Jensen and Haise formulation.
"""
def __call__(self, *args, **kwargs):
if 'cts_jh' in kwargs:
cts = kwargs['cts_jh']
else:
cts = self.cons['cts_jh']
if 'cts_jh' in kwargs:
ctx = kwargs['ctx_jh']
else:
ctx = self.cons['ctx_jh']
if not isinstance(cts, float):
if not isinstance(np.array(ctx), np.ndarray):
raise ValueError('cts must be array like')
else: # if cts is array like it must be given for 12 months of year, not more not less
if len(np.array(cts)) > 12:
raise ValueError('cts must be of length 12')
else: # if only one value is given for all moths distribute it as monthly value
cts = np.array([cts for _ in range(12)])
if not isinstance(ctx, float):
raise ValueError('ctx must be float')
# distributing cts values for all dates of input data
self.input['cts'] = np.nan
for m, i in zip(self.input.index.month, self.input.index):
for _m in range(m):
self.input.at[i, 'cts'] = cts[_m]
cts = self.input['cts']
taf = self.input['temp'].values
rad_in = self.rad_to_evap()
pan_evp = np.multiply(np.multiply(cts, np.subtract(taf, ctx)), rad_in)
et = np.where(pan_evp < 0.0, 0.0, pan_evp)
self.post_process(et, kwargs.get('transform', False))
return et
class Kharrufa(ETBase):
"""
For monthly potential evapotranspiration estimation, originally presented by Kharrufa, 1885. Xu and Singh, 2001
presented following formula:
et = 0.34 * p * Tmean**1.3
et: pot. evapotranspiration in mm/month.
Tmean: Average temperature in Degree Centigrade
p: percentage of total daytime hours for the period used (daily or monthly) outof total daytime hours of the
year (365 * 12)
<NAME>. (1985). Simplified equation for evapotranspiration in arid regions. Beitrage zur
Hydrologie, 5(1), 39-47.
"""
def __call__(self, *args, **kwargs):
ta = self.input['temp']
N = self.daylight_fao56() # mean daily percentage of annual daytime hours
n_annual = assign_yearly(N, self.input.index)
et = 0.34 * n_annual['N'].values * ta**1.3
self.post_process(et, kwargs.get('transform', False))
return et
class Linacre(ETBase):
"""
using formulation of Linacre 1977 who simplified Penman method.
:uses
temp
tdew/rel_hum
https://doi.org/10.1016/0002-1571(77)90007-3
"""
def __call__(self, *args, **kwargs):
if 'tdew' not in self.input:
if 'rel_hum' in self.input:
self.tdew_from_t_rel_hum()
tm = np.add(self.input['temp'].values, np.multiply(0.006, self.cons['altitude']))
tmp1 = np.multiply(500, np.divide(tm, 100 - self.cons['lat_dec_deg']))
tmp2 = np.multiply(15, np.subtract(self.input['temp'].values, self.input['tdew'].values))
upar = np.add(tmp1, tmp2)
et = np.divide(upar, np.subtract(80, self.input['temp'].values))
self.post_process(et, kwargs.get('transform', False))
return et
class Makkink(ETBase):
"""
:uses
a_s, b_s
temp
solar_rad
using formulation of Makkink
"""
def __call__(self, *args, **kwargs):
rs = self.rs()
delta = self.slope_sat_vp(self.input['temp'].values)
gamma = self.psy_const()
et = np.subtract(np.multiply(np.multiply(0.61, np.divide(delta, np.add(delta, gamma))),
np.divide(rs, 2.45)), 0.12)
self.post_process(et, kwargs.get('transform', False))
return et
class Irmak(ETBase):
"""
Pandey et al 2016, presented 3 formulas for Irmak.
1 eto = -0.611 + 0.149 * rs + 0.079 * t
2 eto = -0.642 + 0.174 * rs + 0.0353 * t
3 eto = -0.478 + 0.156 * rs - 0.0112 * tmax + 0.0733 * tmin
References:
Irmak 2003
Tabari et al 2011
Pandey et al 2016
"""
class Mahringer(ETBase):
"""
Developed by Mahringer in Germany. [1] Wrote formula as
eto = 0.15072 * sqrt(3.6) * (es - ea)
"""
class Mather(ETBase):
"""
Developed by Mather 1978 and used in Rosenberry et al 2004. Calculates daily Pot ETP.
pet = [1.6 (10T_a/I) ** 6.75e-7 * I**3 - 7.71e-7 * I**2 + 1.79e-2 * I + 0.49] (10/d)
I = annual heat index, sum(Ta/5)1.514
d = number of days in month
"""
class MattShuttleworth(ETBase):
"""
using formulation of Matt-Shuttleworth and Wallace, 2009. This is designed for semi-arid and windy areas as an
alternative to FAO-56 Reference Crop method
10.13031/2013.29217
https://elibrary.asabe.org/abstract.asp?aid=29217
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['CH', 'Roua', 'Ca', 'surf_res'])
ch = self.cons['CH'] # crop height
ro_a = self.cons['Roua']
ca = self.cons['Ca'] # specific heat of the air
# surface resistance (s m-1) of a well-watered crop equivalent to the FAO crop coefficient
r_s = self.cons['surf_res']
vabar = self.avp_from_rel_hum() # Vapour pressure
vas = self.mean_sat_vp_fao56()
r_n = self.net_rad(vabar) # net radiation
u2 = self._wind_2m() # Wind speed
delta = self.slope_sat_vp(self.input['temp'].values) # slope of vapour pressure curve
gam = self.psy_const() # psychrometric constant
tmp1 = self.seconds * ro_a * ca
# clinmatological resistance (s*m^-1) (S5.34)
r_clim = np.multiply(tmp1, np.divide(np.subtract(vas, vabar), np.multiply(delta, r_n)))
r_clim = np.where(r_clim == 0, 0.1, r_clim) # correction for r_clim = 0
u2 = np.where(u2 == 0, 0.1, u2) # correction for u2 = 0
# ratio of vapour pressure deficits at 50m to vapour pressure deficits at 2m heights, eq S5.35
a1 = (302 * (delta + gam) + 70 * gam * u2)
a2 = (208 * (delta + gam) + 70 * gam * u2)
a3 = 1/r_clim * ((302 * (delta + gam) + 70 * gam * u2) / (208 * (delta + gam) + 70 * gam * u2) * (208 / u2) - (302 / u2))
vpd50_to_vpd2 = a1/a2 + a3
# aerodynamic coefficient for crop height (s*m^-1) (eq S5.36 in McMohan et al 2013)
a1 = 1 / (0.41**2)
a2 = np.log((50 - 0.67 * ch) / (0.123 * ch))
a3 = np.log((50 - 0.67 * ch) / (0.0123 * ch))
a4 = np.log((2 - 0.08) / 0.0148) / np.log((50 - 0.08) / 0.0148)
rc_50 = a1 * a2 * a3 * a4
a1 = 1/LAMBDA
a2 = (delta * r_n + (ro_a * ca * u2 * (vas - vabar)) / rc_50 * vpd50_to_vpd2)
a3 = (delta + gam * (1 + r_s * u2 / rc_50))
et = a1 * a2/a3
self.post_process(et, kwargs.get('transform', False))
return et
class McGuinnessBordne(ETBase):
"""
calculates evapotranspiration [mm/day] using Mcguinnes Bordne formulation McGuinnes and Bordne, 1972.
"""
def __call__(self, *args, **kwargs):
ra = self._et_rad()
# latent heat of vaporisation, MJ/Kg
_lambda = LAMBDA # multiply((2.501 - 2.361e-3), self.input['temp'].values)
tmp1 = np.multiply((1/_lambda), ra)
tmp2 = np.divide(np.add(self.input['temp'].values, 5), 68)
et = np.multiply(tmp1, tmp2)
self.post_process(et, kwargs.get('transform', False))
return et
class Penman(ETBase):
"""
calculates pan evaporation from open water using formulation of Penman, 1948, as mentioned (as eq 12) in
McMahon et al., 2012. If wind data is missing then equation 33 from Valiantzas, 2006 is used which does not require
wind data.
uses: wind_f='pen48', a_s=0.23, b_s=0.5, albedo=0.23
uz
temp
rs
reh_hum
:param `wind_f` str, if 'pen48 is used then formulation of [1] is used otherwise formulation of [3] requires
wind_f to be 2.626.
"""
# todo, gives -ve values sometimes
def __call__(self, **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'wind_f', 'albedo'],
ts=['temp', 'rh_mean'])
if self.cons['wind_f'] not in ['pen48', 'pen56']:
raise ValueError('value of given wind_f is not allowed.')
wind_method = 'macmohan'
if 'wind_method' in kwargs:
wind_method = kwargs['wind_method']
if self.cons['wind_f'] == 'pen48':
_a = 2.626
_b = 0.09
else:
_a = 1.313
_b = 0.06
delta = self.slope_sat_vp(self.input['temp'].values)
gamma = self.psy_const()
rs = self.rs()
vabar = self.avp_from_rel_hum() # Vapour pressure *ea*
r_n = self.net_rad(vabar, rs) # net radiation
vas = self.mean_sat_vp_fao56()
if 'wind_speed' in self.input.columns:
if self.verbosity > 1:
print("Wind data have been used for calculating the Penman evaporation.")
u2 = self._wind_2m(method=wind_method)
fau = _a + 1.381 * u2
ea = np.multiply(fau, np.subtract(vas, vabar))
tmp1 = np.divide(delta, np.add(delta, gamma))
tmp2 = np.divide(r_n, LAMBDA)
tmp3 = np.multiply(np.divide(gamma, np.add(delta, gamma)), ea)
evap = np.add(np.multiply(tmp1, tmp2), tmp3)
# if wind data is not available
else:
if self.verbosity > 1:
print("Alternative calculation for Penman evaporation without wind data has been performed")
ra = self._et_rad()
tmp1 = np.multiply(np.multiply(0.047, rs), np.sqrt(np.add(self.input['temp'].values, 9.5)))
tmp2 = np.multiply(np.power(np.divide(rs, ra), 2.0), 2.4)
tmp3 = np.multiply(_b, np.add(self.input['temp'].values, 20))
tmp4 = np.subtract(1, np.divide(self.input['rh_mean'].values, 100))
tmp5 = np.multiply(tmp3, tmp4)
evap = np.add(np.subtract(tmp1, tmp2), tmp5)
self.post_process(evap, kwargs.get('transform', False))
return evap
class PenPan(ETBase):
"""
implementing the PenPan formulation for Class-A pan evaporation as given in Rotstayn et al., 2006
"""
def __call__(self, **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'pen_ap', 'albedo', 'alphaA', 'pan_over_est',
'pan_est'],
ts=['temp', 'wind_speed'])
epan = self.evap_pan()
et = epan
if self.cons['pan_over_est']:
if self.cons['pan_est'] == 'pot_et':
et = np.multiply(np.divide(et, 1.078), self.cons['pan_coef'])
else:
et = np.divide(et, 1.078)
self.post_process(et, kwargs.get('transform', False))
return et
class PenmanMonteith(ETBase):
"""
calculates reference evapotrnaspiration according to Penman-Monteith (Allen et al 1998) equation which is
also recommended by FAO. The etp is calculated at the time step determined by the step size of input data.
For hourly or sub-hourly calculation, equation 53 is used while for daily time step equation 6 is used.
# Requirements
Following timeseries data is used
relative humidity
temperature
Following constants are used
lm=None, a_s=0.25, b_s=0.5, albedo=0.23
http://www.fao.org/3/X0490E/x0490e08.htm#chapter%204%20%20%20determination%20of%20eto
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'albedo', 'a_s', 'b_s'],
ts=['temp', 'wind_speed', 'jday'])
wind_2m = self._wind_2m()
d = self.slope_sat_vp(self.input['temp'].values)
g = self.psy_const()
# Mean saturation vapour pressure
if 'es' not in self.input:
if self.freq_in_mins == 1440:
es = self.mean_sat_vp_fao56()
elif self.freq_in_mins == 60:
es = self.sat_vp_fao56(self.input['temp'].values)
elif self.freq_in_mins < 60: # TODO should sub-hourly be same as hourly?
es = self.sat_vp_fao56(self.input['temp'].values)
else:
raise NotImplementedError
else:
es = self.input['es']
# actual vapour pressure
ea = self.avp_from_rel_hum()
if 'vp_def' not in self.input:
vp_d = es - ea # vapor pressure deficit
else:
vp_d = self.input['vp_def']
rn = self.net_rad(ea) # eq 40 in Fao
_g = self.soil_heat_flux(rn)
t1 = 0.408 * (d*(rn - _g))
nechay = d + g*(1 + 0.34 * wind_2m)
if self.freq_in_mins == 1440:
t5 = t1 / nechay
t6 = 900/(self.input['temp']+273) * wind_2m * vp_d * g / nechay
pet = np.add(t5, t6)
elif self.freq_in_mins < 1440: # TODO should sub-hourly be same as hourly?
t3 = np.multiply(np.divide(37, self.input['temp']+273.0), g)
t4 = np.multiply(t3, np.multiply(wind_2m, vp_d))
upar = t1 + t4
pet = upar / nechay
else:
raise NotImplementedError("For frequency of {} minutes, {} method can not be implemented"
.format(self.freq_in_mins, self.name))
self.post_process(pet, kwargs.get('transform', False))
return pet
class PriestleyTaylor(ETBase):
"""
following formulation of Priestley & Taylor, 1972.
uses: , a_s=0.23, b_s=0.5, alpha_pt=1.26, albedo=0.23
:param `alpha_pt` Priestley-Taylor coefficient = 1.26 for Priestley-Taylor model (Priestley and Taylor, 1972)
https://doi.org/10.1175/1520-0493(1972)100<0081:OTAOSH>2.3.CO;2
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'alpha_pt', 'albedo'])
delta = self.slope_sat_vp(self.input['temp'].values)
gamma = self.psy_const()
vabar = self.avp_from_rel_hum() # *ea*
r_n = self.net_rad(vabar) # net radiation
# vas = self.mean_sat_vp_fao56()
tmp1 = np.divide(delta, np.add(delta, gamma))
tmp2 = np.multiply(tmp1, np.divide(r_n, LAMBDA))
tmp3 = np.subtract(tmp2, np.divide(self.soil_heat_flux(), LAMBDA))
et = np.multiply(self.cons['alpha_pt'], tmp3)
self.post_process(et, kwargs.get('transform', False))
return et
class Romanenko(ETBase):
def __call__(self, *args, **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'albedo'],
ts=['temp'])
"""
using formulation of Romanenko
uses:
temp
rel_hum
There are two variants of it in Song et al 2017.
https://www.scirp.org/(S(czeh2tfqyw2orz553k1w0r45))/reference/ReferencesPapers.aspx?ReferenceID=2151471
"""
t = self.input['temp'].values
vas = self.mean_sat_vp_fao56()
vabar = self.avp_from_rel_hum() # Vapour pressure *ea*
tmp1 = np.power(np.add(1, np.divide(t, 25)), 2)
tmp2 = np.subtract(1, np.divide(vabar, vas))
et = np.multiply(np.multiply(4.5, tmp1), tmp2)
self.post_process(et, kwargs.get('transform', False))
return et
class SzilagyiJozsa(ETBase):
"""
using formulation of Azilagyi, 2007.
https://doi.org/10.1029/2006GL028708
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['wind_f', 'alphaPT'])
if self.cons['wind_f'] == 'pen48':
_a = 2.626
_b = 0.09
else:
_a = 1.313
_b = 0.06
alpha_pt = self.cons['alphaPT'] # Priestley Taylor constant
delta = self.slope_sat_vp(self.input['temp'].values)
gamma = self.psy_const()
rs = self.rs()
vabar = self.avp_from_rel_hum() # Vapour pressure *ea*
r_n = self.net_rad(vabar) # net radiation
vas = self.mean_sat_vp_fao56()
if 'uz' in self.input.columns:
if self.verbosity > 1:
print("Wind data have been used for calculating the Penman evaporation.")
u2 = self._wind_2m()
fau = _a + 1.381 * u2
ea = np.multiply(fau, np.subtract(vas, vabar))
tmp1 = np.divide(delta, np.add(delta, gamma))
tmp2 = np.divide(r_n, LAMBDA)
tmp3 = np.multiply(np.divide(gamma, np.add(delta, gamma)), ea)
et_penman = np.add(np.multiply(tmp1, tmp2), tmp3)
# if wind data is not available
else:
if self.verbosity > 1:
print("Alternative calculation for Penman evaporation without wind data have been performed")
ra = self._et_rad()
tmp1 = np.multiply(np.multiply(0.047, rs), np.sqrt(np.add(self.input['temp'].values, 9.5)))
tmp2 = np.multiply(np.power(np.divide(rs, ra), 2.0), 2.4)
tmp3 = np.multiply(_b, np.add(self.input['temp'].values, 20))
tmp4 = np.subtract(1, np.divide(self.input['rh_mean'].values, 100))
tmp5 = np.multiply(tmp3, tmp4)
et_penman = np.add(np.subtract(tmp1, tmp2), tmp5)
# find equilibrium temperature T_e
t_e = self.equil_temp(et_penman)
delta_te = self.slope_sat_vp(t_e) # slope of vapour pressure curve at T_e
# Priestley-Taylor evapotranspiration at T_e
et_pt_te = np.multiply(alpha_pt, np.multiply(np.divide(delta_te, np.add(delta_te, gamma)), np.divide(r_n, LAMBDA)))
et = np.subtract(np.multiply(2, et_pt_te), et_penman)
self.post_process(et, kwargs.get('transform', False))
return et
class Thornthwait(ETBase):
"""calculates reference evapotrnaspiration according to empirical temperature based Thornthwaite
(Thornthwaite 1948) method. The method actualy calculates both ETP and evaporation. It requires only temperature
and day length as input. Suitable for monthly values.
"""
def __call__(self, *args, **kwargs):
if 'daylight_hrs' not in self.input.columns:
day_hrs = self.daylight_fao56()
else:
day_hrs = self.input['daylight_hrs']
self.input['adj_t'] = np.where(self.input['temp'].values < 0.0, 0.0, self.input['temp'].values)
I = self.input['adj_t'].resample('A').apply(custom_resampler) # heat index (I)
a = (6.75e-07 * I ** 3) - (7.71e-05 * I ** 2) + (1.792e-02 * I) + 0.49239
self.input['a'] = a
a_mon = self.input['a'] # monthly values filled with NaN
a_mon = pd.DataFrame(a_mon)
a_ann = pd.DataFrame(a)
a_monthly = a_mon.merge(a_ann, left_index=True, right_index=True, how='left').fillna(method='bfill')
self.input['I'] = I
i_mon = self.input['I'] # monthly values filled with NaN
i_mon = pd.DataFrame(i_mon)
i_ann = pd.DataFrame(I)
i_monthly = i_mon.merge(i_ann, left_index=True, right_index=True, how='left').fillna(method='bfill')
tmp1 = np.multiply(1.6, np.divide(day_hrs, 12.0))
tmp2 = np.divide(self.input.index.daysinmonth, 30.0)
tmp3 = np.multiply(np.power(np.multiply(10.0, np.divide(self.input['temp'].values, i_monthly['I'].values)),
a_monthly['a'].values), 10.0)
pet = np.multiply(tmp1, np.multiply(tmp2, tmp3))
# self.input['Thornthwait_daily'] = np.divide(self.input['Thornthwait_Monthly'].values, self.input.index.days_in_month)
self.post_process(pet, kwargs.get('transform', False))
return pet
class MortonCRAE(ETBase):
"""
for monthly pot. ET and wet-environment areal ET and actual ET by Morton 1983.
:return:
"""
class Papadakis(ETBase):
"""
Calculates monthly values based on saturation vapor pressure and temperature. Following equation is given by
eto = 0.5625 * (ea_tmax - ed)
ea: water pressure corresponding to avg max temperature [KiloPascal].
ed: saturation water pressure corresponding to the dew point temperature [KiloPascal].
Rosenberry et al., 2004 presented following equation quoting McGuinnes and Bordne, 1972
pet = 0.5625 * [es_max - (es_min - 2)] (10/d)
d = number of days in month
es = saturated vapour pressure at temperature of air in millibars
"""
class Ritchie(ETBase):
"""
Given by Jones and Ritchie 1990 and quoted by Valipour, 2005 and Pandey et al., 2016
et = rs * alpha [0.002322 * tmax + 0.001548*tmin + 0.11223]
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['ritchie_a', 'ritchie_b', 'ritchie_b', 'ritchie_alpha'],
ts=['tmin', 'tmax'])
ritchie_a = self.cons['ritchie_a']
ritchie_b = self.cons['ritchie_b']
ritchie_c = self.cons['ritchie_c']
alpha = self.cons['ritchie_alpha']
rs = self.rs()
eto = rs * alpha * [ritchie_a * self.input['tmax'] + ritchie_b * self.input['tmin'] + ritchie_c]
self.post_process(eto, kwargs.get('transform', False))
return eto
class Turc(ETBase):
"""
The original formulation is from Turc, 1961 which was developed for southern France and Africa.
Pandey et al 2016 mentioned a modified version of Turc quoting Xu et al., 2008, Singh, 2008 and Chen and Chen, 2008.
eto = alpha_t * 0.013 T/(T+15) ( (23.8856Rs + 50)/gamma)
A shorter version of this formula is quoted by Valipour, 2015 quoting Xu et al., 2008
eto = (0.3107 * Rs + 0.65) [T alpha_t / (T + 15)]
Here it is implemented as given (as eq 5) in Alexandris, et al., 2008 which is;
for rh > 50 %:
eto = 0.0133 * [T_mean / (T_mean + 15)] ( Rs + 50)
for rh < 50 %:
eto = 0.0133 * [T_mean / (T_mean + 15)] ( Rs + 50) [1 + (50 - Rh) / 70]
uses
:param `k` float or array like, monthly crop coefficient. A single value means same crop coefficient for
whole year
:param `a_s` fraction of extraterrestrial radiation reaching earth on sunless days
:param `b_s` difference between fracion of extraterrestrial radiation reaching full-sun days
and that on sunless days.
Turc, L. (1961). Estimation of irrigation water requirements, potential evapotranspiration: a simple climatic
formula evolved up to date. <NAME>, 12(1), 13-49.
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'turc_k'],
ts=['temp'])
use_rh = False # because while testing daily, rhmin and rhmax are given and rhmean is calculated by default
if 'use_rh' in kwargs:
use_rh = kwargs['use_rh']
rs = self.rs()
ta = self.input['temp'].values
et = np.multiply(np.multiply(self.cons['turc_k'], (np.add(np.multiply(23.88, rs), 50))),
np.divide(ta, (np.add(ta, 15))))
if use_rh:
if 'rh_mean' in self.input.columns:
rh_mean = self.input['rh_mean'].values
eq1 = np.multiply(np.multiply(np.multiply(self.cons['turc_k'], (np.add(np.multiply(23.88, rs), 50))),
np.divide(ta, (np.add(ta, 15)))),
(np.add(1, np.divide((np.subtract(50, rh_mean)), 70))))
eq2 = np.multiply(np.multiply(self.cons['turc_k'], (np.add(np.multiply(23.88, rs), 50))),
np.divide(ta, (np.add(ta, 15))))
et = np.where(rh_mean < 50, eq1, eq2)
self.post_process(et, kwargs.get('transform', False))
return et
class Valiantzas(ETBase):
"""
Djaman 2016 mentioned 2 methods from him, however Valipour 2015 tested 5 variants of his formulations in Iran.
Ahmad et al 2019 used 6 variants of this method however, Djaman et al., 2017 used 9 of its variants.
These 9 methods are given below:
method_1:
This is equation equation 19 in Valiantzas, 2012. This also does not require wind data.
eto = 0.0393 * Rs* sqrt(T_avg + 9.5) - (0.19 * Rs**0.6 * lat_rad**0.15)
+ 0.0061(T_avg + 20)(1.12*_avg - T_min - 2)**0.7
method_2:
This is equation 14 in Valiantzas, 2012. This does not require wind data. The recommended value of alpha is 0.23.
eto = 0.0393 * Rs * sqrt(T_avg + 9.5) - (0.19 * Rs**0.6 * lat_rad**0.15) + 0.078(T_avg + 20)(1 - rh/100)
method_3
eto = 0.0393 * Rs * sqrt(T_avg + 9.5) - (Rs/Ra)**2 - [(T_avg + 20) * (1-rh/100) * ( 0.024 - 0.1 * Waero)]
method_4:
This is equation 35 in Valiantzas 2013c paper and was reffered as Fo-PENM method with using alpha as 0.25.
eto = 0.051 * (1 - alpha) * Rs * sqrt(T_avg + 9.5) - 2.4 * (Rs/Ra)**2
+ [0.048 * (T_avg + 20) * ( 1- rh/100) * (0.5 + 0.536 * u2)] + (0.00012 * z)
method_5:
This is equation 30 in Valiantzas 2013c. This is when no wind data is available.
eto = 0.0393 * Rs sqrt(T_avg + 9.5)
- [2.46 * Rs * lat**0.15 / (4 * sin(2 * pi * J / 365 - 1.39) lat + 12)**2 + 0.92]**2
- 0.024 * (T_avg + 20)(1 - rh/100) - (0.0268 * Rs)
+ (0.0984 * (T_avg + 17)) * (1.03 + 0.00055) * (T_max - T_min)**2 - rh/100
method_6
This method is when wind speed and solar radiation data is not available. This is equation 34 in
Valiantzas, 2013c.
eto = 0.0068 * Ra * sqrt[(T_avg + 9.5) * (T_max - T_min)]
- 0.0696 * (T_max - T_min) - 0.024 * (T_avg + 20)
* [ ((1-rh/100) - 0.00455 * Ra * sqrt(T_max - T_dew)
+ 0.0984 * (T_avg + 17) * (1.03 + 0.0055) * (T_max - T_min)**2)
- rh/100
method_7:
This is equation 27 in Valiantzas, 2013c. This method requires all data. Djaman et al., (by mistake) used 0.0043
in denominator instead of 0.00043.
eto = 0.051 * (1-alpha) * Rs * (T_avg + 9.5)**0.5
- 0.188 * (T_avg + 13) * (Rs/Ra - 0.194)
* (1 - 0.00015) * (T_avg + 45)**2 * sqrt(rh/100)
- 0.0165 * Rs * u**0.7 + 0.0585 * (T_avg + 17) * u**0.75
* {[1 + 0.00043 * (T_max - T_min)**2]**2 - rh/100} / [1 + 0.00043 * (T_max - T_min)**2 + 0.0001*z]
method_8:
eto = 0.051 * (1-alpha) * Rs * (T_avg + 9.5)**0.5
- 2.4 (Rs/Ra)**2 - 2.4 * (T_avg + 20) * (1 - rh/100)
- 0.0165 * Rs * u**0.7 + 0.0585 * (T_avg + 17) * u**0.75
* { [1 + 0.00043 (T_max - T_min)**2]**2 - rh/100} / ( 1 + 0.00043 * (T_max - T_min)**2 + (0.0001 * z)
method_9:
This must be equation 29 of Valiantzas, 2013c but method 9 in Djaman et al., 2017 used 2.46 instead of 22.46. This
formulation does not require Ra.
eto = [0.051 * (1-alpha) * Rs (T_avg + 9.5)**2
* (2.46 * Rs * lat**0.15 / (4 sin(2 * pi J / 365 - 1.39) * lat + 12)**2 + 0.92)]**2
- 0.024 * (T_avg + 20) * (1-rh/100) - 0.0165 * Rs * u**0.7
+ 0.0585 * (T_avg + 17) * u**0.75 * {[(1.03 + 0.00055) * (T_max - T_min)**2 - rh/100] + 0.0001*z}
"""
def __call__(self, method='method_1', **kwargs):
self.requirements(constants=['valiantzas_alpha'],
ts=['temp'])
alpha = self.cons['valiantzas_alpha']
z = self.cons['altitute']
rh = self.input['rh']
ta = self.input['temp']
tmin = self.input['tmin']
tmax = self.input['tmax']
j = self.input['jday']
ra = self._et_rad()
u2 = self._wind_2m()
w_aero = np.where(rh <= 65.0, 1.067, 0.78) # empirical weighted factor
rs_ra = (self.rs() / ra)**2
tr = tmax - tmin
tr_sq = tr**2
lat_15 = self.lat_rad**0.15
t_sqrt = np.sqrt(ta + 9.5)
init = 0.0393 * self.rs() * t_sqrt
rs_fact = 0.19 * (self.rs()**0.6) * lat_15
t_20 = ta + 20
rh_factor = 1.0 - (rh/100.0)
if method == 'method_1':
eto = init - rs_fact + 0.0061 * t_20 * (1.12 * (ta - tmin) - 2.0)**0.7
elif method == 'method_2':
eto = init - rs_fact + (0.078 * t_20 * rh_factor)
elif method == 'method_3':
eto = init - rs_ra - (t_20 * rh_factor * (0.024 - 0.1 * w_aero))
elif method == 'method_4':
eto = 0.051 * (1 - alpha) * self.rs() * t_sqrt - 2.4 * rs_ra + (0.048 * t_20 * rh_factor * (0.5 + 0.536 * u2)) + (0.00012 * z)
elif method == 'method_5':
eto = init
elif method == 'method_6':
pass
elif method == 'method_7':
pass
elif method == 'method_8':
pass
elif method == 'method_9':
eto = 0.051 * (1 - alpha) * self.rs() * (ta + 9.5)**2 * (2.46 * self.rs() * lat_15) / (4 * np.sin(2 * 3.14 * j / 365 - 1.39))
else:
raise ValueError
self.post_process(eto, kwargs.get('transform', False))
return eto
class Oudin(ETBase):
"""
https://doi.org/10.1016/j.jhydrol.2004.08.026
"""
pass
class RengerWessolek(ETBase):
"""
<NAME>. & <NAME>. (1990): Auswirkungen von Grundwasserabsenkung und Nutzungsänderungen auf die
Grundwasserneubildung. – Mit. Inst. für Wasserwesen, Univ. der Bundeswehr München, 386: 295-307.
"""
class Black(ETBase):
"""
https://doi.org/10.2136/sssaj1969.03615995003300050013x
"""
class McNaughtonBlack(ETBase):
"""
https://doi.org/10.1029/WR009i006p01579
"""
def custom_resampler(array_like):
"""calculating heat index using monthly values of temperature."""
return np.sum(np.power(np.divide(array_like, 5.0), 1.514))
def assign_yearly(data, index):
# TODO for leap years or when first or final year is not complete, the results are not correct immitate
# https://github.com/cran/Evapotranspiration/blob/master/R/Evapotranspiration.R#L1848
""" assigns `step` summed data to whole data while keeping the length of data preserved."""
n_ts = | pd.DataFrame(data, index=index, columns=['N']) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.