prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
DeepCache
DeepCache is distributed under the following BSD 3-Clause License:
Copyright(c) 2019
University of Minensota - Twin Cities
Authors: <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@author: <NAME> (<EMAIL>)
DESCRIPTION:
This code analyzes the generated requests and extracts the properties of each object such as: when it was first
introduced, its frequency, its lifespan. It also extracts the number of active unique objects requested in each
hour.
PREPROCESSING_SCRIPTS:
Need to run any of these scripts before running the requestAnalysis.py script
1. generateSyntheticDataset.py: generates a synthetic dataset
2. generateMediSynDataset.py: generates a synthetic dataset according to MediSyn paper
INPUT:
The input directory is '../Datasets':
1- REQUESTFILENAME: The request file to be analyzed @Line 41
2- FORCE_GENERATE_BINS: a flag to force the regeneration of the bin file, by default it is False
3- FORCE_GENERATE_PROPERTIES: a flag to force the regeneration of the object properties, by default it is False
OUTPUT:
The output files are generated in '../Datasets' directory:
1- {RequestFile}_bins.csv: which indicates the number of unique objects in each hour.
Format: {binID, uniqueObjNum, binMinRequestTime, binMaxRequestTime}
2- {RequestFile}_properties.csv: which includes the properties of each object.
Format: {object_ID, frequency, lifeSpan, minRequestTime, maxRequestTime, start_day, end_day}
"""
from __future__ import print_function
import pandas as pd
import numpy as np
import os
FORCE_GENERATE_BINS = False
FORCE_GENERATE_PROPERTIES = False
REQDIR = '../Datasets/'
REQUESTFILENAME = 'mediSynDataset_x2_O3488.csv' #'syntheticDataset_O50.csv'
REQUESTPATH = REQDIR + REQUESTFILENAME
BINFILENAME = REQDIR + REQUESTFILENAME[:-4] + '_bins.csv'
PROPERTIES_FILENAME = REQDIR + REQUESTFILENAME[:-4] + '_properties.csv'
BIN_SECONDS_WIDTH = 3600
# Load Requests File
print('Loading Request File ...')
reqdf = pd.read_csv(REQUESTPATH, sep=',')
print('Sorting Request File by time ...')
reqdf.sort_values(by=['request_time'], inplace=True)
print('Request File Sorted')
# get all 1-hour intervals/bins
if not os.path.isfile(BINFILENAME) or FORCE_GENERATE_BINS:
bins = np.arange(np.ceil(reqdf.request_time.min()), np.ceil(reqdf.request_time.max()), BIN_SECONDS_WIDTH)
print('Starting binning process ...')
reqdf['binID'] = pd.cut(reqdf['request_time'], bins, labels=np.arange(0, len(bins)-1))
grp = reqdf.groupby(['binID']).agg({'object_ID': {'uniqueObjNum': lambda x: x.nunique()},
'request_time': ['min', 'max']})
grp.reset_index(level=0, inplace=True)
# clean up columns
cols = list()
for k in grp.columns:
if k[1] == '':
cols.append(k[0])
else:
cols.append(k[1])
grp.columns = cols
filtered = grp.dropna()
filtered["uniqueObjNum"] = filtered["uniqueObjNum"].apply(int)
filtered.rename(columns={'min': 'binMinRequestTime'}, inplace=True)
filtered.rename(columns={'max': 'binMaxRequestTime'}, inplace=True)
filtered.to_csv(BINFILENAME, index=False)
del filtered
if not os.path.isfile(PROPERTIES_FILENAME) or FORCE_GENERATE_PROPERTIES:
# Calculate object frequency
print('Calculating Object Frequency')
objfreqdf = (reqdf['object_ID'].value_counts()).to_frame()
objfreqdf.rename(columns={'object_ID': 'frequency'}, inplace=True)
objfreqdf['object_ID'] = objfreqdf.index
# Calculate object lifespan
print('Calculating Object LifeSpan & Introduction Day')
reqdf.sort_values(by=['object_ID'], inplace=True)
objLifespandf = reqdf.groupby(['object_ID']).agg({'request_time': ['min', 'max']})
objLifespandf.columns = ['_'.join(col).strip() for col in objLifespandf.columns.values]
objLifespandf.rename(columns={'request_time_min': 'minRequestTime'}, inplace=True)
objLifespandf.rename(columns={'request_time_max': 'maxRequestTime'}, inplace=True)
objLifespandf['object_ID'] = objLifespandf.index
objLifespandf['lifeSpan'] = (objLifespandf['maxRequestTime'] - objLifespandf['minRequestTime'])/86400
min_request_time = reqdf['request_time'].min()
objLifespandf['start_day'] = (objLifespandf['minRequestTime'] - min_request_time) / 86400
objLifespandf['end_day'] = (objLifespandf['maxRequestTime'] - min_request_time) / 86400
objLifespandf["start_day"] = objLifespandf["start_day"].apply(int)
objLifespandf["end_day"] = objLifespandf["end_day"].apply(int)
objLifespandf.sort_values('start_day', ascending=True, inplace=True)
objLifespandf.index.names = ['index']
# Save the properties of the objects
mergeddf =
|
pd.merge(objfreqdf, objLifespandf, on='object_ID')
|
pandas.merge
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 9 15:40:52 2020
@author: lukass
pip3.8 install xlrd==1.1.0
"""
import numpy as np
import pandas as pd
#import os,shutil
#import xlrd
#from datetime import datetime, date
#from discharge_xa_projections import df_to_excel
#from ecrf_compare import df_to_excel
def autosize_excel_columns(worksheet, df):
autosize_excel_columns_df(worksheet, df.index.to_frame())
autosize_excel_columns_df(worksheet, df, offset=df.index.nlevels)
def autosize_excel_columns_df(worksheet, df, offset=0):
for idx, col in enumerate(df):
series = df[col]
#import sys
#reload(sys) # Reload does the trick!
#sys.setdefaultencoding('UTF8')
max_len = max(( series.astype(str).map(len).max(), len(str(series.name)) )) + 1
max_len = min(max_len, 100)
worksheet.set_column(idx+offset, idx+offset, max_len)
def df_to_excel(writer, sheetname, df):
df.to_excel(writer, sheet_name = sheetname, freeze_panes = (df.columns.nlevels, df.index.nlevels))
autosize_excel_columns(writer.sheets[sheetname], df)
def format_differences(worksheet, levels, df_ref, df_bool, format_highlighted):
for i in range(df_ref.shape[0]):
for j in range(df_ref.shape[1]):
if df_bool.iloc[i,j]:
v = df_ref.iloc[i,j]
try:
if v!=v:
worksheet.write_blank(i+1, j+levels, None, format_highlighted)
else:
worksheet.write(i+1, j+levels, v, format_highlighted)
except:
print("An exception occurred "+type(v))
return
def discharge_large_fov():
#f = '/home/lukass/Downloads/discharge_tags_16042020.xlsx'
f = 'H:/cloud/cloud_data/Projects/DL/Code/src/solutions/CACSFilter/discharge_tags_16042020.xlsx'
df0 =
|
pd.read_excel(f, 'linear')
|
pandas.read_excel
|
import pickle
import pandas as pd
import numpy as np
crnn2_result = pickle.load(open('../../CRNN2/crnn_results/crnn_results_summary.p', 'rb'))
crnn4_result = pickle.load(open('../../CRNN4/crnn_results/crnn_results_summary.p', 'rb'))
crnn6_result = pickle.load(open('../../CRNN6/crnn_results/crnn_results_summary.p', 'rb'))
crnn8_result = pickle.load(open('../../CRNN8/crnn_results/crnn_results_summary.p', 'rb'))
crnn10_result = pickle.load(open('../../CRNN10/crnn_results/crnn_results_summary.p', 'rb'))
crnn40_result = pickle.load(open('../../CRNN40/crnn_results/crnn_results_summary.p', 'rb'))
crnn100_result = pickle.load(open('../../CRNN100/crnn_results/crnn_results_summary.p', 'rb'))
crnn400_result = pickle.load(open('../../CRNN400/crnn_results/crnn_results_summary.p', 'rb'))
crnn1200_result = pickle.load(open('../../CRNN1200/crnn_results/crnn_results_summary.p', 'rb'))
vgg_result = pickle.load(open('../../VGG/results/vgg_results_summary.p', 'rb'))
lenet_result = pickle.load(open('../../LENET/results/lenet_results_summary.p', 'rb'))
svm_result = pickle.load(open('../../SVM/results/svm_results_summary.p', 'rb'))
result_summary = {'crnn2': pd.DataFrame(crnn2_result), 'crnn4': pd.DataFrame(crnn4_result), 'crnn6': pd.DataFrame(crnn6_result),
'crnn8': pd.DataFrame(crnn8_result), 'crnn10': pd.DataFrame(crnn10_result), 'crnn40': pd.DataFrame(crnn40_result),
'crnn100': pd.DataFrame(crnn100_result), 'crnn400': pd.DataFrame(crnn400_result), 'crnn1200': pd.DataFrame(crnn1200_result),
'vgg': pd.DataFrame(vgg_result), 'lenet': pd.DataFrame(lenet_result), 'svm': pd.DataFrame(svm_result)}
result_summary = pd.concat(result_summary)
result_summary.to_csv('../result/result_summary.csv', sep = ',')
crnn_pitch_shift = pickle.load(open('../../CRNN400/crnn_results/pitch_shift_results.p', 'rb'))
crnn_time_stretch = pickle.load(open('../../CRNN400/crnn_results/time_stretch_results.p', 'rb'))
crnn_crop = pickle.load(open('../../CRNN400/crnn_results/crop_results.p', 'rb'))
lenet_pitch_shift = pickle.load(open('../../LENET/results/pitch_shift_results.p', 'rb'))
lenet_time_stretch = pickle.load(open('../../LENET/results/time_stretch_results.p', 'rb'))
lenet_crop = pickle.load(open('../../LENET/results/crop_results.p', 'rb'))
svm_pitch_shift = pickle.load(open('../../SVM/results/pitch_shift_results.p', 'rb'))
svm_time_stretch = pickle.load(open('../../SVM/results/time_stretch_results.p', 'rb'))
svm_crop = pickle.load(open('../../SVM/results/crop_results.p', 'rb'))
simulation_summary = {'crnn_picth_shift': pd.DataFrame(crnn_pitch_shift), 'crnn_time_stretch': pd.DataFrame(crnn_time_stretch),
'crnn_crop': pd.DataFrame(crnn_crop), 'lenet_picth_shift': pd.DataFrame(lenet_pitch_shift),
'lenet_time_stretch': pd.DataFrame(lenet_time_stretch), 'lenet_crop': pd.DataFrame(lenet_crop),
'svm_pitch_shift': pd.DataFrame(svm_pitch_shift), 'svm_time_stretch': pd.DataFrame(svm_time_stretch),
'svm_crop': pd.DataFrame(svm_crop)}
simulation_summary = pd.concat(simulation_summary)
simulation_summary.to_csv('../result/simulation_summary.csv', sep = ',')
###############################################3
crnn_result = pickle.load(open('../../CRNN400/crnn_results/crnn_results_summary.p', 'rb'))
lenet_result = pickle.load(open('../../LENET/results/lenet_results_summary.p', 'rb'))
svm_result = pickle.load(open('../../SVM/results/svm_results_summary.p', 'rb'))
result_event = {'crnn': crnn_result['threshold_result']['label event'], 'lenet_hmm_bino': lenet_result['hmm_bino_threshold_result']['label event'],
'lenet_hmm_gmm': lenet_result['hmm_gmm_result']['label event'], 'lenet': lenet_result['threshold_result']['label event'],
'svm_hmm_bino': svm_result['hmm_bino_result']['label event'], 'svm': svm_result['svm_result']['label event'],
'crnn_hmm': crnn_result['hmm_bino_threshold_result']['label event']}#add a crnn hmm result
result_event = pd.DataFrame(result_event)
result_event.to_csv('../result/result_event2.csv', sep = ',', index= False)
#####################################################
crnn_result_proportion_075 = pickle.load(open('../../CRNN400_proportion_data/crnn_results/proportion_0/crnn_results_summary.p', 'rb'))
crnn_result_proportion_050 = pickle.load(open('../../CRNN400_proportion_data/crnn_results/proportion_1/crnn_results_summary.p', 'rb'))
crnn_result_proportion_025 = pickle.load(open('../../CRNN400_proportion_data/crnn_results/proportion_2/crnn_results_summary.p', 'rb'))
lenet_result_proportion_075 = pickle.load(open('../../LENET_proportion_data/results/proportion_0/lenet_results_summary.p', 'rb'))
lenet_result_proportion_050 = pickle.load(open('../../LENET_proportion_data/results/proportion_1/lenet_results_summary.p', 'rb'))
lenet_result_proportion_025 = pickle.load(open('../../LENET_proportion_data/results/proportion_2/lenet_results_summary.p', 'rb'))
svm_result_proportion_075 = pickle.load(open('../../SVM_proportion_data/results/proportion_0/svm_results_summary.p', 'rb'))
svm_result_proportion_050 = pickle.load(open('../../SVM_proportion_data/results/proportion_1/svm_results_summary.p', 'rb'))
svm_result_proportion_025 = pickle.load(open('../../SVM_proportion_data/results/proportion_2/svm_results_summary.p', 'rb'))
proportion_data_summary = {'crnn_proportion_075': pd.DataFrame(crnn_result_proportion_075),
'crnn_proportion_050': pd.DataFrame(crnn_result_proportion_050),
'crnn_proportion_025': pd.DataFrame(crnn_result_proportion_025),
'lenet_proportion_075': pd.DataFrame(lenet_result_proportion_075),
'lenet_proportion_050': pd.DataFrame(lenet_result_proportion_050),
'lenet_proportion_025': pd.DataFrame(lenet_result_proportion_025),
'svm_proportion_075': pd.DataFrame(svm_result_proportion_075),
'svm_proportion_050': pd.DataFrame(svm_result_proportion_050),
'svm_proportion_025': pd.DataFrame(svm_result_proportion_025)}
proportion_data_summary =
|
pd.concat(proportion_data_summary)
|
pandas.concat
|
import cv2
import numpy as np
import pandas as pd
from . import constants
def load_darknet_weights(model, weights_file_path): # pylint: disable=too-many-locals
conv_layer_size = 110
conv_output_idxs = [93, 101, 109]
with open(weights_file_path, 'rb') as weights_file:
_, _, _, _, _ = np.fromfile(weights_file, dtype=np.int32, count=5)
bn_idx = 0
for conv_idx in range(conv_layer_size):
conv_layer_name = f'conv2d_{conv_idx}' if conv_idx > 0 else 'conv2d'
bn_layer_name = f'batch_normalization_{bn_idx}' if bn_idx > 0 else 'batch_normalization'
conv_layer = model.get_layer(conv_layer_name)
filters = conv_layer.filters
kernel_size = conv_layer.kernel_size[0]
input_dims = conv_layer.input_shape[-1]
if conv_idx not in conv_output_idxs:
# darknet bn layer weights: [beta, gamma, mean, variance]
bn_weights = np.fromfile(weights_file, dtype=np.float32, count=4 * filters)
# tf bn layer weights: [gamma, beta, mean, variance]
bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]
bn_layer = model.get_layer(bn_layer_name)
bn_idx += 1
else:
conv_bias = np.fromfile(weights_file, dtype=np.float32, count=filters)
# darknet shape: (out_dim, input_dims, height, width)
# tf shape: (height, width, input_dims, out_dim)
conv_shape = (filters, input_dims, kernel_size, kernel_size)
conv_weights = np.fromfile(weights_file, dtype=np.float32, count=np.product(conv_shape))
conv_weights = conv_weights.reshape(conv_shape).transpose([2, 3, 1, 0])
if conv_idx not in conv_output_idxs:
conv_layer.set_weights([conv_weights])
bn_layer.set_weights(bn_weights)
else:
conv_layer.set_weights([conv_weights, conv_bias])
remaining_weights = len(weights_file.read())
if remaining_weights == 0:
print('all weights read')
else:
print(f'failed to read all weights, # of unread weights: {remaining_weights}')
def preprocess_image_data(image_data):
img = cv2.resize(image_data, constants.INPUT_SHAPE[:2])
img = img / 255.
imgs = np.expand_dims(img, axis=0)
return imgs
def get_detection_data(img, model_outputs):
num_bboxes = model_outputs[-1][0]
boxes, scores, _ = [output[0][:num_bboxes] for output in model_outputs[:-1]]
h, w = img.shape[:2]
df =
|
pd.DataFrame(boxes, columns=['x1', 'y1', 'x2', 'y2'])
|
pandas.DataFrame
|
# ------------------------------------------------------------------------------
# Import libraries
# ------------------------------------------------------------------------------
import os
import sys
import math
import time
import pickle
import psutil
import random
import numpy as np
import pandas as pd
from pathlib import Path
from datetime import datetime
from contextlib import contextmanager
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import warnings
import optuna
from lightgbm import LGBMRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
plt.style.use('fivethirtyeight')
warnings.filterwarnings('ignore')
# ------------------------------------------------------------------------------
# Parameters
# ------------------------------------------------------------------------------
N_FOLDS = 10
N_ESTIMATORS = 30000
SEED = 2021
BAGGING_SEED = 48
N_TRIALS = 50
# ------------------------------------------------------------------------------
# Path definition
# ------------------------------------------------------------------------------
DATA_PATH = Path("input/")
LOG_PATH = Path("./log/")
LOG_PATH.mkdir(parents=True, exist_ok=True)
# ------------------------------------------------------------------------------
# Utilities
# ------------------------------------------------------------------------------
@contextmanager
def timer(name: str):
t0 = time.time()
p = psutil.Process(os.getpid())
m0 = p.memory_info()[0] / 2. ** 30
try:
yield
finally:
m1 = p.memory_info()[0] / 2. ** 30
delta = m1 - m0
sign = '+' if delta >= 0 else '-'
delta = math.fabs(delta)
print(f"[{m1:.1f}GB({sign}{delta:.1f}GB): {time.time() - t0:.3f}sec] {name}", file=sys.stderr)
def set_seed(seed=42):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
def score_log(df: pd.DataFrame, seed: int, num_fold: int, model_name: str, cv: float):
score_dict = {'date': datetime.now(), 'seed': seed, 'fold': num_fold, 'model': model_name, 'cv': cv}
# noinspection PyTypeChecker
df = pd.concat([df,
|
pd.DataFrame.from_dict([score_dict])
|
pandas.DataFrame.from_dict
|
from contextlib import nullcontext as does_not_raise
from functools import partial
import pandas as pd
from pandas.testing import assert_series_equal
from solarforecastarbiter import datamodel
from solarforecastarbiter.reference_forecasts import persistence
from solarforecastarbiter.conftest import default_observation
import pytest
def load_data_base(data, observation, data_start, data_end):
# slice doesn't care about closed or interval label
# so here we manually adjust start and end times
if 'instant' in observation.interval_label:
pass
elif observation.interval_label == 'ending':
data_start += pd.Timedelta('1s')
elif observation.interval_label == 'beginning':
data_end -= pd.Timedelta('1s')
return data[data_start:data_end]
@pytest.fixture
def powerplant_metadata():
"""1:1 AC:DC"""
modeling_params = datamodel.FixedTiltModelingParameters(
ac_capacity=200, dc_capacity=200, temperature_coefficient=-0.3,
dc_loss_factor=3, ac_loss_factor=0,
surface_tilt=30, surface_azimuth=180)
metadata = datamodel.SolarPowerPlant(
name='Albuquerque Baseline', latitude=35.05, longitude=-106.54,
elevation=1657.0, timezone='America/Denver',
modeling_parameters=modeling_params)
return metadata
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190404 1400'),
('ending', 'right', '20190404 1400'),
('instant', None, '20190404 1359')
])
def test_persistence_scalar(site_metadata, interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp(end, tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data=load_data)
expected_index = pd.date_range(
start='20190404 1300', end=end, freq='5min', tz=tz,
closed=closed)
expected = pd.Series(100., index=expected_index)
assert_series_equal(fx, expected)
@pytest.mark.parametrize('obs_interval_label', ('beginning', 'ending',
'instant'))
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190406 0000'),
('ending', 'right', '20190406 0000'),
('instant', None, '20190405 2359')
])
def test_persistence_interval(site_metadata, obs_interval_label,
interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label=obs_interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
data = pd.Series(data_index.hour, index=data_index, dtype=float)
if obs_interval_label == 'ending':
# e.g. timestamp 12:00:00 should be equal to 11
data = data.shift(1).fillna(0)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed=closed)
expected_vals = list(range(0, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
# handle permutations of parameters that should fail
if data_end.minute == 59 and obs_interval_label != 'instant':
expectation = pytest.raises(ValueError)
elif data_end.minute == 0 and obs_interval_label == 'instant':
expectation = pytest.raises(ValueError)
else:
expectation = does_not_raise()
with expectation:
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected)
def test_persistence_interval_missing_data(site_metadata):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label='ending')
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404T1200', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
end = '20190406 0000'
data = pd.Series(data_index.hour, index=data_index, dtype=float)
data = data.shift(1)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed='right')
expected_vals = [None] * 12 + list(range(12, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, 'ending', load_data)
assert_series_equal(fx, expected)
@pytest.fixture
def uniform_data():
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
return data
@pytest.mark.parametrize(
'interval_label,expected_index,expected_ghi,expected_ac,obsscale', (
('beginning',
['20190404 1300', '20190404 1330'],
[96.41150694741889, 91.6991546408236],
[96.60171202566896, 92.074796727846],
1),
('ending',
['20190404 1330', '20190404 1400'],
[96.2818141290749, 91.5132934827808],
[96.47816752344607, 91.89460837042301],
1),
# test clipped at 2x clearsky
('beginning',
['20190404 1300', '20190404 1330'],
[1926.5828549018618, 1832.4163238767312],
[383.1524464326973, 365.19729186262526],
50)
)
)
def test_persistence_scalar_index(
powerplant_metadata, uniform_data, interval_label,
expected_index, expected_ghi, expected_ac, obsscale):
# ac_capacity is 200 from above
observation = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning')
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning', variable='ac_power')
data = uniform_data * obsscale
tz = data.index.tzinfo
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
expected_index, tz=tz, freq=interval_length)
expected = pd.Series(expected_ghi, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected = pd.Series(expected_ac, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_instant_obs_fx(
site_metadata, powerplant_metadata, uniform_data):
# instantaneous obs and fx
interval_length = pd.Timedelta('30min')
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label=interval_label, variable='ac_power')
data = uniform_data
tz = data.index.tzinfo
load_data = partial(load_data_base, data)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1259', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1359', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.59022431746838, 91.99405501672328]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_values = [96.77231379880752, 92.36198028963426]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
# instant obs and fx, but with offset added to starts instead of ends
data_start = pd.Timestamp('20190404 1201', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1301', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.55340033645147, 91.89662922267517]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_invalid_times_instant(site_metadata):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
# instant obs that cover the whole interval - not allowed!
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
with pytest.raises(ValueError):
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
@pytest.mark.parametrize('data_start,data_end,forecast_start,forecast_end', (
('20190404 1201', '20190404 1300', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1259', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1301', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1300', '20190404 1359'),
))
def test_persistence_scalar_index_invalid_times_interval(
site_metadata, interval_label, data_start, data_end, forecast_start,
forecast_end):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_length = pd.Timedelta('30min')
# base times to mess with
data_start = pd.Timestamp(data_start, tz=tz)
data_end = pd.Timestamp(data_end, tz=tz)
forecast_start = pd.Timestamp(forecast_start, tz=tz)
forecast_end = pd.Timestamp(forecast_end, tz=tz)
# interval average obs with invalid starts/ends
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
errtext = "with interval_label beginning or ending"
with pytest.raises(ValueError) as excinfo:
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert errtext in str(excinfo.value)
def test_persistence_scalar_index_invalid_times_invalid_label(site_metadata):
data =
|
pd.Series(100., index=[0])
|
pandas.Series
|
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(
|
DatetimeIndex(arr)
|
pandas.DatetimeIndex
|
"""
Predicting Lab Profitability in Washington State
Cannabis Data Science Meetup Group
Copyright (c) 2022 Cannlytics
Authors: <NAME> <<EMAIL>>
Created: 1/10/2022
Updated: 1/12/2022
License: MIT License <https://opensource.org/licenses/MIT>
Description: Using data on analyses performed by labs in Washington State,
this script calculates historic performance of each lab and uses analysis
prices to forecast the profitability of each lab over the next 5 years.
Data Sources:
- WA State Traceability Data January 2018 - November 2021
https://lcb.app.box.com/s/e89t59s0yb558tjoncjsid710oirqbgd?page=1
Resources:
- Pandas time series / date functionality
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
"""
# Standard imports.
import gc
import json
import re
import requests
# External imports.
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import pandas as pd
from pandas.tseries.offsets import MonthEnd
import pmdarima as pm
import seaborn as sns
import statsmodels.api as sm
# Internal imports.
from utils import (
forecast_arima,
format_millions,
format_thousands,
)
#------------------------------------------------------------------------------
# Perform housekeeping and define useful functions.
#------------------------------------------------------------------------------
# Define format for all plots.
plt.style.use('fivethirtyeight')
plt.rcParams.update({
'font.family': 'Times New Roman',
'font.size': 20,
})
# Print floats to 2 decimal places.
pd.options.display.float_format = "{:.0f}".format
def sorted_nicely(unsorted_list):
"""Sort the given iterable in the way that humans expect.
Credit: <NAME> <https://stackoverflow.com/a/2669120/5021266>
License: CC BY-SA 2.5 <https://creativecommons.org/licenses/by-sa/2.5/>
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(unsorted_list, key = alphanum_key)
#------------------------------------------------------------------------------
# Read in and clean the laboratory data.
#------------------------------------------------------------------------------
# Define lab datasets.
lab_datasets = ['LabResults_0', 'LabResults_1', 'LabResults_2']
# Specify the column types to read.
column_types = {
'global_id' : 'string',
# 'mme_id' : 'category',
# 'type' : 'category',
# 'intermediate_type' : 'category',
# 'status' : 'category',
#'user_id' : 'string',
#'external_id' : 'string',
#'inventory_id' : 'string',
#'testing_status' : 'category',
#'batch_id' : 'string',
#'parent_lab_result_id' : 'string',
#'og_parent_lab_result_id' : 'string',
#'copied_from_lab_id' : 'string',
#'lab_user_id' : 'string',
#'foreign_matter' : 'bool',
#'moisture_content_percent' : 'float16',
#'growth_regulators_ppm' : 'float16',
#'cannabinoid_status' : 'category',
#'cannabinoid_editor' : 'float32',
#'cannabinoid_d9_thca_percent': 'float16',
#'cannabinoid_d9_thca_mg_g' : 'float16',
#'cannabinoid_d9_thc_percent' : 'float16',
#'cannabinoid_d9_thc_mg_g' : 'float16',
#'cannabinoid_d8_thc_percent' : 'float16',
#'cannabinoid_d8_thc_mg_g' : 'float16',
#'cannabinoid_cbd_percent' : 'float16',
#'cannabinoid_cbd_mg_g' : 'float16',
#'cannabinoid_cbda_percent' : 'float16',
#'cannabinoid_cbda_mg_g' : 'float16',
#'cannabinoid_cbdv_percent' : 'float16',
#'cannabinoid_cbg_percent' : 'float16',
#'cannabinoid_cbg_mg_g' : 'float16',
#'terpenoid_pinene_percent' : 'float16',
#'terpenoid_pinene_mg_g' : 'float16',
#'microbial_status' : 'category',
#'microbial_editor' : 'string',
#'microbial_bile_tolerant_cfu_g' : 'float16',
#'microbial_pathogenic_e_coli_cfu_g' : 'float16',
#'microbial_salmonella_cfu_g' : 'float16',
#'mycotoxin_status' : 'category',
#'mycotoxin_editor' : 'string',
#'mycotoxin_aflatoxins_ppb' : 'float16',
#'mycotoxin_ochratoxin_ppb' : 'float16',
#'metal_status' : 'category',
#'metal_editor': 'string',
#'metal_arsenic_ppm' : 'float16',
#'metal_cadmium_ppm' : 'float16',
#'metal_lead_ppm' : 'float16',
#'metal_mercury_ppm' : 'float16',
#'pesticide_status' : 'category',
#'pesticide_editor' : 'string',
#'pesticide_abamectin_ppm' : 'float16',
#'pesticide_acequinocyl_ppm' : 'float16',
#'pesticide_bifenazate_ppm' : 'float16',
#'pesticide_cyfluthrin_ppm' : 'float16',
#'pesticide_cypermethrin_ppm' : 'float16',
#'pesticide_etoxazole_ppm' : 'float16',
#'pesticide_flonicamid_ppm' : 'float',
#'pesticide_fludioxonil_ppm' : 'float16',
#'pesticide_imidacloprid_ppm' : 'float16',
#'pesticide_myclobutanil_ppm' : 'float16',
#'pesticide_spinosad_ppm' : 'float16',
#'pesticide_spirotetramet_ppm' : 'float16',
#'pesticide_thiamethoxam_ppm' : 'float16',
#'pesticide_trifloxystrobin_ppm' : 'float16',
#'solvent_status' : 'category',
#'solvent_editor' : 'string',
#'solvent_butanes_ppm' : 'float16',
#'solvent_heptane_ppm' : 'float16',
#'solvent_propane_ppm' : 'float16',
#'notes' : 'float32',
#'thc_percent' : 'float16',
#'moisture_content_water_activity_rate' : 'float16',
#'solvent_acetone_ppm' : 'float16',
#'solvent_benzene_ppm' : 'float16',
#'solvent_cyclohexane_ppm' : 'float16',
#'solvent_chloroform_ppm' : 'float16',
#'solvent_dichloromethane_ppm' : 'float16',
#'solvent_ethyl_acetate_ppm' : 'float16',
#'solvent_hexanes_ppm' : 'float16',
#'solvent_isopropanol_ppm' : 'float16',
#'solvent_methanol_ppm' : 'float16',
#'solvent_pentanes_ppm' : 'float16',
#'solvent_toluene_ppm' : 'float16',
#'solvent_xylene_ppm' : 'float16',
#'pesticide_acephate_ppm' : 'float16',
#'pesticide_acetamiprid_ppm' : 'float16',
#'pesticide_aldicarb_ppm' : 'float16',
#'pesticide_azoxystrobin_ppm' : 'float16',
#'pesticide_bifenthrin_ppm' : 'float16',
#'pesticide_boscalid_ppm' : 'float16',
#'pesticide_carbaryl_ppm' : 'float16',
#'pesticide_carbofuran_ppm' : 'float16',
#'pesticide_chlorantraniliprole_ppm' : 'float16'
}
# Specify the date columns.
date_columns = ['created_at']
# Specify all of the columns.
columns = list(column_types.keys()) + date_columns
# Read in the lab result data.
shards = []
for dataset in lab_datasets:
lab_data = pd.read_csv(
f'../.datasets/{dataset}.csv',
sep='\t',
encoding='utf-16',
usecols=columns,
dtype=column_types,
parse_dates=date_columns,
# nrows=10000,
# skipinitialspace=True,
)
shards.append(lab_data)
# Aggregate lab data, remove shards to free up memory.
data = pd.concat(shards)
del shards
del lab_data
gc.collect()
# Beginning cleaning the lab data.
data.dropna(subset=['global_id'], inplace=True)
data.index = data['global_id']
data = data.sort_index()
# Define lab ID for each observation.
data['lab_id'] = data['global_id'].map(lambda x: x[x.find('WAL'):x.find('.')])
# Remove attested lab results.
data = data.loc[data.lab_id != '']
# Identify all of the labs.
lab_ids = list(data['lab_id'].unique())
# Sort the alphanumeric lab IDs.
lab_ids = sorted_nicely(lab_ids)
#------------------------------------------------------------------------------
# Read in and clean the licensee data.
#------------------------------------------------------------------------------
# Specify the licensee fields
licensee_column_types = {
'global_id' : 'string',
'name': 'string',
'city': 'string',
'type': 'string',
'code': 'string',
}
# Read in the licensee data.
file_name = '../.datasets/Licensees_0.csv'
licensees = pd.read_csv(
file_name,
sep='\t',
encoding='utf-16',
usecols=list(licensee_column_types.keys()),
dtype=licensee_column_types,
)
#------------------------------------------------------------------------------
# Create day, month, year variables.
#------------------------------------------------------------------------------
def format_end_of_month(row):
"""Format a row with a 'date' column as an ISO formatted month."""
month = row['date'].month
if month < 10:
month = f'0{month}'
year = row['date'].year
day = row['date'] + MonthEnd(0)
return f'{year}-{month}-{day.day}'
def format_end_of_year(row):
"""Format a row with a 'date' column as an ISO formatted year."""
year = row['date'].year
return f'{year}-12-31'
# Add a time column.
data['date'] = pd.to_datetime(data['created_at'])
# Assign day, month, year variables.
data = data.assign(
day=data['date'].dt.date,
month=data.apply(lambda row: format_end_of_month(row), axis=1),
year=data.apply(lambda row: format_end_of_year(row), axis=1),
)
#------------------------------------------------------------------------------
# Calculate interesting lab summary statistics.
#------------------------------------------------------------------------------
# Identify the number of samples tested by each lab.
stats = {}
total_tests = 0
for lab_id in lab_ids:
lab_samples = data.loc[data['lab_id'] == lab_id]
tested_samples = len(lab_samples)
if tested_samples > 0:
code = lab_id.replace('WA', '')
lab_data = licensees.loc[licensees['code'] == code].iloc[0]
stats[lab_id] = {
'name': lab_data['name'],
'city': lab_data['city'],
'total_samples': tested_samples,
}
total_tests += tested_samples
# Calculate the market share for each lab.
lab_stats = pd.DataFrame.from_dict(stats, orient='index')
lab_stats['market_share'] = lab_stats['total_samples'] / total_tests * 100
# Print lab statistics.
statistics = ['name', 'total_samples', 'market_share', 'city']
print(lab_stats[statistics])
# Print by market share.
print(lab_stats.sort_values(by='market_share', ascending=False)[statistics])
#------------------------------------------------------------------------------
# How many analyses are being conducted by each lab on a day-to-day,
# month-to-month, and year-to-year basis?
#------------------------------------------------------------------------------
def plot_samples_by_period(data, column, thousands=False):
"""Plot samples for each lab by a given period."""
lab_ids = sorted_nicely(list(data['lab_id'].unique()))
colors = sns.color_palette('tab20', n_colors=len(lab_ids))
fig, ax = plt.subplots(figsize=(14, 6))
for count, lab_id in enumerate(lab_ids):
lab_samples = data.loc[data['lab_id'] == lab_id]
timeseries = lab_samples.groupby(
column,
as_index=False
).size()
timeseries['date'] = pd.to_datetime(timeseries[column])
timeseries.set_index('date', inplace=True)
plt.plot(
timeseries.index,
timeseries['size'],
label=lab_id,
color=colors[count],
alpha=0.6,
)
plt.ylim(0)
plt.setp(ax.get_yticklabels()[0], visible=False)
if thousands:
ax.yaxis.set_major_formatter(FuncFormatter(format_thousands))
plt.title(f'Samples Tested per {column} by Labs in Washington'.title())
plt.legend(
ncol=5,
loc='upper center',
bbox_to_anchor=(0.5, -0.05),
)
plt.savefig(f'figures/samples_tested_per_{column}_wa.png', dpi=300,
bbox_inches='tight', pad_inches=0.75, transparent=False)
plt.show()
# Plot daily samples tested by each lab.
plot_samples_by_period(data, 'day')
# Plot monthly samples tested by each lab.
plot_samples_by_period(data, 'month')
# Count yearly samples tested by each lab.
plot_samples_by_period(data, 'year', thousands=True)
#------------------------------------------------------------------------------
# Bonus: Calculate even more lab statistics.
#------------------------------------------------------------------------------
# What is the break down of analyses by sample type? By lab?
# What is the overall failure rate? By lab?
# What is the failure rate by analysis? By lab?
# What is the failure rate day-to-day, month-to-month, and year-to-year? By lab?
#------------------------------------------------------------------------------
# Forecast samples tested by lab.
# How many samples will each lab test in 2022-2026?
#------------------------------------------------------------------------------
# Define forecast horizon and forecast fix effects.
forecast_horizon = pd.date_range(
start=pd.to_datetime('2021-11-01'),
end=pd.to_datetime('2027-01-01'),
freq='M',
)
forecast_month_effects =
|
pd.get_dummies(forecast_horizon.month)
|
pandas.get_dummies
|
# -*- coding: utf-8 -*-
"""Functions from market data"""
__author__ = "<NAME>"
__version__ = "1"
import pandas as pd
import numpy as np
class TestResult:
def __init__(self, data_sim):
self.trade_list = None
self.long_system_results = pd.DataFrame(index=data_sim.index)
self.short_system_results = pd.DataFrame(index=data_sim.index)
self.system_results =
|
pd.DataFrame(index=data_sim.index)
|
pandas.DataFrame
|
from elasticsearch import Elasticsearch
from elasticsearch.helpers import scan
import seaborn as sns
import matplotlib.pylab as plt
import numpy as np
import pandas as pd
load_from_disk = True
start_date = '2018-04-01 00:00:00'
end_date = '2018-05-01 23:59:59'
site = 'MWT2'
es = Elasticsearch(['atlas-kibana.mwt2.org:9200'], timeout=60)
indices = "traces"
print("start:", start_date, "end:", end_date)
start = int(pd.Timestamp(start_date).timestamp())
end = int(
|
pd.Timestamp(end_date)
|
pandas.Timestamp
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import math
import itertools
import pandas as pd
import datetime
from fclib.dataset.retail.benchmark_paths import DATA_DIR
import fclib.dataset.retail.benchmark_settings as bs
# Utility functions
def week_of_month(dt):
"""Get the week of the month for the specified date.
Args:
dt (Datetime): Input date
Returns:
wom (Integer): Week of the month of the input date
"""
from math import ceil
first_day = dt.replace(day=1)
dom = dt.day
adjusted_dom = dom + first_day.weekday()
wom = int(ceil(adjusted_dom / 7.0))
return wom
def lagged_features(df, lags):
"""Create lagged features based on time series data.
Args:
df (Dataframe): Input time series data sorted by time
lags (List): Lag lengths
Returns:
fea (Dataframe): Lagged features
"""
df_list = []
for lag in lags:
df_shifted = df.shift(lag)
df_shifted.columns = [x + "_lag" + str(lag) for x in df_shifted.columns]
df_list.append(df_shifted)
fea = pd.concat(df_list, axis=1)
return fea
def moving_averages(df, start_step, window_size=None):
"""Compute averages of every feature over moving time windows.
Args:
df (Dataframe): Input features as a dataframe
Returns:
fea (Dataframe): Dataframe consisting of the moving averages
"""
if window_size is None:
# Use a large window to compute average over all historical data
window_size = df.shape[0]
fea = df.shift(start_step).rolling(min_periods=1, center=False, window=window_size).mean()
fea.columns = fea.columns + "_mean" + str(window_size)
return fea
if __name__ == "__main__":
for submission_round in range(1, bs.NUM_ROUNDS + 1):
print("creating features for round {}...".format(submission_round))
# read in data
train_file = os.path.join(DATA_DIR, "train/train_round_{}.csv".format(submission_round))
aux_file = os.path.join(DATA_DIR, "train/aux_round_{}.csv".format(submission_round))
train_df = pd.read_csv(train_file, index_col=False)
aux_df = pd.read_csv(aux_file, index_col=False)
# calculate move
train_df["move"] = train_df["logmove"].apply(lambda x: round(math.exp(x)))
train_df = train_df[["store", "brand", "week", "profit", "move", "logmove"]]
# merge train_df with aux_df
all_df = pd.merge(train_df, aux_df, how="right", on=["store", "brand", "week"])
# fill missing datetime gaps
store_list = all_df["store"].unique()
brand_list = all_df["brand"].unique()
week_list = range(bs.TRAIN_START_WEEK, bs.TEST_END_WEEK_LIST[submission_round - 1] + 1)
item_list = list(itertools.product(store_list, brand_list, week_list))
item_df = pd.DataFrame.from_records(item_list, columns=["store", "brand", "week"])
all_df = item_df.merge(all_df, how="left", on=["store", "brand", "week"])
# calculate features
# (1) price and price ratio
# Create relative price feature
price_cols = [
"price1",
"price2",
"price3",
"price4",
"price5",
"price6",
"price7",
"price8",
"price9",
"price10",
"price11",
]
all_df["price"] = all_df.apply(lambda x: x.loc["price" + str(int(x.loc["brand"]))], axis=1)
all_df["avg_price"] = all_df[price_cols].sum(axis=1).apply(lambda x: x / len(price_cols))
all_df["price_ratio"] = all_df["price"] / all_df["avg_price"]
# (2) date time related features: week of month, year, month, day
all_df["week_start"] = all_df["week"].apply(
lambda x: bs.FIRST_WEEK_START + datetime.timedelta(days=(x - 1) * 7)
)
all_df["week_of_month"] = all_df["week_start"].apply(lambda x: week_of_month(x))
all_df["year"] = all_df["week_start"].apply(lambda x: x.year)
all_df["month"] = all_df["week_start"].apply(lambda x: x.month)
all_df["day"] = all_df["week_start"].apply(lambda x: x.day)
# (3) lag features and moving average features
# You could also change the lags to create your own lag features as
# needed
all_df = all_df.sort_values(by=["store", "brand", "week"])
lags = [2, 3, 4]
# fill the NA sales before calculating the lag and moving average features
filled_sales = all_df[["store", "brand", "move"]].apply(
lambda x: x.fillna(method="ffill").fillna(method="bfill")
)
lagged_fea = filled_sales.groupby(["store", "brand"]).apply(lambda x: lagged_features(x[["move"]], lags))
moving_avg = filled_sales.groupby(["store", "brand"]).apply(lambda x: moving_averages(x[["move"]], 2, 10))
all_df =
|
pd.concat([all_df, lagged_fea, moving_avg], axis=1)
|
pandas.concat
|
import csv
import itertools as it
import os
import pandas as pd
import numpy as np
import pathlib2
import sys
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
class BaseReporter(object):
"""
Base class for reporting results of baseline and EDA algorithms.
"""
metrics = [
('accuracy', accuracy_score),
('precision-micro', lambda y_true, y_pred: precision_score(y_true, y_pred, average='micro')),
('precision-macro', lambda y_true, y_pred: precision_score(y_true, y_pred, average='macro')),
('precision-weighted', lambda y_true, y_pred: precision_score(y_true, y_pred, average='weighted')),
('recall-micro', lambda y_true, y_pred: recall_score(y_true, y_pred, average='micro')),
('recall-macro', lambda y_true, y_pred: recall_score(y_true, y_pred, average='macro')),
('recall-weighted', lambda y_true, y_pred: recall_score(y_true, y_pred, average='weighted')),
('f1-micro', lambda y_true, y_pred: f1_score(y_true, y_pred, average='micro')),
('f1-macro', lambda y_true, y_pred: f1_score(y_true, y_pred, average='macro')),
('f1-weighted', lambda y_true, y_pred: f1_score(y_true, y_pred, average='weighted')),
]
def __init__(
self, Xs, ys,
n_classifiers, n_classes,
set_names, dataset_name, n_fold, n_run, output_path
):
self.Xs = Xs
self.ys = ys
self.set_sizes = map(len, self.ys)
self.set_names = set_names
self.dataset_name = dataset_name
self.n_run = n_run
self.n_fold = n_fold
self.n_classifiers = n_classifiers
self.n_classes = n_classes
self.output_path = output_path
@staticmethod
def get_output_file_name(output_path, dataset_name, n_fold, n_run, reason):
"""
Formats the name of the output file.
:param output_path: Path to output file (without the file name, obviously).
:param dataset_name: Name of the dataset to be tested.
:param n_fold: Current fold being tested.
:param n_run: Current run being tested.
:param reason: Any additional information regarding the file, e.g., use pop for population file or gm for
graphical model.
:return: The formatted name.
"""
name = os.path.join(
output_path,
'-'.join([dataset_name, str(n_fold), str(n_run), reason]) + '.csv'
)
return name
@staticmethod
def generate_summary(path_read, path_out):
"""
Based on metadata collected at test time, generates a single csv file with the summary of results of all
collected metrics.
:param path_read: Path where all metadata files are.
:param path_out: Path to the file (with the file name and csv termination) to output summary.
"""
pass
class BaselineReporter(BaseReporter):
"""
A class for reporting the partial results of baseline algorithms.
"""
def __init__(self, Xs, ys, n_classifiers, n_classes, set_names, dataset_name, n_fold, n_run, output_path,
algorithm):
"""
Initializes a reporter, which will follow the EDA throughout evolution, reporting the performance of its
population.
:param Xs: Predictive attributes of subsets (e.g., train, validation, test).
:param ys: Labels of subsets (e.g., train, validation, test).
:param n_classifiers: Number of classifiers within the ensemble.
:param n_classes: Number of classes in the problem.
:param set_names: Name of the sets (e.g., train, validation, test).
:param dataset_name: Name of the tested dataset.
:param n_fold: Current fold index.
:param n_run: Current run index.
:param output_path: Path to output meta-files.
:param algorithm: the name of the algorithm being tested.
"""
super(BaselineReporter, self).__init__(
Xs=Xs,
ys=ys,
n_classifiers=n_classifiers,
n_classes=n_classes,
set_names=set_names,
dataset_name=dataset_name,
n_fold=n_fold,
n_run=n_run,
output_path=output_path
)
self.population_file = self.get_output_file_name(
output_path=self.output_path, dataset_name=self.dataset_name,
n_fold=self.n_fold, n_run=self.n_run,
reason=algorithm.__name__
)
with open(self.population_file, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(
['dataset', 'n_fold', 'n_run', 'set_name', 'set_size'] +
[a for a, b in self.metrics]
)
def save_baseline(self, ensemble):
with open(self.population_file, 'a') as f:
writer = csv.writer(f, delimiter=',')
counter = 0
for set_name, set_size, set_x, set_y in zip(self.set_names, self.set_sizes, self.Xs, self.ys):
preds = ensemble.predict(set_x)
results = []
for metric_name, metric_func in BaseReporter.metrics:
results += [metric_func(y_true=set_y, y_pred=preds)]
writer.writerow(
[self.dataset_name, self.n_fold, self.n_run, set_name, set_size] + results
)
counter += 1
@staticmethod
def generate_summary(path_read, path_out):
files = [xx for xx in pathlib2.Path(path_read).iterdir() if xx.is_file()]
files = list(map(lambda x: str(x).split('/')[-1].split('.')[0].split('-'), files))
summary = pd.DataFrame(files, columns=['dataset_name', 'n_fold', 'n_run', 'algorithm'])
summary['n_fold'] = summary['n_fold'].astype(np.int32)
summary['n_run'] = summary['n_run'].astype(np.int32)
algorithms = summary['algorithm'].unique()
datasets = summary['dataset_name'].unique()
n_folds = len(summary['n_fold'].unique())
n_runs = len(summary['n_run'].unique())
metric_names = [metric_name for metric_name, metric in BaseReporter.metrics]
result_df = pd.DataFrame(
index=pd.MultiIndex.from_tuples(list(it.product(algorithms, datasets))),
columns=list(
it.chain(*zip(map(lambda x: x + ' mean', metric_names), map(lambda x: x + ' std', metric_names)))),
dtype=np.float32
)
for algorithm in algorithms:
for dataset in datasets:
dataset_size = None
__local_metrics = {k: dict() for k in metric_names}
for n_fold in range(n_folds):
for n_run in range(n_runs):
current = pd.read_csv(
os.path.join(
path_read,
'-'.join([dataset, str(n_fold), str(n_run), algorithm]) + '.csv',
),
sep=','
)
# gets dataset size
if dataset_size is None:
set_names = current['set_name'].unique()
dataset_size = 0.
for set_name in set_names:
dataset_size += int((current.loc[current['set_name'] == set_name].iloc[0])['set_size'])
current = current.loc[current['set_name'] == 'test']
for metric_name in metric_names:
try:
__local_metrics[metric_name][n_run] += float(current[metric_name]) * \
(
float(current['set_size']) / float(dataset_size))
except KeyError:
__local_metrics[metric_name][n_run] = float(current[metric_name]) * \
(float(current['set_size']) / float(dataset_size))
metric_means = {k: np.mean(list(v.values())) for k, v in __local_metrics.items()}
metric_stds = {k: np.std(list(v.values())) for k, v in __local_metrics.items()}
for (metric_name, metric_mean), (metric_name, metric_std) in \
zip(metric_means.items(), metric_stds.items()):
result_df.loc[(algorithm, dataset)][metric_name + ' mean'] = metric_mean
result_df.loc[(algorithm, dataset)][metric_name + ' std'] = metric_std
result_df.to_csv(path_out, index=True, sep=',', float_format='%0.8f')
class EDAReporter(BaseReporter):
"""
A class for reporting the partial results of the Ensemble class.
"""
def __init__(self, Xs, ys, n_classifiers, n_classes, set_names, dataset_name, n_fold, n_run, output_path):
"""
Initializes a reporter, which will follow the EDA throughout evolution, reporting the performance of its
population.
:param Xs: Predictive attributes of subsets (e.g., train, validation, test).
:param ys: Labels of subsets (e.g., train, validation, test).
:param n_classifiers: Number of classifiers within the ensemble.
:param n_classes: Number of classes in the problem.
:param set_names: Name of the sets (e.g., train, validation, test).
:param dataset_name: Name of the tested dataset.
:param n_fold: Current fold index.
:param n_run: Current run index.
:param output_path: Path to output meta-files.
"""
super(EDAReporter, self).__init__(
Xs=Xs,
ys=ys,
n_classifiers=n_classifiers,
n_classes=n_classes,
set_names=set_names,
dataset_name=dataset_name,
n_fold=n_fold,
n_run=n_run,
output_path=output_path
)
self.population_file = self.get_output_file_name(
output_path=self.output_path, dataset_name=self.dataset_name,
n_fold=self.n_fold, n_run=self.n_run, reason='pop'
)
self.gm_file = self.get_output_file_name(
output_path=self.output_path, dataset_name=self.dataset_name,
n_fold=self.n_fold, n_run=self.n_run, reason='gm'
)
with open(self.population_file, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(
['dataset', 'n_fold', 'n_run', 'generation', 'set_name', 'set_size', 'elite', 'fitness'] +
[a for a, b in EDAReporter.metrics] +
['w_%d_%d' % (a, b) for a, b in list(it.product(np.arange(self.n_classifiers), np.arange(self.n_classes)))]
)
with open(self.gm_file, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(
['dataset', 'n_fold', 'n_run', 'generation', 'scale'] +
['w_%d_%d' % (a, b) for a, b in list(it.product(np.arange(self.n_classifiers), np.arange(self.n_classes)))]
)
def save_population(self, generation, elite, ensembles, P_fitness):
"""
Saves population metadata to a file. Calculates metrics regarding each individual (for example, accuracy,
precision, etc).
:param generation: Current generation index.
:param elite: A list of boolean arrays denoting whether that individual is from the elite or not.
:param ensembles: A list of ensembles to be saved.
:param P_fitness: A list of arrays denoting the fitness of that individuals.
"""
with open(self.population_file, 'a') as f:
writer = csv.writer(f, delimiter=',')
counter = 0
for elite, ensemble, fitness in list(zip(elite, ensembles, P_fitness)):
ravel_weights = ensemble.voting_weights.ravel().tolist()
for set_name, set_size, set_x, set_y in list(zip(self.set_names, self.set_sizes, self.Xs, self.ys)):
preds = ensemble.predict(set_x)
results = []
for metric_name, metric_func in EDAReporter.metrics:
results += [metric_func(y_true=set_y, y_pred=preds)]
writer.writerow(
[self.dataset_name, self.n_fold, self.n_run, generation, set_name, set_size, elite, fitness] +
results + ravel_weights
)
counter += 1
def save_gm(self, generation, loc, scale):
"""
Saves a probabilistic graphical model to a file.
:param generation: Current generation index.
:param loc: A matrix with the mean of each variable PMF.
:param scale: A matrix with the std deviation of each variable PMF.
"""
with open(self.gm_file, 'a') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(
[self.dataset_name, self.n_fold, self.n_run, generation, scale] +
loc.ravel().tolist()
)
@staticmethod
def generate_summary(path_read, path_out):
files = [xx for xx in pathlib2.Path(path_read).iterdir() if (xx.is_file() and 'pop.csv' in str(xx))]
files = list(map(lambda x: str(x).split('/')[-1].split('.')[0].split('-'), files))
summary =
|
pd.DataFrame(files, columns=['dataset_name', 'n_fold', 'n_run', 'pop'])
|
pandas.DataFrame
|
import language_tool_python
from newspaper import Article
from newspaper import fulltext
from newspaper import build
import traceback
import sys
from pandas import DataFrame
import pandas as pd
import numpy as np
import praw
import csv
tool = language_tool_python.LanguageTool('en-US')
data = pd.DataFrame(columns = ['URL', 'Typos', 'Total Words', 'Date', 'Upvotes', 'Upvote Ratio'])
df =
|
pd.read_csv('data/news.csv')
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import pytest
from nltk.metrics.distance import masi_distance
from pandas.testing import assert_series_equal
from crowdkit.aggregation.utils import get_accuracy
from crowdkit.metrics.data import alpha_krippendorff, consistency, uncertainty
from crowdkit.metrics.performers import accuracy_on_aggregates
def test_consistency(toy_answers_df):
assert consistency(toy_answers_df) == 0.9384615384615385
class TestUncertaintyMetric:
def test_uncertainty_mean_per_task_skills(self, toy_answers_df):
performers_skills = pd.Series(
[0.6, 0.8, 1.0, 0.4, 0.8],
index=pd.Index(['w1', 'w2', 'w3', 'w4', 'w5'], name='performer'),
)
assert uncertainty(toy_answers_df, performers_skills) == 0.6308666201949331
def test_uncertainty_raises_wrong_compte_by(self, toy_answers_df):
performers_skills = pd.Series(
[0.6, 0.8, 1.0, 0.4, 0.8],
index=pd.Index(['w1', 'w2', 'w3', 'w4', 'w5'], name='performer'),
)
with pytest.raises(KeyError):
uncertainty(toy_answers_df, performers_skills, compute_by='invalid')
def test_uncertainty_docstring_examples(self):
assert uncertainty(
pd.DataFrame.from_records(
[
{'task': 'X', 'performer': 'A', 'label': 'Yes'},
{'task': 'X', 'performer': 'B', 'label': 'Yes'},
]
)
) == 0.0
assert uncertainty(
pd.DataFrame.from_records(
[
{'task': 'X', 'performer': 'A', 'label': 'Yes'},
{'task': 'X', 'performer': 'B', 'label': 'No'},
{'task': 'X', 'performer': 'C', 'label': 'Maybe'},
]
)
) == 1.0986122886681096
np.testing.assert_allclose(
uncertainty(
pd.DataFrame.from_records(
[
{'task': 'X', 'performer': 'A', 'label': 'Yes'},
{'task': 'X', 'performer': 'B', 'label': 'No'},
{'task': 'Y', 'performer': 'A', 'label': 'Yes'},
{'task': 'Y', 'performer': 'B', 'label': 'Yes'},
]
),
compute_by="task",
aggregate=False
), pd.Series([0.693147, 0.0], index=['X', 'Y'], name='task'), atol=1e-3
)
np.testing.assert_allclose(
uncertainty(
pd.DataFrame.from_records(
[
{'task': 'X', 'performer': 'A', 'label': 'Yes'},
{'task': 'X', 'performer': 'B', 'label': 'No'},
{'task': 'Y', 'performer': 'A', 'label': 'Yes'},
{'task': 'Y', 'performer': 'B', 'label': 'Yes'},
]
),
compute_by="performer",
aggregate=False
), pd.Series([0.0, 0.693147], index=['A', 'B'], name='performer'), atol=1e-3
)
def test_uncertainty_raises_skills_not_found(self):
answers = pd.DataFrame.from_records(
[
{'task': '1', 'performer': 'A', 'label': frozenset(['dog'])},
{'task': '1', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '1', 'performer': 'C', 'label': frozenset(['cat'])},
]
)
performers_skills = pd.Series(
[1, 1],
index=pd.Index(['A', 'B'], name='performer'),
)
with pytest.raises(AssertionError):
uncertainty(answers, performers_skills)
def test_uncertainty_per_performer(self):
answers = pd.DataFrame.from_records(
[
{'task': '1', 'performer': 'A', 'label': frozenset(['dog'])},
{'task': '1', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '1', 'performer': 'C', 'label': frozenset(['cat'])},
{'task': '2', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': '2', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '2', 'performer': 'C', 'label': frozenset(['cat'])},
{'task': '3', 'performer': 'A', 'label': frozenset(['dog'])},
{'task': '3', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '3', 'performer': 'C', 'label': frozenset(['dog'])},
{'task': '4', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': '4', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '4', 'performer': 'C', 'label': frozenset(['cat'])},
]
)
performers_skills = pd.Series(
[1, 1, 1],
index=pd.Index(['A', 'B', 'C'], name='performer'),
)
entropies = uncertainty(
answers,
performers_skills,
compute_by='performer',
aggregate=False
)
assert isinstance(entropies, pd.Series)
assert sorted(np.unique(entropies.index).tolist()) == ['A', 'B', 'C']
# B always answers the same, entropy = 0
np.testing.assert_allclose(entropies['B'], 0, atol=1e-6)
# A answers uniformly, entropy = max possible
np.testing.assert_allclose(entropies['A'], 0.693147, atol=1e-6)
# C answers non-uniformly, entropy = between B and A
assert entropies['A'] > entropies['C'] > entropies['B']
assert entropies.mean() == uncertainty(
answers,
performers_skills,
compute_by='performer',
aggregate=True
)
def test_uncertainty_per_task(self):
answers = pd.DataFrame.from_records(
[
{'task': '1', 'performer': 'A', 'label': frozenset(['dog'])},
{'task': '1', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '1', 'performer': 'C', 'label': frozenset(['cat'])},
{'task': '2', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': '2', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '2', 'performer': 'C', 'label': frozenset(['cat'])},
{'task': '3', 'performer': 'A', 'label': frozenset(['dog'])},
{'task': '3', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '3', 'performer': 'C', 'label': frozenset(['dog'])},
{'task': '4', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': '4', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '4', 'performer': 'C', 'label': frozenset(['cat'])},
{'task': '4', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': '5', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': '5', 'performer': 'B', 'label': frozenset(['dog'])},
]
)
performers_skills = pd.Series(
[1, 1, 1],
index=
|
pd.Index(['A', 'B', 'C'], name='performer')
|
pandas.Index
|
import os
import json
import numpy as np
import pandas as pd
def init_trial_path(args,is_save=True):
"""Initialize the path for a hyperparameter setting
Parameters
----------
args : dict
Settings
Returns
-------
args : dict
Settings with the trial path updated
"""
prename = args.dataset + '_' + str(args.test_dataset)+ '_' +str(args.n_shot_test) + '_' + args.enc_gnn
result_path = os.path.join(args.result_path, prename)
os.makedirs(result_path, exist_ok=True)
trial_id = 0
path_exists = True
while path_exists:
trial_id += 1
path_to_results = result_path + '/{:d}'.format(trial_id)
path_exists = os.path.exists(path_to_results)
args.trial_path = path_to_results
os.makedirs(args.trial_path)
if is_save:
save_args(args)
return args
def save_args(args):
args = args.__dict__
json.dump(args, open(args['trial_path'] + '/args.json', 'w'))
prename=f"upt{args['update_step']}-{args['inner_lr']}_mod{args['batch_norm']}-{args['rel_node_concat']}"
prename+=f"-{args['rel_hidden_dim']}-{args['rel_res']}"
json.dump(args, open(args['trial_path'] +'/'+prename+ '.json', 'w'))
def count_model_params(model):
print(model)
param_size = {}
cnt = 0
for name, p in model.named_parameters():
k = name.split('.')[0]
if k not in param_size:
param_size[k] = 0
p_cnt = 1
# for j in p.size():
for j in p.shape:
p_cnt *= j
param_size[k] += p_cnt
cnt += p_cnt
for k, v in param_size.items():
print(f"Number of parameters for {k} = {round(v / 1024, 2)} k")
print(f"Total parameters of model = {round(cnt / 1024, 2)} k")
class Logger(object):
'''Save training process to log file with simple plot function.'''
def __init__(self, fpath, title=None, resume=False):
self.file = None
self.resume = resume
self.title = '' if title == None else title
self.fpath = fpath
if fpath is not None:
if resume:
self.file = open(fpath, 'r')
name = self.file.readline()
self.names = name.rstrip().split('\t')
self.numbers = {}
for _, name in enumerate(self.names):
self.numbers[name] = []
for numbers in self.file:
numbers = numbers.rstrip().split('\t')
for i in range(0, len(numbers)):
self.numbers[self.names[i]].append(numbers[i])
self.file.close()
self.file = open(fpath, 'a')
else:
self.file = open(fpath, 'w')
def set_names(self, names):
if self.resume:
pass
# initialize numbers as empty list
self.numbers = {}
self.names = names
for _, name in enumerate(self.names):
self.file.write(name)
self.file.write('\t')
self.numbers[name] = []
self.file.write('\n')
self.file.flush()
def append(self, numbers, verbose=True):
assert len(self.names) == len(numbers), 'Numbers do not match names'
for index, num in enumerate(numbers):
self.file.write("{0:.6f}".format(num))
self.file.write('\t')
self.numbers[self.names[index]].append(num)
self.file.write('\n')
self.file.flush()
if verbose:
self.print()
def print(self):
log_str = ""
for name, num in self.numbers.items():
log_str += f"{name}: {num[-1]}, "
print(log_str)
def conclude(self, avg_k=3):
avg_numbers={}
best_numbers={}
valid_name=[]
for name, num in self.numbers.items():
best_numbers[name] = np.max(num)
avg_numbers[name] = np.mean(num[-avg_k:])
if str.isdigit(name.split('-')[-1]):
valid_name.append(name)
vals=np.array([list(avg_numbers.values()),list(best_numbers.values())])
cols = list(self.numbers.keys())
rows = ['avg','best']
df =
|
pd.DataFrame(vals,index=rows, columns=cols)
|
pandas.DataFrame
|
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import period as libperiod
import pandas as pd
from pandas import DatetimeIndex, Period, PeriodIndex, Series, notna, period_range
import pandas._testing as tm
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem(self):
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx[0]
assert result == pd.Period("2011-01-01", freq="D")
result = idx[-1]
assert result == pd.Period("2011-01-31", freq="D")
result = idx[0:5]
expected = pd.period_range("2011-01-01", "2011-01-05", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[0:10:2]
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-07", "2011-01-09"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[-20:-5:3]
expected = pd.PeriodIndex(
["2011-01-12", "2011-01-15", "2011-01-18", "2011-01-21", "2011-01-24"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[4::-1]
expected = PeriodIndex(
["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_getitem_index(self):
idx = period_range("2007-01", periods=10, freq="M", name="x")
result = idx[[1, 3, 5]]
exp = pd.PeriodIndex(["2007-02", "2007-04", "2007-06"], freq="M", name="x")
tm.assert_index_equal(result, exp)
result = idx[[True, True, False, False, False, True, True, False, False, False]]
exp = pd.PeriodIndex(
["2007-01", "2007-02", "2007-06", "2007-07"], freq="M", name="x"
)
tm.assert_index_equal(result, exp)
def test_getitem_partial(self):
rng = period_range("2007-01", periods=50, freq="M")
ts = Series(np.random.randn(len(rng)), rng)
with pytest.raises(KeyError, match=r"^'2006'$"):
ts["2006"]
result = ts["2008"]
assert (result.index.year == 2008).all()
result = ts["2008":"2009"]
assert len(result) == 24
result = ts["2008-1":"2009-12"]
assert len(result) == 24
result = ts["2008Q1":"2009Q4"]
assert len(result) == 24
result = ts[:"2009"]
assert len(result) == 36
result = ts["2009":]
assert len(result) == 50 - 24
exp = result
result = ts[24:]
tm.assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
msg = "left slice bound for non-unique label: '2008'"
with pytest.raises(KeyError, match=msg):
ts[slice("2008", "2009")]
def test_getitem_datetime(self):
rng = period_range(start="2012-01-01", periods=10, freq="W-MON")
ts = Series(range(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
tm.assert_series_equal(rs, ts)
def test_getitem_nat(self):
idx = pd.PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M")
assert idx[0] == pd.Period("2011-01", freq="M")
assert idx[1] is pd.NaT
s = pd.Series([0, 1, 2], index=idx)
assert s[pd.NaT] == 1
s = pd.Series(idx, index=idx)
assert s[pd.Period("2011-01", freq="M")] == pd.Period("2011-01", freq="M")
assert s[pd.NaT] is pd.NaT
def test_getitem_list_periods(self):
# GH 7710
rng = period_range(start="2012-01-01", periods=10, freq="D")
ts = Series(range(len(rng)), index=rng)
exp = ts.iloc[[1]]
tm.assert_series_equal(ts[[Period("2012-01-02", freq="D")]], exp)
def test_getitem_seconds(self):
# GH#6716
didx = pd.date_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
pidx = period_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01/01 10:00"], s[3600:3660])
tm.assert_series_equal(s["2013/01/01 9H"], s[:3600])
for d in ["2013/01/01", "2013/01", "2013"]:
tm.assert_series_equal(s[d], s)
def test_getitem_day(self):
# GH#6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = pd.date_range(start="2013/01/01", freq="D", periods=400)
pidx = period_range(start="2013/01/01", freq="D", periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01"], s[0:31])
tm.assert_series_equal(s["2013/02"], s[31:59])
tm.assert_series_equal(s["2014"], s[365:])
invalid = ["2013/02/01 9H", "2013/02/01 09:00"]
for v in invalid:
with pytest.raises(KeyError, match=v):
s[v]
class TestWhere:
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where(self, klass):
i = period_range("20130101", periods=5, freq="D")
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = PeriodIndex([pd.NaT] + i[1:].tolist(), freq="D")
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = period_range("20130101", periods=5, freq="D")
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_invalid_dtypes(self):
pi = period_range("20130101", periods=5, freq="D")
i2 = pi.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + pi[2:].tolist(), freq="D")
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8)
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8.view("timedelta64[ns]"))
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.to_timestamp("S"))
class TestTake:
def test_take(self):
# GH#10295
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx.take([0])
assert result == pd.Period("2011-01-01", freq="D")
result = idx.take([5])
assert result == pd.Period("2011-01-06", freq="D")
result = idx.take([0, 1, 2])
expected = pd.period_range("2011-01-01", "2011-01-03", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == "D"
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(
["2011-01-08", "2011-01-05", "2011-01-02"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([3, 2, 5])
expected = PeriodIndex(
["2011-01-04", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([-3, 2, 5])
expected = PeriodIndex(
["2011-01-29", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_take_misc(self):
index = period_range(start="1/1/10", end="12/31/12", freq="D", name="idx")
expected = PeriodIndex(
[
datetime(2010, 1, 6),
datetime(2010, 1, 7),
datetime(2010, 1, 9),
datetime(2010, 1, 13),
],
freq="D",
name="idx",
)
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, PeriodIndex)
assert taken.freq == index.freq
assert taken.name == expected.name
def test_take_fill_value(self):
# GH#12631
idx = pd.PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", freq="D"
)
result = idx.take(np.array([1, 0, -1]))
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "NaT"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for( axis 0 with)? size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
class TestIndexing:
def test_get_loc_msg(self):
idx = period_range("2000-1-1", freq="A", periods=10)
bad_period = Period("2012", "A")
with pytest.raises(KeyError, match=r"^Period\('2012', 'A-DEC'\)$"):
idx.get_loc(bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
assert inst.args[0] == bad_period
def test_get_loc_nat(self):
didx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-03"])
pidx = PeriodIndex(["2011-01-01", "NaT", "2011-01-03"], freq="M")
# check DatetimeIndex compat
for idx in [didx, pidx]:
assert idx.get_loc(pd.NaT) == 1
assert idx.get_loc(None) == 1
assert idx.get_loc(float("nan")) == 1
assert idx.get_loc(np.nan) == 1
def test_get_loc(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with non-duplicate
idx0 = pd.PeriodIndex([p0, p1, p2])
expected_idx1_p1 = 1
expected_idx1_p2 = 2
assert idx0.get_loc(p1) == expected_idx1_p1
assert idx0.get_loc(str(p1)) == expected_idx1_p1
assert idx0.get_loc(p2) == expected_idx1_p2
assert idx0.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx0.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx0.get_loc(1.1)
msg = (
r"'PeriodIndex\(\['2017-09-01', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx0.get_loc(idx0)
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with duplicate
idx1 = pd.PeriodIndex([p1, p1, p2])
expected_idx1_p1 = slice(0, 2)
expected_idx1_p2 = 2
assert idx1.get_loc(p1) == expected_idx1_p1
assert idx1.get_loc(str(p1)) == expected_idx1_p1
assert idx1.get_loc(p2) == expected_idx1_p2
assert idx1.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx1.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx1.get_loc(1.1)
msg = (
r"'PeriodIndex\(\['2017-09-02', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx1.get_loc(idx1)
# get the location of p1/p2 from
# non-monotonic increasing/decreasing PeriodIndex with duplicate
idx2 = pd.PeriodIndex([p2, p1, p2])
expected_idx2_p1 = 1
expected_idx2_p2 = np.array([True, False, True])
assert idx2.get_loc(p1) == expected_idx2_p1
assert idx2.get_loc(str(p1)) == expected_idx2_p1
tm.assert_numpy_array_equal(idx2.get_loc(p2), expected_idx2_p2)
tm.assert_numpy_array_equal(idx2.get_loc(str(p2)), expected_idx2_p2)
def test_is_monotonic_increasing(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_increasing is True
assert idx_inc1.is_monotonic_increasing is True
assert idx_dec0.is_monotonic_increasing is False
assert idx_dec1.is_monotonic_increasing is False
assert idx.is_monotonic_increasing is False
def test_is_monotonic_decreasing(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_decreasing is False
assert idx_inc1.is_monotonic_decreasing is False
assert idx_dec0.is_monotonic_decreasing is True
assert idx_dec1.is_monotonic_decreasing is True
assert idx.is_monotonic_decreasing is False
def test_contains(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
p3 = pd.Period("2017-09-04")
ps0 = [p0, p1, p2]
idx0 = pd.PeriodIndex(ps0)
for p in ps0:
assert p in idx0
assert str(p) in idx0
assert "2017-09-01 00:00:01" in idx0
assert "2017-09" in idx0
assert p3 not in idx0
def test_get_value(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 =
|
pd.Period("2017-09-03")
|
pandas.Period
|
import csv
import logging
import os
import tempfile
from datetime import datetime, date
from io import StringIO
from typing import Dict, List, Any, TypeVar
import click
import pandas as pd
import s3fs
import sqlalchemy.engine
from requests import HTTPError
from snowflake.connector.pandas_tools import write_pandas
from sqlalchemy import Column, TEXT, TIMESTAMP, DATE, INT, FLOAT, BOOLEAN
from dbd.config.dbd_project import DbdProjectConfigException
from dbd.db.db_table import DbTable
from dbd.log.dbd_exception import DbdException
from dbd.tasks.db_table_task import DbTableTask
from dbd.utils.io_utils import download_file, url_to_filename, is_zip, extract_zip_file, zip_to_url_and_locator, \
is_kaggle, extract_kaggle_dataset_id_and_zip_name, download_kaggle
from dbd.utils.io_utils import is_url
from dbd.utils.sql_parser import SqlParser
log = logging.getLogger(__name__)
class DbdUnsupportedDataFile(DbdException):
pass
class DbdInvalidDataFileFormatException(DbdException):
pass
class DbdInvalidDataFileReferenceException(DbdException):
pass
class DbdDataLoadError(DbdException):
pass
DataTaskType = TypeVar('DataTaskType', bound='DataTask')
def psql_writer(table, conn, keys, data_iter):
"""
Execute SQL statement inserting data
Parameters
----------
table : pandas.io.sql.SQLTable
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : Iterable that iterates the values to be inserted
"""
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
s_buf = StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ', '.join('"{}"'.format(k) for k in keys)
if table.schema:
table_name = '{}.{}'.format(table.schema, table.name)
else:
table_name = table.name
sql = 'COPY {} ({}) FROM STDIN WITH CSV'.format(
table_name, columns)
cur.copy_expert(sql=sql, file=s_buf)
class DataTask(DbTableTask):
"""
Data loading task. Loads data from a local data file (e.g. CSV) to database.
"""
def __init__(self, task_def: Dict[str, Any]):
"""
Data task constructor
:param Dict[str, Any] task_def: Target table definition
"""
super().__init__(task_def)
def data_files(self) -> List[str]:
"""
Task data files
:return: task data files
"""
return self.task_data()
def set_data_files(self, data_files: List[str]):
"""
Sets task data files
:param List[str] data_files: task data file
"""
self.set_task_data(data_files)
@classmethod
def from_code(cls, task_def: Dict[str, Any]) -> DataTaskType:
"""
Creates a new task from table definition (dict)
:param Dict[str, Any] task_def: table definition (dict)
:return: new EltTask instance
:rtype: EltTask
"""
return DataTask(task_def)
# noinspection PyMethodMayBeStatic
def __data_file_columns(self, data_frame: pd.DataFrame) -> List[sqlalchemy.Column]:
"""
Introspects data file columns
:param pd.DataFrame data_frame: Pandas dataframe with populated data
:return: list of data file columns (SQLAlchemy Column[])
:rtype: List[sqlalchemy.Column]
"""
columns = []
for column_name, column_type in data_frame.dtypes.iteritems():
if column_type.name == 'datetime64[ns]':
columns.append(Column(column_name, TIMESTAMP))
elif column_type.name == 'datetime64[D]':
columns.append(Column(column_name, DATE))
elif column_type.name == 'object':
columns.append(Column(column_name, TEXT))
elif column_type.name == 'int64':
columns.append(Column(column_name, INT))
elif column_type.name == 'float64':
columns.append(Column(column_name, FLOAT))
elif column_type.name == 'bool':
columns.append(Column(column_name, BOOLEAN))
else:
columns.append(Column(column_name, TEXT))
return columns
# noinspection DuplicatedCode
def __override_data_file_column_definitions(self, data_frame: pd.DataFrame) -> Dict[str, Any]:
"""
Merges the data file column definitions with the column definitions from the task_def.
The column definitions override the introspected data file types
:param pd.DataFrame data_frame: Pandas dataframe with populated data
:return: data file columns overridden with the task's explicit column definitions
:rtype: Dict[str, Any]
"""
table_def = self.table_def()
column_overrides = table_def.get('columns', {})
data_file_columns = self.__data_file_columns(data_frame)
ordered_columns = {}
for c in data_file_columns:
overridden_column = column_overrides.get(c.name)
if overridden_column:
if 'type' not in overridden_column:
overridden_column['type'] = c.type
ordered_columns[c.name] = overridden_column
else:
ordered_columns[c.name] = {"type": c.type}
table_def['columns'] = ordered_columns
return table_def
def create(self, target_alchemy_metadata: sqlalchemy.MetaData, alchemy_engine: sqlalchemy.engine.Engine,
**kwargs) -> None:
"""
Executes the task. Creates the target table and loads data
:param sqlalchemy.MetaData target_alchemy_metadata: MetaData SQLAlchemy MetaData
:param Dict[str, str] copy_stage_storage: copy stage storage parameters e.g. AWS S3 dict(url, access_key, secret_key)
:param sqlalchemy.engine.Engine alchemy_engine:
"""
try:
copy_stage_storage = kwargs.get('copy_stage_storage')
global_tmpdir = kwargs.get('global_tmpdir')
for data_file in self.data_files():
if len(data_file) > 0:
with tempfile.TemporaryDirectory() as locaL_tmpdir:
current_tmpdir = locaL_tmpdir
zip_locator = None
if is_zip(data_file):
data_file, zip_locator = zip_to_url_and_locator(data_file)
if is_url(data_file):
absolute_file_name = os.path.join(current_tmpdir, url_to_filename(data_file))
click.echo(f"\tDownloading file from URL: '{data_file}'.")
download_file(data_file, absolute_file_name)
data_file = absolute_file_name
if is_kaggle(data_file):
current_tmpdir = global_tmpdir
kaggle_dataset_id, kaggle_zip_name = extract_kaggle_dataset_id_and_zip_name(data_file)
absolute_file_name = os.path.join(current_tmpdir, f"{kaggle_zip_name}.zip")
click.echo(f"\tDownloading Kaggle dataset: '{data_file}'.")
download_kaggle(kaggle_dataset_id, current_tmpdir)
data_file = absolute_file_name
if zip_locator is not None and len(zip_locator) > 0:
absolute_file_name = os.path.join(current_tmpdir, os.path.basename(zip_locator))
click.echo(f"\tExtracting file from archive: '{data_file}'.")
extract_zip_file(data_file, zip_locator, current_tmpdir)
data_file = absolute_file_name
click.echo(f"\tProcessing local file: '{data_file}'.")
absolute_file_name = data_file
df = self.__read_file_to_dataframe(absolute_file_name)
mysql_bulk_load_config = alchemy_engine.url.query.get('local_infile') == '1'
if self.db_table() is None:
table_def = self.__override_data_file_column_definitions(df)
db_table = DbTable.from_code(self.target(), table_def, target_alchemy_metadata,
self.target_schema())
self.set_db_table(db_table)
db_table.create()
dtype = self.__adjust_dataframe_datatypes(df, alchemy_engine.dialect.name)
click.echo(f"\tLoading data to database.")
if alchemy_engine.dialect.name == 'snowflake':
self.__bulk_load_snowflake(df, alchemy_engine)
elif alchemy_engine.dialect.name == 'postgresql':
df.to_sql(self.target(), alchemy_engine, chunksize=1024, method=psql_writer,
schema=self.target_schema(), if_exists='append', index=False, dtype=dtype)
elif alchemy_engine.dialect.name == 'mysql' and mysql_bulk_load_config:
self.__bulk_load_mysql(df, alchemy_engine)
elif alchemy_engine.dialect.name == 'bigquery':
self.__bulk_load_bigquery(df, dtype, alchemy_engine)
elif alchemy_engine.dialect.name == 'redshift' and copy_stage_storage is not None:
self.__bulk_load_redshift(df, alchemy_engine, copy_stage_storage)
else:
if alchemy_engine.dialect.name == 'redshift':
log.warning(
"Using default SQLAlchemy writer for Redshift. Specify 'copy_stage' parameter "
"in your profile configuration file to make loading faster.")
if alchemy_engine.dialect.name == 'mysql':
log.warning(
"Using default SQLAlchemy writer for MySQL. Specify 'local_infile=1' parameter "
"in a query parameter of your MySQL connection string to make loading faster.")
df.to_sql(self.target(), alchemy_engine, chunksize=1024, method='multi',
schema=self.target_schema(), if_exists='append', index=False, dtype=dtype)
except sqlalchemy.exc.IntegrityError as e:
raise DbdDataLoadError(f" Referential integrity error: {e}")
except ValueError as e:
raise DbdInvalidDataFileFormatException(f"Error parsing file '{absolute_file_name}': {e}")
except (FileNotFoundError, HTTPError) as e:
raise DbdInvalidDataFileReferenceException(f"Referenced file '{absolute_file_name}' doesn't exist: {e}")
def __bulk_load_bigquery(self, df: pd.DataFrame, dtype: Dict[str, str], alchemy_engine: sqlalchemy.engine.Engine):
"""
Bulk load data to BigQuery
:param pd.DataFrame df: pandas dataframe
:param Dict[str, str] dtype: Data types for each column
:param sqlalchemy.engine.Engine alchemy_engine: SqlAlchemy engine
"""
table_schema = [dict(name=k, type=SqlParser.datatype_to_gbq_datatype(str(v))) for (k, v) in dtype.items()]
target_schema = self.target_schema()
dataset = target_schema if target_schema is not None and len(target_schema) > 0 \
else alchemy_engine.engine.url.database
df.to_gbq(f"{dataset}.{self.target()}", if_exists='append', table_schema=table_schema)
def __bulk_load_redshift(self, df: pd.DataFrame, alchemy_engine: sqlalchemy.engine.Engine,
copy_stage_storage: Dict[str, str]):
"""
Bulk load data to Redshift
:param pd.DataFrame df: pandas dataframe
:param sqlalchemy.engine.Engine alchemy_engine: SqlAlchemy engine
:param Dict[str, str] copy_stage_storage: copy stage storage parameters e.g. AWS S3 dict(url, access_key, secret_key)
"""
if copy_stage_storage is not None:
if 'url' in copy_stage_storage:
aws_stage_path = copy_stage_storage['url']
else:
raise DbdProjectConfigException(
"Missing 'url' key in the 'copy_stage' storage definition parameter in your profile file.")
if 'access_key' in copy_stage_storage:
aws_access_key = copy_stage_storage['access_key']
else:
raise DbdProjectConfigException(
"Missing 'access_key' key in the 'copy_stage' storage definition parameter in your profile file.")
if 'secret_key' in copy_stage_storage:
aws_secret_key = copy_stage_storage['secret_key']
else:
raise DbdProjectConfigException(
"Missing 'secret_key' key in the 'copy_stage' storage definition parameter in your profile file.")
temp_file_name = f"{aws_stage_path.rstrip('/')}/{self.target_schema()}/{self.target()}" \
f"_{datetime.now().strftime('%y%m%d_%H%M%S')}"
df.to_csv(f"{temp_file_name}.csv.gz", index=False, quoting=csv.QUOTE_NONNUMERIC, compression='gzip',
storage_options={"key": aws_access_key,
"secret": aws_secret_key})
with alchemy_engine.connect() as conn:
target_schema = self.target_schema()
target_schema_with_dot = f"{target_schema}." if target_schema else ''
conn.execute(f"copy {target_schema_with_dot}{self.target()} from '{temp_file_name}.csv.gz' "
f"CREDENTIALS 'aws_access_key_id={aws_access_key};aws_secret_access_key={aws_secret_key}' "
f"FORMAT CSV DELIMITER AS ',' DATEFORMAT 'YYYY-MM-DD' EMPTYASNULL IGNOREHEADER 1 GZIP")
conn.connection.commit()
file = s3fs.S3FileSystem(anon=False, key=aws_access_key, secret=aws_secret_key)
file.rm(f"{temp_file_name}.csv.gz")
else:
raise DbdProjectConfigException("Redshift requires 'copy_stage' parameter in your project file.")
def __bulk_load_snowflake(self, df: pd.DataFrame, alchemy_engine: sqlalchemy.engine.Engine):
"""
Bulk load data to snowflake
:param pandas.DataFrame df: DataFrame
:param sqlalchemy.engine.Engine alchemy_engine: SQLAlchemy engine
"""
#df.columns = map(str.upper, df.columns)
#table_name = self.target().upper()
table_name = self.target()
schema_name = self.target_schema()
#schema_name = schema_name.upper() if schema_name else None
with alchemy_engine.connect() as conn:
write_pandas(
conn.connection, df,
table_name=table_name,
schema=schema_name,
quote_identifiers=True)
conn.connection.commit()
def __bulk_load_mysql(self, df: pd.DataFrame, alchemy_engine: sqlalchemy.engine.Engine):
"""
Bulk load data to MySQL
:param pandas.DataFrame df: DataFrame
:param sqlalchemy.engine.Engine alchemy_engine: SQLAlchemy engine
"""
with tempfile.TemporaryDirectory() as tmp_dir_name:
temporary_file_name = f"{tmp_dir_name}/bulk.csv"
df.to_csv(temporary_file_name, index=False, na_rep='\\N')
target_schema = self.target_schema()
target_schema_with_dot = f"{target_schema}." if target_schema else ''
with alchemy_engine.connect() as conn:
query = f"LOAD DATA LOCAL INFILE '{temporary_file_name}' " \
f"INTO TABLE {target_schema_with_dot}{self.target()} " \
f"FIELDS TERMINATED BY ',' " \
f"OPTIONALLY ENCLOSED BY '\"' ESCAPED BY '\\\\' IGNORE 1 LINES"
conn.execute(query)
conn.connection.commit()
def __adjust_dataframe_datatypes(self, df, dialect_name: str):
"""
Adjusts the dataframe datatypes to match the target table
:param pd.DataFrame df: Pandas dataframe with populated data
:param str dialect_name: SQLAlchemy dialect name
:return: dtype for to_sql
"""
dtype = {}
for c in self.db_table().columns():
column_name = c.name()
column_type = c.type()
# Snowflake fix
if str(column_type).upper().startswith('TIMESTAMP_'):
python_type = datetime
else:
python_type = SqlParser.parse_alchemy_data_type(column_type).python_type
if isinstance(python_type, type) and issubclass(python_type, datetime):
if dialect_name in ['bigquery']:
df[column_name] = pd.to_datetime(df[column_name]).dt.strftime('%Y-%m-%d %H:%M:%S')
df[column_name] = df[column_name].astype('datetime64[ns]')
elif dialect_name in ['snowflake']:
df[column_name] = pd.to_datetime(df[column_name]).dt.strftime('%Y-%m-%d %H:%M:%S')
else:
df[column_name] = pd.to_datetime(df[column_name])
elif isinstance(python_type, type) and issubclass(python_type, date):
if dialect_name in ['bigquery']:
df[column_name] =
|
pd.to_datetime(df[column_name])
|
pandas.to_datetime
|
# coding=utf-8
import os
import unittest
from nose.tools import *
import pandas as pd
import py_entitymatching.catalog.catalog_manager as cm
import py_entitymatching.matcher.matcherutils as mu
from py_entitymatching.debugmatcher.debug_gui_decisiontree_matcher import _vis_debug_dt, \
vis_tuple_debug_dt_matcher
from py_entitymatching.debugmatcher.debug_decisiontree_matcher import visualize_tree, \
debug_decisiontree_matcher
from py_entitymatching.feature.autofeaturegen import get_features_for_matching
from py_entitymatching.feature.extractfeatures import extract_feature_vecs
from py_entitymatching.io.parsers import read_csv_metadata
from py_entitymatching.matcher.dtmatcher import DTMatcher
from py_entitymatching.utils.generic_helper import get_install_path
datasets_path = os.sep.join([get_install_path(), 'tests', 'test_datasets'])
path_a = os.sep.join([datasets_path, 'A.csv'])
path_b = os.sep.join([datasets_path, 'B.csv'])
path_c = os.sep.join([datasets_path, 'C.csv'])
class VisDTDebugMatcherTestCases(unittest.TestCase):
def setUp(self):
cm.del_catalog()
def tearDown(self):
cm.del_catalog()
def test_vis_debug_matcher_dt_valid_1(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
labels = [0] * 7
labels.extend([1] * 8)
C['labels'] = labels
feature_table = get_features_for_matching(A, B)
feature_vectors = extract_feature_vecs(C, feature_table=feature_table,
attrs_after='labels')
dt = DTMatcher()
train_test = mu.split_train_test(feature_vectors)
train = train_test['train']
test = train_test['test']
_vis_debug_dt(dt, train, test,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'],
target_attr='labels', show_window=False)
def test_vis_tuple_debug_dt_matcher_valid_1(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
labels = [0] * 7
labels.extend([1] * 8)
C['labels'] = labels
feature_table = get_features_for_matching(A, B)
feature_vectors = extract_feature_vecs(C, feature_table=feature_table,
attrs_after='labels')
dt = DTMatcher()
dt.fit(table=feature_vectors, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'],
target_attr='labels')
s = pd.DataFrame(feature_vectors.loc[0])
s1 = s.T
vis_tuple_debug_dt_matcher(dt, s1,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'])
def test_vis_tuple_debug_dt_matcher_valid_2(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
labels = [0] * 7
labels.extend([1] * 8)
C['labels'] = labels
feature_table = get_features_for_matching(A, B)
feature_vectors = extract_feature_vecs(C, feature_table=feature_table,
attrs_after='labels')
dt = DTMatcher()
dt.fit(table=feature_vectors, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'],
target_attr='labels')
s = pd.DataFrame(feature_vectors.loc[0])
s1 = s.T
vis_tuple_debug_dt_matcher(dt.clf, s1,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'])
def test_vis_tuple_debug_dt_matcher_valid_3(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
labels = [0] * 7
labels.extend([1] * 8)
C['labels'] = labels
feature_table = get_features_for_matching(A, B)
feature_vectors = extract_feature_vecs(C, feature_table=feature_table,
attrs_after='labels')
dt = DTMatcher()
dt.fit(table=feature_vectors, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'],
target_attr='labels')
feature_vectors.drop(['_id', 'ltable_ID', 'rtable_ID', 'labels'], axis=1, inplace=True)
s = pd.DataFrame(feature_vectors.loc[0])
s1 = s.T
vis_tuple_debug_dt_matcher(dt.clf, s1, exclude_attrs=None)
@raises(AssertionError)
def test_vis_debug_matcher_dt_invalid_df(self):
_vis_debug_dt(None, pd.DataFrame(),
|
pd.DataFrame()
|
pandas.DataFrame
|
# %load_ext autoreload
# %autoreload 2
from deepar.dataset.time_series import MockTs
from deepar.model.lstm import DeepAR
from numpy.random import normal
import tqdm
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
ts = MockTs()
dp_model = DeepAR(ts, epochs=50)
dp_model.instantiate_and_fit()
def get_sample_prediction(sample, fn):
sample = np.array(sample).reshape(1, 20, 1)
output = fn([sample])
samples = []
for mu, sigma in zip(output[0].reshape(20), output[1].reshape(20)):
samples.append(normal(loc=mu, scale=np.sqrt(sigma), size=1000)[0])
return np.array(samples)
batch = ts.next_batch(1, 20)
ress = []
for i in tqdm.tqdm(range(300)):
ress.append(get_sample_prediction(batch[0], dp_model.predict_theta_from_input))
res_df =
|
pd.DataFrame(ress)
|
pandas.DataFrame
|
#!/usr/bin/env python3
#/Users/akbudak/anaconda3/bin/python
import re
import sys
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
if len(sys.argv) < 2:
print("Usage: gemm.py outputFile")
sys.exit(-1);
print("This is the name of the script: ", sys.argv[0])
print("Number of arguments: ", len(sys.argv))
print("The arguments are: " , str(sys.argv))
rawfile=sys.argv[1]
print("Result file:", rawfile)
with open(rawfile) as f:
lines = f.readlines()
allres=[]
for (iline,line) in enumerate(lines):
if line.startswith("# MB:"):
res={}
res["mb"]=int(line.split(':')[1])
if line.startswith("# fixed acc:"):
res["acc"]=float(line.split(':')[1])
if line.startswith("# reorder inner products:"):
res["rip"]=int(line.split(':')[1])
for (pre,txt) in [
("i","Ainitial_ranks:"),
("f", "Cfinal_ranks:")]:
if line.startswith(txt):
res[pre+"avg"] = float(re.split(':| ',line)[2])
res[pre+"min"] = int(re.split(':| ',line)[4])
res[pre+"max"] = int(re.split(':| ',line)[6])
if line.startswith("ReShg"):
arr=re.split('\t', line);
res["flop"] = int(arr[1])
res["gflop"] = float(arr[2])
res["gflop/s"] = float(arr[3])
res["time"] = float(arr[4])
line=lines[iline+1]
arr=line.strip().split(' ');
#print(arr)
res["M"] = int(arr[0])
#res["N"] = int(arr[1])
#res["K"] = int(arr[2])
#res["time2"] = float(arr[3])
print(res)
allres.append(res)
df=pd.DataFrame.from_dict(allres)
print(df)
sys.exit(0)
df2 = df.loc[df.groupby(['M','rip'])['time'].idxmin()]
print(df2)
df3=df2[['M','rip','gflop','time']]
print(df3)
#plt.subplots()
#for val in ['time','gflop']:
val='gflop'
what='flops'
for (val, what, ylabel) in [
('gflop','flops','GFlop')
, ('time','time','Time(s)')
#, ('favg','final rank','Final Average Rank')
]:
table =
|
pd.pivot_table(df3, values=[val], index=['M'], columns=['rip'])
|
pandas.pivot_table
|
"""
Module report
================
A module with helper functions for computing pre-defined plots for the analysis
of fragment combinations.
"""
import warnings
import logging
import argparse
import sys
from shutil import rmtree
from datetime import datetime
import re
from pathlib import Path
from collections import OrderedDict
# data handling
import numpy as np
import json
import pandas as pd
from pandas import DataFrame
import networkx as nx
# data visualization
from matplotlib import pyplot as plt
import seaborn as sns
# from pylab import savefig
from adjustText import adjust_text
# chemoinformatics
import rdkit
from rdkit import Chem
from rdkit.Chem import Draw
from rdkit.Chem import Mol
from rdkit.Chem import rdChemReactions
# docs
from typing import List
from typing import Tuple
from rdkit import RDLogger
# custom libraries
import npfc
from npfc import utils
from npfc import load
from npfc import save
from npfc import fragment_combination
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GLOBALS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# test
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CLASSES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
class ReporterProcess:
def __init__(self,
chunk_load: str,
chunk_std_passed: str,
chunk_std_filtered: str,
chunk_std_error: str,
chunk_dedupl: str,
WD_out: str,
max_examples: int = 1,
):
pass
class ReporterFragmentSearch:
def __init__(self):
pass
class ReporterFragmentCombination:
def __init__(self):
pass
class ReporterFragmentCombinationGraph:
def __init__(self):
pass
class ReporterPNP:
def __init__(self):
pass
def _parse_std_chunks(chunks: List[str]) -> DataFrame:
"""Parse all output files of a category (passed, filtered or error) for the std step and return a corresponding a results summary.
:param chunks: output files for a category of std results
:return: summary DF with counts
"""
# parse all files
dfs = []
for c in chunks:
df = pd.read_csv(c, sep="|", compression="gzip").groupby("task").count()[['status']].rename({'status': 'Count'}, axis=1)
if len(df.index) > 0:
dfs.append(df)
# if no case was found, return an empty dataframe
if len(dfs) == 0:
df = pd.DataFrame([], columns=["Count", "Category"])
return df
# concatenate all dataframes and compute the sum of all counts
df = pd.concat(dfs)
df["Category"] = df.index # I don't know how to group by index!
df = df.groupby("Category").sum()
df["Category"] = df.index.map(lambda x: x.replace('filter_', ''))
# df['Perc_status'] = df['Count'].map(lambda x: f"{x/tot_mols:.2%}")
return df
def preprocess(input_load: str,
output_file: str,
input_std_passed: str = None,
input_std_filtered: str = None,
input_std_error: str = None,
input_dedupl: str = None,
input_depict: str = None,
num_examples: int = 0,
):
"""The information is not looked everywhere using the same logic:
- input_load: the log file from the chunk being loaded
"""
# load
df = pd.read_csv(input_load, sep="@", header=None) # char not found in the log file
records = df[df[0].str.contains("FAILURE")].iloc[0][0].split()
num_total = int(records[6])
num_errors = int(records[9])
num_passed = int(df[df[0].str.contains("SAVED")].iloc[0][0].split()[6])
if num_total != num_errors + num_passed:
raise ValueError(f"Error during parsing of log file: '{input_load}': {num_passed} + {num_errors} != {num_total}")
df_load = DataFrame({'Category': ['loaded', 'cannot_load'], 'Count': [num_passed, num_errors]})
# standardize
df_std_passed = load.file(input_std_passed, decode=False)[['task', 'status']].groupby("task").count()[['status']].reset_index().rename({'task': 'Category', 'status': 'Count'}, axis=1)
df_std_filtered = load.file(input_std_filtered, decode=False)[['task', 'status']].groupby("task").count()[['status']].rename({'status': 'Count'}, axis=1)
df_std_error = load.file(input_std_error, decode=False)[['task', 'status']].groupby("task").count()[['status']].rename({'status': 'Count'}, axis=1)
# dedupl
df =
|
pd.read_csv(input_dedupl, sep="@", header=None)
|
pandas.read_csv
|
import pandas as pd
import csv
df1 = pd.read_csv('data-scientist.csv')
df2 = pd.read_csv('software-engineer.csv')
df3 =
|
pd.read_csv('computer-systems.csv')
|
pandas.read_csv
|
import unittest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from msticpy.analysis.anomalous_sequence import sessionize
class TestSessionize(unittest.TestCase):
def setUp(self):
self.df1 = pd.DataFrame({"UserId": [], "time": [], "operation": []})
self.df1_with_ses_col = pd.DataFrame(
{"UserId": [], "time": [], "operation": [], "session_ind": []}
)
self.df1_sessionized = pd.DataFrame(
{
"UserId": [],
"time_min": [],
"time_max": [],
"operation_list": [],
"duration": [],
"number_events": [],
}
)
self.df2 = pd.DataFrame(
{
"UserId": [1, 1, 2, 3, 1, 2, 2],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-06 11:06:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
],
"operation": ["A", "B", "C", "A", "A", "B", "C"],
}
)
self.df2_with_ses_col_1 = pd.DataFrame(
{
"UserId": [1, 1, 1, 2, 2, 2, 3],
"time": [
|
pd.to_datetime("2020-01-03 00:00:00")
|
pandas.to_datetime
|
# Copyright (c) 2017, Intel Research and Development Ireland Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '<NAME>'
__copyright__ = "Copyright (c) 2017, Intel Research and Development Ireland Ltd."
__license__ = "Apache 2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import pandas
from analytics_engine.heuristics.beans.infograph import InfoGraphNode
from analytics_engine import common
LOG = common.LOG
class SnapUtils(object):
@staticmethod
def annotate_machine_pu_util(internal_graph, node):
source = InfoGraphNode.get_machine_name_of_pu(node)
machine = InfoGraphNode.get_node(internal_graph, source)
machine_util = InfoGraphNode.get_compute_utilization(machine)
if 'intel/use/compute/utilization' not in machine_util.columns:
sum_util = None
cpu_metric = 'intel/procfs/cpu/utilization_percentage'
pu_util_df = InfoGraphNode.get_compute_utilization(node)
if cpu_metric in pu_util_df.columns:
pu_util = pu_util_df[cpu_metric]
pu_util = pu_util.fillna(0)
machine_util[InfoGraphNode.get_attributes(node)['name']] = pu_util
InfoGraphNode.set_compute_utilization(machine, machine_util)
else:
LOG.info('CPU util not Found use for node {}'.format(InfoGraphNode.get_name(node)))
else:
LOG.debug('Found use for node {}'.format(InfoGraphNode.get_name(node)))
@staticmethod
def annotate_machine_disk_util(internal_graph, node):
source = InfoGraphNode.get_attributes(node)['allocation']
machine = InfoGraphNode.get_node(internal_graph, source)
machine_util = InfoGraphNode.get_disk_utilization(machine)
if 'intel/use/disk/utilization' not in machine_util.columns:
disk_metric = 'intel/procfs/disk/utilization_percentage'
disk_util_df = InfoGraphNode.get_disk_utilization(node)
if disk_metric in disk_util_df.columns:
disk_util = disk_util_df[disk_metric]
disk_util = disk_util.fillna(0)
machine_util[InfoGraphNode.get_attributes(node)['name']] = disk_util
InfoGraphNode.set_disk_utilization(machine, machine_util)
else:
LOG.info('Disk util not Found use for node {}'.format(InfoGraphNode.get_name(node)))
else:
LOG.debug('Found use disk for node {}'.format(InfoGraphNode.get_name(node)))
@staticmethod
def annotate_machine_network_util(internal_graph, node):
source = InfoGraphNode.get_attributes(node)['allocation']
machine = InfoGraphNode.get_node(internal_graph, source)
machine_util = InfoGraphNode.get_network_utilization(machine)
if 'intel/use/network/utilization' not in machine_util.columns:
net_metric = 'intel/psutil/net/utilization_percentage'
net_util_df = InfoGraphNode.get_network_utilization(node)
if net_metric in net_util_df.columns:
net_util = net_util_df[net_metric]
net_util = net_util.fillna(0)
machine_util[InfoGraphNode.get_attributes(node)['name']] = net_util
InfoGraphNode.set_network_utilization(machine, machine_util)
else:
LOG.info('Net util not Found use for node {}'.format(InfoGraphNode.get_name(node)))
else:
LOG.debug('Found use network for node {}'.format(InfoGraphNode.get_name(node)))
@staticmethod
def utilization(internal_graph, node, telemetry):
# machine usage
telemetry_data = telemetry.get_data(node)
if 'intel/use/compute/utilization' in telemetry_data:
InfoGraphNode.set_compute_utilization(node,
pandas.DataFrame(telemetry_data['intel/use/compute/utilization'],
columns=['intel/use/compute/utilization']))
# pu usage
if 'intel/procfs/cpu/utilization_percentage' in telemetry_data:
InfoGraphNode.set_compute_utilization(node,
pandas.DataFrame(
telemetry_data['intel/procfs/cpu/utilization_percentage'],
columns=['intel/procfs/cpu/utilization_percentage']))
if 'intel/use/memory/utilization' in telemetry_data:
InfoGraphNode.set_memory_utilization(node, pandas.DataFrame(telemetry_data['intel/use/memory/utilization']))
if 'intel/use/disk/utilization' in telemetry_data:
InfoGraphNode.set_disk_utilization(node, pandas.DataFrame(telemetry_data['intel/use/disk/utilization']))
if 'intel/use/network/utilization' in telemetry_data:
InfoGraphNode.set_network_utilization(node,
pandas.DataFrame(telemetry_data['intel/use/network/utilization']))
# supporting not available /use/ metrics
if 'intel/procfs/meminfo/mem_total' in telemetry_data and 'intel/procfs/meminfo/mem_used' in telemetry_data:
# LOG.info('Found memory utilization procfs')
mem_used = telemetry_data['intel/procfs/meminfo/mem_used'].fillna(0)
mem_total = telemetry_data['intel/procfs/meminfo/mem_total'].fillna(0)
mem_util = mem_used * 100 / mem_total
mem_util.name = 'intel/procfs/memory/utilization_percentage'
InfoGraphNode.set_memory_utilization(node, pandas.DataFrame(mem_util))
if 'intel/procfs/disk/io_time' in telemetry_data:
io_time = telemetry_data['intel/procfs/disk/io_time'].fillna(0)
disk_util = io_time*100/1000
disk_util.name = 'intel/procfs/disk/utilization_percentage'
InfoGraphNode.set_disk_utilization(node, pandas.DataFrame(disk_util))
if 'intel/psutil/net/bytes_recv' in telemetry_data and 'intel/psutil/net/bytes_sent' in telemetry_data:
source=telemetry._source(node)
machine = InfoGraphNode.get_node(internal_graph, source)
nic_speed = InfoGraphNode.get_nic_speed_mbps(machine) * 1000000
net_data = telemetry_data.filter(['timestamp', 'intel/psutil/net/bytes_recv','intel/psutil/net/bytes_sent'], axis=1)
net_data.fillna(0)
net_data['intel/psutil/net/bytes_total'] = net_data['intel/psutil/net/bytes_recv']+net_data['intel/psutil/net/bytes_sent']
net_data_interval = net_data.set_index('timestamp').diff()
net_data_interval['intel/psutil/net/utilization_percentage'] = net_data_interval['intel/psutil/net/bytes_total'] * 100 /nic_speed
net_data_pct = pandas.DataFrame(net_data_interval['intel/psutil/net/utilization_percentage'])
InfoGraphNode.set_network_utilization(node, net_data_pct)
elif 'intel/procfs/iface/bytes_recv' in telemetry_data and 'intel/procfs/iface/bytes_recv' in telemetry_data:
source=telemetry._source(node)
machine = InfoGraphNode.get_node(internal_graph, source)
nic_speed = InfoGraphNode.get_nic_speed_mbps(machine) * 1000000
net_data = telemetry_data.filter(['timestamp', 'intel/procfs/iface/bytes_recv','intel/procfs/iface/bytes_sent'], axis=1)
net_data.fillna(0)
net_data['intel/psutil/net/bytes_total'] = net_data['intel/procfs/iface/bytes_recv']+net_data['intel/procfs/iface/bytes_sent']
net_data_interval = net_data.set_index('timestamp').diff()
net_data_interval['intel/psutil/net/utilization_percentage'] = net_data_interval['intel/psutil/net/bytes_total'] * 100 /nic_speed
net_data_pct = pandas.DataFrame(net_data_interval['intel/psutil/net/utilization_percentage'])
InfoGraphNode.set_network_utilization(node, net_data_pct)
if 'intel/docker/stats/cgroups/cpu_stats/cpu_usage/total' in telemetry_data:
# Container node
#cpu util
cpu_data = telemetry_data.filter(['timestamp', 'intel/docker/stats/cgroups/cpu_stats/cpu_usage/total'], axis=1)
cpu_data_interval = cpu_data.set_index('timestamp').diff()
#util data in nanoseconds
cpu_data_interval['intel/docker/stats/cgroups/cpu_stats/cpu_usage/percentage'] = cpu_data_interval['intel/docker/stats/cgroups/cpu_stats/cpu_usage/total'] / 10000000
cpu_data_pct = pandas.DataFrame(cpu_data_interval['intel/docker/stats/cgroups/cpu_stats/cpu_usage/percentage'])
InfoGraphNode.set_compute_utilization(node, cpu_data_pct)
if "intel/docker/stats/cgroups/memory_stats/usage/usage" in telemetry_data:
#container mem util
source=telemetry._source(node)
machine = InfoGraphNode.get_node(internal_graph, source)
local_mem = int(InfoGraphNode.get_attributes(machine).get("local_memory"))
mem_data = telemetry_data.filter(['timestamp', "intel/docker/stats/cgroups/memory_stats/usage/usage"], axis=1)
mem_data["intel/docker/stats/cgroups/memory_stats/usage/percentage"] = mem_data["intel/docker/stats/cgroups/memory_stats/usage/usage"]/local_mem * 100
mem_data_pct = pandas.DataFrame(mem_data["intel/docker/stats/cgroups/memory_stats/usage/percentage"])
InfoGraphNode.set_memory_utilization(node, mem_data_pct)
if "intel/docker/stats/network/tx_bytes" in telemetry_data:
#container network util
source=telemetry._source(node)
machine = InfoGraphNode.get_node(internal_graph, source)
nic_speed = InfoGraphNode.get_nic_speed_mbps(machine) * 1000000
net_data = telemetry_data.filter(['timestamp', "intel/docker/stats/network/tx_bytes","intel/docker/stats/network/rx_bytes"], axis=1)
net_data.fillna(0)
net_data['intel/docker/stats/network/bytes_total'] = net_data["intel/docker/stats/network/tx_bytes"]+net_data["intel/docker/stats/network/rx_bytes"]
net_data_interval = net_data.set_index('timestamp').diff()
net_data_interval['intel/docker/stats/network/utilization_percentage'] = net_data_interval['intel/docker/stats/network/bytes_total'] * 100 /nic_speed
net_data_pct = pandas.DataFrame(net_data_interval['intel/docker/stats/network/utilization_percentage'])
InfoGraphNode.set_network_utilization(node, net_data_pct)
if "intel/docker/stats/cgroups/blkio_stats/io_time_recursive/value" in telemetry_data:
#container disk util
disk_data = telemetry_data.filter(['timestamp', "intel/docker/stats/cgroups/blkio_stats/io_time_recursive/value"], axis=1)
disk_data_interval = disk_data.set_index('timestamp').diff()
#util data in milliseconds
disk_data_interval["intel/docker/stats/cgroups/blkio_stats/io_time_recursive/percentage"] = \
disk_data_interval["intel/docker/stats/cgroups/blkio_stats/io_time_recursive/value"] / 1000000
disk_data_pct =
|
pandas.DataFrame(disk_data_interval["intel/docker/stats/cgroups/blkio_stats/io_time_recursive/percentage"])
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import dash
import pandas
from app import app
import cfg
import time
import pickle
import dash_html_components as html
from plotly import tools
import plotly.graph_objs as go
from iclip_tab import createGeneModelPlot
import plotly.utils as pu
@app.callback(
dash.dependencies.Output('rnaDescDiv', component_property='children'),
[dash.dependencies.Input('geneDrop', 'value')],
)
def rnaDesc(name):
if cfg.descAvail:
try:
return [
html.P(
cfg.geneDescriptions.loc[
cfg.geneDescriptions['ensembl_gene_id'] == name,
['description']
].iloc[0]
)
]
except IndexError:
return ['No description available']
except KeyError:
return ['No description available']
else:
return ['No description available']
@app.callback(
dash.dependencies.Output('spliceGraph', 'figure'),
[dash.dependencies.Input('spliceMem', 'data'),
dash.dependencies.Input('rnaParamList', 'values'),
dash.dependencies.Input('rnaRadio', 'value'),
dash.dependencies.Input('covColorFinal', 'data'),
dash.dependencies.Input('eventColorFinal', 'data'),
dash.dependencies.Input('legendSpacingDiv', 'data'),
dash.dependencies.Input('coverageScale', 'value'),
dash.dependencies.Input('sequenceRadio', 'value'),
dash.dependencies.Input('eventScale', 'value'),
dash.dependencies.Input('bsGraphMem', 'data')]
)
def showRNA(figData, dataSets, displayType, covColor, eventColor, legendSpacing, coverageScale, seqDisp, eventScale, bsMem):
"""Update callback that selects traces to be displayed based on settings.
Positional arguments:
figData -- Trace data from the data callback.
datasets -- List of datasets to display.
displayType -- Type of splice event display.
covColor -- Colors for coverage traces.
eventColorl -- Colots for splice events.
legendSpacing -- Specifies margin between colorbar and other legend items.
coverageScale -- Scaling factor for coverage plots.
eventScale -- Scaling factor for event plots.
"""
legendColumnSpacing = legendSpacing
figData = figData
traces = figData['rnaTraces']
geneModels = figData['geneModels']
coverageColors = covColor
try:
seqTrace = bsMem[seqDisp]
if seqDisp == 'heatSeq':
for i in seqTrace:
i['showscale'] = False
except:
seqTrace = []
#print(seqTrace)
eventColors = eventColor
eventIndices = [] # Save indices of all elements that contain event traces
rnaDataSets = sorted(list(cfg.coverageData.keys()))
displayed_rnaDataSet = []
maxYDict = figData['maxYList']
axisTitles = []
yVals = []
for rm in sorted(dataSets):
for set in rnaDataSets:
if rm == set.split('_')[0]:
displayed_rnaDataSet.append(set)
finTraces = []
eventIndices = []
for index, t in enumerate(traces):
try:
if len(t[displayType]) > 1:
try:
if t[displayType][0]['meta'] in displayed_rnaDataSet:
if displayType == 'two':
for i in t[displayType]:
newColor = eventColors[i['name']]
i['marker'] = {'color' : newColor}
finTraces.append(t[displayType])
eventIndices.append(index//2)
axisTitles.append('')
except KeyError:
if t[displayType]['meta'] in displayed_rnaDataSet:
if displayType == 'two':
newColor = eventColors[t['name']]
t['marker'] = {'color' : newColor}
finTraces.append(t[displayType])
eventIndices.append(index//2)
axisTitles.append('')
else:
if t[displayType][0] in displayed_rnaDataSet:
finTraces.append([])
axisTitles.append('')
eventIndices.append(index//2)
except KeyError:
if t['meta'] in displayed_rnaDataSet:
newColor = coverageColors[t['meta'].split('_')[0]]
yVals.append(maxYDict[t['meta']])
axisTitles.append('')
t['fillcolor'] = newColor
finTraces.append(t)
numIsoforms = len(geneModels) # Number of isoforms in the gene model
numRows = len(finTraces)+numIsoforms+1#+1 for sequence trace
# Setup row heights based on available data
plotSpace = 0.9 # Space taken up by data tracks
spacingSpace = 1.0 - plotSpace # Space left for spacer tracks
rowHeight = plotSpace / numRows
if numRows > 1:
vSpace = spacingSpace / (numRows - 1)
else:
vSpace = spacingSpace
rowHeights = []
rowHeights.append(rowHeight/2)
eventHeights = []
eventMaxHeights = figData['maxHeights']
for index, i in enumerate(eventMaxHeights):
if index in eventIndices:
if i == 0:
eventHeights.append(0)
if i > 0 and i <= 5:
eventHeights.append(1)
if i >= 6 and i < 10:
eventHeights.append(2)
if i >= 10:
eventHeights.append(i % 5 +1)
if cfg.spliceEventAvail:
for i in range(1,numRows):
if i > len(finTraces):
rowHeights.append(0.5 * rowHeight) # Gene model row
elif (i % 2 == 0):
try:
rowHeights.append(eventHeights[(i//2)-1] * rowHeight * eventScale) # Splice event row
except IndexError:
rowHeights.append(0)
else:
rowHeights.append(3 * rowHeight * coverageScale) # Coverage row
else:
for i in range(1,numRows):
if i > len(finTraces): rowHeights.append(0.5 * rowHeight) # Gene model row
else:
rowHeights.append(3 * rowHeight * coverageScale) # Coverage row
fig = tools.make_subplots(print_grid=False, rows=numRows, cols=1,
shared_xaxes=True, row_width=rowHeights[::-1], vertical_spacing = vSpace)
# Layouting of the figure
eventIndicesDraw = [] # Save indices of all elements that contain event traces
for i in seqTrace:
fig.append_trace(i, 1, 1)
for index, t in enumerate(finTraces):
try:
fig.append_trace(t, index + 2, 1)
except ValueError:
eventIndicesDraw.append(index)
for i in eventIndicesDraw: # Add event traces after all coverage traces have been added for legend item positioning
for x in finTraces[i]:
fig.append_trace(x, i + 2, 1)
counter = len(finTraces)+1
for model in geneModels:
for part in model:
fig.append_trace(part, counter+1, 1)
counter += 1
fig['layout']['xaxis'].update(nticks=6)
fig['layout']['xaxis'].update(tickmode='array')
fig['layout']['xaxis'].update(showgrid=True)
fig['layout']['xaxis'].update(ticks='outside')
fig['layout']['xaxis'].update(ticksuffix='b')
fig['layout']['xaxis'].update(ticksuffix='b')
fig['layout'].update(hovermode='closest')
fig['layout']['yaxis'].update(fixedrange=True)
fig['layout'].update(barmode='relative')
# Reverse x-axis if gene is on - strand to always show models in 3'->5'
if figData['strand'] == '-':
fig['layout']['xaxis'].update(autorange='reversed')
for i in range(1, numRows+1): # prevent zoom on y axis
fig['layout']['yaxis' + str(i)].update(fixedrange=True)
try:
maxYVal = max(yVals)
except ValueError:
maxYVal = 0
blockHeight = 0.4
for i in range(1, numRows):
if cfg.spliceEventAvail:
if i % 2 != 0 and i <= len(finTraces): # Coverage row
fig['layout']['yaxis' + str(i+1)].update(range=[0, maxYVal],title={'text': axisTitles[i-1]})
fig['layout']['yaxis' + str(i+1)].update(showticklabels=True, showgrid=True, zeroline=True)
else: # Event or gene model row
if i > len(finTraces): # Gene model row
fig['layout']['yaxis' + str(i+1)].update(showticklabels=False, showgrid=False, zeroline=False)
fig['layout']['yaxis' + str(i+1)].update(range=[-blockHeight, blockHeight])
else: # Event row
fig['layout']['yaxis' + str(i+1)].update(showticklabels=False, showgrid=False, zeroline=False, title={'text': axisTitles[i-1]})
else:
if i <= len(finTraces): # Coverage row
fig['layout']['yaxis' + str(i+1)].update(range=[0, maxYVal], title={'text': axisTitles[i-1]})
fig['layout']['yaxis' + str(i+1)].update(showticklabels=True, showgrid=True, zeroline=True)
else: # Gene model row
fig['layout']['yaxis' + str(i+1)].update(showticklabels=False, showgrid=False, zeroline=False)
fig['layout']['yaxis' + str(i+1)].update(range=[-blockHeight, blockHeight], )
# Setup plot height, add 85 to account for margins
fig['layout'].update(margin=go.layout.Margin(l=60, r=40, t=25, b=60),)
fig['layout']['yaxis'].update(visible = False, showticklabels=False, showgrid=False, zeroline=False)
rowScales = [x/rowHeight for x in rowHeights]
size = 0
for i in rowScales:
size += 50*i
fig['layout']['height'] = (size + 85)
# set spacing for the second legend column
fig['layout']['legend'].update(x = legendColumnSpacing)
#print('Showcallback: ' + str(end-start))
return fig
@app.callback(
dash.dependencies.Output('spliceMem', 'data'),
[dash.dependencies.Input('geneDrop', 'value')],
[dash.dependencies.State('rnaRadio', 'value'),
dash.dependencies.State('rnaParamList', 'values'),
dash.dependencies.State('covColorFinal', 'data'),
dash.dependencies.State('eventColorFinal', 'data'),
dash.dependencies.State('legendSpacingDiv', 'data'),
dash.dependencies.State('coverageScale', 'value'),
dash.dependencies.State('eventScale', 'value')]
)
def rnaCallback(geneName, displayMode,rnaParamList, colorsFinal, eventColorsFinal, legendSpacing,
coverageScale, eventScale):
"""Data callback that selects relevant data and creates all possible traces.
Positional arguments:
geneName -- Name of the selected gene in order to filter the data.
displaymode --determines how splice events will be visualized.
rnaParamList -- Selected RNA data sets to plot.
colorsFinal -- Last confirmed color.
eventColorsFinal -- Last confirmed colors for splice events.
legendSpacing -- Specifies margin between colorbar and other legend items.
coverageScale -- Scaling factor for coverage plots.
eventScale -- Scaling factor for event plots.
"""
colors = colorsFinal
figData = {}
# Select appropriate data from gene annotations
currentGene = pandas.DataFrame()
for index, elem in enumerate(cfg.geneAnnotations):
currentGene = elem[elem['geneID'].str.contains(geneName)]
if not currentGene.empty:
break
# Get axis minimum and maximum over all isoforms. Also get current chromosome
xAxisMax = currentGene['chromEnd'].max()
xAxisMin = currentGene['chromStart'].min()
chrom = currentGene['chrom'].iloc[0]
strand = currentGene['strand'].iloc[0]
figData.update({'strand': strand})
color_dict = colors # Color per mutant
figData.update({'covColors' : color_dict})
# Filter out needed datasets
rnaDataSets = sorted(list(cfg.coverageData.keys()))
displayed_rnaDataSet = rnaDataSets
# for rm in sorted(rnaParamList):
# for set in rnaDataSets:
# if rm == set.split('_')[0]:
# displayed_rnaDataSet.append(set)
# Dicts for lists of axis values
xVals = {}
yVals = {}
maxYVal = 0 # Used to scale y-axes later
maxYVals = {}
eventDict = {} # stores dataframes with relevant splice event data
iterTime = 0
evSel = 0
covSel = 0
for ds in sorted(displayed_rnaDataSet): # Select relevant coverage files from Index
covStart = time.time()
spliceSlice = coverageDataSelection(ds, xAxisMin, xAxisMax, chrom)
covEnd = time.time()
covSel += covEnd-covStart
# Pre-init y-value list
yVal = [0] * (len(range(xAxisMin, xAxisMax)))
organism = ds.split("_")[0] # Prefix of the curret data frame, first filter
spliceEvents =
|
pandas.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from distutils.version import LooseVersion
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import (
DatetimeIndex, Int64Index, Series, Timedelta, TimedeltaIndex, Timestamp,
date_range, timedelta_range
)
from pandas.errors import NullFrequencyError
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(params=['B', 'D'])
def freq(request):
return request.param
class TestTimedeltaIndexArithmetic(object):
# Addition and Subtraction Operations
# -------------------------------------------------------------
# TimedeltaIndex.shift is used by __add__/__sub__
def test_tdi_shift_empty(self):
# GH#9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
def test_tdi_shift_hours(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_tdi_shift_minutes(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_tdi_shift_int(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
tm.assert_index_equal(result, expected)
def test_tdi_shift_nonstandard_freq(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
tm.assert_index_equal(result, expected)
def test_shift_no_freq(self):
# GH#19147
tdi = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00'], freq=None)
with pytest.raises(NullFrequencyError):
tdi.shift(2)
# -------------------------------------------------------------
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and integer
def test_tdi_add_int(self, one):
# Variants of `one` for #19012
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + one
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_iadd_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
rng += one
tm.assert_index_equal(rng, expected)
def test_tdi_sub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - one
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_isub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_add_integer_array(self, box):
# GH#19959
rng = timedelta_range('1 days 09:00:00', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['1 day 13:00:00'] * 3)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_sub_integer_array(self, box):
# GH#19959
rng = timedelta_range('9H', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['5H', '7H', '9H'])
result = rng - other
tm.assert_index_equal(result, expected)
result = other - rng
tm.assert_index_equal(result, -expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_addsub_integer_array_no_freq(self, box):
# GH#19959
tdi = TimedeltaIndex(['1 Day', 'NaT', '3 Hours'])
other = box([14, -1, 16])
with pytest.raises(NullFrequencyError):
tdi + other
with pytest.raises(NullFrequencyError):
other + tdi
with pytest.raises(NullFrequencyError):
tdi - other
with pytest.raises(NullFrequencyError):
other - tdi
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and timedelta-like
# Note: add and sub are tested in tests.test_arithmetic
def test_tdi_iadd_timedeltalike(self, delta):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng += delta
tm.assert_index_equal(rng, expected)
def test_tdi_isub_timedeltalike(self, delta):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
pytest.raises(TypeError, lambda: tdi - dt)
pytest.raises(TypeError, lambda: tdi - dti)
pytest.raises(TypeError, lambda: td - dt)
pytest.raises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
pytest.raises(TypeError, lambda: dt_tz - ts)
pytest.raises(TypeError, lambda: dt_tz - dt)
pytest.raises(TypeError, lambda: dt_tz - ts_tz2)
pytest.raises(TypeError, lambda: dt - dt_tz)
pytest.raises(TypeError, lambda: ts - dt_tz)
pytest.raises(TypeError, lambda: ts_tz2 - ts)
pytest.raises(TypeError, lambda: ts_tz2 - dt)
pytest.raises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
pytest.raises(TypeError, lambda: dti - ts_tz)
pytest.raises(TypeError, lambda: dti_tz - ts)
pytest.raises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
|
tm.assert_index_equal(result, expected)
|
pandas.util.testing.assert_index_equal
|
import pickle
#from .retrieve_marks import norm2
import os
import _pickle as cPickle
#import cache
from scipy.interpolate import interp1d
import pandas as pd
import numpy as np
import glob
import pprint
from scipy.signal import find_peaks
chromlength_human =[249250621,243199373,198022430,191154276,180915260,171115067,159138663,146364022,
141213431,135534747,135006516,133851895,115169878,107349540,102531392,90354753,81195210,78077248,
59128983,63025520,48129895,51304566]
chromlength_yeast =[230218,813184,316620,1531933,576874,270161,1090940,562643,439888,
745751,666816,1078177,924431,784333,1091291,948066]
try:
import pyBigWig
import gffutils
except:
print("You may need to install pyBigWig")
pp = pprint.PrettyPrinter(indent=2)
def smooth(ser, sc):
return np.array(pd.Series(ser).rolling(sc, min_periods=1, center=True).mean())
# string,string -> bool,[],res
# select a strain and an experimental value and return if available, the files and the resolution
# def is_available(strain, experiment):
# return True,[],1
ROOT = "../DNaseI/data/"
def nan_polate_c(A, kind="linear"):
ok = ~np.isnan(A)
x = np.arange(len(A))
f2 = interp1d(x[ok], A[ok], kind=kind, bounds_error=False)
# print(ok)
return f2(x)
def is_available_alias(strain, experiment):
alias = {"Hela": ["Hela", "HeLaS3", "Helas3"],
"Helas3": ["Helas3", "HeLaS3", "Hela"],
"GM12878": ["GM12878", "Gm12878"],
"Gm12878": ["GM12878", "Gm12878"]
}
# alias={"Helas3":["HeLaS3","Hela","Helas3"]}
if strain not in alias.keys():
avail, files, res = is_available(strain, experiment)
else:
for strain1 in alias[strain]:
avail, files, res = is_available(strain1, experiment)
if files != []:
if strain1 != strain:
print("Using alias %s" % strain1)
return avail, files, res
return avail, files, res
def is_available(strain, experiment):
avail_exp = ["MRT", "OKSeq", "OKSeqo", "DNaseI", "ORC2", "ExpGenes", "Faire", "Meth", "Meth450",
"Constant", "OKSeqF", "OKSeqR", "OKSeqS", "CNV", "NFR",
"MCM", "HMM", "GC", "Bubble","G4","G4p","G4m","Ini","ORC1","AT_20","AT_5","AT_30","RHMM","MRTstd",
"RNA_seq","MCMo","MCMp","MCM-beda","Mcm3","Mcm7","Orc2","Orc3"]
marks = ['H2az', 'H3k27ac', 'H3k27me3', 'H3k36me3', 'H3k4me1',
'H3k4me2', 'H3k4me3', 'H3k79me2', 'H3k9ac', 'H3k9me1',
'H3k9me3', 'H4k20me1', "SNS"]
marks_bw = [m + "wig" for m in marks]
Prot = ["Rad21","ORC2"]
#print("La")
if strain in ["Yeast-MCM"]:
lroot = ROOT+"/external/"+strain + "/"
resolutions = glob.glob(lroot + "/*")
#print(lroot + "/*")
resolutions = [r.split("/")[-1] for r in resolutions if "kb" in r]
#print(resolutions)
if len(resolutions) != 0:
exps = glob.glob(lroot + resolutions[0]+"/*")
files = []+exps
exps = [exp.split("/")[-1][:] for exp in exps if "csv" in exp]
print(exps)
for iexp,exp in enumerate(exps):
if exp == experiment:
return True,[files[iexp]],int(resolutions[0].replace("kb",""))
if strain in ["Cerevisae"] and experiment =="MCM-beda":
lroot = ROOT+"/external/Yeast-MCM-bedalov/"
return True,glob.glob(lroot+"/*"),0.001
if experiment not in avail_exp + marks + Prot + marks_bw:
print("Exp %s not available" % experiment)
print("Available experiments", avail_exp + marks + Prot)
return False, [], None
if experiment == "Constant":
return True, [], 1
if experiment == "MRT":
if strain == "Cerevisae":
return True, ["/home/jarbona/ifromprof/notebooks/exploratory/Yeast_wt_alvino.csv"], 1
elif strain == "Raji":
files = glob.glob(ROOT + "/external/timing_final//*_Nina_Raji_logE2Lratio_w100kbp_dw10kbp.dat" )
return True, files, 10
else:
root = ROOT + "/Data/UCSC/hsap_hg19/downloads/ENCODE/wgEncodeUwRepliSeq_V2/compute_profiles/timing_final/"
root = ROOT + "/external/timing_final/"
extract = glob.glob(root + "/*Rep1_chr10.dat")
cells = [e.split("_")[-3] for e in extract]
if strain in cells:
files = glob.glob(root + "/timing_final_W100kb_dx10kb_%s*" % strain)
return True, files, 10
if experiment == "MRTstd":
root = ROOT + "/external/Sfrac/"
extract = glob.glob(root + "/*Rep1_chr10.dat")
cells = [e.split("_")[-3] for e in extract]
if strain in cells:
files = glob.glob(root + "/Sfrac_HansenNormDenoised_W100kb_dx10kb_%s*" % strain)
return True, files, 10
if experiment == "ExpGenes":
root = ROOT + "/external/ExpressedGenes/"
extract = glob.glob(root + "/*ExpressedGenes_zero.txt")
# print(extract)
cells = [e.split("/")[-1].replace("ExpressedGenes_zero.txt", "") for e in extract]
print(cells)
if strain in cells:
files = glob.glob(root + "/%sExpressedGenes_zero.txt" % strain)
return True, files, 10
if experiment == "RNA_seq":
root = ROOT + "external//RNA_seq//"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*Tot")
# print(extract)
cells = [e.split("/")[-1].split("_")[0] for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + strain + "_Tot/*")
files.sort()
return True, files, 1
if experiment == "NFR":
root = ROOT + "/external/NFR/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
return True, extract, 1
if experiment == "Bubble":
root = ROOT + "/external/Bubble/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bedgraph")
# print(extract)
cells = [e.split("/")[-1].split(".bedgraph")[0] for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + strain + ".bedgraph")
files.sort()
return True, files, 1
#print("IRCRRRRRRRRRRRRRRRRRRRR")
if experiment == "ORC1":
#print("LA")
root = ROOT + "/external/ORC1/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
#print(extract)
cells = [e.split("/")[-1].split(".bed")[0] for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + strain + ".bed")
files.sort()
return True, files,
if (experiment in ["Mcm3","Mcm7","Orc2","Orc3"]) and strain =="Raji":
return True,glob.glob(ROOT+"/external/nina_kirstein/*_"+experiment+"_G1_1kbMEAN.txt") ,1
if experiment in ["MCM","MCMp"]:
#print("LA")
if strain != "Hela":
return False,[],1
root = ROOT + "/external/MCM2-bed/R1/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.txt")
#print(extract)
return True, extract, 1
if experiment == "Ini":
root = ROOT + "/external/ini/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.csv")
# print(extract)
cells = [e.split("/")[-1].split(".csv")[0] for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + strain + ".csv")
files.sort()
return True, files, 1
if experiment == "GC":
root = ROOT + "/external//1ColProfiles/*1kbp*" # chr1_gc_native_w1kbp.dat
extract = glob.glob(root)
return True, extract, 1
if "AT" in experiment:
root = ROOT + "/external//AT_hooks/c__%s.csv"%experiment.split("_")[1] # chr1_gc_native_w1kbp.dat
extract = glob.glob(root)
return True, extract, 5
if experiment == "SNS":
root = ROOT + "/external/SNS/"
# root = ROOT + "/external/1kb_profiles//"
extract = []
if strain in ["K562"]:
extract = glob.glob(root + "*.bed")
elif strain in ["HeLaS3","Hela","HeLa"]:
extract=glob.glob(root + "*.csv")
#print("Strain",strain)
#print(extract, root)
if strain not in ["K562","HeLaS3"]:
print("Wrong strain")
print("Only K562")
return False, [], 1
return True, extract, 1
if experiment == "MCMo":
if strain not in ["HeLa", "HeLaS3","Hela"]:
print("Wrong strain")
print("Only", "HeLa", "HeLaS3")
raise
root = ROOT + "/external/MCM/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
print(extract, root)
return True, extract, 1
if experiment == "MCMbw":
if strain not in ["HeLa", "HeLaS3"]:
print("Wrong strain")
print("Only", "HeLa", "HeLaS3")
raise
"""
root = ROOT + "/external/SNS/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
print(extract, root)
return True, extract, 1"""
if "G4" in experiment:
root = ROOT + "/external/G4/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
print(extract, root)
return True, extract, 1
if experiment == "CNV":
root = ROOT + "/external/CNV/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.txt")
# print(extract)
cells = [e.split("/")[-1].split(".txt")[0] for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + strain + ".txt")
files.sort()
#print(files)
return True, files, 10
if experiment == "HMM":
root = ROOT + "/external/HMM/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
# print(extract)
cells = [e.split("/")[-1].replace("wgEncodeBroadHmm", "").replace("HMM.bed", "")
for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + "wgEncodeBroadHmm%sHMM.bed" % strain)
files.sort()
# print(files)
return True, files, 10
if experiment == "RHMM":
root = ROOT + "/external/RHMM/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
#print(extract)
cells = [e.split("/")[-1].replace("RHMM.bed", "")
for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + "%sRHMM.bed" % strain)
files.sort()
# print(files)
return True, files, 1
if experiment.startswith("OKSeq"):
root = ROOT + "/Data/UCSC/hsap_hg19//local/Okazaki_Hyrien/1kb_profiles/"
root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*")
cells = [e.split("/")[-1] for e in extract]
cells.sort()
# print(cells)
if strain in cells:
if experiment == "OKSeqo":
files = glob.glob(root + strain + "/*pol*")
if experiment == "OKSeqF":
files = glob.glob(root + strain + "/*F*")
if experiment == "OKSeqR":
files = glob.glob(root + strain + "/*R*")
if experiment in ["OKSeqS", "OKSeq"]:
# print("La")
files = glob.glob(root + strain + "/*R*")
files += glob.glob(root + strain + "/*F*")
files.sort()
return True, files, 1
if experiment == "DNaseI":
root = ROOT + "/external/DNaseI//"
print(root)
if strain == "Cerevisae":
return True, [root + "/yeast.dnaseI.tagCounts.bed"], 0.001
else:
extract = glob.glob(root + "/*.narrowPeak")
cells = [e.split("/")[-1].replace("wgEncodeAwgDnaseUwduke",
"").replace("UniPk.narrowPeak", "") for e in extract]
extract2 = glob.glob(root + "../DNaseIK562/*.narrowPeak")
cells2 = [e.split("/")[-1].replace("wgEncodeOpenChromDnase",
"").replace("Pk.narrowPeak", "") for e in extract2]
extract3 = glob.glob(root + "../DNaseIK562/*.bigWig")
cells3 = [e.split("/")[-1].replace("wgEncodeUwDnase",
"").replace("Rep1.bigWig", "") for e in extract3]
# print(extract2, cells2)
extract += extract2
cells += cells2
extract += extract3
cells += cells3
if strain in cells:
files = [extract[cells.index(strain)]]
return True, files, 0.001
if experiment == "Meth":
root = ROOT + "/external/methylation//"
extract = glob.glob(root + "*.bed")
cells = [e.split("/")[-1].replace(".bed", "") for e in extract]
if strain in cells:
files = [extract[cells.index(strain)]]
return True, files, 1
if experiment == "Meth450":
root = ROOT + "/external/methylation450//"
extract = glob.glob(root + "*.bed")
cells = [e.split("/")[-1].replace(".bed", "") for e in extract]
if strain in cells:
files = [extract[cells.index(strain)]]
return True, files, 1
if experiment == "Faire":
root = ROOT + "/external/Faire//"
extract = glob.glob(root + "*.pk")
cells = [e.split("/")[-1].replace("UncFAIREseq.pk", "") for e in extract]
if strain in cells:
files = [extract[cells.index(strain)]]
return True, files, 1
if experiment in Prot:
root = ROOT + "/external/DNaseI//"
extract = glob.glob(root + "/*.narrowPeak")
cells = [e.split("/")[-1].replace(experiment + "narrowPeak", "") for e in extract]
if strain in cells:
files = [extract[cells.index(strain)]]
return True, files, 1
root = ROOT + "/external/proteins//"
extract = glob.glob(root + "/*.csv")
cells = [e.split("/")[-1].replace("_ORC2_miotto.csv", "") for e in extract]
if strain in cells:
files = glob.glob(root + "/%s_ORC2_miotto.csv" % strain)
return True, files, 1
if experiment in marks:
root = ROOT + "/external/histones//"
if experiment == "H2az" and strain == "IMR90":
experiment = "H2A.Z"
extract = glob.glob(root + "/*%s*.broadPeak" % experiment)
#print(extract)
if strain not in ["IMR90"]:
cells = [e.split("/")[-1].replace("wgEncodeBroadHistone",
"").replace("Std", "").replace("%sPk.broadPeak" % experiment, "") for e in extract]
# print(extract,cells)
if strain in cells:
files = glob.glob(root + "/wgEncodeBroadHistone%s%sStdPk.broadPeak" %
(strain, experiment))
files += glob.glob(root + "/wgEncodeBroadHistone%s%sPk.broadPeak" %
(strain, experiment))
return True, files, 1
else:
cells = [e.split("/")[-1].split("-")[0] for e in
extract]
# print(extract,cells)
print("Larr")
if strain in cells:
files = glob.glob(root + "/%s-%s.broadPeak" %
(strain, experiment))
return True, files, 1
if experiment[:-3] in marks:
root = ROOT + "/external/histones//"
if strain not in ["IMR90"]:
extract = glob.glob(root + "/*%s*.bigWig" % experiment[:-3])
# print(extract)
cells = []
for c in extract:
if "StdSig" in c:
cells.append(c.split("/")[-1].replace("wgEncodeBroadHistone",
"").replace("%sStdSig.bigWig" % experiment[:-3], ""))
else:
cells.append(c.split("/")[-1].replace("wgEncodeBroadHistone",
"").replace("%sSig.bigWig" % experiment[:-3], ""))
# print(extract, cells)
if strain in cells:
files = glob.glob(root + "/wgEncodeBroadHistone%s%sStdSig.bigWig" %
(strain, experiment[:-3]))
if files == []:
#print("Warning using Sig")
files = glob.glob(root + "/wgEncodeBroadHistone%s%sSig.bigWig" %
(strain, experiment[:-3]))
# print(files)
return True, files, 1
else:
exp = experiment[:-3]
exp = exp.replace("k","K") # from roadmap epi
extract = glob.glob(root + "/IMR90_%s*wh.csv" % exp)
print(extract)
cells = []
return True, extract, 1
print("Available cells")
pp.pprint(cells)
return False, [], None
def re_sample(x, y, start, end, resolution=1000):
resampled = np.zeros(int(end / resolution - start / resolution)) + np.nan
# print(data)
# print(resampled.shape)
for p, v in zip(x, y):
#print(v)
if not np.isnan(v):
posi = int((p - start) / resolution)
if np.isnan(resampled[min(posi, len(resampled) - 1)]):
resampled[min(posi, len(resampled) - 1)] = 0
resampled[min(posi, len(resampled) - 1)] += v
if int(posi) > len(resampled) + 1:
print("resample", posi, len(resampled))
# raise "Problem"
return np.arange(len(resampled)) * resolution + start, resampled
def cut_path(start, end, res=1):
initpos = 0 + start
delta = end - start
path = [0 + initpos]
def cond(x): return x <= end
while (initpos + delta) != int(initpos) and cond(initpos):
ddelta = int(initpos) + res - initpos
initpos += ddelta
ddelta -= initpos
path.append(initpos)
path[-1] = end
if len(path) >= 2 and path[-1] == path[-2]:
path.pop(-1)
return path
def overlap(start, end, res):
r = cut_path(start / res, end / res)
return [ri * res for ri in r]
def overlap_fraction(start, end, res):
assert(start <= end)
v = np.array(overlap(start, end, res))
deltas = (v[1:] - v[:-1]) / res
indexes = np.array(v[:-1] / res, dtype=np.int)
return deltas, indexes
def create_index_human(strain,exp,resolution=10,root="./"):
#chromlength = [248956422]
data = {iexp:[] for iexp in exp}
for chrom, length in enumerate(chromlength_human, 1):
for iexp in exp:
data[iexp].append(replication_data(strain, iexp,
chromosome=chrom, start=0,
end=length // 1000,
resolution=resolution)[1])
if iexp == "OKSeq":
data[iexp][-1] /= resolution
ran = [np.arange(len(dat)) * 1000 * resolution for dat in data[exp[0]]]
index = {"chrom": np.concatenate([["chr%i"%i]*len(xran) for i,xran in enumerate(ran,1)]),
"chromStart":np.concatenate(ran),
"chromEnd":np.concatenate(ran)}
print(root)
os.makedirs(root,exist_ok=True)
pd.DataFrame(index).to_csv(root+"/index.csv",index=False)
for iexp in exp:
index.update({"signalValue":np.concatenate(data[iexp])})
Df = pd.DataFrame(index)
Df.to_csv(root + "/%s.csv" % iexp, index=False)
def whole_genome(**kwargs):
data = []
def fl(name):
def sanit(z):
z = z.replace("/", "")
return z
if type(name) == dict:
items = list(name.items())
items.sort()
return "".join(["%s-%s" % (p, sanit(str(fl(value)))) for p, value in items])
else:
return name
redo = kwargs.pop("redo")
root = kwargs.get("root", "./")
# print("ic")
if "root" in kwargs.keys():
# print("la")
kwargs.pop("root")
name = root + "data/saved/"+fl(kwargs)
if os.path.exists(name) and not redo:
with open(name, "rb") as f:
return cPickle.load(f)
strain = kwargs.pop("strain")
experiment = kwargs.pop("experiment")
resolution = kwargs.pop("resolution")
for chrom, length in enumerate(chromlength_human, 1):
data.append(replication_data(strain, experiment,
chromosome=chrom, start=0,
end=length//1000,
resolution=resolution, **kwargs)[1])
if len(data[-1]) != int(length / 1000 / resolution - 0 / resolution):
print(strain, experiment, len(data[-1]),
int(length / 1000 / resolution - 0 / resolution))
raise
with open(name, "wb") as f:
cPickle.dump(data, f)
return data
def replication_data(strain, experiment, chromosome,
start, end, resolution, raw=False,
oData=False, bp=True, bpc=False, filename=None,
pad=False, smoothf=None, signame="signalValue"):
marks = ['H2az', 'H3k27ac', 'H3k27me3', 'H3k36me3', 'H3k4me1',
'H3k4me2', 'H3k4me3', 'H3k79me2', 'H3k9ac', 'H3k9me1',
'H3k9me3', 'H4k20me1']
if experiment != "" and os.path.exists(experiment):
filename = experiment
if os.path.exists(strain) and strain.endswith("csv"):
#print(strain)
data=pd.read_csv(strain)
#print(len(data))
sub = data[data.chrom==chromosome][experiment]
y = np.array(sub[int(start/resolution):int(end/resolution)])
print("Sizes",chromosome,len(sub),int(end/resolution))
return (np.arange(len(y))*resolution + start)*1000,y
#chn = list(set(data.chr))
if experiment.endswith("weight"):
from repli1d.retrieve_marks import norm2
with open(experiment, "rb") as f:
w = pickle.load(f)
if len(w) == 4:
[M, S, bestw, Exp] = w
normed = False
else:
[M, S, bestw, Exp, normed] = w
if normed:
smark = replication_data(chromosome=chromosome, start=start,
end=end, strain=strain, experiment="CNV",
resolution=resolution, raw=False, oData=False,
bp=True, bpc=False, filename=None)[1]
smark[smark == 0] = 4
smark[np.isnan(smark)] = 4
CNV = smark
Signals = {}
for mark in Exp:
if "_" in mark:
markn, smoothf = mark.split("_")
smoothf = int(smoothf)
else:
markn = mark
smark = replication_data(chromosome=chromosome, start=start,
end=end, strain=strain, experiment=markn,
resolution=resolution, raw=False, oData=False,
bp=True, bpc=False, filename=None)[1]
if normed:
smark /= CNV
if mark != "Constant":
Signals[mark] = norm2(smark, mean=M[mark], std=S[mark], cut=15)[0]
else:
Signals[mark] = smark
if smoothf is not None:
Signals[mark] = smooth(Signals[mark], smoothf)
# print(bestw)
if type(bestw[0]) in [list, np.ndarray]:
comp = [bestw[0][i]*(-1+2/(1+np.exp(-bestw[1][i]*(Signals[iexp]-bestw[2][i]))))
for i, iexp in enumerate(Exp)]
else:
comp = np.array([bestw[i] * Signals[iexp] for i, iexp in enumerate(Exp)])
y = np.sum(comp, axis=0)
y[y < 0] = 0
x = np.arange(len(y))*resolution + start
return x, y
# print(smark)
if filename is None:
avail, files, resolution_experiment = is_available_alias(strain, experiment)
if not avail:
return [], []
else:
print("Filename", filename)
avail = True
files = [filename]
resolution_experiment = 0.001
if filename.endswith("bigWig") or filename.endswith("bw"):
cell = pyBigWig.open(files[0])
if "chrI" in cell.chroms().keys():
print("Yeast")
#print(cell.chroms())
from repli1d.tools import int_to_roman
#print(int(chromosome))
chromosome = int_to_roman(int(chromosome))
if end is None:
end = int(cell.chroms()['chr%s' % str(chromosome)] / 1000)
#print(start * 1000, end * 1000, int((end - start) / (resolution)))
#Check the end:
endp = end
smaller =False
if end > cell.chroms()["chr%s" % str(chromosome)]/1000:
print("Warning index > end ch")
endp = int(cell.chroms()["chr%s" % str(chromosome)] /1000)
smaller = True
v = [np.nan if s is None else s for s in cell.stats(
"chr%s" % str(chromosome), start * 1000, endp * 1000, nBins=int((endp - start) / resolution))]
if not smaller:
return np.arange(start, end + 100, resolution)[: len(v)], np.array(v)
else:
x = np.arange(start, end + 0.1, resolution)
y = np.zeros_like(x) + np.nan
y[:len(v)] = np.array(v)
return x[:end], y[:end]
if filename.endswith("narrowPeak"):
index = ["chrom", "chromStart", "chromEnd", "name", "score",
"strand", "signalValue", "pValue", "qValue", "peak"]
chro = str(chromosome)
strain = pd.read_csv(files[0], sep="\t", names=index)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue)
return re_sample(x, y, start, end, resolution)
if filename.endswith("bed"):
index = ["chrom", "chromStart", "chromEnd", "name", "score",
"strand", "signalValue", "pValue", "qValue", "peak"]
chro = str(chromosome)
strain = pd.read_csv(files[0], sep="\t", names=index)
if "chrI" in set(strain["chrom"]):
print("Yeast")
# print(cell.chroms())
from repli1d.tools import int_to_roman
# print(int(chromosome))
chro = int_to_roman(int(chromosome))
#print(strain)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
#print("La")
#print(data)
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue)
y = np.ones_like(x)
#print(y)
return re_sample(x, y, start, end, resolution)
if filename.endswith("tagAlign"):
index = ["chrom", "chromStart", "chromEnd", "N", "signalValue","pm"]
chro = str(chromosome)
strain = pd.read_csv(files[0], sep="\t", names=index)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue) / 1000 # because the value is 1000 ?
return re_sample(x, y, start, end, resolution)
if filename.endswith("csv"):
#index = ["chrom", "chromStart", "chromEnd", "signalValue"]
chro = str(chromosome)
# print(files[0])
strain = pd.read_csv(files[0], sep="\t")
#print(strain.mean())
#print(strain)
#tmpl = "chr%s"
f = 1000 #Needed because start is in kb
if "chrom" not in strain.columns:
strain = pd.read_csv(files[0], sep=",")
# print(strain)
#tmpl = "chrom%s"
f = 1000
# strain.chrom
# sanitize chrom:
def sanitize(ch):
if type(ch) == int:
return "chr%s"%ch
if type(ch) == str:
if "chrom" in ch:
return ch.replace("chrom","chr")
if (not "chr" in ch) and (not "chrom" in ch):
return "chr%s"%ch
return ch
strain["chrom"] = [sanitize(ch) for ch in strain["chrom"]]
#print(strain)
#print(strain.describe())
#print(strain.head())
#print( tmpl % chro)
#print("F",f)
data = strain[(strain.chrom == chro) & (
strain.chromStart >= f * start) & (strain.chromStart < f * end)]
#print("Warning coold shift one")
#print("Data",len(data))
#print(f)
#print(data)
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) #/ f # kb
if signame == "signalValue" and signame not in data.columns:
if "signal" in data.columns:
signame = "signal"
print("Warning changing signalValue to signal")
y = np.array(data[signame])
#print(y)
#print(x[:10])
#print(y[:10])
#print(start,end)
#print(chro,np.mean(y),len(y))
return re_sample(x, y, start * f, end * f, resolution*f)
# print(files)
assert(type(files) == list)
if strain in ["Yeast-MCM"]:
#print(files[0])
index = "/".join(files[0].split("/")[:-1]) + "/index.csv"
index = pd.read_csv(index,sep="\t")
strain = index
exp = pd.read_csv(files[0])
if len(index) != len(exp):
raise ValueError("Wrong size of indexing %i %i"%(len(index) , len(exp)))
strain["signal"] = exp
if "Yeast" in strain:
from repli1d.tools import int_to_roman
chro = int_to_roman(int(chromosome))
else:
chro = chromosome
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000*start) & (strain.chromStart < 1000*end)]
#print(data)
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signal)
if raw:
return x, y
else:
return re_sample(x, y, start, end, resolution)
if experiment in ["MCM","MCMp"]:
#print(chromosome)
files = [f for f in files if "chr%i."%chromosome in f]
#print(files)
files += [f.replace("R1","R2") for f in files]
#print(files)
data = np.sum([np.array(pd.read_csv(f))[:,0] for f in files],axis=0)
x = np.arange(len(data)) # kb
sub = (x> start) & (x < end)
x=x[sub]
y = np.array(data[sub],dtype=np.float)
x,y = re_sample(x, y, start, end, resolution)
if experiment == "MCMp":
print(np.nanpercentile(y,50))
peaks, _ = find_peaks(y / np.nanpercentile(y,50),width=1,prominence=1.)
peaksa = np.zeros_like(y,dtype=np.bool)
for p in peaks:
peaksa[p]=True
print(len(y),len(peaks),"Peaks")
y[~peaksa]=0
#raise "NT"
return x,y
if experiment == "DNaseI":
if strain == "Cerevisae":
index = ["chrom", "chromStart", "chromEnd", "name", "signalValue"]
print(files[0])
strain = pd.read_csv(files[0], sep="\t", names=index)
chro = str(chromosome)
if oData:
return strain
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue)
else:
index = ["chrom", "chromStart", "chromEnd", "name", "score",
"strand", "signalValue", "pValue", "qValue", "peak"]
chro = str(chromosome)
if files[0].endswith("narrowPeak"):
strain = pd.read_csv(files[0], sep="\t", names=index)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue)
else:
cell = pyBigWig.open(files[0])
if end is None:
end = cell.chroms()['chr%s' % str(chro)]
v = [np.nan if s is None else s for s in cell.stats(
"chr%s" % str(chro), start * 1000, end * 1000, nBins=int(end - start) // (resolution))]
return np.arange(start, end + 100, resolution)[: len(v)], np.array(v)
if experiment == "Faire":
index = ["chrom", "chromStart", "chromEnd", "name", "score",
"strand", "signalValue", "pValue", "qValue", "peak"]
chro = str(chromosome)
strain = pd.read_csv(files[0], sep="\t", names=index)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue)
if "MCM-beda" in experiment:
#print(files[0])
strain = pd.read_csv(files[0],sep="\t")
#strain.MCM = smooth(strain.MCM2_ChEC)
chromosome = {1: "I", 2: "II", 3: "III", 4: "IV", 5: "V", 6: "VI", 7: "VII", 8: "VIII", 9: "IX", 10: "X",
11: "XI", 12: "XII", 13: "XIII", 14: "XIV", 15: "XV", 16: "XVI"}[chromosome]
#print(chromosome)
#print(strain)
data = strain[(strain.chr == "chr%s" % chromosome) & (
strain.coord > 1000 * start) & (strain.coord < 1000 * end)]
#print(data)
if oData:
return data
x = np.array(data.coord) / 1000 # kb
#y = np.array(data.cerevisiae_MCM2ChEC_rep1_library_fragment_size_range_51bpto100bp)
y = np.array(data.cerevisiae_MCM2ChEC_rep1_library_fragment_size_range_all)
if "G4" in experiment:
index = ["chrom", "chromStart", "chromEnd"]
chro = str(chromosome)
if "p" in experiment:
ip = np.argmax(["plus" in f for f in files])
print(files[ip],)
strain = pd.read_csv(files[ip], sep="\t", names=index)
elif "m" in experiment:
ip = np.argmax(["minus" in f for f in files])
print(files[ip])
strain = pd.read_csv(files[ip], sep="\t", names=index)
else:
strain = pd.concat([pd.read_csv(files[ip], sep="\t", names=index) for ip in [0,1]])
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.ones_like(x)
if experiment == "GC":
index = ["signalValue"]
chro = str(chromosome)
file = [f for f in files if "chr%s_" % chro in f]
strain = pd.read_csv(file[0], sep="\t", names=index)
strain["chromStart"] = np.arange(0, len(strain)*1000, 1000)
strain["chromEnd"] = np.arange(0, len(strain)*1000, 1000)
data = strain[(strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue)
if "AT" in experiment :
index = ["signalValue"]
chro = str(chromosome)
#file = [f for f in files if "chr%s_" % chro in f]
strain = pd.read_csv(files[0], sep="\t")
#print(strain.head())
data = strain[(strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue)
if experiment == "Ini":
filename = files[0]
index = ["chrom", "chromStart", "chromEnd"]
chro = str(chromosome)
strain = pd.read_csv(files[0], sep=",", names=index)
data = strain[(strain.chrom == "%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
data.chromStart /= 1000
data.chromEnd /= 1000
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.ones_like(x)
return re_sample(x, y, start, end, resolution)
if experiment == "HMM" :
filename = files[0]
index = ["chrom", "chromStart", "chromEnd", "ClassName", "u1",
"u2", "u3", "u4", "color"]
chro = str(chromosome)
strain = pd.read_csv(files[0], sep="\t", names=index)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
data.chromStart /= 1000
data.chromEnd /= 1000
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.color)
return re_sample(x, y, start, end, resolution)
if experiment == "RHMM":
filename = files[0]
try:
index = ["chrom", "chromStart", "chromEnd", "ClassName", "u1",
"u2", "u3", "u4", "color"]
chro = str(chromosome)
strain = pd.read_csv(files[0], sep="\t", names=index)
strain["ClassName"] = [int(class_n.split("_")[0]) for class_n in strain.ClassName]
except:
index = ["chrom", "chromStart", "chromEnd", "ClassName"]
chro = str(chromosome)
strain = pd.read_csv(files[0], sep="\t", names=index)
strain["ClassName"] = [int(class_n[1:]) for class_n in strain.ClassName]
inac=3
trans=6
#r = {2:inac,3:inac,4:inac,7:inac,8:inac,9:inac,10:inac,11:inac,
# 12:12,
# 13:13,
# 5:trans,6:trans
# }
for k,v in r.items():
strain.ClassName[strain.ClassName==k]=v
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
data.chromStart /= 1000
data.chromEnd /= 1000
return data
#x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
#y = np.array(data.ClassName)
x = np.array([data.chromStart,data.chromEnd]).reshape((1, -1), order="F")[0] / 1000
print(x[:10])
y = np.array([data.ClassName, data.ClassName]).reshape((1, -1), order="F")[0]
return x,y #re_sample(x, y, start, end, resolution)
if experiment == "CNV":
index = ["chrom", "chromStart", "chromEnd", "CNV", "Sig"]
data = pd.read_csv(files[0], sep="\t", names=index)
#print(data)
# x = np.arange(start, end, resolution)
# y = np.zeros_like(x, dtype=np.float)
y = np.zeros(int(end / resolution - start / resolution))
x = np.arange(len(y)) * resolution + start
print(data.chrom.dtype)
if str(data.chrom.dtype) == "int64":
data = data[data.chrom == chromosome]
else:
data = data[data.chrom == str(chromosome)]
print(data)
for startv, endv, CNV in zip(data.chromStart, data.chromEnd, data.CNV):
startv /= 1000
endv /= 1000
# deltas, indexes = overlap_fraction(startv / 1000, endv / 1000, resolution)
# print(startv, endv, endv < start, startv > end)
if endv < start or startv > end:
continue
# print("la", start, end)
startr = max(start, startv)
endr = min(end, endv)
y[int((startr - start) / resolution):int((endr - start) / resolution)] = CNV
if oData:
return data
else:
return x, y
if experiment == "NFR":
index = ["chrom", "chromStart", "chromEnd"]
chro = str(chromosome)
print(files)
strain = pd.read_csv(files[0], sep="\t", names=index, skiprows=1)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.ones_like(x)
if experiment == "Bubble":
index = ["chrom", "chromStart", "chromEnd", "signalValue"]
chro = str(chromosome)
print(files)
strain = pd.read_csv(files[0], sep="\t", names=index, skiprows=1)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue)
if experiment == "ORC1":
index = ["chrom", "chromStart", "chromEnd"]
chro = str(chromosome)
print(files)
strain = pd.read_csv(files[0], sep="\t", names=index, skiprows=1)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.ones_like(x)
if (experiment in ["Mcm3","Mcm7","Orc2","Orc3"]) and strain =="Raji":
print(files)
for f in files:
if "chr%s_" % str(chromosome) in f:
# print(f)
data = pd.read_csv(f)
input = pd.read_csv(f.replace(experiment,"input"))
break
data=np.array(data[start:end],dtype=float)[::,0]
input = np.array(input[start:end],dtype=float)[::,0]
print(data.shape)
print(data)
x=np.arange(start,end)
x0,data = re_sample(x, data, start, end, resolution)
_,input = re_sample(x, input, start, end, resolution)
data = data/input
data[input<10]=np.nan
return x0,data
if experiment == "SNS":
if strain == "K562":
index = ["chrom", "chromStart", "chromEnd"]
chro = str(chromosome)
print(files)
strain = pd.read_csv(files[0], sep="\t", names=index, skiprows=1)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.ones_like(x)
else:
index = ["chrom", "chromStart", "chromEnd","signalValue"]
chro = str(chromosome)
print(files)
strain = pd.read_csv(files[0], sep="\t", names=index, skiprows=1)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = data.signalValue
if experiment == "MCMo":
index = ["chrom", "chromStart", "chromEnd"]
chro = str(chromosome)
print(files)
strain = pd.read_csv(files[0], sep="\t", names=index)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.ones_like(x)
print(sum(y), len(y))
if experiment == "Meth":
index = ["chrom", "chromStart", "chromEnd", "name", "score",
"strand", "chromStart1", "chromEnd1", "bs", "signalValue", "p"]
chro = str(chromosome)
strain = pd.read_csv(files[0], sep="\t", names=index, skiprows=1)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue)
if experiment == "Meth450":
# chr16 53468112 53468162 cg00000029 721 + 53468112 53468162 255,127,0
index = ["chrom", "chromStart", "chromEnd", "name", "signalValue",
"strand", "chromStart1", "chromEnd1", "bs"]
chro = str(chromosome)
strain = pd.read_csv(files[0], sep="\t", names=index, skiprows=1)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue)
if experiment == "Constant":
y = np.zeros(int(end / resolution - start / resolution)) + 1
x = np.arange(len(y)) * resolution + start
return x, y
if experiment == "ORC2":
strain = pd.read_csv(files[0],
skiprows=2, names=["chrom", "chromStart", "chromEnd"])
chro = str(chromosome)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array([1 for _ in range(len(x))])
if experiment == "ExpGenes":
strain = pd.read_csv(files[0],
skiprows=1, names=["chrom", "chromStart", "chromEnd", "signalValue", "gene_id", "tss_id"], sep="\t")
chro = str(chromosome)
# print(strain)
# return strain
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
data["chromStart"] /= (1000 * resolution)
data["chromEnd"] /= (1000 * resolution)
db = gffutils.FeatureDB(
ROOT + "/external/ExpressedGenes/db_gtf.db", keep_order=True)
sens = [db[gene].strand for gene in data["gene_id"]]
data["strand"] = sens
if oData or raw:
return data
else:
raise "Only raw available"
# x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
# y = np.array([1 for _ in range(len(x))])
if experiment[:-3] in marks and strain == "IMR90":
strain = pd.read_csv(files[0],sep="\t")
chro = str(chromosome)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue)
return x,y
if experiment[:-3] in marks:
cell = pyBigWig.open(files[0])
if end is None:
end = cell.chroms()['chr%s' % str(chromosome)]
# check end of chromosome:
true_end = cell.chroms()['chr%s' % str(chromosome)]
true_end /= 1000
pad = False
if true_end < end:
print("Problem of size", true_end, end, chromosome)
pad = end + 0
end = int(true_end)
# print("Problem of size", true_end, end, chromosome)
#print(chromosome)
#print(cell.chroms())
#print(files[0])
#print(start,end)
v = [np.nan if s is None else s for s in cell.stats(
"chr%s" % str(chromosome), int(start * 1000), int(end * 1000), nBins=int(int(end - start) // (resolution)))]
if pad:
print("padding...", int(pad/resolution)-len(v))
v += [np.nan]*(int(pad/resolution)-len(v))
return np.arange(start, end + 100, resolution)[:len(v)], np.array(v)
if experiment in marks:
index = ["chrom", "chromStart", "chromEnd", "name",
"score", "strand", "signalValue", "pValue", "qValue"]
strain = pd.read_csv(files[0], sep="\t", names=index)
chro = str(chromosome)
if oData:
return strain
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromEnd < 1000 * end)]
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue)
xp, yp = re_sample(x, y, start, end, resolution)
if not bp:
return xp, yp
# if bpc:
# x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
# y = np.array(data.chromEnd - data.chromStart)
# xp, yp = re_sample(x, y, start, end, resolution)
# return xp, yp
yp = np.zeros_like(yp)
for startv, endv, v in zip(data["chromStart"], data["chromEnd"], data["signalValue"]):
# if endv < 6000000:
# print(startv, endv, v)
endv1 = min(endv, end * 1000)
if endv1 != endv:
print("End of peak outside", endv, end)
endv = endv1
print(endv, startv)
start1 = min(startv, end * 1000)
if start1 != startv:
print("Start of peak outside", startv, end)
startv = start1
# print(endv, startv)
deltas, indexes = overlap_fraction(startv / 1000, endv / 1000, resolution)
deltas /= np.sum(deltas)
if np.any(indexes - int(start / resolution) >= len(yp)):
print("Out of bound")
continue
if bpc:
yp[indexes - int(start / resolution)] += deltas * (endv - startv) # * v
else:
yp[indexes - int(start / resolution)] += deltas * \
v * (endv-startv)/(resolution*1000)
# print(startv, endv)
# print(deltas, indexes, start, indexes - int(start / resolution))
# return xp, yp
yp[yp == 0] = np.nan
return xp, yp
if experiment == "MRT":
if strain == "Cerevisae":
index = ["i", "chrom", "chromStart",
"%HL: tp10", "%HL: tp12.5A", "%HL: tp12.5B", "%HL: tp15A", "%HL: tp15B", "%HL: tp17.5A",
"%HL: tp17.5B", "%HL: tp25A", "%HL: tp25B", "%HL: tp40A", "%HL: tp40B", "TrepA", "TrepB"]
data =
|
pd.read_csv(files[0], sep=",", names=index, skiprows=1)
|
pandas.read_csv
|
from itertools import product
from pathlib import Path
from typing import Dict, Optional, Sequence
import numpy as np
import pandas as pd
from tqdm import tqdm
from adaptive.estimators import rollingOLS as run_regressions
from adaptive.model import Model, ModelUnit, gravity_matrix
from adaptive.plots import plot_simulation_range
from adaptive.policy import simulate_adaptive_control, simulate_lockdown
from adaptive.utils import cwd, days, weeks
from etl import download_data, district_migration_matrices, get_time_series, load_all_data
def get_model(districts, populations, timeseries, seed = 0):
units = [ModelUnit(
name = district,
population = populations[i],
I0 = timeseries[district].iloc[-1]['Hospitalized'] if not timeseries[district].empty and 'Hospitalized' in timeseries[district].iloc[-1] else 0,
R0 = timeseries[district].iloc[-1]['Recovered'] if not timeseries[district].empty and 'Recovered' in timeseries[district].iloc[-1] else 0,
D0 = timeseries[district].iloc[-1]['Deceased'] if not timeseries[district].empty and 'Deceased' in timeseries[district].iloc[-1] else 0,
) for (i, district) in enumerate(districts)]
return Model(units, random_seed = seed)
def run_policies(migrations, district_names, populations, district_time_series, Rm, Rv, gamma, seed, initial_lockdown = 13*days, total_time = 190*days):
# run various policy scenarios
lockdown = np.zeros(migrations.shape)
# 1. release lockdown 31 May
release_31_may = get_model(district_names, populations, district_time_series, seed)
simulate_lockdown(release_31_may,
lockdown_period = initial_lockdown + 4*weeks,
total_time = total_time,
RR0_mandatory = Rm, RR0_voluntary = Rv,
lockdown = lockdown.copy(), migrations = migrations)
# 3. adaptive release starting 31 may
adaptive = get_model(district_names, populations, district_time_series, seed)
simulate_adaptive_control(adaptive, initial_lockdown, total_time, lockdown, migrations, Rm, {district: R * gamma for (district, R) in Rv.items()}, {district: R * gamma for (district, R) in Rm.items()}, evaluation_period=1*weeks)
return (release_31_may, adaptive)
if __name__ == "__main__":
root = cwd()
data = root/"data"
# model details
gamma = 0.2
prevalence = 1
total_time = 90 * days
release_date = pd.to_datetime("May 31, 2020")
lockdown_period = (release_date -
|
pd.to_datetime("today")
|
pandas.to_datetime
|
"""Post process the resuls data
"""
# from tkinter import font
# from tkinter.font import _FontDict
# from tkinter.font import _FontDict
from audioop import avg
import pandas as pd
import matplotlib.pyplot as plt
from statistics import mean
import statistics as stat
import os
from pyparsing import FollowedBy
import para
import numpy as np
# root_folder = r"C:\Users\phdji\OneDrive - Danmarks Tekniske Universitet\JuanJuanLin\Tests2022/"
label_font = {'family': 'Times New Roman',
'weight': 'bold',
'size': 12
}
tick_font = {'family': 'Times New Roman',
'weight': 'bold',
'size': 10
}
def getBestSeed(_folder: str): # return the best seed number
"""Input is the folder
"""
fn = _folder+'/BestSeed.txt'
df = pd.read_csv(fn)
best_seed = df["Seed"][0]
print("Best Seed = {0}".format(best_seed))
return int(best_seed)
def updateOperatorName(fn: str):
"""
Given the folder of name return the oprator number
This is applied to test the effect of different operators
"""
print("input fn ={0}".format(fn))
if fn == "0_TestOp_0":
return "NH_1"
elif fn == "1_TestOp_1":
return "NH_2"
elif fn == "2_TestOp_2":
return "NH_3"
elif fn == "3_TestOp_3":
return "NH_4"
elif fn == "4_TestOp_4":
return "NH_5"
elif fn == "5_TestOp_5":
return "NH_6"
elif fn == "6_TestOp_6":
return "NH_7"
elif fn == "7_TestOp_7":
return "NH_8"
elif fn == "8_TestOp_8":
return "NH_9"
else:
return fn
def CompareOneFolder(_folder: str, _name: str):
root, dirs, files = next(os.walk(_folder), ([], [], []))
print(dirs)
print("plot one folder case")
print("default number of iter is 200, need to check if it is not")
NumberOfIter = para.NumofIter
allGaps = []
for fo in range(0, len(dirs)):
f = _folder+"\\"+dirs[fo] + "\\ABC_Converge_ave.txt"
df = pd.read_csv(f, header=None)
gap = []
for j in range(0, NumberOfIter):
gap.append(df[1][j])
allGaps.append(gap)
# plt.plot(gap, label=dirs[fo])
this_label = updateOperatorName(dirs[fo])
plt.plot(gap, label=this_label)
plt.xlabel("No. of Iterations", fontdict=label_font)
plt.ylabel("Ave. Objective Value", fontdict=label_font)
plt.ion()
plt.legend()
plt.pause(1)
plt.savefig(_folder+"\\"+_name+"_ave.png", bbox_inches='tight', dpi=600)
plt.close()
d = []
for i in range(0, len(dirs)):
d.append([str(i), allGaps[i][NumberOfIter-1]])
df = pd.DataFrame(d, columns=['Exp', 'MinObj'])
with open(_folder+"min_ObjTable.txt", "w+") as f:
print("Id,Obj", file=f)
for r in range(0, len(df)):
print("{0},{1}", df['Exp'][r], df['MinObj'][r], file=f)
print(df)
allGaps = []
for fo in range(0, len(dirs)):
f = _folder+"\\"+dirs[fo] + "\\ABC_Converge_min.txt"
df = pd.read_csv(f, header=None)
gap = []
for j in range(0, NumberOfIter):
gap.append(df[1][j])
allGaps.append(gap)
# plt.plot(gap, label=dirs[fo])
this_label = updateOperatorName(dirs[fo])
plt.plot(gap, label=this_label)
plt.xlabel("No. of Iterations", fontdict=label_font)
plt.ylabel("Min. Objective Value", fontdict=label_font)
plt.ion()
plt.legend()
plt.pause(1)
plt.savefig(_folder+"\\"+_name+"_min.png", bbox_inches='tight', dpi=600)
plt.close()
# next is to output the value, std, and min value
allObjs = []
ave = []
minval = []
std = []
for fo in range(0, len(dirs)):
f = _folder+"\\"+dirs[fo] + "\\ABCPrintSeedBestSolVal.txt"
df = pd.read_csv(f)
print(df)
obj = []
for j in range(0, para.NumOfTestSeed):
obj.append(df["BestVal"][j])
allObjs.append(obj)
ave.append(mean(obj))
minval.append(min(obj))
std.append(stat.stdev(obj))
rowLabel = []
matrix = []
for i in range(0, len(dirs)):
matrix.append([-1]*3)
colLabel = ["Ave", "Min", "Std"]
for i in range(0, len(dirs)):
rowLabel.append(dirs[i])
matrix[i][0] = ave[i]
matrix[i][1] = minval[i]
matrix[i][2] = std[i]
print(matrix)
mydf = pd.DataFrame(matrix, columns=colLabel, index=rowLabel)
plotTable(mydf, "Summary")
with open(_folder+"Summary.txt", "w+") as f:
print(mydf, file=f)
# print("Id,Obj",file=f)
# for r in range(0,len(df)):
# print("{0},{1}",df['Exp'][r],df['MinObj'][r],file=f)
def TuneReward():
"""
test for tuning the reward parameters
"""
# improveFolder = r"C:\Users\phdji\OneDrive - Danmarks Tekniske Universitet\JuanJuanLin\\SiouxFall_TunePara\improve=3/"
# CompareOneFolder(_folder=improveFolder,_name="improve=3")
# exit()
root = root_folder
# root = r"C:\Users\phdji\OneDrive - Danmarks Tekniske Universitet\JuanJuanLin\\SiouxFall_TunePara\\"
folder = root + "j=0\\"
CompareOneFolder(_folder=folder, _name="j=0")
folder = root + "j=1\\"
CompareOneFolder(_folder=folder, _name="j=1")
folder = root + "j=2\\"
CompareOneFolder(_folder=folder, _name="j=2")
folder = root + "j=3\\"
CompareOneFolder(_folder=folder, _name="j=3")
folder = root + "j=4\\"
CompareOneFolder(_folder=folder, _name="j=4")
folder = root + "j=5\\"
CompareOneFolder(_folder=folder, _name="j=5")
folder = root + "j=6\\"
CompareOneFolder(_folder=folder, _name="j=6")
folder = root + "j=7\\"
CompareOneFolder(_folder=folder, _name="j=7")
folder = root + "j=8\\"
CompareOneFolder(_folder=folder, _name="j=8")
def effect_of_operators(_folder):
"""test the effect of operators
"""
# print("Now Plot the effect of operatros")
# OperatorFolder = root_folder + "Tests2022/EffectOfOperators/"
print("-----Now start to plot the effecct for each operators ------")
print("Read folder = {0}".format(_folder))
CompareOneFolder(_folder=_folder, _name="CompareOperator")
print("--------------------Complete operator effects--------------------------")
def get_files():
"""generate the list of files names
"""
pass
def plotTable(df, _name: str):
rcolors = plt.cm.BuPu(np.full(len(df.index), 0.1))
ccolors = plt.cm.BuPu(np.full(len(df.columns), 0.1))
the_table = plt.table(cellText=df.values,
rowLabels=df.index,
rowColours=rcolors,
colColours=ccolors,
colLabels=df.columns,
rowLoc='right',
loc='center')
plt.box(on=None)
plt.gca().get_xaxis().set_visible(False)
plt.gca().get_yaxis().set_visible(False)
the_table.scale(1, 1.5)
the_table.set_fontsize(12)
plt.ion()
plt.pause(2)
plt.savefig(_name+".png", bbox_inches='tight', dpi=600)
plt.close()
def plotRelation(_folder: str):
"""read and plot relationship
plot table reference
https://towardsdatascience.com/simple-little-tables-with-matplotlib-9780ef5d0bc4
"""
# plot table
BestSeed = getBestSeed(_folder)
fn = _folder + "/DomRelation.txt"
data = pd.read_csv(fn)
num_row = data.shape[0]
print(num_row)
matrix_afterScore = []
matrix_BeforeScore = []
matrix_SameScore = []
matrix_Dom = []
for i in range(0, len(para.FailureLinks)):
matrix_afterScore.append([-1]*len(para.FailureLinks))
matrix_BeforeScore.append([-1]*len(para.FailureLinks))
matrix_SameScore.append([-1]*len(para.FailureLinks))
matrix_Dom.append([-1]*len(para.FailureLinks))
for i in range(0, num_row):
if (data["Seed"][i] == BestSeed):
RowLink = data["First"][i]
ColLink = data["Second"][i]
AfterScore = data["AferScore"][i]
BeforeScore = data["BeforeScore"][i]
SameScore = data["SameScore"][i]
DomVal = data["Dom"][i]
DomStatus = 'None'
if DomVal == 0:
DomStatus = "Equal"
if DomVal == 1:
DomStatus = "After"
if DomVal == 2:
DomStatus = "Before"
if DomVal == 3:
DomStatus = "Same"
RowNum = para.FailureLinks.index(RowLink)
ColNum = para.FailureLinks.index(ColLink)
matrix_afterScore[RowNum][ColNum] = AfterScore
matrix_BeforeScore[RowNum][ColNum] = BeforeScore
matrix_SameScore[RowNum][ColNum] = SameScore
matrix_Dom[RowNum][ColNum] = DomStatus
# print(matrix)
df = pd.DataFrame(matrix_afterScore,
columns=para.FailureLinks, index=para.FailureLinks)
plotTable(df, "AfterScore")
df = pd.DataFrame(matrix_BeforeScore,
columns=para.FailureLinks, index=para.FailureLinks)
plotTable(df, "BeforeScore")
df = pd.DataFrame(matrix_SameScore,
columns=para.FailureLinks, index=para.FailureLinks)
plotTable(df, "SameScore")
df = pd.DataFrame(matrix_Dom, columns=para.FailureLinks,
index=para.FailureLinks)
plotTable(df, "DomStatus")
def print_best_seed_sol(_folder,_best_seed):
"""
print the best solution of the best seed
"""
sol = pd.read_csv(_folder+"/ABCPrintSols.txt")
num_row = sol.shape[0]
link = []
st =[]
et = []
for i in range(0,num_row):
if sol["Seed"][i]==_best_seed:
link.append(sol["Link"][i])
st.append(sol["St"][i])
et.append(sol["Et"][i])
with open(_folder+"BestSol.txt","w+") as f:
print("Seed,Link,St,Et",file=f)
for r in range(0,len(link)):
print("{0},{1},{2},{3}".format(_best_seed,link[r],st[r],et[r]),file=f)
def print_best_seed_period(_folder,_best_seed):
"""
print the best solution of the best seed
"""
sol =
|
pd.read_csv(_folder+"/PrintPeriod.txt")
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
from numpy import complex128
import pandas as pd
from pandapower.auxiliary import _sum_by_group, sequence_to_phase, _sum_by_group_nvals
from pandapower.pypower.idx_bus import VM, VA, PD, QD, LAM_P, LAM_Q, BASE_KV,NONE
from pandapower.pypower.idx_gen import PG, QG
def _set_buses_out_of_service(ppc):
disco = np.where(ppc["bus"][:, 1] == NONE)[0]
ppc["bus"][disco, VM] = np.nan
ppc["bus"][disco, VA] = np.nan
ppc["bus"][disco, PD] = 0
ppc["bus"][disco, QD] = 0
def _get_bus_v_results(net, ppc, suffix=None):
ac = net["_options"]["ac"]
bus_idx = _get_bus_idx(net)
res_table = "res_bus" if suffix is None else "res_bus%s" % suffix
if ac:
net[res_table]["vm_pu"] = ppc["bus"][bus_idx][:, VM]
# voltage angles
net[res_table]["va_degree"] = ppc["bus"][bus_idx][:, VA]
def _get_bus_v_results_3ph(net, ppc0, ppc1, ppc2):
ac = net["_options"]["ac"]
V012_pu = _V012_from_ppc012(net, ppc0, ppc1, ppc2)
# Uncomment for results in kV instead of pu
# bus_base_kv = ppc0["bus"][:,BASE_KV]/np.sqrt(3)
# V012_pu = V012_pu*bus_base_kv
Vabc_pu = sequence_to_phase(V012_pu)
if ac:
net["res_bus_3ph"]["vm_a_pu"] = np.abs(Vabc_pu[0, :].flatten())
net["res_bus_3ph"]["vm_b_pu"] = np.abs(Vabc_pu[1, :].flatten())
net["res_bus_3ph"]["vm_c_pu"] = np.abs(Vabc_pu[2, :].flatten())
# voltage angles
net["res_bus_3ph"]["va_a_degree"] = np.angle(Vabc_pu[0, :].flatten())*180/np.pi
net["res_bus_3ph"]["va_b_degree"] = np.angle(Vabc_pu[1, :].flatten())*180/np.pi
net["res_bus_3ph"]["va_c_degree"] = np.angle(Vabc_pu[2, :].flatten())*180/np.pi
net["res_bus_3ph"]["unbalance_percent"] = np.abs(V012_pu[2, :]/V012_pu[1, :])*100
net["res_bus_3ph"].index = net["bus"].index
def _V012_from_ppc012(net, ppc0, ppc1, ppc2):
bus_idx = _get_bus_idx(net)
V012_pu = np.zeros((3, len(bus_idx)), dtype=complex128)
V012_pu[0, :] = ppc0["bus"][bus_idx][:, VM] * np.exp(1j * np.deg2rad(ppc0["bus"][bus_idx][:, VA]))
V012_pu[1, :] = ppc1["bus"][bus_idx][:, VM] * np.exp(1j * np.deg2rad(ppc1["bus"][bus_idx][:, VA]))
V012_pu[2, :] = ppc2["bus"][bus_idx][:, VM] * np.exp(1j * np.deg2rad(ppc2["bus"][bus_idx][:, VA]))
return V012_pu
def _get_bus_idx(net):
bus_lookup = net["_pd2ppc_lookups"]["bus"]
ppi = net["bus"].index.values
bus_idx = bus_lookup[ppi]
return bus_idx
def _get_opf_marginal_prices(net, ppc):
bus_idx = _get_bus_idx(net)
net["res_bus"]["lam_p"] = ppc["bus"][bus_idx][:, LAM_P]
net["res_bus"]["lam_q"] = ppc["bus"][bus_idx][:, LAM_Q]
def _get_bus_results(net, ppc, bus_pq):
ac = net["_options"]["ac"]
mode = net["_options"]["mode"]
# write sum of p and q values to bus
net["res_bus"]["p_mw"].values[:] = bus_pq[:, 0]
if ac:
net["res_bus"]["q_mvar"].values[:] = bus_pq[:, 1]
# opf variables
if mode == "opf":
_get_opf_marginal_prices(net, ppc)
# update index in res bus bus
net["res_bus"].index = net["bus"].index
def _get_bus_results_3ph(net, bus_pq):
ac = net["_options"]["ac"]
# write sum of p and q values to bus
net["res_bus_3ph"]["p_a_mw"] = bus_pq[:, 0]
net["res_bus_3ph"]["p_b_mw"] = bus_pq[:, 2]
net["res_bus_3ph"]["p_c_mw"] = bus_pq[:, 4]
if ac:
net["res_bus_3ph"]["q_a_mvar"] = bus_pq[:, 1]
net["res_bus_3ph"]["q_b_mvar"] = bus_pq[:, 3]
net["res_bus_3ph"]["q_c_mvar"] = bus_pq[:, 5]
# Todo: OPF
# update index in res bus bus
# net["res_bus"].index = net["bus"].index
net["res_bus_3ph"].index = net["bus"].index
def write_voltage_dependend_load_results(net, p, q, b):
l = net["load"]
_is_elements = net["_is_elements"]
if len(l) > 0:
load_is = _is_elements["load"]
scaling = l["scaling"].values
bus_lookup = net["_pd2ppc_lookups"]["bus"]
lidx = bus_lookup[l["bus"].values]
voltage_depend_loads = net["_options"]["voltage_depend_loads"]
cz = l["const_z_percent"].values / 100.
ci = l["const_i_percent"].values / 100.
cp = 1. - (cz + ci)
# constant power
pl = l["p_mw"].values * scaling * load_is * cp
net["res_load"]["p_mw"] = pl
p = np.hstack([p, pl])
ql = l["q_mvar"].values * scaling * load_is * cp
net["res_load"]["q_mvar"] = ql
q = np.hstack([q, ql])
b = np.hstack([b, l["bus"].values])
if voltage_depend_loads:
# constant impedance and constant current
vm_l = net["_ppc"]["bus"][lidx, 7]
volt_depend = ci * vm_l + cz * vm_l ** 2
pl = l["p_mw"].values * scaling * load_is * volt_depend
net["res_load"]["p_mw"] += pl
p = np.hstack([p, pl])
ql = l["q_mvar"].values * scaling * load_is * volt_depend
net["res_load"]["q_mvar"] += ql
q = np.hstack([q, ql])
b = np.hstack([b, l["bus"].values])
return p, q, b
def write_pq_results_to_element(net, ppc, element, suffix=None):
"""
get p_mw and q_mvar for a specific pq element ("load", "sgen"...).
This function basically writes values element table to res_element table
:param net: pandapower net
:param element: element name (str)
:return:
"""
# info from net
_is_elements = net["_is_elements"]
ac = net["_options"]["ac"]
# info element
el_data = net[element]
res_ = "res_%s" % element
if suffix is not None:
res_ += "_%s"%suffix
ctrl_ = "%s_controllable" % element
is_controllable = False
if ctrl_ in _is_elements:
controlled_elements = net[element][net._is_elements[ctrl_]].index
gen_idx = net._pd2ppc_lookups[ctrl_][controlled_elements]
gen_sign = 1 if element == "sgen" else -1
is_controllable = True
# Wards and xwards have different names in their element table, but not in res table. Also no scaling -> Fix...
p_mw = "ps_mw" if element in ["ward", "xward"] else "p_mw"
q_mvar = "qs_mvar" if element in ["ward", "xward"] else "q_mvar"
scaling = el_data["scaling"].values if element not in ["ward", "xward"] else 1.0
element_in_service = _is_elements[element]
# P result in kw to element
net[res_]["p_mw"].values[:] = el_data[p_mw].values * scaling * element_in_service
if is_controllable:
net[res_]["p_mw"].loc[controlled_elements] = ppc["gen"][gen_idx, PG] * gen_sign
if ac:
# Q result in kvar to element
net[res_]["q_mvar"].values[:] = el_data[q_mvar].values * scaling * element_in_service
if is_controllable:
net[res_]["q_mvar"].loc[controlled_elements] = ppc["gen"][gen_idx, QG] * gen_sign
return net
def write_pq_results_to_element_3ph(net, element):
"""
get p_mw and q_mvar for a specific pq element ("load", "sgen"...).
This function basically writes values element table to res_element table
:param net: pandapower net
:param element: element name (str)
:return:
"""
# info from net
_is_elements = net["_is_elements"]
ac = net["_options"]["ac"]
# info element
el_data = net[element]
res_ = "res_" + element+"_3ph"
scaling = el_data["scaling"].values
element_in_service = _is_elements[element]
net[res_]["p_a_mw"] = pd.Series((el_data["p_mw"].values/3)\
* scaling * element_in_service) if element in[ "load","sgen"] else\
pd.Series(el_data["p_a_mw"].values * scaling * element_in_service)
net[res_]["p_b_mw"] = pd.Series((el_data["p_mw"].values/3) \
* scaling * element_in_service)if element in[ "load","sgen"] else\
pd.Series(el_data["p_b_mw"].values * scaling * element_in_service)
net[res_]["p_c_mw"] = pd.Series((el_data["p_mw"].values/3) \
* scaling * element_in_service) if element in[ "load","sgen"] else\
pd.Series(el_data["p_c_mw"].values * scaling * element_in_service)
if ac:
# Q result in kvar to element
net[res_]["q_a_mvar"] = pd.Series((el_data["q_mvar"].values/3)\
* scaling * element_in_service) if element in[ "load","sgen"] else\
|
pd.Series(el_data["q_a_mvar"].values * scaling * element_in_service)
|
pandas.Series
|
import pandas as pd
lst = [1, 2, 3, 4, 5] #creating a list
series =
|
pd.Series(lst)
|
pandas.Series
|
from __future__ import division, print_function
from datetime import timedelta
from jitterbug import *
from supervised_models import TM,SVM,RF,DT,NB,LR
from pdb import set_trace
import matplotlib.pyplot as plt
from os import listdir
from collections import Counter
import pandas as pd
from demos import cmd
try:
import cPickle as pickle
except:
import pickle
import warnings
warnings.filterwarnings('ignore')
def parse(path = "../data/"):
for file in listdir(path):
df = pd.read_csv("../data/"+file)
df.rename(columns={'commenttext':'Abstract'}, inplace=True)
df['label'] = ["no" if type=="WITHOUT_CLASSIFICATION" else "yes" for type in df["classification"]]
df['ID'] = range(len(df))
df = df[["ID","projectname","classification","Abstract","label",]]
df.to_csv("../new_data/original/"+file, line_terminator="\r\n", index=False)
def find_patterns(target='apache-ant-1.7.0'):
data=load_csv(path="../new_data/original/")
jitterbug = Jitterbug(data,target)
patterns = jitterbug.find_patterns()
print("Patterns:")
print(patterns)
print("Precisions on training set:")
print({p: jitterbug.easy.precs[i] for i,p in enumerate(patterns)})
def validate_ground_truth(target='apache-ant-1.7.0'):
data=load_csv(path="../new_data/original/")
jitterbug = Jitterbug(data,target)
patterns = jitterbug.find_patterns()
jitterbug.easy_code(patterns)
jitterbug.output_conflicts(output="../new_data/conflicts/")
def summarize_validate(input = "../new_data/validate/",output="../results/"):
data=load_csv(input)
columns = ["Double Check"]+list(data.keys())
result = {}
result["Double Check"] = ["yes (Easy)","no (GT)"]
for project in data:
count = Counter(data[project]["validate"])
result[project]=[count["yes"],count["no"]]
df = pd.DataFrame(data=result,columns=columns)
df.to_csv(output+"validate_sum.csv", line_terminator="\r\n", index=False)
def correct_ground_truth(validated="../new_data/validate/", output="../new_data/corrected/"):
data = load_csv(path="../new_data/original/")
data_validated = load_csv(path=validated)
for project in data:
for id in data_validated[project][data_validated[project]["validate"]=="yes"]["ID"]:
data[project]["label"][id]="yes"
data[project].to_csv(output+project+".csv", line_terminator="\r\n", index=False)
stats = Counter(data_validated[project]["validate"])
ratio = float(stats["yes"])/(stats["yes"]+stats["no"])
print(project)
print(ratio)
def Easy_results(source="corrected",output="../results/"):
input = "../new_data/"+source+"/"
data=load_csv(path=input)
results = {"Metrics":["Precision","Recall","F1"]}
for target in data:
jitterbug = Jitterbug(data,target)
patterns = jitterbug.find_patterns()
print(patterns)
print(jitterbug.easy.precs)
stats = jitterbug.test_patterns(output=True)
stats["t"] = len(data[target][data[target]["label"]=="yes"])
prec = float(stats['tp'])/stats['p']
rec = float(stats['tp'])/stats['t']
f1 = 2*prec*rec/(prec+rec)
results[target]=[prec,rec,f1]
df = pd.DataFrame(data=results,columns=["Metrics"]+list(data.keys()))
df.to_csv(output+"step1_Easy_"+source+".csv", line_terminator="\r\n", index=False)
def MAT_results(source="corrected",output="../results/"):
input = "../new_data/"+source+"/"
data=load_csv(path=input)
results = {"Metrics":["Precision","Recall","F1"]}
for target in data:
mat = MAT(data,target)
mat.preprocess()
mat.find_patterns()
stats = mat.test_patterns()
stats["t"] = len(data[target][data[target]["label"]=="yes"])
prec = float(stats['tp'])/stats['p']
rec = float(stats['tp'])/stats['t']
f1 = 2*prec*rec/(prec+rec)
results[target]=[prec,rec,f1]
df = pd.DataFrame(data=results,columns=["Metrics"]+list(data.keys()))
df.to_csv(output+"step1_MAT_"+source+".csv", line_terminator="\r\n", index=False)
def fitness_pattern(pattern='xxx'):
data=load_csv(path="../new_data/original/")
fitness = {}
for target in data:
jitterbug = Jitterbug(data,target)
p_id = list(jitterbug.easy.voc).index(pattern)
poses = np.where(np.array(jitterbug.easy.y_label)== "yes")[0]
count_tp = np.array(np.sum(jitterbug.easy.test_data[poses], axis=0))[0][p_id]
count_p = np.array(np.sum(jitterbug.easy.test_data, axis=0))[0][p_id]
fitness[target] = np.nan_to_num(count_tp * (count_tp / count_p) ** 3)
print(fitness)
def rest_results(seed=0,input="../new_data/rest/",output="../results/"):
treatments = ["LR","DT","RF","SVM","NB","TM"]
data=load_csv(path=input)
columns = ["Treatment"]+list(data.keys())
# Supervised Learning Results
result = {target: [supervised_model(data,target,model=model,seed=seed) for model in treatments] for target in data}
result["Treatment"] = treatments
to_dump = {key: {"RF": result[key][2], "TM": result[key][5]} for key in data}
# Output results to tables
metrics = result[columns[-1]][0].keys()
for metric in metrics:
df = {key: (result[key] if key=="Treatment" else [dict[metric] for dict in result[key]]) for key in result}
pd.DataFrame(df,columns=columns).to_csv(output+"rest_"+metric+".csv", line_terminator="\r\n", index=False)
# Hard Results (continuous learning)
APFD_result = {}
AUC_result = {}
for target in data:
APFD_result[target] = []
AUC_result[target] = []
for model in treatments[:-1]:
jitterbug = Jitterbug_hard(data,target,model=model,seed=seed)
stats = jitterbug.eval()
APFD_result[target].append(stats['APFD'])
AUC_result[target].append(stats['AUC'])
if model=="RF":
to_dump[target]["Hard"] = stats
with open("../dump/rest_result.pickle","wb") as f:
pickle.dump(to_dump,f)
APFD_result["Treatment"] = treatments[:-1]
AUC_result["Treatment"] = treatments[:-1]
|
pd.DataFrame(APFD_result,columns=columns)
|
pandas.DataFrame
|
from .._common import *
import pandas as pd
import numpy as np
class ToDataframe(yo_fluq.agg.PushQueryElement):
def __init__(self, **kwargs):
self.kwargs = kwargs
def on_enter(factory,instance):
instance.lst = []
def on_process(factory, instance, element):
instance.lst.append(element)
def on_report(factory, instance):
return pd.DataFrame(instance.lst,**factory.kwargs)
class ToNDArray(yo_fluq.agg.PushQueryElement):
def on_enter(factory,instance):
instance.lst = []
def on_process(factory, instance, element):
instance.lst.append(element)
def on_report(factory, instance):
return np.array(instance.lst)
class ToSeries(yo_fluq.agg.PushQueryElement):
def __init__(self, value_selector : Optional[Callable] = None, key_selector : Optional[Callable] = None, **kwargs):
self.value_selector = value_selector
self.key_selector = key_selector
self.kwargs = kwargs
def on_enter(factory,instance):
instance.values = []
instance.keys = []
instance.first_time = True
instance.accepts_key_value_pair = None
instance.key_selector = factory.key_selector
instance.value_selector = factory.value_selector
instance.kwargs = factory.kwargs
def on_process(factory, instance, element):
if instance.first_time:
if isinstance(element, yo_fluq.KeyValuePair):
instance.accepts_key_value_pair = True
if instance.value_selector is None:
instance.value_selector = lambda z: z.value
if instance.key_selector is None:
instance.key_selector = lambda z: z.key
else:
instance.accepts_key_value_pair = False
if instance.value_selector is None:
instance.value_selector = lambda z: z
instance.first_time = False
else:
if isinstance(element, yo_fluq.KeyValuePair) != (instance.accepts_key_value_pair):
raise ValueError('The sequence is the mixture of keyvalue pairs and onther types, which is not allowed')
instance.values.append(instance.value_selector(element))
if instance.key_selector is not None:
instance.keys.append(instance.key_selector(element))
def on_report(factory, instance):
if instance.key_selector is None:
return pd.Series(instance.values,**instance.kwargs)
else:
return
|
pd.Series(instance.values, instance.keys, **instance.kwargs)
|
pandas.Series
|
# -*- coding: utf-8 -*-
"""
svm和gdbt算法模块
@author 谢金豆
"""
import random
import numpy as np
import cv2
import matplotlib.pyplot as plt
import seaborn as sns
import cv2
import numpy as np
import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler #标准差标准化
from sklearn.svm import SVC #svm包中SVC用于分类
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import cross_validation, metrics
from sklearn.metrics import roc_curve, auc
from sklearn import metrics
L=["cr","in","pa","ps","rs","sc"]
L1=["cr","in","pa","ps","rs","sc","gg","rp","sp"]
def get_data(x,y):
file_path='D:/NLS_data/' #设置文件路径
file_path1='D:/train/'
train_set = np.zeros(shape=[1,64*64]) #train_set用于获取的数据集
train_set = pd.DataFrame(train_set) #将train_set转换成DataFrame类型
target=[] #标签列表
for i in L:
for j in range(x,y):
target.append(i)
img = cv2.imread(file_path+i+'/'+str(j)+'.jpg',\
cv2.IMREAD_GRAYSCALE) #读取图片,第二个参数表示以灰度图像读入
img=img.reshape(1,img.shape[0]*img.shape[1])
img=pd.DataFrame(img)
train_set=pd.concat([train_set,img],axis=0)
train_set.index=list(range(0,train_set.shape[0]))
train_set.drop(labels=0,axis=0,inplace=True)
target=
|
pd.DataFrame(target)
|
pandas.DataFrame
|
import numpy as np
from at import *
from at.load import load_mat
from matplotlib import pyplot as plt
import matplotlib.pyplot as plt
import at.plot
import numpy as np
from pylab import *
import pandas as pd
import csv
from random import random
def plot_closedOrbit(ring, refpts):
elements_indexes = get_refpts(ring, refpts)
lindata0, tune, chrom, lindata = ring.linopt(get_chrom=True, refpts=elements_indexes)
closed_orbitx = lindata['closed_orbit'][:, 0]
closed_orbity = lindata['closed_orbit'][:, 2]
s_pos = lindata['s_pos']
closed_orbit = lindata['closed_orbit']
beta_x= lindata['beta'][:, 0]
beta_y= lindata['beta'][:, 1]
dx = lindata['dispersion'][:, 0]
dy = lindata['dispersion'][:, 2]
plt.plot(s_pos, closed_orbitx)
# Label for x-axis
plt.xlabel("s_pos")
# Label for y-axis
plt.ylabel("closed_orbit x")
# for display
i = 0
S_pos2 = []
plt.title("Closed orbit x")
plt.show()
plt.plot(s_pos, closed_orbity)
# Label for x-axis
plt.xlabel("s_pos")
# Label for y-axis
plt.ylabel("closed_orbit y")
# for display
i = 0
S_pos2 = []
plt.title("Closed orbit y")
plt.show()
def correctionType(alpha1,alpha2, alpha3):
if alpha1 == 1:
type = "optics correction"
if alpha2 == 1:
type = "dispersion correction"
if alpha3 == 1:
type = "optics and dispersion correction"
print("This code performs: ", type)
#return type
def func(j, mylist):
# dedup, preserving order (dict is insertion-ordered as a language guarantee as of 3.7):
deduped = list(dict.fromkeys(mylist))
# Slice off all but the part you care about:
return deduped[::j]
def defineMatrices_w_eta(W, alpha1, alpha2,alpha3, C0x, C0y, C0xy, C0yx, Cxx_err, Cyy_err, Cxy_err, Cyx_err, dCx, dCy, dCxy,dCyx):
Nk = len(dCx) # number of free parameters
Nm = len(dCx) # number of measurements
print('NK:', Nk)
print('Nm:', Nm)
Ax = np.zeros([Nk, Nk])
Ay = np.zeros([Nk, Nk])
Axy = np.zeros([Nk, Nk])
Ayx = np.zeros([Nk, Nk])
A = np.zeros([4 * Nk, Nk])
##
Bx = np.zeros([Nk, 1])
By = np.zeros([Nk, 1])
Bxy = np.zeros([Nk, 1])
Byx = np.zeros([Nk, 1])
B = np.zeros([4 * Nk, 1])
##
Dx = (Cxx_err[:, :] - C0x[:, :] )#- error_variance) ### dk ?
Dy = (Cyy_err[:, :] - C0y[:, :] )
Dxy = (Cxy_err[:, :] - C0xy[:, :])
Dyx = (Cyx_err[:, :] - C0yx[:, :] )
##
for i in range(Nk): ## i represents each quad
# print('done A:', 100.* i ,'%')
for j in range(Nk):
Ax[i, j] = np.sum(np.dot(np.dot(dCx[i][0: -2, :],W*alpha1), dCx[j][0: -2, :].T)) + np.sum(np.dot(np.dot(dCx[i][ -2 ::, :],W*alpha2), dCx[j][ -2 ::, :].T)) + np.sum(np.dot(np.dot(dCx[i],W*alpha3), dCx[j].T))
Ay[i, j] = np.sum(np.dot(np.dot(dCy[i][0: -2, :],W*alpha1), dCy[j][0: -2, :].T)) + np.sum(np.dot(np.dot(dCy[i][ -2 ::, :],W*alpha2), dCy[j][ -2 ::, :].T))+ np.sum(np.dot(np.dot(dCy[i],W*alpha3), dCy[j].T))
Axy[i, j] = np.sum(np.dot(np.dot(dCxy[i][0: -2, :],W*alpha1), dCxy[j][0: -2, :].T)) + np.sum(np.dot(np.dot(dCxy[i][ -2 ::, :],W*alpha2), dCxy[j][ -2 ::, :].T))+ np.sum(np.dot(np.dot(dCxy[i],W*alpha3), dCxy[j].T))
Ayx[i, j] = np.sum(np.dot(np.dot(dCyx[i][0: -2, :],W*alpha1), dCyx[j][0: -2, :].T)) + np.sum(np.dot(np.dot(dCyx[i][ -2 ::, :],W*alpha2), dCyx[j][ -2 ::, :].T))+ np.sum(np.dot(np.dot(dCyx[i],W*alpha3), dCyx[j].T))
A[i, :] = Ax[i, :]
A[i + Nk, :] = Ay[i, :]
A[i + 2 * Nk, :] = Axy[i, :]
A[i + 3 * Nk, :] = Ayx[i, :]
##
for i in range(Nk):
Bx[i] = np.sum(np.dot(np.dot(dCx[i][0: -2, :],W*alpha1), Dx[0: -2, :].T))+ np.sum(np.dot(np.dot(dCx[i][ -2 ::, :],W*alpha2), Dx[ -2 ::, :].T)) + np.sum(np.dot(np.dot(dCx[i],W*alpha3), Dx.T))
By[i] = np.sum(np.dot(np.dot(dCy[i][0: -2, :],W*alpha1), Dy[0: -2, :].T)) + np.sum(np.dot(np.dot(dCy[i][ -2 ::, :],W*alpha2), Dy[ -2 ::, :].T))+np.sum(np.dot(np.dot(dCy[i],W*alpha3), Dy.T))
Bxy[i] = np.sum(np.dot(np.dot(dCxy[i][0: -2, :],W*alpha1), Dxy[0: -2, :].T))+ np.sum(np.dot(np.dot(dCxy[i][ -2 ::, :],W*alpha2), Dxy[ -2 ::, :].T))+np.sum(np.dot(np.dot(dCxy[i],W*alpha3), Dxy.T))
Byx[i] = np.sum(np.dot(np.dot(dCyx[i][0: -2, :],W*alpha1), Dyx[0: -2, :].T))+ np.sum(np.dot(np.dot(dCyx[i][ -2 ::, :],W*alpha2), Dyx[ -2 ::, :].T))+np.sum(np.dot(np.dot(dCyx[i],W*alpha3), Dyx.T))
B[i] = Bx[i]
B[i + Nk] = By[i]
B[i + 2 * Nk] = Bxy[i]
B[i + 3 * Nk] = Byx[i]
return A, B,
def getInverse(A, B,Nk, sCut):
u, s, v = np.linalg.svd(A, full_matrices=True)
smat = 0.0 * A
si = s ** -1
n_sv = sCut
si[n_sv:] *= 0.0
print("number of singular values {}".format(len(si)))
smat[:Nk, :Nk] = np.diag(si)
print('A' + str(A.shape), 'B' + str(B.shape), 'U' + str(u.shape), 'smat' + str(smat.shape), 'v' + str(v.shape))
plt.plot(np.log(s), 'd--')
plt.title('singular value')
plt.show()
plt.plot(si, 'd--')
plt.title('singular value inverse')
plt.show()
Ai = np.dot(v.transpose(), np.dot(smat.transpose(), u.transpose()))
###
r = (np.dot(Ai, B)).reshape(-1)
plot(r, 'd')
plt.show()
# error
e = np.dot(A, r).reshape(-1) - B.reshape(-1)
plt.plot(e)
plt.show()
plt.plot(B)
plt.show()
return Ai, r, e
def compare_orm(Cxy, Cxy_err, Cxy_corr, no):
# plot the 3 sets
plt.plot(Cxy[no], label='C')
plt.plot(Cxy_err[no], label='C_err')
plt.plot(Cxy_corr[no], label='C_corr')
# call with no parameters
plt.legend()
plt.show()
def compare_drm(Cxy, Cxy_err, Cxy_corr):
# plot the 3 sets
plt.plot(Cxy, label='$\eta$')
plt.plot(Cxy_err, label='$\eta_{err}$')
plt.plot(Cxy_corr, label='$\eta_{corr}$')
# call with no parameters
plt.legend()
plt.show()
def generatingQuadsResponse1(ring, Cxx, Cyy,Cxy, Cyx , used_correctors):
# %%time
quads_info = quad_info(ring)
quad_dict, quad_vals = getQuadFamilies(quads_info)
quads = [k for k in quad_dict.keys()]
quad_names = quads
dk = 0.0001
qxx = []
qxy = []
qyy = []
qyx = []
quad_names = quads
for qname in quad_names:
print('generating response to {}, n={}'.format(qname, quad_dict[qname]))
t0 = time.time()
nq = quad_dict[qname] + 1
for i in range(0, nq):
Qxx, Qxy, Qyy, Qyx = computeOpticsD1(ring, qname, i, dk, quad_vals, used_correctors)
qxx.append(Qxx)
qxy.append(Qxy)
qyy.append(Qyy)
qyx.append(Qyx)
t1 = time.time()
print(f"Execution time: {t1 - t0} sec")
C0x = Cxx
C0y = Cyy
C0xy = Cxy
C0yx = Cyx
dCx = []
dCy = []
dCxy = []
dCyx = []
quad_names = quads
for qname in quad_names:
# nquad = quad_dict[qname]
print('loading response to:', qname)
i = 0
while (i < len(qxx)):
C1x = qxx[i]
C1y = qyy[i]
C1xy = qxy[i]
C1yx = qyx[i]
dcxx = ((C1x - C0x) / dk)
dcyy = ((C1y - C0y) / dk)
dCxy.append((C1xy - C0xy) / dk)
dCyx.append((C1yx - C0yx) / dk)
dCx.append(dcxx)
dCy.append(dcyy)
i += 1
return C0x, C0y, C0xy, C0yx, dCx, dCy, dCxy,dCyx
def setCorrection(ring, quads_info_error,quad_names, r , quads_info,n_list, used_quads):
quad_dict, quad_vals = getQuadFamilies(quads_info_error)
n_list = len(quads_info_error.s_pos)
# print(n_list)
quad_names = quad_names
iq = 0
frac = 1.0
cor_dict = {}
DK = []
for qname in quad_names:
if qname in used_quads:
cor_dict[qname] = -r[iq] * frac
iq += 1
print("define correction : Done")
quads_indexes = get_refpts(ring, elements.Quadrupole)
for qname in quads_indexes:
if ring[qname].FamName in used_quads:
dk1 = cor_dict[ring[qname].FamName]
DK.append(dk1)
else:
DK.append(0)
quads_indexes = get_refpts(ring, elements.Quadrupole)
i = 0
while (i < len(quads_indexes)):
ring[quads_indexes[i]].K += DK[i]
i += 1
print("set correction : Done")
def setCorrection1(ring, quads_info_error,quad_names, r , quads_info,n_list):
quad_dict, quad_vals = getQuadFamilies(quads_info_error)
n_list = len(quads_info_error.s_pos)
# print(n_list)
quad_names = quad_names
iq = 0
frac = 1.0
cor_dict = {}
for qname in quad_names:
nquad = quad_dict[qname]
# print(qname, quad_dict[qname])
for i in range(0, nquad):
cor_dict[qname, i + 1] = -r[iq] * frac
iq += 1
print("define correction : Done")
DK = []
for idx in range(n_list):
qname_ = quads_info.elements_name[idx] # ElementName
occ = quads_info_error.occ[idx]
dk = cor_dict[qname_, occ]
DK.append(dk)
quads_indexes = get_refpts(ring, elements.Quadrupole)
i = 0
while (i < len(quads_indexes)):
ring[quads_indexes[i]].K += DK[i]
i += 1
print("set correction : Done")
def plotORM(orm):
plt.figure()
imshow(orm)
plt.show()
def getBetaBeat(twiss, twiss_error):
print("getBetaBeat bx and by: ")
bxi =[]
for i in range(len(twiss.betax)):
bxx = (twiss_error.betax[i] - twiss.betax[i]) / twiss.betax[i]
bxi.append(bxx)
byi =[]
for i in range(len(twiss.betay)):
byy = (twiss_error.betay[i] - twiss.betay[i]) / twiss.betay[i]
byi.append(byy)
bxx = np.array((twiss_error.betax - twiss.betax) / twiss.betax)
byy = np.array((twiss_error.betay - twiss.betay) / twiss.betay)
bx = np.sqrt(np.mean(bxx ** 2))
by = np.sqrt(np.mean(byy ** 2))
#bx = np.std((twiss_error.betax - twiss.betax) / twiss.betax)
#by = np.std((twiss_error.betay - twiss.betay) / twiss.betay)
print("Simulated beta beat, x:" + str(bx * 100) + "% y: " + str(by* 100) + "%")
def used_elements_plot(lattice, elements_indexes, used_quad):
elements_indexes = get_refpts(lattice, '*')
lindata0, tune, chrom, lindata = lattice.linopt(get_chrom=True, refpts=elements_indexes)
closed_orbitx = lindata['closed_orbit'][:, 0]
closed_orbity = lindata['closed_orbit'][:, 2]
s_pos = lindata['s_pos']
closed_orbit = lindata['closed_orbit']
beta_x = lindata['beta'][:, 0]
beta_y = lindata['beta'][:, 1]
dx = lindata['dispersion'][:, 0]
dy = lindata['dispersion'][:, 2]
plt.plot(s_pos, closed_orbitx)
#plt.plot(s_pos, closed_orbitx)
# Label for x-axis
plt.xlabel("elements_indexes")
# Label for y-axis
plt.ylabel("closed_orbit_x")
# for display
i = 0
S_pos2 = []
while (i < used_quad.shape[1]):
S_pos1 = used_quad.iloc[:, i]
S_pos_ = df = pd.concat([S_pos1])
S_pos2.append(S_pos_)
i += 1
for i in S_pos_:
scatter(i, 0)
plt.title("used quadrupoles indices")
plt.show()
plt.plot(s_pos, beta_x)
#plt.plot(s_pos, beta_x)
# Label for x-axis
plt.xlabel("elements_indexes")
# Label for y-axis
plt.ylabel("beta_x")
# for display
S_pos2 = []
i = 0
S_pos2 = []
while (i < used_quad.shape[1]):
S_pos1 = used_quad.iloc[:, i]
S_pos_ = df = pd.concat([S_pos1])
S_pos2.append(S_pos_)
i += 1
for i in S_pos_:
scatter(i, 0)
plt.title("used quadrupoles indices")
plt.show()
def used_elements_plot1(lattice, s_poss, used_quad):
elements_indexes = get_refpts(lattice, '*')
lindata0, tune, chrom, lindata = lattice.linopt(get_chrom=True, refpts=elements_indexes)
closed_orbitx = lindata['closed_orbit'][:, 0]
closed_orbity = lindata['closed_orbit'][:, 2]
s_pos = lindata['s_pos']
closed_orbit = lindata['closed_orbit']
beta_x = lindata['beta'][:, 0]
beta_y = lindata['beta'][:, 1]
dx = lindata['dispersion'][:, 0]
dy = lindata['dispersion'][:, 2]
plt.plot(s_pos, closed_orbitx)
#plt.plot(s_pos, closed_orbitx)
# Label for x-axis
plt.xlabel("elements_indexes")
# Label for y-axis
plt.ylabel("closed_orbit_x")
# for display
i = 0
for i in s_poss:
scatter(i, 0)
plt.title("used quadrupoles indices")
plt.show()
plt.plot(s_pos, beta_x)
#plt.plot(s_pos, beta_x)
# Label for x-axis
plt.xlabel("elements_indexes")
# Label for y-axis
plt.ylabel("beta_x")
# for display
S_pos2 = []
i = 0
S_pos2 = []
for i in s_poss:
scatter(i, 0)
plt.title("used quadrupoles indices")
plt.show()
def getDispersion(twiss, twiss_error, twiss_corrected):
plt.plot(twiss.dx, label='$\eta_x$')
plt.plot(twiss_error.dx, label='$\eta_x_err$')
plt.plot(twiss_corrected.dx, label='$\eta_x_corr$')
plt.legend()
plt.show()
plt.plot(twiss.dy, label='$\eta_y$')
plt.plot(twiss_error.dy, label='$\eta_y_err$')
plt.plot(twiss_corrected.dy, label='$\eta_y_corr$')
plt.legend()
plt.show()
def make_plot(twiss, plot_name):
from mpl_toolkits.axes_grid1 import host_subplot
import matplotlib.pyplot as plt
host = host_subplot(111)
par = host.twinx()
host.set_xlabel("s_pos")
host.set_ylabel(r'$\beta_x$')
host.set_ylabel(r'$\beta_y$')
par.set_ylabel("dx")
p1, = host.plot(twiss.s_pos, twiss.betax, label=r'$\beta_x$')
p2, = host.plot(twiss.s_pos, twiss.betay, label=r'$\beta_y$')
p3, = par.plot(twiss.s_pos, twiss.dx, label=r'$\eta_x$')
p4, = par.plot(twiss.s_pos, twiss.dy, label=r'$\eta_y$')
leg = plt.legend()
host.yaxis.get_label().set_color(p1.get_color())
leg.texts[0].set_color(p1.get_color())
host.yaxis.get_label().set_color(p2.get_color())
leg.texts[1].set_color(p2.get_color())
par.yaxis.get_label().set_color(p3.get_color())
leg.texts[2].set_color(p3.get_color())
plt.title(plot_name)
plt.show()
def used_quads_f1(ring, used_correctors_list, quad_dict):
# elements_name = used_correctors_list
correctors_indexes = []
quad_dict_ = []
elements_name = []
quads = pd.DataFrame()
s_pos =[]
for i in used_correctors_list:
# quad_dict_.append(int(quad_dict[i]))
quad_dict_ = int(quad_dict[i])
elements_numbers = quad_dict_
corrector_indexx = get_refpts(ring, i)
# print(corrector_index)
element_name = ring[corrector_indexx[0]].FamName
lindata0, tune, chrom, lindata = ring.linopt(get_chrom=True, refpts=corrector_indexx)
s_poss = lindata['s_pos']
s_pos.append(s_poss)
df1 = {
str(i) + str("=") + str(" ") + str(quad_dict_) + str(" ") + str('quads'): corrector_indexx,
}
df2 = pd.concat([pd.DataFrame(v, columns=[k]) for k, v in df1.items()], axis=1)
quads = pd.concat([quads, df2], axis=1)
for j in range(len(s_pos)):
array1 = numpy.append(s_pos[0], s_pos[j])
return quads, s_pos
def used_quads_f(ring, used_correctors_list, quad_dict):
#elements_name = used_correctors_list
correctors_indexes = []
quad_dict_ = []
elements_name =[]
quads = pd.DataFrame()
for i in used_correctors_list:
#quad_dict_.append(int(quad_dict[i]))
quad_dict_= int(quad_dict[i])
elements_numbers = quad_dict_
corrector_index = get_refpts(ring, i)
#print(corrector_index)
element_name = ring[corrector_index[0]].FamName
lindata0, tune, chrom, lindata = ring.linopt(get_chrom=True, refpts=corrector_index)
s_poss = lindata['s_pos']
#print(element_name)
df1 = {
str(i) + str("=") + str(" ")+ str( quad_dict_)+ str(" ")+ str('quads'): s_poss,
}
df2 = pd.concat([pd.DataFrame(v, columns=[k]) for k, v in df1.items()], axis=1)
correctors_indexes.append(np.squeeze(corrector_index))
elements_name.append(element_name)
quads = pd.concat([quads, df2], axis=1)
return quads
def used_elements_plot(lattice, used_quad):
elements_indexes = get_refpts(lattice, '*')
lindata0, tune, chrom, lindata = lattice.linopt(get_chrom=True, refpts=elements_indexes)
closed_orbitx = lindata['closed_orbit'][:, 0]
closed_orbity = lindata['closed_orbit'][:, 2]
s_pos = lindata['s_pos']
closed_orbit = lindata['closed_orbit']
beta_x = lindata['beta'][:, 0]
beta_y = lindata['beta'][:, 1]
dx = lindata['dispersion'][:, 0]
dy = lindata['dispersion'][:, 2]
plt.plot(s_pos, closed_orbitx)
#plt.plot(s_pos, closed_orbitx)
# Label for x-axis
plt.xlabel("elements_indexes")
# Label for y-axis
plt.ylabel("closed_orbit_x")
# for display
i = 0
S_pos2 = []
while (i < used_quad.shape[1]):
S_pos1 = used_quad.iloc[:, i]
S_pos_ = df = pd.concat([S_pos1])
S_pos2.append(S_pos_)
i += 1
for i in S_pos_:
scatter(i, 0)
plt.title("used quadrupoles indices")
plt.show()
plt.plot(s_pos, beta_x)
#plt.plot(s_pos, beta_x)
# Label for x-axis
plt.xlabel("elements_indexes")
# Label for y-axis
plt.ylabel("beta_x")
# for display
S_pos2 = []
i = 0
S_pos2 = []
while (i < used_quad.shape[1]):
S_pos1 = used_quad.iloc[:, i]
S_pos_ = df =
|
pd.concat([S_pos1])
|
pandas.concat
|
import os
import sys
if not os.path.join('..','..') in sys.path: sys.path.append(os.path.join('..','..'))
import nibabel as nib
import numpy as np
import pandas as pd
import re
import matplotlib as mpl
if os.getenv('DISPLAY') is None: mpl.use('Agg')
import matplotlib.pyplot as py
import seaborn as sns
import pickle
from glob import glob
from skimage.metrics import structural_similarity as ssim
from pymirc.viewer import ThreeAxisViewer
from scipy.ndimage import find_objects, zoom
py.rcParams.update({'mathtext.default': 'regular' })
#------------------------------------------------------------------------------------------------------------
def read_nii(fname):
nii = nib.load(fname)
nii = nib.as_closest_canonical(nii)
vol = nii.get_data()
return vol, nii.header['pixdim'][1:4]
#------------------------------------------------------------------------------------------------------------
def regional_statistics(vol, ref_vol, labelimg):
#_,ss_img = ssim(vol.astype(np.float32), ref_vol.astype(np.float32), full = True)
_,ss_img = ssim(vol.astype(np.float32), ref_vol.astype(np.float32), full = True, data_range = 2*ref_vol.max(),
gaussian_weights = True)
df = pd.DataFrame()
for roinum in np.unique(labelimg):
roiinds = np.where(labelimg == roinum)
x = vol[roiinds]
y = ref_vol[roiinds]
data = {'roinum': roinum,
'mean': x.mean(),
'rc_mean': x.mean()/y.mean(),
'ssim': ss_img[roiinds].mean(),
'rmse': np.sqrt(((x - y)**2).mean())/y.mean(),
'nvox': len(roiinds[0])}
df = df.append([data], ignore_index = True)
return df
#------------------------------------------------------------------------------------------------------------
def roi_to_region(roi):
if '-Cerebral-White-Matter' in roi: region = 'white matter'
elif '-Ventricle' in roi: region = 'ventricle'
elif '-Cerebellum-White-Matter' in roi: region = 'cerebellum'
elif '-Cerebellum-Cortex' in roi: region = 'cerebellum'
elif '-Thalamus' in roi: region = 'thalamus'
elif '-Caudate' in roi: region = 'basal ganglia'
elif '-Putamen' in roi: region = 'basal ganglia'
elif '-Pallidum' in roi: region = 'basal ganglia'
elif '3rd-Ventricle' in roi: region = 'ventricle'
elif '4th-Ventricle' in roi: region = 'ventricle'
elif '-Hippocampus' in roi: region = 'hippocampus'
elif '-Amygdala' in roi: region = 'temporal cortex'
elif '-Insula' in roi: region = 'temporal cortex'
elif '-Accumbens-area' in roi: region = 'basal ganglia'
elif roi == 'Unknown': region = 'background'
elif bool(re.match(r'ctx-.*-corpuscallosum',roi)): region = 'corpuscallosum'
elif bool(re.match(r'ctx-.*-cuneus',roi)): region = 'occipital cortex'
elif bool(re.match(r'ctx-.*-entorhinal',roi)): region = 'temporal cortex'
elif bool(re.match(r'ctx-.*-fusiform',roi)): region = 'temporal cortex'
elif bool(re.match(r'ctx-.*-paracentral',roi)): region = 'frontal cortex'
elif bool(re.match(r'ctx-.*-parsopercularis',roi)): region = 'frontal cortex'
elif bool(re.match(r'ctx-.*-parsorbitalis',roi)): region = 'frontal cortex'
elif bool(re.match(r'ctx-.*-parstriangularis',roi)): region = 'frontal cortex'
elif bool(re.match(r'ctx-.*-pericalcarine',roi)): region = 'occipital cortex'
elif bool(re.match(r'ctx-.*-postcentral',roi)): region = 'parietal cortex'
elif bool(re.match(r'ctx-.*-precentral',roi)): region = 'frontal cortex'
elif bool(re.match(r'ctx-.*-precuneus',roi)): region = 'parietal cortex'
elif bool(re.match(r'ctx-.*-supramarginal',roi)): region = 'parietal cortex'
elif bool(re.match(r'ctx-.*-frontalpole',roi)): region = 'frontal cortex'
elif bool(re.match(r'ctx-.*-temporalpole',roi)): region = 'temporal cortex'
elif bool(re.match(r'ctx-.*-insula',roi)): region = 'temporal cortex'
elif bool(re.match(r'ctx-.*frontal',roi)): region = 'frontal cortex'
elif bool(re.match(r'ctx-.*parietal',roi)): region = 'parietal cortex'
elif bool(re.match(r'ctx-.*temporal',roi)): region = 'temporal cortex'
elif bool(re.match(r'ctx-.*cingulate',roi)): region = 'cingulate cortex'
elif bool(re.match(r'ctx-.*occipital',roi)): region = 'occipital cortex'
elif bool(re.match(r'ctx-.*lingual',roi)): region = 'occipital cortex'
elif bool(re.match(r'ctx-.*hippocampal',roi)): region = 'temporal cortex'
else: region = 'other'
return region
#------------------------------------------------------------------------------------------------------------
from argparse import ArgumentParser
parser = ArgumentParser(description = 'boxplots of CNN Bowsher models')
parser.add_argument('model_name', help = 'model to analyze')
parser.add_argument('--tracer', default = 'FDG', choices = ['FDG','PE2I','FET'], help = 'data set to analyze')
parser.add_argument('--osem_sdir', default = '20_min', help = 'osem count level')
parser.add_argument('--osem_file', default = 'osem_psf_4_5.nii', help = 'osem file to use')
parser.add_argument('--bow_file', default = 'bow_bet_1.0E+01_psf_4_5.nii', help = 'bowsher file to use')
args = parser.parse_args()
model_name = args.model_name
tracer = args.tracer
osem_sdir = args.osem_sdir
osem_file = args.osem_file
bow_file = args.bow_file
model_dir = '../../data/trained_models'
recompute = False
#------------------------------------------------------------------------------------------------------------
mr_file = 'aligned_t1.nii'
aparc_file = 'aparc+aseg_native.nii'
roilut = pd.read_table('FreeSurferColorLUT.txt', comment = '#', sep = '\s+',
names = ['num','roi','r','g','b','a'])
reg_results = pd.DataFrame()
lps_flip = lambda x: np.flip(np.flip(x,0),1)
if tracer == 'FDG':
mdir = '../../data/test_data/mMR/Tim-Patients'
pdirs = glob(os.path.join(mdir,'Tim-Patient-*'))
elif tracer == 'PE2I':
mdir = '../../data/test_data/signa/signa-pe2i'
pdirs = glob(os.path.join(mdir,'ANON????'))
elif tracer == 'FET':
mdir = '../../data/test_data/signa/signa-fet'
pdirs = glob(os.path.join(mdir,'ANON????'))
else:
raise ValueError('Invalid tracer: ', tracer)
for pdir in pdirs:
print(pdir)
output_dir = os.path.join(pdir,'predictions',osem_sdir)
prediction_file = os.path.join(output_dir, '___'.join([os.path.splitext(model_name)[0],osem_file]))
# read the prediction
cnn_bow, voxsize = read_nii(prediction_file)
bbox_data = pickle.load(open(os.path.splitext(prediction_file)[0] + '_bbox.pkl','rb'))
bow, _ = read_nii(os.path.join(pdir,'20_min',bow_file))
osem, _ = read_nii(os.path.join(pdir,osem_sdir,osem_file))
aparc, _ = read_nii(os.path.join(pdir,aparc_file))
mr, _ = read_nii(os.path.join(pdir,mr_file))
# crop and interpolate
bow = bow[bbox_data['bbox']]
bow = zoom(bow, bbox_data['zoomfacs'], order = 1, prefilter = False)
osem = osem[bbox_data['bbox']]
osem = zoom(osem, bbox_data['zoomfacs'], order = 1, prefilter = False)
mr = mr[bbox_data['bbox']]
mr = zoom(mr, bbox_data['zoomfacs'], order = 1, prefilter = False)
aparc = aparc[bbox_data['bbox']]
aparc = zoom(aparc, bbox_data['zoomfacs'], order = 0, prefilter = False)
df_file = os.path.splitext(prediction_file)[0] + '_regional_stats.csv'
if (not os.path.exists(df_file)) or recompute:
df = regional_statistics(cnn_bow, bow, aparc)
df["subject"] = os.path.basename(pdir)
df['roiname'] = df['roinum'].apply(lambda x: roilut[roilut.num == x].roi.to_string(index = False).strip())
df['region'] = df["roiname"].apply(roi_to_region)
df['bow_file'] = bow_file
df = df.reindex(columns=['subject','roinum','roiname','region','bow_file','nvox',
'mean','rc_mean','rmse','ssim'])
df.to_csv(df_file)
print('wrote: ', df_file)
# plot the results
lputamen_bbox = find_objects(lps_flip(aparc) == 12)
sl0 = int(0.5*(lputamen_bbox[0][0].start + lputamen_bbox[0][0].stop))
sl1 = int(0.5*(lputamen_bbox[0][1].start + lputamen_bbox[0][1].stop))
sl2 = int(0.5*(lputamen_bbox[0][2].start + lputamen_bbox[0][2].stop))
mr_imshow_kwargs = {'vmin':0, 'vmax':np.percentile(mr,99.99), 'cmap':py.cm.Greys_r}
pet_imshow_kwargs = {'vmin':0, 'vmax':np.percentile(cnn_bow[aparc>0],99.99)}
vi = ThreeAxisViewer([lps_flip(mr),lps_flip(osem),lps_flip(bow),lps_flip(cnn_bow)],
sl_x = sl0, sl_y = sl1, sl_z = sl2, ls = '', rowlabels = ['T1 MR','OSEM','BOW','$BOW_{CNN}$'],
imshow_kwargs = [mr_imshow_kwargs] + 3*[pet_imshow_kwargs])
vi.fig.savefig(os.path.splitext(prediction_file)[0] + '.png')
py.close(vi.fig)
else:
print('reading : ', df_file)
df = pd.read_csv(df_file)
df['tracer'] = tracer
reg_results = reg_results.append([df], ignore_index = True)
#---------------------------------------------------------------------------------
# filter background ROIs
reg_results = reg_results.loc[(reg_results['region'] != 'other') &
(reg_results['region'] != 'background')]
#---------------------------------------------------------------------------------
# make plots
order = ['frontal cortex','temporal cortex','occipital cortex','parietal cortex',
'hippocampus','cingulate cortex','thalamus','basal ganglia',
'cerebellum','white matter','ventricle']
fp = dict(marker = 'o', markerfacecolor = '0.3', markeredgewidth = 0, markersize = 2.5)
fig, ax = py.subplots(2,1, figsize = (12,6), sharex = True)
bplot1 = sns.boxplot(x='region', y ='rc_mean', data = reg_results, ax = ax[0], hue = 'tracer', flierprops = fp,
order = order)
bplot2 = sns.boxplot(x='region', y ='ssim', data = reg_results, ax = ax[1], hue = 'tracer', flierprops = fp,
order = order)
# make better legend
for plot in [bplot1, bplot2]:
handles, labels = plot.axes.get_legend_handles_labels()
plot.legend().remove()
bplot1.legend(handles, labels, ncol=len(tracer), loc='upper center')
bplot2.legend(handles, labels, ncol=len(tracer), loc='lower right')
ax[0].set_ylabel('RC_mean')
ax[1].set_ylabel('SSIM_mean')
for axx in ax:
axx.set_xticklabels(axx.get_xticklabels(),rotation=15)
axx.grid(ls = ':')
fig.tight_layout()
fig.savefig(os.path.join('figs', f'regions_{model_name}_{tracer}.pdf'))
fig.show()
fig2, ax2 = py.subplots(2,1, figsize = (12,6), sharex = True)
sns.boxplot(x='subject', y ='rc_mean', data = reg_results, ax = ax2[0], hue = 'tracer', flierprops = fp)
sns.boxplot(x='subject', y ='ssim', data = reg_results, ax = ax2[1], hue = 'tracer', flierprops = fp)
for axx in ax2:
axx.set_xticklabels(axx.get_xticklabels(),rotation=90)
axx.grid(ls = ':')
fig2.tight_layout()
fig2.savefig(os.path.join('figs', f'subjects_{model_name}_{tracer}.pdf'))
fig2.show()
# make the data tables
sum_data =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
def get_number_of_cell_types_with_results(cfg):
n_cell_types_list = []
for dataset in cfg['datasets']:
df =
|
pd.read_csv(dataset['summary'], sep='\t')
|
pandas.read_csv
|
import pandas as pd
import re
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns; sns.set_style('whitegrid')
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
#matplotlib.rc('text', usetex = True)
def plot_likelihoods(df, ax):
for c in df.columns:
ax.plot(df[c], label=c)
ax.set_xlabel("Number $\it{K}$ Signatures Used", fontsize=14)
ax.margins(y=.5)
ax.legend()
ax.set_ylabel("Held-out Log Likelihood", fontsize=14)
return ax
def combine_likelihood_files(files):
output = []
for f in files:
# get the covariates used from the filename
out = re.split("_|\.", f)
covariates = out[1]
# stored as one column with one row for each K
df =
|
pd.read_csv(f, sep="\t", index_col=0)
|
pandas.read_csv
|
import json
import pandas as pd
import time
#################################
#
#with open('logs.json', 'r') as data:
# data = data.read()
#
#logs = json.loads(data)
#
########################
def get_data(file):
with open(file, 'r') as data:
data = data.read()
logs = json.loads(data)
#s = Sender('Test', '192.168.1.214')
#logs = s.list_logs()
df = pd.DataFrame(columns=['acquired_time'])
lenghth = len(logs)
i = 0
while i < lenghth:
for x in logs[i]:
if x == "create_params_file:output":
stats = logs[i][6]
stats = stats.split(',')
acquired_time = stats[3].split('"')
acquired_time = acquired_time[3]
print(acquired_time)
df_temp = pd.DataFrame({'acquired_time': [acquired_time]})
df =
|
pd.concat([df, df_temp])
|
pandas.concat
|
#/usr/bin/env
#This program is for testing a trained BTD
# By <NAME>
# 9/9/2021
# Running this will test on a f
import awkward as ak
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Data is stored in pandas -> Each
from sklearn.model_selection import train_test_split
import xgboost as xgb
from numpy.random import choice
import argparse
parser = argparse.ArgumentParser(description='run boosted decision tree on data, note this file grabs only the data not validation this is an experimental set')
parser.add_argument('file', metavar='f', type=str)
parser.add_argument('BTD',metavar='d', type=str)
parser.add_argument('result',metavar='d', type=str)
args=parser.parse_args()
xg_reg=xgb.load_model(args.BTD)
rawdata=pandas.read_csv(args.file)
cleandata=rawdata.drop(["event"],axis=1)
Dexp=xgb.DMatrix(data=cleandata)
predictions=xg_reg.predict(Dexp)
preddf=
|
pd.Series(predictions)
|
pandas.Series
|
#!/usr/bin/env python
# coding: utf-8
# # Monte-carlo simulations
# In[1]:
# %load imports.py
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('reload_kedro', '')
get_ipython().run_line_magic('config', 'Completer.use_jedi = False ## (To fix autocomplete)')
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
|
pd.set_option('display.max_rows', 500)
|
pandas.set_option
|
""" ecospold2matrix - Class for recasting ecospold2 dataset in matrix form.
The module provides function to parse ecospold2 data, notably ecoinvent 3, as
Leontief A-matrix and extensions, or alternatively as supply and use tables for
the unallocated version of ecoinvent.
:PythonVersion: 3
:Dependencies: pandas 0.14.1 or more recent, scipy, numpy, lxml and xml
License: BDS
Authors:
<NAME>
<NAME>
<NAME>
<NAME>
Credits:
This module re-uses/adapts code from brightway2data, more specifically the
Ecospold2DataExtractor class in import_ecospold2.py, changeset:
271:7e67a75ed791; Wed Sep 10; published under BDS-license:
Copyright (c) 2014, <NAME> and ETH Zürich
Neither the name of ETH Zürich nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE
COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import pdb
import os
import glob
import io
import pkgutil
import subprocess
from lxml import objectify
import xml.etree.ElementTree as ET
from lxml import etree
import pandas as pd
_df = pd.DataFrame
import numpy as np
import scipy.sparse
import scipy.io
import logging
import pickle
import gzip
import csv
import shelve
import hashlib
import sqlite3
try:
import IPython
except:
pass
import re
import xlrd
import xlwt
import copy
# pylint: disable-msg=C0103
class Ecospold2Matrix(object):
"""
Defines a parser object that holds all project parameters and processes the
ecospold-formatted data into matrices of choice.
The two main functions of this class are ecospold_to_Leontief() and
ecospold_to_sut()
"""
# Some hardcoded stuff
__PRE = '{http://www.EcoInvent.org/EcoSpold02}'
__ELEXCHANGE = 'ElementaryExchanges.xml'
__INTERMEXCHANGE = 'IntermediateExchanges.xml'
__ACTIVITYINDEX = 'ActivityIndex.xml'
__DB_CHARACTERISATION = 'characterisation.db'
rtolmin = 1e-16 # 16 significant digits being roughly the limit of float64
__TechnologyLevels = pd.Series(
['Undefined', 'New', 'Modern', 'Current', 'Old', 'Outdated'],
index=[0, 1, 2, 3, 4, 5])
def __init__(self, sys_dir, project_name, out_dir='.', lci_dir=None,
positive_waste=False, prefer_pickles=False, nan2null=False,
save_interm=True, PRO_order=['ISIC', 'activityName'],
STR_order=['comp', 'name', 'subcomp'],
verbose=True, version_name='ecoinvent31',
unlinked = True, remove_markets=True):
""" Defining an ecospold2matrix object, with key parameters that
determine how the data will be processes.
Args:
-----
* sys_dir: directory containing the system description,i.e., ecospold
dataset and master XML files
* project_name: Name used to log progress and save results
* out_dir: Directory where to save result matrices and logs
* lci_dir: Directory where official cummulative LCI ecospold files are
* positive_waste: Whether or not to change sign convention and make
waste flows positive
[default false]
* prefer_pickles: If sys_dir contains pre-processed data in form of
pickle-files, whether or not to use those
[Default: False, don't use]
* nan2null: Whether or not to replace Not-a-Number by 0.0
[Default: False, don't replace anything]
* save_interm: Whether or not to save intermediate results as pickle
files for potential re-use
[Default: True, do it]
* PRO_order: List of meta-data used for sorting processes in the
different matrices.
[Default: first sort by order of ISIC code, then, within
each code, by order of activity name]
* PRO_order: List of meta-data used for sorting stressors (elementary
flows) in the different matrices.
[Default: first sort by order of compartment,
subcompartment and then by name]
* unlinked: Whether or not the datasets are linked/allocated.
[Default: True, the data are unlinked]
Main functions and worflow:
---------------------------
self.ecospold_to_Leontief(): Turn ecospold files into Leontief matrix
representation
* Parse ecospold files, get products, activities, flows, emissions
* If need be, correct inconsistencies in system description
* After corrections, create "final" labels for matrices
* Generate symmetric, normalized system description (A-matrix,
extension F-matrix)
* Save to file (many different formats)
* Optionally, read cummulative lifecycle inventories (slow) and
compare to calculated LCI for sanity testing
self.ecospold_to_sut(): Turn unallocated ecospold into Suppy and Use
Tables
* Parse ecospold files, get products, activities, flows, emissions
* Organize in supply and use
* optionally, aggregate sources to generate a fully untraceable SUT
* Save to file
"""
# INTERMEDIATE DATA/RESULTS, TO BE GENERATED BY OBJECT METHODS
self.products = None # products, with IDs and descriptions
self.activities = None # activities, w IDs and description
self.inflows = None # intermediate-exchange input flows
self.outflows = None # intermediate-exchange output flows
self.prices = None
self.elementary_flows = None # elementary flows
self.q = None # total supply of each product
self.PRO_old=None
self.STR_old = None
self.IMP_old=None
# FINAL VARIABLES: SYMMETRIC SYSTEM, NORMALIZED AND UNNORMALIZED
self.PRO = None # Process labels, rows/cols of A-matrix
self.STR = None # Factors labels, rows extensions
self.IMP =
|
pd.DataFrame([])
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Generates the data needed for Supplementary Figure 3.
The figure is generated by the routine fig_2d_age_bdi.py
"""
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
from scipy.stats import median_test
ref = datetime.date(2019, 12, 31)
max_dur = 90
data0 = pd.read_csv('../Data/SRAG_filtered_morb.csv')
data0 = data0[(~pd.isna(data0.NU_IDADE_N))&(~pd.isna(data0.ibp))]
saida_H = {'mean_all':[], 'stdm_all':[], 'median_all':[], \
'mean_death':[], 'stdm_death':[], 'median_death':[], 'mean_cure':[],\
'stdm_cure':[], 'median_cure':[], 'n_death':[], 'n_cure':[],\
'age_min':[], 'age_mean':[], 'age_max':[], \
'ibp_min':[], 'ibp_mean':[], 'ibp_max':[]}
saida_U = {'mean_all':[], 'stdm_all':[], 'median_all':[], \
'mean_death':[], 'stdm_death':[], 'median_death':[], 'mean_cure':[],\
'stdm_cure':[], 'median_cure':[], 'n_death':[], 'n_cure':[],\
'age_min':[], 'age_mean':[], 'age_max':[],
'ibp_min':[], 'ibp_mean':[], 'ibp_max':[]}
data0 = data0[~(data0.UTI_dur<0)]
data0 = data0[~(data0.HOSP_dur<0)]
data0 = data0[~((data0.UTI_dur>max_dur)|(data0.HOSP_dur>max_dur))]
ages = [0, 18, 30, 40, 50, 65, 75, 85, np.inf]
nsep = len(ages) - 1
nsep2 = 10
ibps = np.linspace(data0.ibp.min(), data0.ibp.max(), nsep2+1)
#%%
for j in range(nsep2):
for i in range(nsep):
print(i,j)
if i == nsep-1:
data = data0[(data0.NU_IDADE_N>=ages[i])]
else:
data = data0[(data0.NU_IDADE_N>=ages[i])&(data0.NU_IDADE_N<ages[i+1])]
if j == nsep2-1:
data = data[data.ibp>=ibps[j]]
else:
data = data[(data.ibp>=ibps[j])&(data.ibp<ibps[j+1])]
agem = data.NU_IDADE_N.mean()
agei = [data.NU_IDADE_N.min(), data.NU_IDADE_N.max()]
saida_H['age_mean'].append(agem)
saida_U['age_mean'].append(agem)
saida_H['age_max'].append(agei[1])
saida_U['age_max'].append(agei[1])
saida_H['age_min'].append(agei[0])
saida_U['age_min'].append(agei[0])
ibpm = data.ibp.mean()
ibpi = [data.ibp.min(), data.ibp.max()]
saida_H['ibp_mean'].append(ibpm)
saida_U['ibp_mean'].append(ibpm)
saida_H['ibp_max'].append(ibpi[1])
saida_U['ibp_max'].append(ibpi[1])
saida_H['ibp_min'].append(ibpi[0])
saida_U['ibp_min'].append(ibpi[0])
hU, b_edg = np.histogram(data.UTI_dur, bins=np.arange(0, max_dur+1))
hH, b_edg = np.histogram(data.HOSP_dur[
|
pd.isna(data.UTI_dur)
|
pandas.isna
|
"""
This is a translation of Carl-Johans implementation in Matlab to Python
"""
import os.path
import numpy as np
from numpy import tanh, exp, sqrt, pi, sin, cos, arccos, min, max
import pandas as pd
import scipy.interpolate
from copy import copy
import matplotlib.pyplot as plt
dir_path = os.path.dirname(__file__)
base_path = os.path.split(dir_path)[0]
data_path_S175 = os.path.join(base_path, 'rolldecayestimators', 'Bw0_S175.csv')
data_S175 =
|
pd.read_csv(data_path_S175, sep=';')
|
pandas.read_csv
|
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
"""
Helper functions for extracting and storing useful information for analysis
"""
import itertools
import numpy as np
import pandas as pd
from emukit.core.interfaces import IModel
from emukit.model_wrappers import GPyModelWrapper
from functools import singledispatch
from gp.model_wrapper import GPyTorchModelWrapper, get_constrained_named_parameters
@singledispatch
def get_model_hyperparam(model: IModel) -> pd.DataFrame:
raise NotImplementedError(f"Can't get parameters for type {model}")
@get_model_hyperparam.register
def _(model: GPyModelWrapper) -> np.ndarray:
parameter_names_list = [name.split(".", 1)[1] for name in model.model.parameter_names_flat(include_fixed=True)]
return
|
pd.DataFrame(model.model.param_array[None, :], columns=parameter_names_list)
|
pandas.DataFrame
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
def check(result, expected=None):
if expected is not None:
tm.assert_frame_equal(result, expected)
result.dtypes
str(result)
class TestDataFrameNonuniqueIndexes:
def test_setattr_columns_vs_construct_with_columns(self):
# assignment
# GH 3687
arr = np.random.randn(3, 2)
idx = list(range(2))
df = DataFrame(arr, columns=["A", "A"])
df.columns = idx
expected = DataFrame(arr, columns=idx)
check(df, expected)
def test_setattr_columns_vs_construct_with_columns_datetimeindx(self):
idx = date_range("20130101", periods=4, freq="Q-NOV")
df = DataFrame(
[[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=["a", "a", "a", "a"]
)
df.columns = idx
expected = DataFrame([[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=idx)
check(df, expected)
def test_insert_with_duplicate_columns(self):
# insert
df = DataFrame(
[[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]],
columns=["foo", "bar", "foo", "hello"],
)
df["string"] = "bah"
expected = DataFrame(
[[1, 1, 1, 5, "bah"], [1, 1, 2, 5, "bah"], [2, 1, 3, 5, "bah"]],
columns=["foo", "bar", "foo", "hello", "string"],
)
check(df, expected)
with pytest.raises(ValueError, match="Length of value"):
df.insert(0, "AnotherColumn", range(len(df.index) - 1))
# insert same dtype
df["foo2"] = 3
expected = DataFrame(
[[1, 1, 1, 5, "bah", 3], [1, 1, 2, 5, "bah", 3], [2, 1, 3, 5, "bah", 3]],
columns=["foo", "bar", "foo", "hello", "string", "foo2"],
)
check(df, expected)
# set (non-dup)
df["foo2"] = 4
expected = DataFrame(
[[1, 1, 1, 5, "bah", 4], [1, 1, 2, 5, "bah", 4], [2, 1, 3, 5, "bah", 4]],
columns=["foo", "bar", "foo", "hello", "string", "foo2"],
)
check(df, expected)
df["foo2"] = 3
# delete (non dup)
del df["bar"]
expected = DataFrame(
[[1, 1, 5, "bah", 3], [1, 2, 5, "bah", 3], [2, 3, 5, "bah", 3]],
columns=["foo", "foo", "hello", "string", "foo2"],
)
check(df, expected)
# try to delete again (its not consolidated)
del df["hello"]
expected = DataFrame(
[[1, 1, "bah", 3], [1, 2, "bah", 3], [2, 3, "bah", 3]],
columns=["foo", "foo", "string", "foo2"],
)
check(df, expected)
# consolidate
df = df._consolidate()
expected = DataFrame(
[[1, 1, "bah", 3], [1, 2, "bah", 3], [2, 3, "bah", 3]],
columns=["foo", "foo", "string", "foo2"],
)
check(df, expected)
# insert
df.insert(2, "new_col", 5.0)
expected = DataFrame(
[[1, 1, 5.0, "bah", 3], [1, 2, 5.0, "bah", 3], [2, 3, 5.0, "bah", 3]],
columns=["foo", "foo", "new_col", "string", "foo2"],
)
check(df, expected)
# insert a dup
with pytest.raises(ValueError, match="cannot insert"):
df.insert(2, "new_col", 4.0)
df.insert(2, "new_col", 4.0, allow_duplicates=True)
expected = DataFrame(
[
[1, 1, 4.0, 5.0, "bah", 3],
[1, 2, 4.0, 5.0, "bah", 3],
[2, 3, 4.0, 5.0, "bah", 3],
],
columns=["foo", "foo", "new_col", "new_col", "string", "foo2"],
)
check(df, expected)
# delete (dup)
del df["foo"]
expected = DataFrame(
[[4.0, 5.0, "bah", 3], [4.0, 5.0, "bah", 3], [4.0, 5.0, "bah", 3]],
columns=["new_col", "new_col", "string", "foo2"],
)
tm.assert_frame_equal(df, expected)
def test_dup_across_dtypes(self):
# dup across dtypes
df = DataFrame(
[[1, 1, 1.0, 5], [1, 1, 2.0, 5], [2, 1, 3.0, 5]],
columns=["foo", "bar", "foo", "hello"],
)
check(df)
df["foo2"] = 7.0
expected = DataFrame(
[[1, 1, 1.0, 5, 7.0], [1, 1, 2.0, 5, 7.0], [2, 1, 3.0, 5, 7.0]],
columns=["foo", "bar", "foo", "hello", "foo2"],
)
check(df, expected)
result = df["foo"]
expected = DataFrame([[1, 1.0], [1, 2.0], [2, 3.0]], columns=["foo", "foo"])
check(result, expected)
# multiple replacements
df["foo"] = "string"
expected = DataFrame(
[
["string", 1, "string", 5, 7.0],
["string", 1, "string", 5, 7.0],
["string", 1, "string", 5, 7.0],
],
columns=["foo", "bar", "foo", "hello", "foo2"],
)
check(df, expected)
del df["foo"]
expected = DataFrame(
[[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "hello", "foo2"]
)
check(df, expected)
def test_column_dups_indexes(self):
# check column dups with index equal and not equal to df's index
df = DataFrame(
np.random.randn(5, 3),
index=["a", "b", "c", "d", "e"],
columns=["A", "B", "A"],
)
for index in [df.index, pd.Index(list("edcba"))]:
this_df = df.copy()
expected_ser = Series(index.values, index=this_df.index)
expected_df = DataFrame(
{"A": expected_ser, "B": this_df["B"], "A": expected_ser},
columns=["A", "B", "A"],
)
this_df["A"] = index
check(this_df, expected_df)
def test_changing_dtypes_with_duplicate_columns(self):
# multiple assignments that change dtypes
# the location indexer is a slice
# GH 6120
df = DataFrame(np.random.randn(5, 2), columns=["that", "that"])
expected = DataFrame(1.0, index=range(5), columns=["that", "that"])
df["that"] = 1.0
check(df, expected)
df = DataFrame(np.random.rand(5, 2), columns=["that", "that"])
expected = DataFrame(1, index=range(5), columns=["that", "that"])
df["that"] = 1
check(df, expected)
def test_dup_columns_comparisons(self):
# equality
df1 = DataFrame([[1, 2], [2, np.nan], [3, 4], [4, 4]], columns=["A", "B"])
df2 = DataFrame([[0, 1], [2, 4], [2, np.nan], [4, 5]], columns=["A", "A"])
# not-comparing like-labelled
msg = "Can only compare identically-labeled DataFrame objects"
with pytest.raises(ValueError, match=msg):
df1 == df2
df1r = df1.reindex_like(df2)
result = df1r == df2
expected = DataFrame(
[[False, True], [True, False], [False, False], [True, False]],
columns=["A", "A"],
)
tm.assert_frame_equal(result, expected)
def test_mixed_column_selection(self):
# mixed column selection
# GH 5639
dfbool = DataFrame(
{
"one": Series([True, True, False], index=["a", "b", "c"]),
"two": Series([False, False, True, False], index=["a", "b", "c", "d"]),
"three": Series([False, True, True, True], index=["a", "b", "c", "d"]),
}
)
expected = pd.concat([dfbool["one"], dfbool["three"], dfbool["one"]], axis=1)
result = dfbool[["one", "three", "one"]]
check(result, expected)
def test_multi_axis_dups(self):
# multi-axis dups
# GH 6121
df = DataFrame(
np.arange(25.0).reshape(5, 5),
index=["a", "b", "c", "d", "e"],
columns=["A", "B", "C", "D", "E"],
)
z = df[["A", "C", "A"]].copy()
expected = z.loc[["a", "c", "a"]]
df = DataFrame(
np.arange(25.0).reshape(5, 5),
index=["a", "b", "c", "d", "e"],
columns=["A", "B", "C", "D", "E"],
)
z = df[["A", "C", "A"]]
result = z.loc[["a", "c", "a"]]
check(result, expected)
def test_columns_with_dups(self):
# GH 3468 related
# basic
df = DataFrame([[1, 2]], columns=["a", "a"])
df.columns = ["a", "a.1"]
str(df)
expected = DataFrame([[1, 2]], columns=["a", "a.1"])
|
tm.assert_frame_equal(df, expected)
|
pandas._testing.assert_frame_equal
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
# Command line :
# python -m benchmark.COMPARE.GG.compare_models
from .hyper_parameters import DA_HP
from .hyper_parameters import GB_HP
from .hyper_parameters import FF_HP
from .hyper_parameters import INF_HP
from .hyper_parameters import NN_HP
from .hyper_parameters import PIVOT_HP
from .hyper_parameters import REG_HP
from .hyper_parameters import REG_M_HP
from .hyper_parameters import TP_HP
from .hyper_parameters import Likelihood_HP
from ..loader import DALoader
from ..loader import GBLoader
from ..loader import FFLoader
from ..loader import INFLoader
from ..loader import NNLoader
from ..loader import PIVOTLoader
from ..loader import REGLoader
from ..loader import TPLoader
from ..loader import LikelihoodLoader
from .visual.common import hp_kwargs_generator
import pandas as pd
import os
from visual.misc import set_plot_config
set_plot_config()
import matplotlib.pyplot as plt
from config import DEFAULT_DIR
from config import SAVING_DIR
from .visual import compare
BENCHMARK_NAME = "COMPARE"
def load_all_evaluation(TheLoader, hp_args, data_name='GG', benchmark_name='GG-marginal'):
all_evaluation = []
for kwargs in hp_kwargs_generator(hp_args):
loader = TheLoader(data_name, benchmark_name, **kwargs)
try:
evaluation = loader.load_evaluation_config()
except FileNotFoundError:
print(f"Missing results for {loader.model_full_name}")
else:
all_evaluation.append(evaluation)
return all_evaluation
def load_all_data(all_hp, all_loader_classes, all_code_names, data_name='GG', benchmark_name='GG-calib'):
all_data = []
for hp_args, TheLoader, name in zip(all_hp, all_loader_classes, all_code_names):
all_evaluation = load_all_evaluation(TheLoader, hp_args, data_name=data_name, benchmark_name=benchmark_name)
print(f" found {len(all_evaluation)} completed runs for {name} in {benchmark_name}")
if all_evaluation :
all_evaluation = pd.concat(all_evaluation)
all_evaluation['code_name'] = name
all_data.append(all_evaluation)
return all_data
def load_all_estimation_evaluation(TheLoader, hp_args, data_name='GG', benchmark_name='HIGGTES-marginal'):
all_evaluation = []
for kwargs in hp_kwargs_generator(hp_args):
loader = TheLoader(data_name, benchmark_name, **kwargs)
try:
config_table = loader.load_config_table()
evaluation = loader.load_estimation_evaluation()
except FileNotFoundError:
try:
evaluation = loader.load_evaluation()
except FileNotFoundError:
print(f"[MISSING] estimation results for {loader.model_full_name}")
else:
print(f"[SUCCESS] load for {loader.model_full_name}")
evaluation = evaluation.join(config_table, rsuffix='_')
all_evaluation.append(evaluation)
else:
print(f"[SUCCESS] load for {loader.model_full_name}")
evaluation = evaluation.join(config_table, rsuffix='_')
all_evaluation.append(evaluation)
return all_evaluation
def load_all_conditional_evaluation(TheLoader, hp_args, data_name='GG', benchmark_name='HIGGTES-marginal'):
all_evaluation = []
for kwargs in hp_kwargs_generator(hp_args):
loader = TheLoader(data_name, benchmark_name, **kwargs)
try:
config_table = loader.load_config_table()
evaluation = loader.load_estimation_evaluation()
conditional_evaluation = loader.load_conditional_evaluation()
except FileNotFoundError:
print(f"[MISSING] conditional estimation results for {loader.model_full_name}")
else:
print(f"[SUCCESS] load for {loader.model_full_name}")
evaluation = evaluation.join(config_table, rsuffix='_')
evaluation = evaluation.join(conditional_evaluation, rsuffix='__')
all_evaluation.append(evaluation)
return all_evaluation
def load_all_estimation_data(all_hp, all_loader_classes, all_code_names, data_name='GG', benchmark_name='GG-prior'):
all_data = []
for hp_args, TheLoader, name in zip(all_hp, all_loader_classes, all_code_names):
all_evaluation = load_all_estimation_evaluation(TheLoader, hp_args, data_name=data_name, benchmark_name=benchmark_name)
print(f" found {len(all_evaluation)} completed estimation runs for {name} in {benchmark_name}")
if all_evaluation :
all_evaluation = pd.concat(all_evaluation)
all_evaluation['code_name'] = name
all_data.append(all_evaluation)
return all_data
def load_all_conditional_data(all_hp, all_loader_classes, all_code_names, data_name='GG', benchmark_name='GG-prior'):
all_data = []
for hp_args, TheLoader, name in zip(all_hp, all_loader_classes, all_code_names):
all_evaluation = load_all_conditional_evaluation(TheLoader, hp_args, data_name=data_name, benchmark_name=benchmark_name)
print(f" found {len(all_evaluation)} completed conditional runs for {name} in {benchmark_name}")
if all_evaluation :
all_evaluation = pd.concat(all_evaluation)
all_evaluation['code_name'] = name
all_data.append(all_evaluation)
return all_data
def make_common_estimation_plots(data_and_marginal, benchmark_name):
directory = os.path.join(SAVING_DIR, BENCHMARK_NAME, benchmark_name, "BEST_MSE")
os.makedirs(directory, exist_ok=True)
compare.min_avg_mse_mse_box_plot(data_and_marginal, title=benchmark_name, directory=directory)
compare.min_avg_mse_mse_err_plot(data_and_marginal, title=benchmark_name, directory=directory)
compare.min_avg_mse_sigma_mean_box_plot(data_and_marginal, title=benchmark_name, directory=directory)
compare.min_avg_mse_true_mu_mse(data_and_marginal, title=benchmark_name, directory=directory)
compare.min_avg_mse_true_mu_sigma_mean(data_and_marginal, title=benchmark_name, directory=directory)
compare.min_avg_mse_true_mu_target_std(data_and_marginal, title=benchmark_name, directory=directory)
directory = os.path.join(SAVING_DIR, BENCHMARK_NAME, benchmark_name, "BEST_MEDIAN")
os.makedirs(directory, exist_ok=True)
compare.min_median_mse_mse_box_plot(data_and_marginal, title=benchmark_name, directory=directory)
compare.min_median_mse_mse_err_plot(data_and_marginal, title=benchmark_name, directory=directory)
compare.min_median_mse_sigma_mean_box_plot(data_and_marginal, title=benchmark_name, directory=directory)
compare.min_median_mse_true_mu_mse(data_and_marginal, title=benchmark_name, directory=directory)
compare.min_median_mse_true_mu_sigma_mean(data_and_marginal, title=benchmark_name, directory=directory)
compare.min_median_mse_true_mu_target_std(data_and_marginal, title=benchmark_name, directory=directory)
def make_common_conditional_plots(data, benchmark_name):
directory = os.path.join(SAVING_DIR, BENCHMARK_NAME, benchmark_name, "BEST_MSE")
os.makedirs(directory, exist_ok=True)
compare.min_avg_mse_v_stat_box_plot(data, title=benchmark_name, directory=directory)
compare.min_avg_mse_v_syst_box_plot(data, title=benchmark_name, directory=directory)
compare.min_avg_mse_v_stat_err_plot(data, title=benchmark_name, directory=directory)
compare.min_avg_mse_v_syst_err_plot(data, title=benchmark_name, directory=directory)
directory = os.path.join(SAVING_DIR, BENCHMARK_NAME, benchmark_name, "BEST_MEDIAN")
os.makedirs(directory, exist_ok=True)
compare.min_median_mse_v_stat_box_plot(data, title=benchmark_name, directory=directory)
compare.min_median_mse_v_syst_box_plot(data, title=benchmark_name, directory=directory)
compare.min_median_mse_v_stat_err_plot(data, title=benchmark_name, directory=directory)
compare.min_median_mse_v_syst_err_plot(data, title=benchmark_name, directory=directory)
def work(ALL_HP, ALL_LOADER, ALL_NAME, data_name, benchmark_name, marginal_eval):
print()
print("="*15, benchmark_name, "="*15)
all_estimation_data = load_all_estimation_data(ALL_HP, ALL_LOADER, ALL_NAME, data_name=data_name, benchmark_name=benchmark_name)
all_conditional_data = load_all_conditional_data(ALL_HP, ALL_LOADER, ALL_NAME, data_name=data_name, benchmark_name=benchmark_name)
if all_estimation_data :
all_estimation_data = all_estimation_data + [marginal_eval]
data_estimation_and_marginal = pd.concat(all_estimation_data, sort=False)
make_common_estimation_plots(data_estimation_and_marginal, benchmark_name)
else:
print(f"WARNING : FOUND NO ESTIMATION FOR {benchmark_name}")
if all_conditional_data:
data_conditional = pd.concat(all_conditional_data, sort=False)
make_common_conditional_plots(data_conditional, benchmark_name)
else:
print(f"WARNING : FOUND NO CONDITIONAL ESTIMATION FOR {benchmark_name}")
def main():
print("hello")
os.makedirs(DEFAULT_DIR, exist_ok=True)
ALL_HP = [
DA_HP
, GB_HP
, FF_HP
, INF_HP
, NN_HP
, PIVOT_HP
, REG_HP
, TP_HP
, Likelihood_HP
]
ALL_LOADER = [
DALoader
, GBLoader
, FFLoader
, INFLoader
, NNLoader
, PIVOTLoader
, REGLoader
, TPLoader
, LikelihoodLoader
]
ALL_NAME = [
"DA"
, "GB"
, "FF"
, "INF"
, "NN"
, "PIVOT"
, "Param-REG"
, "TP"
, "Likelihood"
]
data_name = 'GG'
marginal_eval = load_all_evaluation(REGLoader, REG_M_HP, data_name=data_name, benchmark_name='GG-marginal')
if marginal_eval :
marginal_eval = pd.concat(marginal_eval, sort=False)
marginal_eval['base_name'] = "Marginal"
marginal_eval['code_name'] = "Blind-REG"
else:
marginal_eval =
|
pd.DataFrame()
|
pandas.DataFrame
|
#python code for the plot
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from scipy.stats import lognorm
mu=0.06
n=500
dt=0.1
x0=1
x=
|
pd.DataFrame()
|
pandas.DataFrame
|
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
#Code starts here
data =
|
pd.read_csv(path)
|
pandas.read_csv
|
""" test indexing with ix """
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas.types.common import is_scalar
from pandas.compat import lrange
from pandas import Series, DataFrame, option_context, MultiIndex
from pandas.util import testing as tm
from pandas.core.common import PerformanceWarning
class TestIX(tm.TestCase):
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] =
|
pd.to_datetime(df['timestamp'], unit='s')
|
pandas.to_datetime
|
"""
Data structure for 1-dimensional cross-sectional and time series data
"""
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
import itertools
import operator
import sys
import warnings
from numpy import nan, ndarray
import numpy as np
from pandas.core.common import (isnull, notnull, _ensure_index,
_is_bool_indexer, _default_index)
from pandas.core.daterange import DateRange
from pandas.core.generic import PandasObject
from pandas.core.index import Index, MultiIndex
from pandas.core.indexing import _SeriesIndexer, _maybe_droplevels
import pandas.core.datetools as datetools
import pandas._tseries as _tseries
__all__ = ['Series', 'TimeSeries']
def _numpy_lt_151():
return np.__version__ < '1.5.1'
#-------------------------------------------------------------------------------
# Wrapper function for Series arithmetic methods
def _arith_method(op, name):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def wrapper(self, other):
from pandas.core.frame import DataFrame
if isinstance(other, Series):
if self.index.equals(other.index):
return Series(op(self.values, other.values), index=self.index)
new_index = self.index + other.index
this_reindexed = self.reindex(new_index)
other_reindexed = other.reindex(new_index)
arr = op(this_reindexed.values, other_reindexed.values)
return Series(arr, index=new_index)
elif isinstance(other, DataFrame):
return NotImplemented
else:
# scalars
return Series(op(self.values, other), index=self.index)
return wrapper
def _flex_method(op, name):
def f(self, other, fill_value=None):
return self._binop(other, op, fill_value=fill_value)
f.__doc__ = """
Binary operator %s with support to substitute a fill_value for missing data
in one of the inputs
Parameters
----------
other: Series or scalar value
fill_value : None or float value, default None
Fill missing (NaN) values with this value. If both Series are
missing, the result will be missing
Returns
-------
result : Series
""" % name
f.__name__ = name
return f
#-------------------------------------------------------------------------------
# Series class
class Series(np.ndarray, PandasObject):
"""
Generic indexed (labeled) vector, including time series
Contains values in a numpy-ndarray with an optional bound index
(also an array of dates, strings, or whatever you want the 'row
names' of your series to be)
Rows can be retrieved by index value (date, string, etc.) or
relative position in the underlying array.
Operations between Series (+, -, /, *, **) align values based on
their associated index values-- they need not be the same length.
Parameters
----------
data : array-like, dict, or scalar value
Contains data stored in Series
index : array-like
Index object (or other iterable of same length as data)
Must be input if first argument is not a dict. If both a dict
and index sequence are used, the index will override the keys
found in the dict.
dtype : numpy.dtype or None
If None, dtype will be inferred
copy : boolean, default False
Copy input data
Notes
-----
If you combine two series, all values for an index position must
be present or the value for that index position will be nan. The
new index is the sorted union of the two Series indices.
Data is *not* copied from input arrays by default
"""
_AXIS_NUMBERS = {
'index' : 0
}
_AXIS_NAMES = dict((v, k) for k, v in _AXIS_NUMBERS.iteritems())
def __new__(cls, data, index=None, dtype=None, name=None, copy=False):
if isinstance(data, Series):
if index is None:
index = data.index
elif isinstance(data, dict):
if index is None:
index = Index(sorted(data.keys()))
data = [data[idx] for idx in index]
# Create array, do *not* copy data by default, infer type
try:
subarr = np.array(data, dtype=dtype, copy=copy)
except ValueError:
if dtype:
raise
subarr = np.array(data, dtype=object)
if subarr.ndim == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
if isinstance(value, basestring) and dtype is None:
dtype = np.object_
if dtype is None:
subarr = np.empty(len(index), dtype=type(value))
else:
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
else:
return subarr.item()
elif subarr.ndim > 1:
raise Exception('Data must be 1-dimensional')
if index is None:
index = _default_index(len(subarr))
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, basestring):
subarr = np.array(data, dtype=object, copy=copy)
# Change the class of the array to be the subclass type.
subarr = subarr.view(cls)
subarr.index = index
subarr.name = name
if subarr.index.is_all_dates():
subarr = subarr.view(TimeSeries)
return subarr
def __init__(self, *args, **kwargs):
pass
def __hash__(self):
raise TypeError('unhashable type')
_index = None
def _get_index(self):
return self._index
def _set_index(self, index):
indexTypes = ndarray, Index, list, tuple
if not isinstance(index, indexTypes):
raise TypeError("Expected index to be in %s; was %s."
% (indexTypes, type(index)))
if len(self) != len(index):
raise AssertionError('Lengths of index and values did not match!')
self._index = _ensure_index(index)
index = property(fget=_get_index, fset=_set_index)
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self._index = getattr(obj, '_index', None)
def toDict(self):
return dict(self.iteritems())
def to_sparse(self, kind='block', fill_value=None):
"""
Convert Series to SparseSeries
Parameters
----------
kind : {'block', 'integer'}
fill_value : float, defaults to NaN (missing)
Returns
-------
sp : SparseSeries
"""
from pandas.core.sparse import SparseSeries
return SparseSeries(self, kind=kind, fill_value=fill_value)
def __contains__(self, key):
return key in self.index
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(ndarray.__reduce__(self))
subclass_state = (self.index, )
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
nd_state, own_state = state
ndarray.__setstate__(self, nd_state)
index, = own_state
self.index = index
def __getitem__(self, key):
"""
Returns item(s) for requested index/sequence, overrides default behavior
for series[key].
Logic is as follows:
- If key is in the index, return the value corresponding
to that index
- Otherwise, use key (presumably one integer or a sequence
of integers) to obtain values from the series. In the case
of a sequence, a 'slice' of the series (with corresponding dates)
will be returned, otherwise a single value.
"""
try:
if isinstance(self.index, MultiIndex):
return self._multilevel_index(key)
else:
values = self.values
try:
return values[self.index.get_loc(key)]
except KeyError:
if isinstance(key, (int, np.integer)):
return values[key]
raise
except TypeError:
pass
def _index_with(indexer):
return Series(self.values[indexer],
index=self.index[indexer])
# special handling of boolean data with NAs stored in object
# arrays. Sort of an elaborate hack since we can't represent boolean
# NA. Hmm
if _is_bool_indexer(key):
self._check_bool_indexer(key)
key = np.asarray(key, dtype=bool)
return _index_with(key)
# TODO: [slice(0, 5, None)] will break if you convert to ndarray,
# e.g. as requested by np.median
try:
return _index_with(key)
except Exception:
key = np.asarray(key)
return _index_with(key)
def _multilevel_index(self, key):
values = self.values
try:
loc = self.index.get_loc(key)
if isinstance(loc, slice):
# TODO: what if a level contains tuples??
new_index = self.index[loc]
new_index = _maybe_droplevels(new_index, key)
return Series(values[loc], index=new_index)
else:
return values[loc]
except KeyError:
if isinstance(key, (int, np.integer)):
return values[key]
raise Exception('Requested index not in this series!')
def get(self, key, default=None):
"""
Returns value occupying requested index, default to specified
missing value if not present
Parameters
----------
key : object
Index value looking for
default : object, optional
Value to return if key not in index
Returns
-------
y : scalar
"""
if key in self.index:
return self._get_val_at(self.index.get_loc(key))
else:
return default
# help out SparseSeries
_get_val_at = ndarray.__getitem__
def __getslice__(self, i, j):
"""
Returns a slice of the Series.
Note that the underlying values are COPIES.
The reason that the getslice returns copies is that otherwise you
will have a reference to the original series which could be
inadvertently changed
"""
return Series(self.values[i:j].copy(), index=self.index[i:j])
def __setitem__(self, key, value):
values = self.values
try:
loc = self.index.get_loc(key)
values[loc] = value
return
except KeyError:
if isinstance(key, (int, np.integer)):
values[key] = value
return
raise Exception('Requested index not in this series!')
except TypeError:
# Could not hash item
pass
self._check_bool_indexer(key)
# special handling of boolean data with NAs stored in object
# arrays. Sort of an elaborate hack since we can't represent boolean
# NA. Hmm
if isinstance(key, np.ndarray) and key.dtype == np.object_:
mask =
|
isnull(key)
|
pandas.core.common.isnull
|
from collections import namedtuple
import os
import re
from astropy import units as u
from astropy.cosmology import FlatLambdaCDM
import h5py
import pandas as pd
import numpy as np
import numpy.ma as ma
from numpy.random import default_rng
from desc.skycatalogs.utils.common_utils import print_dated_msg
__all__ = ['LookupInfo', 'Cmp', 'MagNorm', 'convert_tophat_sed',
'write_sed_file', 'NORMWV_IX', 'get_star_sed_path',
'create_cosmology']
# Index for tophat bin containing 500 nm
NORMWV_IX = 13
def convert_tophat_sed(a_bins, f_nu_input, mag_norm_f, redshift=0,
wavelen_step=0.1):
'''
Given a tophat SED and redshift, produce an equivalent SED as lists of
wavelength and f_lambda. Also compute magnorm
Parameters
----------
a_bins: list of Tophat [tuples (start, width)] in Angstroms
f_nu: list of values for the tophats
mag_norm_f: an instance of MagNorm
redshift: needed for computing distance modulus. Should be
cosmoDC2 redshiftHubble, aka redshift_hubble in sky catalogs
wavelen_step: Re-cast tophat seds to use this bin width in nm (keeping
same step function in f_nu space).
return
------
arrays lambda, f_lambda where lambda is in nm and f_lambda is in
erg / (cm**2 * s * nm)
Also return final magnorm (including redshift adjustment) and f_nu value
at 500 nm
'''
lam_nm = 0.1 * np.array([b.start + 0.5 * b.width for b in a_bins])
lam_width_nm = 0.1 * np.array([b.width for b in a_bins])
f_nu = 1.0 * np.array(f_nu_input)
val_500nm = f_nu[NORMWV_IX]
# Convert from f_nu to f_lambda:
# In earlier versions tophats were in decreasing lambda order
if (lam_nm[0] > lam_nm[1]): # reverse
lam_nm[:] = lam_nm[::-1]
lam_width_nm[:] = lam_width_nm[::-1]
f_nu[:] = f_nu[::-1]
lam_min = lam_nm[0]
lam_max = lam_nm[-1] + lam_width_nm[-1]
# Keep the same step function but use fine bins instead of the
# original tophat widths.
n_bins = int((lam_max - lam_min) / wavelen_step)
lam_fine = np.empty(n_bins)
f_nu_fine = np.empty(n_bins)
boundaries = list(lam_nm)
boundaries.append(lam_max)
b_ix = 0
for i in range(n_bins):
lam_fine[i] = lam_min + wavelen_step * i
if (lam_fine[i] > boundaries[b_ix + 1]) :
b_ix = b_ix + 1
f_nu_fine[i] = f_nu[b_ix]
# Convert fnu to flambda, ignoring constant factors.
flambda = f_nu_fine/lam_fine**2
# Normalize so flambda value at 500 nm is 1.0
nm500_ix = int((500 - lam_min) / wavelen_step) + 1
flambda_norm = flambda / flambda[nm500_ix]
return lam_fine, flambda_norm, mag_norm_f(f_nu[NORMWV_IX],
redshift), val_500nm
def write_sed_file(path, wv, f_lambda, wv_unit=None, f_lambda_unit=None):
'''
Write a two-column text file. First column is wavelength,
second is luminosity value
If units are supplied, write a comment line at the top
Parameters
----------
path Where to write the file and what to call it
wv List or array of wavelength values
f_lambda List or array of luminosities. Must be the same length as wv
wv_unit String describing units for first column
f_lambda_unit String describing units for second column
'''
header = '# '
if wv_unit:
header += wv_unit + ' '
else:
header += ' lambda unit unknown '
if f_lambda_unit:
header += f_lambda_unit
else:
header += ' f_lambda unit unknown'
header += '\n'
with open(path, mode="w") as f:
f.write(header)
for i in range(len(wv)):
line = '{:8.2f} {:g}\n'.format(wv[i], f_lambda[i])
f.write(line)
f.close()
_standard_dict = {'lte' : 'starSED/phoSimMLT',
'bergeron' : 'starSED/wDs',
'km|kp' : 'starSED/kurucz'}
def get_star_sed_path(filename, name_to_folder=_standard_dict):
'''
Return numpy array of full paths relative to SIMS_SED_LIBRARY_DIR,
given filenames
Parameters
----------
filename list of strings. Usually full filename but may be missing final ".gz"
name_to_folder dict mapping regular expression (to be matched with
filename) to relative path for containing directory
Returns
-------
Full path for file, relative to SIMS_SED_LIBRARY_DIR
'''
compiled = { re.compile(k) : v for (k, v) in name_to_folder.items()}
path_list = []
for f in filename:
m = None
matched = False
for k,v in compiled.items():
f = f.strip()
m = k.match(f)
if m:
p = os.path.join(v, f)
if not p.endswith('.gz'):
p = p + '.gz'
path_list.append(p)
matched = True
break
if not matched:
raise ValueError(f'get_star_sed_path: Filename {f} does not match any known patterns')
return np.array(path_list)
def create_cosmology(config):
"""
Create a FlatLambdaCDM cosmology from a dictionary of input parameters.
This code is based on/borrowed from
https://github.com/LSSTDESC/gcr-catalogs/blob/master/GCRCatalogs/cosmodc2.py#L128
"""
cosmo_astropy_allowed = FlatLambdaCDM.__init__.__code__.co_varnames[1:]
cosmo_astropy = {k: v for k, v in config.items()
if k in cosmo_astropy_allowed}
cosmology = FlatLambdaCDM(**cosmo_astropy)
return cosmology
class MagNorm:
def __init__(self, cosmology):
"""
Parameters
----------
cosmology : astropy.cosmology
Cosmology object created from the gcr-catalogs galaxy catalog
cosmology specification.
"""
self.cosmology = cosmology
def dl(self, z):
"""
Return the luminosity distance in units of meters.
"""
# Conversion factor from Mpc to meters (obtained from pyccl).
MPC_TO_METER = 3.085677581491367e+22
return self.cosmology.luminosity_distance(z).value*MPC_TO_METER
def __call__(self, tophat_sed_value, redshift_hubble, one_maggy=4.3442e13):
one_Jy = 1e-26 # W/Hz/m**2
Lnu = tophat_sed_value*one_maggy # convert from maggies to W/Hz
Fnu = Lnu/4/np.pi/self.dl(redshift_hubble)**2
return -2.5*np.log10(Fnu/one_Jy) + 8.90
class LookupInfo(object):
'''
Stash information from the lookup file for a particular hp which
will be useful for Cmp class
Also save tophat scale
'''
def __init__(self, sed_library_dir, hp):
self.sed_lookup_file = os.path.join(sed_library_dir,
f'sed_fit_{hp}.h5')
self.cached = False
def cache_info(self):
if self.cached: return
with h5py.File(self.sed_lookup_file) as f:
# Make a copy which will exist after file is closed
self.sed_names = np.array(f['sed_names'])
self.disk_sed = np.array(f['disk_sed'])
self.bulge_sed = np.array(f['bulge_sed'])
self.galaxy_id = np.array(f['galaxy_id'])
self.cached = True
def get_orig_sed_file(self, cmp, galaxy_id, min_ix=0):
# Start searching for galaxy_id starting with min_ix
the_ix = -1
if cmp not in ['bulge', 'disk']:
raise ValueError(f'Unknown component type "{cmp}" ')
for i in range(min_ix, len(self.galaxy_id)):
if self.galaxy_id[i] == galaxy_id:
the_ix = i
break
if the_ix == -1:
raise ValueError(f'Galaxy {galaxy_id} not found')
if cmp == 'bulge':
return (self.sed_names[self.bulge_sed[the_ix]]).decode("utf-8")
else:
return (self.sed_names[self.disk_sed[the_ix]]).decode("utf-8")
# This class is no longer used. Consider deleting
class Cmp(object):
'''
Handle writing of SED files and booking for either disk or bulge
'''
def __init__(self, cmp_name, obj_coll, output_dir, hp, n_seds, bins,
lookup_info, mag_norm_f):
'''
Parameters
----------
cmp_name string one of 'bulge', 'disk'
obj_coll object collection coming from sky catalog, typically all
galaxies belonging to a particular pixel
output_dir string where to write output SED files
hp int in case we decide to embed in output filename
n_seds int how many SED files to write
bins list list of (start, width) tuples describing bins.
lookup_info LookupInfo information pertaining to a particular hp
mag_norm_f MagNorm Used for computing mag norm
'''
self.cmp_name = cmp_name
self.output_dir = output_dir
self.hp = hp
self.coll = obj_coll
self.n_seds = n_seds
self.n_seds_done = 0
self.bins = bins
lookup_info.cache_info()
self.lookup_info = lookup_info
self.mag_norm_f = mag_norm_f
def _write_sed(self, outpath, sed_list, bins, redshift,
wavelen_step=5.0, summary_only=False):
'''
Convert cosmoDC2-style tophat SEDs to a file of the form expected by
ImSim.
Parameters
----------
outpath string full path of output file
sed_list list of floats list of values as they appear in
cosmoDC2 catalog
bins list((start,width)) bin definitions
redshift -- for the object the sed file is associated with
Return
------
(magnorm, val_500nm) magnorm is our computed magnorm value,
including adjustment for redshift.
val_500nm is the sed value at or near 500 nm
'''
(lmbda, f_lambda,
magnorm, val_500nm) = convert_tophat_sed(bins, sed_list,
self.mag_norm_f,
redshift=redshift,
wavelen_step=wavelen_step)
if not summary_only:
write_sed_file(outpath, lmbda, f_lambda, wv_unit='nm')
start = (min([b.start for b in bins]))/10.0 # A to nm
return (magnorm, val_500nm) # for now
def _write_summary(self, ix, gal, sed, redshift, orig_magnorm, our_magnorm,
val_500nm, orig_sed_file, tp_sed_file):
# Filepath. Use same output dir.
print_dated_msg(f'Entered _write_summary for component {self.cmp_name}')
basename_csv = f'{self.cmp_name}_sed_hp{self.hp}_summary.csv'
outpath_csv = os.path.join(self.output_dir, basename_csv)
basename_csv_brief = f'{self.cmp_name}_sed_hp{self.hp}_brief.csv'
outpath_csv_brief = os.path.join(self.output_dir, basename_csv_brief)
basename_pq = f'{self.cmp_name}_sed_hp{self.hp}_summary.parquet'
outpath_pq = os.path.join(self.output_dir, basename_pq)
out_dict = {}
out_dict['chosen_ix'] = ix
out_dict['gal_id'] = gal
out_dict['redshift'] = redshift
out_dict['orig_magnorm'] = orig_magnorm
out_dict['our_magnorm'] = our_magnorm
out_dict['val_500nm'] = val_500nm
df =
|
pd.DataFrame(data=out_dict)
|
pandas.DataFrame
|
import pickle
from pathlib import Path
import os
import pandas as pd
from openpyxl import load_workbook
from tensorflow.keras.models import load_model
class DLConfig:
def __init__(self, path=None, model_name=None, tokenizer=None,max_nb_words=None, max_sent_len=None,
max_nb_sentences=None, embeddings=None, embedding_dim=None, embedding_path=None, embedding_format = None,
stop_words=None, embedding_matrix=None, padding=None, truncating=None, oov_token=None, lower=None,
remove_punctuation=None, split_by_hyphen=None, lemmatization=None, stems=None, nmr_sentences=None,
seed_value=None, epochs=None, batch_size=None, learning_rate=None, validation_percentage=None,
patience = None, keras_callbacks=False, model_id = None):
self.model_name = model_name
if self.model_name:
self.create_model_folder()
self.model_id=model_id
self.model_id_path = None
if self.model_id is None and self.model_name:
self.set_model_id()
self.path = path
self.tokenizer=tokenizer
self.max_sent_len = max_sent_len
self.max_nb_words = max_nb_words
self.max_nb_sentences = max_nb_sentences
self.nmr_sentences = nmr_sentences
self.embeddings = embeddings
self.embedding_dim = embedding_dim
self.embedding_path = embedding_path
self.embedding_format = embedding_format
self.stop_words = stop_words
self.embedding_matrix = embedding_matrix
self.padding = padding
self.truncating = truncating
self.oov_token = oov_token
self.lower = lower
self.remove_punctuation = remove_punctuation
self.split_by_hyphen = split_by_hyphen
self.lemmatization = lemmatization
self.stems = stems
self.seed_value = seed_value
self.epochs = epochs
self.batch_size = batch_size
self.learning_rate = learning_rate
self.validation_percentage = validation_percentage
self.patience = patience
self.keras_callbacks = keras_callbacks
self.train_acc = None
self.train_f1_score = None
self.test_avg_prec = None
self.test_acc = None
self.test_prec = None
self.test_recall = None
self.test_f1_score = None
self.test_roc_auc = None
self.test_pr_auc = None
self.test_kappa = None
self.test_mcc = None
self.test_true_neg = None
self.test_false_pos = None
self.test_false_neg = None
self.test_true_pos = None
def create_model_folder(self):
if not os.path.exists('models'):
os.mkdir('models')
directory = Path('models/' + self.model_name)
if not os.path.exists(directory):
os.mkdir(directory)
def set_model_id(self):
output_path = Path('models/' + self.model_name + '/' + 'results_'+ self.model_name + '.xlsx')
if os.path.exists(output_path):
reader = pd.read_excel(output_path)
self.model_id = self.model_name + '_' + str(len(reader))
else:
self.model_id = self.model_name + '_0'
self.model_id_path = Path('models/' + self.model_name + '/' + self.model_id)
if not os.path.exists(self.model_id_path):
os.mkdir(self.model_id_path)
def save(self, path=None):
if path:
with open(path, 'wb') as config_path:
pickle.dump(self, config_path)
else:
self.path = self.model_id_path / 'config.txt'
with open(self.path, 'wb') as config_path:
pickle.dump(self, config_path)
def load(self, path):
with open(path, 'rb') as config_path:
return pickle.load(config_path)
def write_report(self):
attrib_dict = self.__dict__.items()
to_remove = ['model_name', 'model_id_path', 'path', 'tokenizer', 'embedding_path', 'embedding_matrix']
data = {}
for tup in attrib_dict:
if tup[0] not in to_remove:
if tup[0] == 'stop_words':
if tup[1] is None:
data[tup[0]] = ['No']
else:
data[tup[0]] = ['Removed']
else:
if tup[1] == False:
data[tup[0]] = ['False']
elif tup[1] == True:
data[tup[0]] = ['True']
else:
data[tup[0]] = [tup[1]]
print(data)
df = pd.DataFrame(data)
if not os.path.exists('../pipelines/models/' + self.model_name):
os.mkdir('../pipelines/models/' + self.model_name)
excel_path = Path('../pipelines/models/' + self.model_name + '/' + 'results_' + self.model_name + '.xlsx')
if not os.path.exists(excel_path):
writer = pd.ExcelWriter(excel_path, engine='xlsxwriter')
df.to_excel(writer, sheet_name=self.model_name, index=False)
writer.save()
else:
writer =
|
pd.ExcelWriter(excel_path, engine='openpyxl')
|
pandas.ExcelWriter
|
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Index,
Interval,
IntervalIndex,
Timedelta,
Timestamp,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
@pytest.fixture(
params=[
(Index([0, 2, 4]), Index([1, 3, 5])),
(Index([0.0, 1.0, 2.0]), Index([1.0, 2.0, 3.0])),
(timedelta_range("0 days", periods=3), timedelta_range("1 day", periods=3)),
(date_range("20170101", periods=3), date_range("20170102", periods=3)),
(
date_range("20170101", periods=3, tz="US/Eastern"),
date_range("20170102", periods=3, tz="US/Eastern"),
),
],
ids=lambda x: str(x[0].dtype),
)
def left_right_dtypes(request):
"""
Fixture for building an IntervalArray from various dtypes
"""
return request.param
class TestAttributes:
@pytest.mark.parametrize(
"left, right",
[
(0, 1),
(
|
Timedelta("0 days")
|
pandas.Timedelta
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
)
import pandas._testing as tm
def test_to_numpy(idx):
result = idx.to_numpy()
exp = idx.values
tm.assert_numpy_array_equal(result, exp)
def test_to_frame():
tuples = [(1, "one"), (1, "two"), (2, "one"), (2, "two")]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, "one"), (1, "two"), (2, "one"), (2, "two")]
index = MultiIndex.from_tuples(tuples, names=["first", "second"])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ["first", "second"]
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
# See GH-22580
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False, name=["first", "second"])
expected = DataFrame(tuples)
expected.columns = ["first", "second"]
tm.assert_frame_equal(result, expected)
result = index.to_frame(name=["first", "second"])
expected.index = index
expected.columns = ["first", "second"]
tm.assert_frame_equal(result, expected)
msg = "'name' must be a list / sequence of column names."
with pytest.raises(TypeError, match=msg):
index.to_frame(name="first")
msg = "'name' should have same length as number of levels on index."
with pytest.raises(ValueError, match=msg):
index.to_frame(name=["first"])
# Tests for datetime index
index = MultiIndex.from_product([range(5), pd.date_range("20130101", periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{
0: np.repeat(np.arange(5, dtype="int64"), 3),
1: np.tile(pd.date_range("20130101", periods=3), 5),
}
)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
# See GH-22580
result = index.to_frame(index=False, name=["first", "second"])
expected = DataFrame(
{
"first": np.repeat(np.arange(5, dtype="int64"), 3),
"second": np.tile(pd.date_range("20130101", periods=3), 5),
}
)
tm.assert_frame_equal(result, expected)
result = index.to_frame(name=["first", "second"])
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_frame_dtype_fidelity():
# GH 22420
mi = MultiIndex.from_arrays(
[
pd.date_range("19910905", periods=6, tz="US/Eastern"),
[1, 1, 1, 2, 2, 2],
pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True),
["x", "x", "y", "z", "x", "y"],
],
names=["dates", "a", "b", "c"],
)
original_dtypes = {name: mi.levels[i].dtype for i, name in enumerate(mi.names)}
expected_df = DataFrame(
{
"dates": pd.date_range("19910905", periods=6, tz="US/Eastern"),
"a": [1, 1, 1, 2, 2, 2],
"b": pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True),
"c": ["x", "x", "y", "z", "x", "y"],
}
)
df = mi.to_frame(index=False)
df_dtypes = df.dtypes.to_dict()
|
tm.assert_frame_equal(df, expected_df)
|
pandas._testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
import numpy as np
from pandas import Series, DataFrame, Index, Float64Index
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
class TestFloatIndexers(tm.TestCase):
def check(self, result, original, indexer, getitem):
"""
comparator for results
we need to take care if we are indexing on a
Series or a frame
"""
if isinstance(original, Series):
expected = original.iloc[indexer]
else:
if getitem:
expected = original.iloc[:, indexer]
else:
expected = original.iloc[indexer]
|
assert_almost_equal(result, expected)
|
pandas.util.testing.assert_almost_equal
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from imblearn.combine import SMOTEENN
from imblearn.over_sampling import SVMSMOTE
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.preprocessing import MinMaxScaler
features =
|
pd.read_pickle("model_files/preprocessed_dataset.pkl")
|
pandas.read_pickle
|
#!/usr/bin/env python
# coding: utf-8
# # Wasserstein Pareto Frontier Experiment on COMPAS Data Set
# ## Import Data
# The experiment used the COMPAS data set as in "Optimized Pre-Processing for Discrimination Prevention" by Calmon and etc. for comparison purpose: https://github.com/fair-preprocessing/nips2017/tree/master/compas/experiment_data2
# In[1]:
import numpy as np
import pandas as pd
import scipy
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.interpolate import interp1d
from tqdm import tqdm
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix, roc_auc_score, auc, classification_report, roc_curve
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from matplotlib import pyplot
from scipy.linalg import sqrtm
from matplotlib import gridspec
from matplotlib.patches import Rectangle
# import data
path =r'/Users/shizhouxu/Documents/LIBRARY/Python/Fair_L2_Supervised_Learning/experiment_data_compass/' # use your path
train_0 = pd.read_csv(path + "train_0.csv",index_col=None, header=0, usecols=range(1,6))
train_1 = pd.read_csv(path + "train_1.csv",index_col=None, header=0, usecols=range(1,6))
train_2 = pd.read_csv(path + "train_2.csv",index_col=None, header=0, usecols=range(1,6))
train_3 = pd.read_csv(path + "train_3.csv",index_col=None, header=0, usecols=range(1,6))
train_4 = pd.read_csv(path + "train_4.csv",index_col=None, header=0, usecols=range(1,6))
test_0 = pd.read_csv(path + "test_0.csv",index_col=None, header=0, usecols=range(1,6))
test_1 = pd.read_csv(path + "test_1.csv",index_col=None, header=0, usecols=range(1,6))
test_2 = pd.read_csv(path + "test_2.csv",index_col=None, header=0, usecols=range(1,6))
test_3 = pd.read_csv(path + "test_3.csv",index_col=None, header=0, usecols=range(1,6))
test_4 = pd.read_csv(path + "test_4.csv",index_col=None, header=0, usecols=range(1,6))
train_new_0 = pd.read_csv(path + "train_new_0.csv",index_col=None, header=0, usecols=range(1,6))
train_new_1 = pd.read_csv(path + "train_new_1.csv",index_col=None, header=0, usecols=range(1,6))
train_new_2 = pd.read_csv(path + "train_new_2.csv",index_col=None, header=0, usecols=range(1,6))
train_new_3 = pd.read_csv(path + "train_new_3.csv",index_col=None, header=0, usecols=range(1,6))
train_new_4 = pd.read_csv(path + "train_new_4.csv",index_col=None, header=0, usecols=range(1,6))
test_new_0 = pd.read_csv(path + "test_new_0.csv",index_col=None, header=0, usecols=range(1,6))
test_new_1 = pd.read_csv(path + "test_new_1.csv",index_col=None, header=0, usecols=range(1,6))
test_new_2 = pd.read_csv(path + "test_new_2.csv",index_col=None, header=0, usecols=range(1,6))
test_new_3 = pd.read_csv(path + "test_new_3.csv",index_col=None, header=0, usecols=range(1,6))
test_new_4 = pd.read_csv(path + "test_new_4.csv",index_col=None, header=0, usecols=range(1,6))
# all available data variables: features = ['race','age_cat','c_charge_degree','priors_count','is_recid']
features = ['race','age_cat','c_charge_degree','priors_count','is_recid']
# sensitive variable: Z_features = ['race']
Z_features = ['race']
# dependnet variable: Y_features = ['is_recid']
Y_features = ['is_recid']
# independnet variable: X_features = ['age_cat', 'c_charge_degree','priors_count']
X_features = ['age_cat', 'c_charge_degree','priors_count']
# combine the data by train/test category
TrainList=[train_0,train_1,train_2,train_3,train_4]
TestList=[test_0,test_1,test_2,test_3,test_4]
TrainNewList=[train_new_0,train_new_1,train_new_2,train_new_3,train_new_4]
TestNewList=[test_new_0,test_new_1,test_new_2,test_new_3,test_new_4]
# data set combined: df
ord_enc = OrdinalEncoder()
df = pd.concat([train_0,train_1,train_2,train_3,train_4,test_0,test_1,test_2,test_3,test_4])
# data set further excluding the sensitive variable: df_delete
df_delete = df.drop('race',axis = 1)
# sensitive variable Z: gender
race = df['race']
# ## Compute the Wasserstein Pseudo-barycenter for X
# In[2]:
# independent variable: X
X = np.delete(np.array(pd.get_dummies(df[X_features])),[4],axis = 1)
# dependent variable: Y
Y = np.array(
|
pd.get_dummies(df[Y_features])
|
pandas.get_dummies
|
import pandas as pd
def optimize_dtype(
series: pd.Series,
dtype: str,
) -> pd.Series:
"""Automatically converts series to handled data type.
:param series: Series to be converted.
:param dtype: Data type is used for conversion.
:return: Optimized pandas series.
"""
return series.astype(dtype)
def convert_date(raw_dates: pd.Series) -> pd.Series:
"""Automatically converts series containing raw dates
to specific format.
:param raw_dates: Series to be converted.
:return: Optimized pandas series.
"""
raw_dates =
|
pd.to_datetime(raw_dates, utc=True)
|
pandas.to_datetime
|
import sqlite3
import sqlalchemy as sa
import pandas as pd
from powergenome.params import DATA_PATHS
from powergenome.util import init_pudl_connection
GENS860_COLS = [
"report_date",
"plant_id_eia",
"generator_id",
# "associated_combined_heat_power",
# "balancing_authority_code_eia",
# "bypass_heat_recovery",
"capacity_mw",
# "county",
"current_planned_operating_date",
"energy_source_code_1",
# "ferc_cogen_status",
# "iso_rto_code",
# "latitude",
# "longitude",
"minimum_load_mw",
# "operating_date",
"operational_status_code",
# "original_planned_operating_date",
# "state",
"summer_capacity_mw",
"technology_description",
# "unit_id_pudl",
"winter_capacity_mw",
"fuel_type_code_pudl",
# "zip_code",
"planned_retirement_date",
"time_cold_shutdown_full_load_code",
"switch_oil_gas",
"planned_new_capacity_mw",
"energy_source_code_2",
"region",
]
GEN_FUEL_COLS = [
"report_date",
"plant_id_eia",
"energy_source_code",
"fuel_consumed_for_electricity_mmbtu",
"fuel_consumed_for_electricity_units",
"fuel_consumed_mmbtu",
"fuel_consumed_units",
"fuel_mmbtu_per_unit",
"net_generation_mwh",
"prime_mover_code",
"fuel_type_code_pudl",
]
ENTITY_COLS = ["plant_id_eia", "generator_id", "prime_mover_code", "operating_date"]
def create_testing_db():
pudl_engine, pudl_out, pg_engine = init_pudl_connection(
start_year=2018, end_year=2020
)
pudl_test_conn = sqlite3.connect(DATA_PATHS["test_data"] / "pudl_test_data.db")
plant_region = pd.read_sql_table("plant_region_map_epaipm", pg_engine)
# gens_860 = pudl_out.gens_eia860()
s = "SELECT * from generators_eia860 where strftime('%Y',report_date)='2020'"
gens_860 = pd.read_sql_query(s, pudl_engine, parse_dates=["report_date"])
# gens_860 = gens_860.loc[gens_860.report_date.dt.year == 2020, :]
gens_860 = pd.merge(gens_860, plant_region, on="plant_id_eia", how="inner")
gens_860 = gens_860.loc[:, GENS860_COLS]
gens_860 = gens_860.groupby(
["region", "technology_description"], as_index=False
).head(10)
gens_860 = gens_860.drop(columns="region")
eia_plant_ids = gens_860["plant_id_eia"].unique()
gen_entity = pd.read_sql_table("generators_entity_eia", pudl_engine)
gen_entity = gen_entity.loc[
gen_entity["plant_id_eia"].isin(eia_plant_ids), ENTITY_COLS
]
bga = pudl_out.bga_eia860()
bga = bga.loc[
(bga.report_date.dt.year == 2020) & (bga.plant_id_eia.isin(eia_plant_ids)), :
]
s = "SELECT * from generation_fuel_eia923 where strftime('%Y',report_date)='2020'"
gen_fuel = pd.read_sql_query(s, pudl_engine, parse_dates=["report_date"])
gen_fuel = gen_fuel.loc[
gen_fuel.plant_id_eia.isin(eia_plant_ids),
GEN_FUEL_COLS,
]
s = "SELECT * from generation_fuel_nuclear_eia923 where strftime('%Y',report_date)='2020'"
gen_fuel_nuc = pd.read_sql_query(s, pudl_engine, parse_dates=["report_date"])
gen_fuel_nuc = gen_fuel_nuc.loc[
gen_fuel_nuc.plant_id_eia.isin(eia_plant_ids),
GEN_FUEL_COLS,
]
# gen_fuel = pd.concat([gen_fuel, gen_fuel_nuc], ignore_index=True)
s = "SELECT * from generation_eia923 where strftime('%Y',report_date)='2020'"
gen_923 = pd.read_sql_query(s, pudl_engine, parse_dates=["report_date"])
gen_923 = gen_923.loc[
gen_923.plant_id_eia.isin(eia_plant_ids),
:,
]
s = "SELECT * from boiler_fuel_eia923 where strftime('%Y',report_date)='2020'"
boiler_fuel = pd.read_sql_query(s, pudl_engine, parse_dates=["report_date"])
boiler_fuel = boiler_fuel.loc[
boiler_fuel.plant_id_eia.isin(eia_plant_ids),
:,
]
plant_entity = pd.read_sql_table("plants_entity_eia", pudl_engine)
plant_entity = plant_entity.loc[plant_entity["plant_id_eia"].isin(eia_plant_ids), :]
plants_eia_860 = pd.read_sql_table("plants_eia860", pudl_engine)
plants_eia_860 = plants_eia_860.loc[
plants_eia_860["plant_id_eia"].isin(eia_plant_ids), :
]
plants_eia = pd.read_sql_table("plants_eia", pudl_engine)
plants_eia = plants_eia.loc[plants_eia["plant_id_eia"].isin(eia_plant_ids), :]
utilities_eia = pd.read_sql_table("utilities_eia", pudl_engine)
utilities_entity =
|
pd.read_sql_table("utilities_entity_eia", pudl_engine)
|
pandas.read_sql_table
|
import re
import json
from tqdm import tqdm
import pandas as pd
from collections import defaultdict
def produce():
return {
'name': None,
'time': None,
'begin': None,
'end': None,
'childs': []
}
class Trace:
def __init__(self, path):
self.path = path
def __iter__(self):
f = open(self.path)
while True:
line = f.readline()
if line:
yield line
else:
f.close()
return
class Parse:
def __init__(self, paths):
self.traces = [Trace(path) for path in paths]
self.patt_str = r'\{"a":.*?[(\])|(\})]\}'
self.patt = re.compile(self.patt_str)
self.res = []
self.call = []
self.nodes = defaultdict(produce)
self.trees = []
self.loads()
def loads(self):
for trace in tqdm(self.traces):
for line in trace:
self.res.extend(
[json.loads(tree) for tree in self.patt.findall(line)]
)
def parse_nodes(self):
for func in self.res:
cur = self.nodes[func['a']]
if not cur['name']:
cur['name'] = func['d']
cur['time'] = func['e'] - func['b']
cur['begin'] = func['b']
cur['end'] = func['e']
if not len(func['p']):
self.call.append(func['a'])
continue
for p_hash in func['p']:
parent = self.nodes[p_hash]
parent['childs'].append(func['a'])
def build_tree(self):
if not len(self.call):
self.parse_nodes()
def deepCall(root, callTree):
if not len(self.nodes[root]['childs']):
return
for child in self.nodes[root]['childs']:
callTree['childs'].append({
"hash": child,
"name": self.nodes[child]['name'],
"childs": []
})
deepCall(child, callTree['childs'][-1])
for root in tqdm(self.call):
self.trees.append({
"hash": root,
"name": self.nodes[root]['name'],
"childs": []
})
deepCall(root, self.trees[-1])
def compress(node, desc, deep, layer):
layer = layer + "&" + str(deep)
# vis = set()
for index, child in enumerate(node['childs']):
name = child['name']
if '[' in child['name']:
name = child['name'][:child['name'].find('[')]
if '(' in child['name']:
name = child['name'][:child['name'].find('(')]
# if name in vis:
# continue
# vis.add(name)
desc['d'] = desc['d'] + '->' + layer + "#" + str(index) + '(' + name + ')'
compress(child, desc, deep+1, layer + "#" + str(index))
def hash_tree(trees):
hashtree = []
for tree in trees:
name = tree['name']
if '[' in tree['name']:
name = tree['name'][:tree['name'].find('[')]
if '(' in tree['name']:
name = tree['name'][:tree['name'].find('(')]
desc = {'d': '0'+'('+name+')'}
compress(tree, desc, 1, '0')
hashtree.append(desc['d'])
return hashtree
def clean_nodes(data):
name, time, child = [],[],[]
for d in data:
name.append(d['name'])
time.append(d['time'])
child.append(len(d['childs']))
func_info = {}
func_info['name'] = name
func_info['time'] = time
func_info['child'] = child
return func_info
def read_trace(file, flag=False):
pfile = file.split('.')[0]+'.'+'pkl'
if flag:
n =
|
pd.read_pickle(pfile)
|
pandas.read_pickle
|
"""gps.py
Utilities for integrating GPS with InSAR maps
Links:
1. list of LLH for all gps stations: ftp://gneiss.nbmg.unr.edu/rapids/llh
Note: ^^ This file is stored in the `STATION_LLH_FILE`
2. Clickable names to explore: http://geodesy.unr.edu/NGLStationPages/GlobalStationList
3. Map of stations: http://geodesy.unr.edu/NGLStationPages/gpsnetmap/GPSNetMap.html
"""
from __future__ import division, print_function
from glob import glob
import re
import os
import difflib # For station name misspelling checks
import datetime
from dataclasses import dataclass
from functools import lru_cache
import requests
import h5py
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.dates as mdates
import apertools.latlon
import apertools.los
import apertools.utils
import apertools.sario
from apertools.sario import LOS_FILENAME
import apertools.plotting
from apertools.log import get_log
logger = get_log()
# URL for ascii file of 24-hour final GPS solutions in east-north-vertical (NA12)
# GPS_BASE_URL = "http://geodesy.unr.edu/gps_timeseries/tenv3/NA12/{station}.NA12.tenv3"
# UPDATE 4/20/20: noticed they changed it to
GPS_BASE_URL = (
"http://geodesy.unr.edu/gps_timeseries/tenv3/plates/{plate}/{station}.{plate}.tenv3"
)
# NOTE: for now i'm assuming all plate-fixed data is the only one i care about...
# if i also want IGS14, i'll need to divide directories and do more
GPS_FILE = GPS_BASE_URL.split("/")[-1].replace(".{plate}", "")
# The main web page per station
# We'll use this for now to scrape the plate information with a regex :(
GPS_STATION_URL = "http://geodesy.unr.edu/NGLStationPages/stations/{station}.sta"
DIRNAME = os.path.dirname(os.path.abspath(__file__))
def _get_gps_dir():
path = apertools.utils.get_cache_dir(force_posix=True)
if not os.path.exists(path):
os.makedirs(path)
return path
GPS_DIR = _get_gps_dir()
# These lists get update occasionally... to keep fresh, download one for current day
# old ones will be removed upon new download
STATION_LLH_URL = "http://geodesy.unr.edu/NGLStationPages/llh.out"
STATION_LLH_FILE = os.path.join(GPS_DIR, "station_llh_all_{today}.csv")
STATION_XYZ_URL = "http://geodesy.unr.edu/NGLStationPages/DataHoldings.txt"
STATION_XYZ_FILE = os.path.join(GPS_DIR, "station_xyz_all_{today}.csv")
@dataclass
class InsarGPSCompare:
insar_filename: str = "deformation.h5"
dset: str = "linear_velocity"
los_map_file: str = LOS_FILENAME
# to measure GPS relative to some other station, set the reference station
reference_station: str = None
# Used to average InSAR values in a box around the stations
window_size: int = 3
# These will get used by in the GPS df creation; they're set using the InSAR data
start_date: datetime.date = None
end_date: datetime.date = None
# To limit stations that have at least 60% coverage over the
# time period we care about, set a nan threshold of 0.4
max_nan_pct: float = 0.4
# To smooth the GPS or insar timeseries, set the number of smoothing days
days_smooth_insar: int = None
days_smooth_gps: int = None
# Create an extra column the the output with the difference
# of (GPS - InSAR) for each station
create_diffs: bool = True
# Use the median trend to compare differences
median: bool = True
# convert velocity comparisons to millimeter/year
to_mm_year: bool = True
print_summary: bool = True
def run(self):
"""Create the GPS/InSAR DataFrame and compare average velocities
Returns:
df_full (DataFrame) with 1 GPS, 1 InSAR, and 1 _diff column per station
df_velo_diffs (DataFrame): 3-column dataframe comparing average velocities.
Columns: velo_diff v_gps v_insar
Each row is one station
"""
df_full = self.create_df()
df_velo_diffs = self.compare_velocities(
to_mm_year=self.to_mm_year, print_summary=self.print_summary
)
return df_full, df_velo_diffs
def create_df(self):
"""Set days_smooth to None or 0 to avoid any data smoothing
If refernce station is specified, all timeseries will subtract
that station's data
"""
df_gps_locations = get_stations_within_image(
filename=self.insar_filename, dset=self.dset
)
df_insar = self.create_insar_df(df_gps_locations)
# Cap the GPS we use by the InSAR start/end dates
self._set_start_end_date(df_insar)
df_gps = self.create_gps_los_df(df_gps_locations)
df = self.combine_insar_gps_dfs(df_insar, df_gps)
if self.reference_station is not None:
df = self._subtract_reference(df)
if self.create_diffs:
for stat in df_gps_locations["name"]:
df[f"{stat}_diff"] = df[f"{stat}_gps"] - df[f"{stat}_insar"]
self.df = self._remove_bad_cols(df)
return self.df
def compare_velocities(self, median=None, to_mm_year=True, print_summary=True):
"""Given the combine insar/gps dataframe, fit and compare slopes
Args:
median (bool): optional. If True, uses TSIA median estimator
to_mm_year (bool): convert the velocities to mm/year. Otherwise, cm/day
"""
df = getattr(self, "df", None)
if df is None:
raise ValueError("Must run create_df before compare_velocities")
if median is None:
median = self.median
station_names = self.get_stations()
df_velo_diffs = pd.DataFrame({"name": station_names})
diffs = []
v_gps_list = []
v_insar_list = []
for station in station_names:
ts_gps = df[station + "_gps"]
ts_insar = df[station + "_insar"]
v_gps = fit_line(ts_gps, median=self.median)[0]
v_insar = fit_line(ts_insar, median=self.median)[0]
v_gps_list.append(v_gps)
v_insar_list.append(v_insar)
diffs.append(v_gps - v_insar)
df_velo_diffs["velo_diff"] = diffs
df_velo_diffs["v_gps"] = v_gps_list
df_velo_diffs["v_insar"] = v_insar_list
if to_mm_year: # as opposed to cm/day
df_velo_diffs[["velo_diff", "v_gps", "v_insar"]] *= 365.25 * 10
df_velo_diffs.set_index("name", inplace=True)
if print_summary:
units = "mm/year" if to_mm_year else "cm/day"
print("RMS Difference:")
print(f"{self.rms(df_velo_diffs['velo_diff']):.3f} {units}")
self.df_velo_diffs = df_velo_diffs
return df_velo_diffs
def get_stations(self):
"""Takes df with columns ['NMHB_insar', 'TXAD_insar',...],
returns list of unique station names"""
# Check that we have run `create_df`
df = getattr(self, "df", None)
if df is None:
return []
return list(sorted(set(map(lambda s: s.split("_")[0], df.columns))))
def create_insar_df(self, df_gps_locations):
"""Set days_smooth to None or 0 to avoid any data smoothing"""
with xr.open_dataset(self.insar_filename) as ds:
da = ds[self.dset]
if "date" in da.coords:
# 3D option
is_2d = False
dates = da.indexes["date"]
else:
# 2D: just the average ground velocity
is_2d = True
dates = ds.indexes["date"]
df_insar = pd.DataFrame({"date": dates})
for row in df_gps_locations.itertuples():
ts = apertools.utils.window_stack_xr(
da, lon=row.lon, lat=row.lat, window_size=self.window_size
)
if is_2d:
# Make a cum_defo from the linear trend
x = (dates - dates[0]).days
v_cm_yr = ts.item()
coeffs = [v_cm_yr / 365.25, 0]
df_insar[row.name] = linear_trend(coeffs=coeffs, x=x)
# NOTE: To recover the linear velocity used here:
# gps.fit_line(df_insar[station_name])[0] * 365.25
else:
df_insar[row.name] = ts
df_insar.set_index("date", inplace=True)
self.df_insar = df_insar
return df_insar
def create_gps_los_df(self, df_gps_locations, start_date=None, end_date=None):
return self._create_gps_df(
df_gps_locations, kind="los", start_date=start_date, end_date=end_date
)
def create_gps_enu_df(self, df_gps_locations, start_date=None, end_date=None):
return self._create_gps_df(
df_gps_locations, kind="enu", start_date=start_date, end_date=end_date
)
def _create_gps_df(
self, df_gps_locations, start_date=None, end_date=None, kind="los"
):
if start_date is None:
start_date = self.start_date
if end_date is None:
end_date = self.end_date
df_list = []
# df_locations = get_stations_within_image(filename=los_map_file)
for row in df_gps_locations.itertuples():
if kind.lower() == "los":
df_los = load_gps_los(
los_map_file=self.los_map_file,
station_name=row.name,
start_date=start_date,
end_date=end_date,
zero_start=True,
# coordinates=self.coordinates,
)
elif kind.lower() == "enu":
df_los = load_station_enu(
station_name=row.name,
start_date=self.start_date,
end_date=self.end_date,
)
# np.array(pd.Series(arr).rolling(window_size).mean())
# df = pd.DataFrame({"date": _series_to_date(gps_dts)})
# df[stat + "_gps"] = moving_average(los_gps_data, days_smooth)
# df[stat + "_smooth_gps"] = moving_average(los_gps_data, days_smooth)
df_list.append(df_los)
# Now merge each one in turn, keeping all dates
df_gps = pd.concat(df_list, join="outer", axis="columns")
# These will all have the same name, so set equal to the station
df_gps.columns = df_gps_locations.name.values
self.df_gps = df_gps
return df_gps
def _set_start_end_date(self, df_insar):
# constrain the date range to just the InSAR min/max dates
self.start_date = df_insar.index.min()
self.end_date = df_insar.index.max()
def combine_insar_gps_dfs(self, df_insar, df_gps):
df = pd.merge(
df_gps,
df_insar,
how="left",
left_on="date",
right_on="date",
suffixes=("_gps", "_insar"),
)
if self.start_date:
df = df.loc[(df.index >= self.start_date)]
if self.end_date:
df = df.loc[(df.index <= self.end_date)]
return df
def rms(self, errors=None):
if errors is None:
errors = self.df_velo_diffs["velo_diffs"]
return np.sqrt(np.mean(np.square(errors)))
def total_abs_error(self, errors):
if errors is None:
errors = self.df_velo_diffs["velo_diffs"]
return np.sum(np.abs(errors))
def _find_bad_cols(
self, df, max_nan_pct=0.4, empty_start_len=None, empty_end_len=None
):
# If we care about and empty `empty_start_len` entries at the beginnning, make into int
empty_starts = df.columns[df.head(empty_start_len).isna().all()]
if empty_end_len:
empty_ends = df.columns[df.tail(empty_end_len).isna().all()]
else:
empty_ends = []
nan_pcts = df.isna().sum() / len(df)
# print("nan pcts:\n", nan_pcts)
high_pct_nan = df.columns[nan_pcts > max_nan_pct]
# Ignore all the insar nans
high_pct_nan = [c for c in high_pct_nan if "gps" in c]
all_cols = np.concatenate(
(
np.array(empty_starts),
np.array(empty_ends),
np.array(high_pct_nan),
)
)
return list(set(all_cols))
def _remove_bad_cols(self, df):
"""Drops columns that are all NaNs, or where GPS doesn't cover whole period"""
bad_cols = self._find_bad_cols(df, max_nan_pct=self.max_nan_pct)
logger.info("Removing the following bad columns:")
logger.info(bad_cols)
df_out = df.copy()
for col in bad_cols:
if col not in df_out.columns:
continue
df_out.drop(col, axis=1, inplace=True)
station = col.replace("_gps", "").replace("_insar", "")
c = "%s_gps" % station
if c in df_out.columns:
df_out.drop(c, axis=1, inplace=True)
c = "%s_insar" % station
if c in df_out.columns:
df_out.drop(c, axis=1, inplace=True)
c = "%s_diff" % station
if c in df_out.columns:
df_out.drop(c, axis=1, inplace=True)
return df_out
def _subtract_reference(self, df):
"""Center all columns of `df` based on the `reference_station` columns"""
gps_ref_col = "%s_%s" % (self.reference_station, "gps")
insar_ref_col = "%s_%s" % (self.reference_station, "insar")
df_out = df.copy()
for col in df.columns:
if "gps" in col:
df_out[col] = df[col] - df[gps_ref_col]
elif "insar" in col:
df_out[col] = df[col] - df[insar_ref_col]
return df_out
@dataclass
class TrendEstimator:
series: pd.Series
tol_days: int = 30
def tsia(self):
"""Calculate the Thiel-Sen Inter-annual slope of a data Series
https://en.wikipedia.org/wiki/Theil%E2%80%93Sen_estimator
Assumes the `series` has a DatetimeIndex.
Forms all possible difference of data which span 1 year +/- `tol_days`,
then takes the median slope of these differences
"""
# Get the non-nan values of the series
data = self.series.dropna().values
times = self.series.dropna().index
# Convert to numerical values for fitting:
# t = (times - times[0]).days
t = mdates.date2num(times)
time_diffs = self._get_all_differences(t)
slopes = self._get_all_differences(data) / time_diffs
# Now pick slopes within `tol_days` of annual
# > 180 to make sure we dont' use super short periods
accept_idxs = np.logical_and(
time_diffs > 180, (self._dist_from_year(time_diffs) < self.tol_days)
)
slopes_annual = slopes[accept_idxs]
slope = np.median(slopes_annual)
# Add Normal dist. factor to MAD
sig = 1.4826 * self.mad(slopes_annual)
# TODO: track down Ben for origina of this formula... prob on Wiki for TSIA
uncertainty = 3 * np.sqrt(np.pi / 2) * sig / np.sqrt(len(slopes_annual) / 4)
b = np.median(data - slope * t)
return slope, b, uncertainty
@staticmethod
def mad(x):
"""Median absolut deviation"""
return np.median(np.abs(x - np.median(x)))
@staticmethod
def _dist_from_year(v):
"""Get the number of days away from 365, mod 1 year"""
return np.abs((v + 180) % 365 - 180)
@staticmethod
def _get_all_differences(a):
"""Calculate all possible differences between elements of `a`"""
n = len(a)
x = np.reshape(a, (1, n))
difference_matrix = x - x.transpose()
# Now get the upper half (bottom is redundant)
return difference_matrix[np.triu_indices(n)].ravel()
def get_final_east_values(east_df):
stations, vals = [], []
direc = None
for column, val in east_df.tail(14).mean().items():
station, d = column.split("_")
direc = d
stations.append(station)
vals.append(val)
return pd.DataFrame(index=stations, data={direc: vals})
def fit_line(series, median=False):
"""Fit a line to `series` with (possibly) uneven dates as index.
Can be used to detrend, or predict final value
Args:
series (pd.Series): data to fit, with a DatetimeIndex
median (bool): if true, use the TSIA median estimator to fit
Returns: [slope, intercept]
"""
# TODO: check that subtracting first item doesn't change it
series_clean = series.dropna()
idxs = mdates.date2num(series_clean.index)
coeffs = np.polyfit(idxs, series_clean, 1)
if median:
# Replace the Least squares fit with the median inter-annual slope
est = TrendEstimator(series)
# med_slope, intercept, uncertainty = est.tsia()
coeffs = est.tsia()[:2]
return coeffs
def linear_trend(series=None, coeffs=None, index=None, x=None, median=False):
"""Get a series of points representing a linear trend through `series`
First computes the lienar regression, the evaluates at each
dates of `series.index`
Args:
series (pandas.Series): data with DatetimeIndex as the index.
coeffs (array or List): [slope, intercept], result from np.polyfit
index (DatetimeIndex, list[date]): Optional. If not passing series, can pass
the DatetimeIndex or list of dates to evaluate coeffs at.
Converts to numbers using `matplotlib.dates.date2num`
x (ndarray-like): directly pass the points to evaluate the poly1d
Returns:
Series: a line, equal length to arr, with same index as `series`
"""
if coeffs is None:
coeffs = fit_line(series, median=median)
if index is None and x is None:
index = series.dropna().index
if x is None:
x = mdates.date2num(index)
poly = np.poly1d(coeffs)
linear_points = poly(x)
return pd.Series(linear_points, index=index)
def _flat_std(series):
"""Find the std dev of an Series with a linear component removed"""
return np.std(series - linear_trend(series))
def load_station_enu(
station_name,
start_date=None,
end_date=None,
download_if_missing=True,
force_download=False,
zero_by="mean",
to_cm=True,
):
"""Loads one gps station's ENU data since start_date until end_date as a dataframe
Args:
station_name (str): 4 Letter name of GPS station
See http://geodesy.unr.edu/NGLStationPages/gpsnetmap/GPSNetMap.html for map
start_date (datetime or str): Optional. cutoff for beginning of GPS data
end_date (datetime or str): Optional. cut off for end of GPS data
download_if_missing (bool): default True
force_download (bool): default False
"""
# start_date, end_date = _parse_dates(start_date, end_date)
if zero_by not in ("start", "mean"):
raise ValueError("'zero_by' must be either 'start' or 'mean'")
station_name = station_name.upper()
gps_data_file = os.path.join(GPS_DIR, GPS_FILE.format(station=station_name))
if force_download:
try:
os.remove(gps_data_file)
logger.info(f"force removed {gps_data_file}")
except FileNotFoundError:
pass
if not os.path.exists(gps_data_file):
if download_if_missing:
logger.info(f"Downloading {station_name} to {gps_data_file}")
download_station_data(station_name)
else:
raise ValueError(
"{gps_data_file} does not exist, download_if_missing = False"
)
df = pd.read_csv(gps_data_file, header=0, sep=r"\s+", engine="c")
clean_df = _clean_gps_df(df, start_date, end_date)
if to_cm:
# logger.info("Converting %s GPS to cm" % station_name)
clean_df[["east", "north", "up"]] = 100 * clean_df[["east", "north", "up"]]
if zero_by.lower() == "mean":
mean_val = clean_df[["east", "north", "up"]].mean()
# enu_zeroed = clean_df[["east", "north", "up"]] - mean_val
clean_df[["east", "north", "up"]] -= mean_val
elif zero_by.lower() == "start":
start_val = clean_df[["east", "north", "up"]].iloc[:10].mean()
# enu_zeroed = clean_df[["east", "north", "up"]] - start_val
clean_df[["east", "north", "up"]] -= start_val
# Finally, make the 'date' column a DateIndex
return clean_df.set_index("date")
def _clean_gps_df(df, start_date=None, end_date=None):
"""Reorganize the Nevada GPS data format"""
df = df.copy()
df["date"] = pd.to_datetime(df["YYMMMDD"], format="%y%b%d")
if start_date:
df = df[df["date"] >= pd.to_datetime(start_date)]
if end_date:
df = df[df["date"] <= pd.to_datetime(end_date)]
df_enu = df[["date", "__east(m)", "_north(m)", "____up(m)"]]
df_enu = df_enu.rename(
mapper=lambda s: s.replace("_", "").replace("(m)", ""), axis="columns"
)
df_enu.reset_index(inplace=True, drop=True)
return df_enu
def get_stations_within_image(
filename=None,
dset=None,
da=None,
bad_vals=[0],
mask_invalid=True,
):
"""Given an image, find gps stations contained in area
Should be GDAL- or xarray-readable with lat/lon coordinates
Args:
filename (str): filename to load
mask_invalid (bool): Default true. if true, don't return stations
where the image value is NaN or exactly 0
bad_vals (list[float]): values (beside nan) indicating no data
(default: [0])
Returns:
ndarray: Nx3, with columns ['name', 'lon', 'lat']
"""
from shapely import geometry
if da is None:
try:
da = xr.open_dataset(filename)[dset]
except Exception:
import rioxarray
da = rioxarray.open_rasterio(filename).rename({"x": "lon", "y": "lat"})
# Do i care to filter out those not really in the image for radar coords?
is_2d_latlon = da.lat.ndim == 2
gdf = read_station_llas(to_geodataframe=True)
image_bbox = geometry.box(*apertools.latlon.bbox_xr(da))
gdf_within = gdf[gdf.geometry.within(image_bbox)]
# good_stations = []
# Will need to select differently for radar coords
if mask_invalid:
good_idxs = []
for row in gdf_within.itertuples():
if is_2d_latlon:
r, c = apertools.latlon.latlon_to_rowcol_rdr(
row.lat,
row.lon,
lat_arr=da.lat.data,
lon_arr=da.lon.data,
warn_oob=False,
)
if r is None or c is None:
# out of bounds (could be on a diagonal corner of the bbox)
continue
val = da[..., r, c]
else:
val = da.sel(lat=row.lat, lon=row.lon, method="nearest")
if np.any(np.isnan(val)) or np.any([np.all(val == v) for v in bad_vals]):
continue
else:
# good_stations.append([row.name, row.lon, row.lat])
good_idxs.append(row.Index)
# to keep 'name' as column, but reset the former index to start at 0
gdf_within = gdf_within.loc[good_idxs].reset_index(drop=True)
return gdf_within
@lru_cache()
def read_station_llas(filename=None, to_geodataframe=False, force_download=True):
"""Read in the name, lat, lon, alt list of gps stations
Assumes file is a space-separated with "name,lat,lon,alt" as columns
"""
today = datetime.date.today().strftime("%Y%m%d")
filename = filename or STATION_LLH_FILE.format(today=today)
lla_path = os.path.join(GPS_DIR, filename)
_remove_old_lists(lla_path)
logger.debug("Searching %s for gps data" % filename)
try:
df = pd.read_csv(lla_path, sep=r"\s+", engine="c", header=None)
except FileNotFoundError:
logger.info("Downloading from %s to %s", STATION_LLH_URL, lla_path)
download_station_locations(lla_path, STATION_LLH_URL)
df = pd.read_csv(lla_path, sep=r"\s+", engine="c", header=None)
df.columns = ["name", "lat", "lon", "alt"]
if to_geodataframe:
import geopandas as gpd
return gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon, df.lat))
else:
return df
def _remove_old_lists(lla_path):
today = datetime.date.today().strftime("%Y%m%d")
gps_dir = os.path.split(lla_path)[0]
station_list_files = glob(os.path.join(gps_dir, "station_*"))
files_to_delete = [f for f in station_list_files if today not in f]
for f in files_to_delete:
logger.info("Removing old station list file: %s", f)
os.remove(f)
@lru_cache()
def read_station_xyzs(filename=None):
"""Read in the name, X, Y, Z position of gps stations"""
today = datetime.date.today().strftime("%Y%m%d")
filename = filename or STATION_XYZ_FILE.format(today=today)
_remove_old_lists(filename)
logger.debug("Searching %s for gps data" % filename)
try:
df = pd.read_csv(
filename,
sep=r"\s+",
engine="c",
warn_bad_lines=True,
error_bad_lines=False,
)
except FileNotFoundError:
logger.warning("%s not found; downloading from %s", filename, STATION_XYZ_URL)
download_station_locations(filename, STATION_XYZ_URL)
df = pd.read_csv(
filename,
sep=r"\s+",
engine="c",
warn_bad_lines=True,
error_bad_lines=False,
)
orig_cols = "Sta Lat(deg) Long(deg) Hgt(m) X(m) Y(m) Z(m) Dtbeg Dtend Dtmod NumSol StaOrigName"
new_cols = "name lat lon alt X Y Z dtbeg dtend dtmod numsol origname"
mapping = dict(zip(orig_cols.split(), new_cols.split()))
return df.rename(columns=mapping)
def download_station_locations(filename, url):
"""Download either station LLH file or XYZ file from Nevada website
url = [STATION_XYZ_URL or STATION_LLH_URL]
"""
resp = requests.get(url)
resp.raise_for_status()
with open(filename, "w") as f:
f.write(resp.text)
def download_station_data(station_name):
station_name = station_name.upper()
plate = _get_station_plate(station_name)
# plate = "PA"
url = GPS_BASE_URL.format(station=station_name, plate=plate)
response = requests.get(url)
response.raise_for_status()
filename = os.path.join(GPS_DIR, GPS_FILE.format(station=station_name, plate=plate))
logger.info(f"Saving {url} to {filename}")
with open(filename, "w") as f:
f.write(response.text)
def _get_station_plate(station_name):
url = GPS_STATION_URL.format(station=station_name)
response = requests.get(url)
response.raise_for_status()
# NOTE: This is not necessarily the only one!
# CA GPS stations have PA and NA plate fixed... do i ever care about both?
match = re.search(r"(?P<plate>[A-Z]{2}) Plate Fixed", response.text)
if not match:
raise ValueError("Could not find plate name on %s" % url)
return match.groupdict()["plate"]
def station_lonlat(station_name):
"""Return the (lon, lat) in degrees of `station_name`"""
df = read_station_llas()
station_name = station_name.upper()
if station_name not in df["name"].values:
closest_names = difflib.get_close_matches(station_name, df["name"], n=5)
raise ValueError(
"No station named %s found. Closest: %s" % (station_name, closest_names)
)
name, lat, lon, alt = df[df["name"] == station_name].iloc[0]
return lon, lat
def station_xyz(station_name):
"""Return the (X, Y, Z) in meters of `station_name`"""
df = read_station_xyzs()
station_name = station_name.upper()
if station_name not in df["name"].values:
closest_names = difflib.get_close_matches(station_name, df["name"], n=5)
raise ValueError(
"No station named %s found. Closest: %s" % (station_name, closest_names)
)
X, Y, Z = df.loc[df["name"] == station_name, ["X", "Y", "Z"]].iloc[0]
return X, Y, Z
def station_rowcol(station_name, rsc_data=None, filename=None):
"""Find the row/columns of a station name within an image
Image coordinates can be defined with `rsc_data` from .rsc file,
or by a gdal-readable `filename`
"""
if rsc_data is None and filename is None:
raise ValueError("Need either rsc_data or filename to locate station")
lon, lat = station_lonlat(station_name)
return apertools.latlon.latlon_to_rowcol(
lat, lon, rsc_data=rsc_data, filename=filename
)
def station_distance(station_name1, station_name2):
"""Find distance (in meters) between two gps stations
Args:
station_name1 (str): name of first GPS station
station_name2 (str): name of second GPS station
Returns:
float: distance (in meters)
"""
lonlat1 = station_lonlat(station_name1)
lonlat2 = station_lonlat(station_name2)
return apertools.latlon.latlon_to_dist(lonlat1[::-1], lonlat2[::-1])
def station_std(station, to_cm=True, start_date=None, end_date=None):
"""Calculates the sum of east, north, and vertical stds of gps"""
enu_df = load_station_enu(
station, start_date=start_date, end_date=end_date, to_cm=to_cm
)
if enu_df.empty:
logger.warning(f"{station} gps data returned an empty dataframe")
return np.nan
return np.sum(enu_df.std())
def load_gps_los(
station_name=None,
los_map_file=LOS_FILENAME,
to_cm=True,
zero_mean=True,
zero_start=False,
start_date=None,
end_date=None,
reference_station=None,
enu_coeffs=None,
force_download=False,
coordinates="geo",
geom_dir="geom_reference",
days_smooth=0,
):
"""Load the GPS timeseries of a station name projected onto InSAR LOS
Returns a DataFrame with index of date, one column for LOS measurement.
This assumes that the los points AWAY from the satellite, towards the ground
(subsidence is a positive LOS measurement, as it increases the LOS distance,
and uplift is negative LOS)
"""
if enu_coeffs is None:
lon, lat = station_lonlat(station_name)
enu_coeffs = apertools.los.find_enu_coeffs(
lon,
lat,
los_map_file=los_map_file,
coordinates=coordinates,
geom_dir=geom_dir,
)
df_enu = load_station_enu(
station_name,
to_cm=to_cm,
start_date=start_date,
end_date=end_date,
force_download=force_download,
)
enu_data = df_enu[["east", "north", "up"]].values.T
los_gps_data = apertools.los.project_enu_to_los(enu_data, enu_coeffs=enu_coeffs)
los_gps_data = los_gps_data.reshape(-1)
if zero_start:
logger.debug("Resetting GPS data start to 0")
los_gps_data = los_gps_data - np.mean(los_gps_data[:100])
elif zero_mean:
logger.debug("Making GPS data 0 mean")
los_gps_data = los_gps_data - np.mean(los_gps_data)
if days_smooth:
los_gps_data = moving_average(los_gps_data, days_smooth)
df_los =
|
pd.DataFrame(data=los_gps_data, index=df_enu.index, columns=["los"])
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin(pd.DataFrame(data['diff']),3))
r3 = Rank(volume_delta)
rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = (rank['r1'] + rank['r2'])* rank['r3']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha7']
return alpha
@timer
def alpha8(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2
data_price_delta = Delta(pd.DataFrame(data_price),4) * -1
alpha = Rank(data_price_delta)
alpha.columns = ['alpha8']
return alpha
@timer
def alpha9(self):
high = self.high
low = self.low
volume = self.volume
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['price']= (data['High'] + data['Low'])/2
data['price_delay'] = Delay(pd.DataFrame(data['price']),1)
alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol']
alpha_temp_unstack = alpha_temp.unstack(level = 'ID')
alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean()
alpha_final = alpha.stack()
alpha = pd.DataFrame(alpha_final)
alpha.columns = ['alpha9']
return alpha
@timer
def alpha10(self):
ret = self.ret
close = self.close
ret_std = STD(pd.DataFrame(ret),20)
ret_std.columns = ['ret_std']
data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner')
temp1 = pd.DataFrame(data['ret_std'][data['Pctchg'] < 0])
temp2 = pd.DataFrame(data['Close'][data['Pctchg'] >= 0])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0,join = 'outer')
temp_order = pd.concat([data,temp],axis = 1)
temp_square = pd.DataFrame(np.power(temp_order['temp'],2))
alpha_temp = TsMax(temp_square,5)
alpha = Rank(alpha_temp)
alpha.columns = ['alpha10']
return alpha
@timer
def alpha11(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume],axis = 1,join = 'inner')
data_temp = (data['Close'] - data['Low']) -(data['High'] - data['Close'])\
/(data['High'] - data['Low']) * data['Vol']
alpha = Sum(pd.DataFrame(data_temp),6)
alpha.columns = ['alpha11']
return alpha
@timer
def alpha12(self):
Open = self.open
vwap = self.vwap
close = self.close
data = pd.concat([Open,vwap,close],axis = 1, join = 'inner')
data['p1'] = data['Open'] - Mean(data['Open'],10)
data['p2'] = data['Close'] - data['Vwap']
r1 = Rank(pd.DataFrame(data['p1']))
r2 = Rank(pd.DataFrame(np.abs(data['p2'])))
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
alpha = rank['r1'] - rank['r2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha12']
return alpha
@timer
def alpha13(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
alpha = (data['High'] + data['Low'])/2 - data['Vwap']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha13']
return alpha
@timer
def alpha14(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close'] - data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha14']
return alpha
@timer
def alpha15(self):
Open = self.open
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([Open,close_delay],axis = 1,join = 'inner')
alpha = data['Open']/data['close_delay'] - 1
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha15']
return alpha
@timer
def alpha16(self):
vwap = self.vwap
volume = self.volume
data = pd.concat([vwap,volume],axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vol']))
r2 = Rank(pd.DataFrame(data['Vwap']))
rank = pd.concat([r1,r2],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(Rank(corr),5)
alpha.columns = ['alpha16']
return alpha
@timer
def alpha17(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close],axis = 1, join = 'inner')
data['vwap_max15'] = TsMax(data['Vwap'],15)
data['close_delta5'] = Delta(data['Close'],5)
temp = np.power(data['vwap_max15'],data['close_delta5'])
alpha = Rank(pd.DataFrame(temp))
alpha.columns = ['alpha17']
return alpha
@timer
def alpha18(self):
"""
this one is similar with alpha14
"""
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close']/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha18']
return alpha
@timer
def alpha19(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data['temp1'] = (data['Close'] - data['close_delay'])/data['close_delay']
data['temp2'] = (data['Close'] - data['close_delay'])/data['Close']
temp1 = pd.DataFrame(data['temp1'][data['Close'] < data['close_delay']])
temp2 = pd.DataFrame(data['temp2'][data['Close'] >= data['close_delay']])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0)
data = pd.concat([data,temp],axis = 1,join = 'outer')
alpha = pd.DataFrame(data['temp'])
alpha.columns = ['alpha19']
return alpha
@timer
def alpha20(self):
close = self.close
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha20']
return alpha
@timer
def alpha21(self):
close = self.close
close_mean = Mean(close,6)
alpha = RegBeta(0,close_mean,None,6)
alpha.columns = ['alpha21']
return alpha
@timer
def alpha22(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1,join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame((data['close'] - data['close_mean'])/data['close_mean'])
temp_delay = Delay(temp,3)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
temp2 = pd.DataFrame(data_temp['temp'] - data_temp['temp_delay'])
alpha = SMA(temp2,12,1)
alpha.columns = ['alpha22']
return alpha
@timer
def alpha23(self):
close = self.close
close_std = STD(close,20)
close_delay = Delay(close,1)
data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner')
data.columns = ['Close','close_std','close_delay']
data['temp'] = data['close_std']
data['temp'][data['Close'] <= data['close_delay']] = 0
temp = pd.DataFrame(data['temp'])
sma1 = SMA(temp,20,1)
sma2 = SMA(pd.DataFrame(data['close_std']),20,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'])
alpha.columns = ['alpha23']
return alpha
@timer
def alpha24(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis=1 ,join = 'inner' )
temp = data['Close'] - data['close_delay']
temp = pd.DataFrame(temp)
alpha = SMA(temp,5,1)
alpha.columns = ['alpha24']
return alpha
@timer
def alpha25(self):
close = self.close
close_delta = Delta(close,7)
ret = self.ret
r1 = Rank(close_delta)
r3 = Rank(Sum(ret,250))
volume = self.volume
volume_mean = Mean(pd.DataFrame(volume['Vol']),20)
volume_mean.columns = ['volume_mean']
data = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
temp0 = pd.DataFrame(data['Vol']/data['volume_mean'])
temp = DecayLinear(temp0,9)
r2 = Rank(temp)
rank = pd.concat([r1,r2,r3],axis = 1, join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = pd.DataFrame(-1 * rank['r1'] * (1 - rank['r2']) * rank['r3'])
alpha.columns = ['alpha25']
return alpha
@timer
def alpha26(self):
close = self.close
vwap = self.vwap
close_mean7 = Mean(close,7)
close_mean7.columns = ['close_mean7']
close_delay5 = Delay(close,5)
close_delay5.columns = ['close_delay5']
data = pd.concat([vwap,close_delay5],axis = 1,join = 'inner')
corr = Corr(data,230)
corr.columns = ['corr']
data_temp = pd.concat([corr,close_mean7,close],axis = 1,join = 'inner')
alpha = data_temp['close_mean7'] - data_temp['Close'] + data_temp['corr']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha26']
return alpha
@timer
def alpha27(self):
"""
uncompleted
"""
close = self.close
close_delay3 = Delay(close,3)
close_delay6 = Delay(close,6)
data = pd.concat([close,close_delay3,close_delay6],axis = 1,join = 'inner')
data.columns = ['close','close_delay3','close_delay6']
temp1 = pd.DataFrame((data['close'] - data['close_delay3'])/data['close_delay3'] * 100)
temp2 = pd.DataFrame((data['close'] - data['close_delay6'])/data['close_delay6'] * 100)
data_temp = pd.concat([temp1,temp2],axis = 1,join = 'inner')
data_temp.columns = ['temp1','temp2']
temp = pd.DataFrame(data_temp['temp1'] + data_temp['temp2'])
alpha = DecayLinear(temp,12)
alpha.columns = ['alpha27']
return alpha
@timer
def alpha28(self):
close = self.close
low = self.low
high = self.high
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['Close','low_min','high_max']
temp1 = pd.DataFrame((data['Close'] - data['low_min']) /(data['high_max'] - data['low_min']))
sma1 = SMA(temp1,3,1)
sma2 = SMA(sma1,3,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] * 2 - sma['sma2'] * 3)
alpha.columns = ['alpha28']
return alpha
@timer
def alpha29(self):
close = self.close
volume = self.volume
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay'] * data['Vol']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha29']
return alpha
@timer
def alpha30(self):
close = self.close
close_delay = Delay(close,1)
@timer
def alpha31(self):
close = self.close
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha31']
return alpha
@timer
def alpha32(self):
volume = self.volume
high = self.high
r1 = Rank(volume)
r2 = Rank(high)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,3)
r = Rank(corr)
alpha = -1 * Sum(r,3)
alpha.columns = ['alpha32']
return alpha
@timer
def alpha33(self):
low = self.low
volume = self.volume
ret = self.ret
low_min = TsMin(low,5)
low_min_delay = Delay(low_min,5)
data1 = pd.concat([low_min,low_min_delay],axis = 1,join = 'inner')
data1.columns = ['low_min','low_min_delay']
ret_sum240 = Sum(ret,240)
ret_sum20 = Sum(ret,20)
ret_temp = pd.concat([ret_sum240,ret_sum20],axis = 1, join = 'inner')
ret_temp.columns = ['ret240','ret20']
temp1 = pd.DataFrame(data1['low_min_delay'] - data1['low_min'])
temp2 = pd.DataFrame((ret_temp['ret240'] - ret_temp['ret20'])/220)
r_temp2 = Rank(temp2)
r_volume = TsRank(volume,5)
temp = pd.concat([temp1,r_temp2,r_volume],axis = 1,join = 'inner')
temp.columns = ['temp1','r_temp2','r_volume']
alpha = temp['temp1'] * temp['r_temp2'] * temp['r_volume']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha33']
return alpha
@timer
def alpha34(self):
close = self.close
close_mean = Mean(close,12)
close_mean.columns = ['close_mean']
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
alpha = pd.DataFrame(data['close_mean']/data['Close'])
alpha.columns = ['alpha34']
return alpha
@timer
def alpha35(self):
volume = self.volume
Open = self.open
open_delay = Delay(Open,1)
open_delay.columns = ['open_delay']
open_linear = DecayLinear(Open,17)
open_linear.columns = ['open_linear']
open_delay_temp = DecayLinear(open_delay,15)
r1 = Rank(open_delay_temp)
data = pd.concat([Open,open_linear],axis = 1,join = 'inner')
Open_temp = data['Open'] * 0.65 + 0.35 * data['open_linear']
rank = pd.concat([volume,Open_temp],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,7)
r2 = Rank(-1 * corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = Cross_min(pd.DataFrame(r['r1']),pd.DataFrame(r['r2']))
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha35']
return alpha
@timer
def alpha36(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,6)
temp = Sum(corr,2)
alpha = Rank(temp)
alpha.columns = ['alpha36']
return alpha
@timer
def alpha37(self):
Open = self.open
ret = self.ret
open_sum = Sum(Open,5)
ret_sum = Sum(ret,5)
data = pd.concat([open_sum,ret_sum],axis = 1,join = 'inner')
data.columns = ['open_sum','ret_sum']
temp = data['open_sum'] * data['ret_sum']
temp_delay = Delay(temp,10)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
alpha = -1 * Rank(pd.DataFrame(data_temp['temp'] - data_temp['temp_delay']))
alpha.columns = ['alpha37']
return alpha
@timer
def alpha38(self):
high = self.high
high_mean = Mean(high,20)
high_delta = Delta(high,2)
data = pd.concat([high,high_mean,high_delta],axis = 1,join = 'inner')
data.columns = ['high','high_mean','high_delta']
data['alpha'] = -1 * data['high_delta']
data['alpha'][data['high_mean'] >= data['high']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha38']
return alpha
@timer
def alpha39(self):
close = self.close
Open = self.open
vwap = self.vwap
volume = self.volume
close_delta2 = Delta(close,2)
close_delta2_decay = DecayLinear(close_delta2,8)
r1 = Rank(close_delta2_decay)
price_temp = pd.concat([vwap,Open],axis = 1,join = 'inner')
price = pd.DataFrame(price_temp['Vwap'] * 0.3 + price_temp['Open'] * 0.7)
volume_mean = Mean(volume,180)
volume_mean_sum = Sum(volume_mean,37)
rank = pd.concat([price,volume_mean_sum],axis = 1,join = 'inner')
corr = Corr(rank,14)
corr_decay = DecayLinear(corr,12)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns = ['alpha39']
return alpha
@timer
def alpha40(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
data = pd.concat([close,volume,close_delay],axis = 1, join = 'inner')
data.columns = ['close','volume','close_delay']
data['temp1'] = data['volume']
data['temp2'] = data['volume']
data['temp1'][data['close'] <= data['close_delay']] = 0
data['temp2'][data['close'] > data['close_delay']] = 0
s1 = Sum(pd.DataFrame(data['temp1']),26)
s2 = Sum(pd.DataFrame(data['temp2']),26)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha40']
return alpha
@timer
def alpha41(self):
vwap = self.vwap
vwap_delta = Delta(vwap,3)
vwap_delta_max = TsMax(vwap_delta,5)
alpha = -1 * Rank(vwap_delta_max)
alpha.columns = ['alpha41']
return alpha
@timer
def alpha42(self):
high = self.high
volume = self.volume
high_std = STD(high,10)
r1 = Rank(high_std)
data = pd.concat([high,volume],axis = 1,join = 'inner')
corr = Corr(data,10)
r = pd.concat([r1,corr],axis = 1,join = 'inner')
r.columns = ['r1','corr']
alpha = pd.DataFrame(-1 * r['r1'] * r['corr'])
alpha.columns = ['alpha42']
return alpha
@timer
def alpha43(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,6)
alpha.columns = ['alpha43']
return alpha
@timer
def alpha44(self):
volume = self.volume
vwap = self.vwap
low = self.low
volume_mean = Mean(volume,10)
rank = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(rank,7)
corr_decay = DecayLinear(corr,6)
r1 = TsRank(corr_decay,4)
vwap_delta = Delta(vwap,3)
vwap_delta_decay = DecayLinear(vwap_delta,10)
r2 = TsRank(vwap_delta_decay,15)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha44']
return alpha
@timer
def alpha45(self):
volume = self.volume
vwap = self.vwap
close = self.close
Open = self.open
price = pd.concat([close,Open],axis = 1,join = 'inner')
price['price'] = price['Close'] * 0.6 + price['Open'] * 0.4
price_delta = Delta(pd.DataFrame(price['price']),1)
r1 = Rank(price_delta)
volume_mean = Mean(volume,150)
data = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,15)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha45']
return alpha
@timer
def alpha46(self):
close = self.close
close_mean3 = Mean(close,3)
close_mean6 = Mean(close,6)
close_mean12 = Mean(close,12)
close_mean24 = Mean(close,24)
data = pd.concat([close,close_mean3,close_mean6,close_mean12,close_mean24],axis = 1,join = 'inner')
data.columns = ['c','c3','c6','c12','c24']
alpha = (data['c3'] + data['c6'] + data['c12'] + data['c24'])/(4 * data['c'])
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha46']
return alpha
@timer
def alpha47(self):
close = self.close
low = self.low
high = self.high
high_max = TsMax(high,6)
low_min = TsMin(low,6)
data = pd.concat([high_max,low_min,close],axis = 1,join = 'inner')
data.columns = ['high_max','low_min','close']
temp = pd.DataFrame((data['high_max'] - data['close'])/(data['high_max'] - \
data['low_min']) * 100)
alpha = SMA(temp,9,1)
alpha.columns = ['alpha47']
return alpha
@timer
def alpha48(self):
close = self.close
volume = self.volume
temp1 = Delta(close,1)
temp1_delay1 = Delay(temp1,1)
temp1_delay2 = Delay(temp1,2)
data = pd.concat([temp1,temp1_delay1,temp1_delay2],axis = 1,join = 'inner')
data.columns = ['temp1','temp1_delay1','temp1_delay2']
temp2 = pd.DataFrame(np.sign(data['temp1']) + np.sign(data['temp1_delay1']) \
+ np.sign(data['temp1_delay2']))
volume_sum5 = Sum(volume,5)
volume_sum20 = Sum(volume,20)
data_temp = pd.concat([temp2,volume_sum5,volume_sum20],axis = 1,join = 'inner')
data_temp.columns = ['temp2','volume_sum5','volume_sum20']
temp3 = pd.DataFrame(data_temp['temp2'] * data_temp['volume_sum5']/\
data_temp['volume_sum20'])
alpha = -1 * Rank(temp3)
alpha.columns = ['alpha48']
return alpha
@timer
def alpha49(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 0
price['temp'][price['sum'] < price['sum_delay']] = 1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha49']
return alpha
@timer
def alpha50(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = -1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha50']
return alpha
@timer
def alpha51(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = 0
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha51']
return alpha
@timer
def alpha52(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
data['sum_delay'] = Delay(pd.DataFrame((data['High'] + data['Low'] + data['Close'])/3),1)
temp1 = pd.DataFrame(data['High'] - data['sum_delay'])
temp1.columns = ['high_diff']
temp2 = pd.DataFrame(data['sum_delay'] - data['Low'])
temp2.columns = ['low_diff']
temp1['max'] = temp1['high_diff']
temp1['max'][temp1['high_diff'] < 0 ] = 0
temp2['max'] = temp2['low_diff']
temp2['max'][temp2['low_diff'] < 0 ] = 0
temp1_sum = Sum(pd.DataFrame(temp1['max']),26)
temp2_sum = Sum(pd.DataFrame(temp2['max']),26)
alpha_temp = pd.concat([temp1_sum,temp2_sum],axis = 1,join = 'inner')
alpha_temp.columns = ['s1','s2']
alpha = pd.DataFrame(alpha_temp['s1']/alpha_temp['s2'] * 100)
alpha.columns = ['alpha52']
return alpha
@timer
def alpha53(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,12)
alpha = count/12.0 * 100
alpha.columns = ['alpha53']
return alpha
@timer
def alpha54(self):
Open = self.open
close = self.close
data = pd.concat([Open,close], axis = 1, join = 'inner')
data.columns = ['close','open']
temp = pd.DataFrame(data['close'] - data['open'])
temp_abs = pd.DataFrame(np.abs(temp))
df = pd.concat([temp,temp_abs], axis = 1, join= 'inner')
df.columns = ['temp','abs']
std = STD(pd.DataFrame(df['temp'] + df['abs']),10)
corr = Corr(data,10)
data1 = pd.concat([corr,std],axis = 1, join = 'inner')
data1.columns = ['corr','std']
alpha = Rank(pd.DataFrame(data1['corr'] + data1['std'])) * -1
alpha.columns = ['alpha54']
return alpha
@timer
def alpha55(self):
Open = self.open
close = self.close
low = self.low
high = self.high
close_delay = Delay(close,1)
open_delay = Delay(Open,1)
low_delay = Delay(low,1)
data = pd.concat([Open,close,low,high,close_delay,open_delay,low_delay], axis =1 ,join = 'inner')
data.columns= ['open','close','low','high','close_delay','open_delay','low_delay']
temp1 = pd.DataFrame((data['close'] - data['close_delay'] + (data['close'] - data['open'])/2\
+ data['close_delay'] - data['open_delay'])/ np.abs(data['high'] - data['close_delay']))
temp2 = pd.DataFrame(np.abs(data['high'] - data['close_delay']) + np.abs(data['low'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
temp3 = pd.DataFrame(np.abs(data['low'] - data['close_delay']) + np.abs(data['high'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
abs1 = pd.DataFrame(np.abs(data['high'] - data['close_delay']))
abs2 = pd.DataFrame(np.abs(data['low'] - data['close_delay']))
abs3 = pd.DataFrame(np.abs(data['high'] - data['low_delay']))
data1 = pd.concat([abs1,abs2,abs3], axis = 1, join = 'inner')
data1.columns = ['abs1','abs2','abs3']
data_temp = pd.concat([abs1,abs2],axis = 1, join = 'inner')
data_temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
data_temp_max.columns = ['max']
data_temp1 = pd.concat([data,data_temp_max], axis = 1, join = 'inner')
temp4 = pd.DataFrame((np.abs(data_temp1['high'] - data_temp1['low_delay']) + \
np.abs(data_temp1['close_delay'] - data_temp1['open_delay'])) *\
data_temp1['max'])
data1['judge1'] = 0
data1['judge2'] = 0
data1['judge3'] = 0
data1['judge4'] = 0
data1['judge1'][data1['abs1'] > data1['abs2']] = 1
data1['judge2'][data1['abs1'] > data1['abs3']] = 1
data1['judge3'][data1['abs2'] > data1['abs3']] = 1
data1['judge3'][data1['abs3'] > data1['abs1']] = 1
judge_1 = pd.DataFrame(data1['judge1'] * data1['judge2'])
judge_2 = pd.DataFrame(data1['judge3'] * data1['judge4'])
data2 = pd.concat([temp1,temp2,temp3,temp4,judge_1,judge_2], axis = 1, join = 'inner')
data2.columns = ['t1','t2','t3','t4','j1','j2']
data2['j3'] = 1
data2['j4'] = data2['j3'] - data2['j1'] - data2['j2']
data2['t5'] = data2['t2'] * data2['j1'] + data2['t3'] * data2['j2'] + \
data2['t4'] * data2['j4']
tep = pd.DataFrame(16 * data2['t5']/data2['t1'])
alpha = Sum(tep,20)
alpha.columns = ['alpha55']
return alpha
@timer
def alpha56(self):
low = self.low
high = self.high
volume = self.volume
Open = self.open
open_min = TsMin(Open,12)
data1 = pd.concat([Open,open_min], axis = 1, join = 'inner')
data1.columns = ['open','open_min']
r1 = Rank(pd.DataFrame(data1['open'] - data1['open_min']))
volume_mean = Mean(volume,40)
volume_mean_sum= Sum(volume_mean,19)
data2 = pd.concat([high,low],axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
rank = pd.concat([temp,volume_mean_sum],axis = 1 , join = 'inner')
rank.columns = ['temp','volume_mean_sum']
corr = Corr(rank,13)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] >= r['r2']] = 1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha56']
return alpha
@timer
def alpha57(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = pd.DataFrame((data['close'] - data['low_min'])/(data['high_max'] \
- data['low_min']) * 100)
alpha = SMA(temp,3,1)
alpha.columns = ['alpha57']
return alpha
@timer
def alpha58(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,20)
alpha = count/20.0 * 100
alpha.columns = ['alpha58']
return alpha
@timer
def alpha59(self):
low = self.low
high = self.high
close = self.close
close_delay = Delay(close,1)
max_temp = pd.concat([high,close_delay],axis = 1,join = 'inner')
min_temp = pd.concat([low,close_delay],axis = 1,join = 'inner')
max_temp1 = pd.DataFrame(np.max(max_temp,axis = 1))
min_temp1 = pd.DataFrame(np.min(min_temp,axis = 1))
data = pd.concat([close,close_delay,max_temp1,min_temp1],axis = 1,join = 'inner')
data.columns = ['close','close_delay','max','min']
data['max'][data['close'] > data['close_delay']] = 0
data['min'][data['close'] <= data['close_delay']] = 0
alpha = pd.DataFrame(data['max'] + data['min'])
alpha.columns = ['alpha59']
return alpha
@timer
def alpha60(self):
low = self.low
high = self.high
close = self.close
volume = self.volume
data = pd.concat([low,high,close,volume],axis = 1,join = 'inner')
temp = pd.DataFrame((2 * data['Close'] - data['Low'] - data['High'])/(data['Low'] + \
data['High']) * data['Vol'])
alpha = Sum(temp,20)
alpha.columns = ['alpha60']
return alpha
@timer
def alpha61(self):
low = self.low
volume = self.volume
vwap = self.vwap
vwap_delta = Delta(vwap,1)
vwap_delta_decay = DecayLinear(vwap_delta,12)
r1 = Rank(vwap_delta_decay)
volume_mean = Mean(volume,80)
data = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,8)
corr_decay = DecayLinear(corr,17)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) * -1)
alpha.columns = ['alpha61']
return alpha
@timer
def alpha62(self):
high = self.high
volume = self.volume
volume_r = Rank(volume)
data = pd.concat([high,volume_r],axis = 1,join = 'inner')
alpha = -1 * Corr(data,5)
alpha.columns = ['alpha62']
return alpha
@timer
def alpha63(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),6,1)
sma2 = SMA(pd.DataFrame(data['abs']),6,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha63']
return alpha
@timer
def alpha64(self):
vwap = self.vwap
volume = self.volume
close = self.close
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data1 = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr1 = Corr(data1,4)
corr1_decay = DecayLinear(corr1,4)
r1 = Rank(corr1_decay)
close_mean = Mean(close,60)
close_r = Rank(close)
close_mean_r = Rank(close_mean)
data2 = pd.concat([close_r,close_mean_r],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_max = TsMax(corr2,13)
corr2_max_decay = DecayLinear(corr2_max,14)
r2 = Rank(corr2_max_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) *-1)
alpha.columns = ['alpha64']
return alpha
@timer
def alpha65(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = pd.DataFrame(data['close_mean']/data['close'])
alpha.columns = ['alpha65']
return alpha
@timer
def alpha66(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha66']
return alpha
@timer
def alpha67(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),24,1)
sma2 = SMA(pd.DataFrame(data['abs']),24,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha67']
return alpha
@timer
def alpha68(self):
high = self.high
volume = self.volume
low = self.low
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['sum']= (data['High'] + data['Low'])/2
data['sum_delta'] = Delta(pd.DataFrame(data['sum']),1)
temp = data['sum_delta'] * (data['High'] - data['Low'])/data['Vol']
alpha = SMA(pd.DataFrame(temp),15,2)
alpha.columns = ['alpha68']
return alpha
@timer
def alpha69(self):
high = self.high
low = self.low
Open = self.open
dtm = DTM(Open,high)
dbm = DBM(Open,low)
dtm_sum = Sum(dtm,20)
dbm_sum = Sum(dbm,20)
data = pd.concat([dtm_sum,dbm_sum],axis = 1, join = 'inner')
data.columns = ['dtm','dbm']
data['temp1'] = (data['dtm'] - data['dbm'])/data['dtm']
data['temp2'] = (data['dtm'] - data['dbm'])/data['dbm']
data['temp1'][data['dtm'] <= data['dbm']] = 0
data['temp2'][data['dtm'] >= data['dbm']] = 0
alpha = pd.DataFrame(data['temp1'] + data['temp2'])
alpha.columns = ['alpha69']
return alpha
@timer
def alpha70(self):
amount = self.amt
alpha= STD(amount,6)
alpha.columns = ['alpha70']
return alpha
@timer
def alpha71(self):
close = self.close
close_mean = Mean(close,24)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha71']
return alpha
@timer
def alpha72(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),15,1)
alpha.columns = ['alpha72']
return alpha
@timer
def alpha73(self):
vwap = self.vwap
volume = self.volume
close = self.close
data1 = pd.concat([close,volume],axis = 1,join = 'inner')
corr1 = Corr(data1,10)
corr1_decay = DecayLinear(DecayLinear(corr1,16),4)
r1 = TsRank(corr1_decay,5)
volume_mean = Mean(volume,30)
data2 = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_decay = DecayLinear(corr2,3)
r2 = Rank(corr2_decay)
r = pd.concat([r1,r2],axis = 1,join ='inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns= ['alpha73']
return alpha
@timer
def alpha74(self):
vwap = self.vwap
volume = self.volume
low = self.low
volume_mean = Mean(volume,40)
volume_mean_sum = Sum(volume_mean,20)
data1 = pd.concat([low,vwap],axis = 1,join = 'inner')
data_sum = Sum(pd.DataFrame(data1['Low'] * 0.35 + data1['Vwap'] * 0.65),20)
data = pd.concat([volume_mean_sum,data_sum],axis = 1,join = 'inner')
corr = Corr(data,7)
r1 = Rank(corr)
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data_temp = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr2 = Corr(data_temp,6)
r2 = Rank(corr2)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha74']
return alpha
@timer
def alpha75(self):
close = self.close
Open = self.open
close_index = self.close_index
open_index = self.open_index
data1 = pd.concat([close,Open], axis = 1, join = 'inner')
data1.columns = ['close','open']
data1['temp'] = 1
data1['temp'][data1['close'] <= data1['open']] = 0
data2 = pd.concat([close_index,open_index], axis = 1, join = 'inner')
data2.columns = ['close','open']
data2['tep'] = 1
data2['tep'][data2['close'] > data2['open']] = 0
temp = data1['temp'].unstack()
tep = data2['tep'].unstack()
tep1 = repmat(tep,1,np.size(temp,1))
data3 = temp * tep1
temp_result = data3.rolling(50,min_periods = 50).sum()
tep_result = tep.rolling(50,min_periods = 50).sum()
tep2_result = np.matlib.repmat(tep_result,1,np.size(temp,1))
result = temp_result/tep2_result
alpha = pd.DataFrame(result.stack())
alpha.columns = ['alpha75']
return alpha
@timer
def alpha76(self):
volume = self.volume
close = self.close
close_delay = Delay(close,1)
data = pd.concat([volume,close,close_delay],axis = 1,join = 'inner')
data.columns = ['volume','close','close_delay']
temp = pd.DataFrame(np.abs((data['close']/data['close_delay'] -1 )/data['volume']))
temp_std = STD(temp,20)
temp_mean = Mean(temp,20)
data_temp = pd.concat([temp_std,temp_mean],axis = 1,join = 'inner')
data_temp.columns = ['std','mean']
alpha = pd.DataFrame(data_temp['std']/data_temp['mean'])
alpha.columns = ['alpha76']
return alpha
@timer
def alpha77(self):
vwap = self.vwap
volume = self.volume
low = self.low
high = self.high
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
temp = pd.DataFrame((data['High'] + data['Low'])/2 - data['Vwap'])
temp_decay = DecayLinear(temp,20)
r1 = Rank(temp_decay)
temp1 = pd.DataFrame((data['High'] + data['Low'])/2)
volume_mean = Mean(volume,40)
data2 = pd.concat([temp1,volume_mean],axis = 1,join = 'inner')
corr = Corr(data2,3)
corr_decay = DecayLinear(corr,6)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.min(r,axis = 1))
alpha.columns = ['alpha77']
return alpha
@timer
def alpha78(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
temp = pd.DataFrame((data['Low'] + data['High'] + data['Close'])/3)
temp.columns = ['temp']
temp_mean = Mean(temp,12)
temp_mean.columns = ['temp_mean']
temp2 = pd.concat([temp,temp_mean],axis = 1,join = 'inner')
tmp = pd.DataFrame(temp2['temp'] - temp2['temp_mean'])
data1 = pd.concat([close,temp_mean],axis = 1,join = 'inner')
temp_abs = pd.DataFrame(np.abs(data1['Close'] - data1['temp_mean']))
temp_abs_mean = Mean(temp_abs,12)
df = pd.concat([tmp,temp_abs_mean],axis = 1,join = 'inner')
df.columns = ['df1','df2']
alpha = pd.DataFrame(df['df1']/(df['df2'] * 0.015))
alpha.columns = ['alpha78']
return alpha
@timer
def alpha79(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),12,1)
sma2 = SMA(pd.DataFrame(data['abs']),12,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha79']
return alpha
@timer
def alpha80(self):
volume = self.volume
volume_delay = Delay(volume,5)
volume_delay.columns = ['volume_delay']
data = pd.concat([volume,volume_delay],axis = 1,join = 'inner')
alpha = (data['Vol'] - data['volume_delay'])/data['volume_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha80']
return alpha
@timer
def alpha81(self):
volume = self.volume
alpha = SMA(volume,21,2)
alpha.columns = ['alpha81']
return alpha
@timer
def alpha82(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),20,1)
alpha.columns = ['alpha82']
return alpha
@timer
def alpha83(self):
high = self.high
volume = self.volume
high_r = Rank(high)
volume_r = Rank(volume)
data = pd.concat([high_r,volume_r],axis = 1,join = 'inner')
corr = Corr(data,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha83']
return alpha
@timer
def alpha84(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,20)
alpha.columns = ['alpha84']
return alpha
@timer
def alpha85(self):
close = self.close
volume = self.volume
volume_mean = Mean(volume,20)
close_delta = Delta(close,7)
data1 = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
data1.columns = ['volume','volume_mean']
temp1 = pd.DataFrame(data1['volume']/data1['volume_mean'])
r1 = TsRank(temp1,20)
r2 = TsRank(-1 * close_delta,8)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha85']
return alpha
@timer
def alpha86(self):
close = self.close
close_delay20 = Delay(close,20)
close_delay10 = Delay(close,20)
data = pd.concat([close,close_delay20,close_delay10],axis = 1,join = 'inner')
data.columns = ['close','close_delay20','close_delay10']
temp = pd.DataFrame((data['close_delay20'] - data['close_delay10'])/10 - \
(data['close_delay10'] - data['close'])/10)
close_delta = Delta(close,1) * -1
data_temp = pd.concat([close_delta,temp],axis = 1,join = 'inner')
data_temp.columns = ['close_delta','temp']
data_temp['close_delta'][data_temp['temp'] > 0.25]= -1
data_temp['close_delta'][data_temp['temp'] < 0]= 1
alpha = pd.DataFrame(data_temp['close_delta'])
alpha.columns = ['alpha86']
return alpha
@timer
def alpha87(self):
vwap = self.vwap
high = self.high
low = self.low
Open = self.open
vwap_delta = Delta(vwap,4)
vwap_delta_decay = DecayLinear(vwap_delta,7)
r1 = Rank(vwap_delta_decay)
data = pd.concat([low,high,vwap,Open], axis = 1, join = 'inner')
temp = pd.DataFrame((data['Low'] * 0.1 + data['High'] * 0.9 - data['Vwap'])/\
(data['Open'] - 0.5 * (data['Low'] + data['High'])))
temp_decay = DecayLinear(temp,11)
r2 = TsRank(temp_decay,7)
r = pd.concat([r1,r2], axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(-1 * (r['r1'] + r['r2']))
alpha.columns = ['alpha87']
return alpha
@timer
def alpha88(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delta']
alpha = (data['close'] - data['close_delta'])/data['close_delta'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha88']
return alpha
@timer
def alpha89(self):
close = self.close
sma1 = SMA(close,13,2)
sma2 = SMA(close,27,2)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
temp = pd.DataFrame(sma['sma1'] - sma['sma2'])
sma3 = SMA(temp,10,2)
data = pd.concat([temp,sma3],axis = 1, join = 'inner')
data.columns = ['temp','sma']
alpha = pd.DataFrame(2 *(data['temp'] - data['sma']))
alpha.columns = ['alpha89']
return alpha
@timer
def alpha90(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2], axis = 1, join = 'inner')
corr = Corr(rank,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha90']
return alpha
@timer
def alpha91(self):
close = self.close
volume = self.volume
low = self.low
close_max = TsMax(close,5)
data1 = pd.concat([close,close_max], axis = 1,join = 'inner')
data1.columns = ['close','close_max']
r1 = Rank(pd.DataFrame(data1['close'] - data1['close_max']))
volume_mean = Mean(volume,40)
data2 = pd.concat([volume_mean,low], axis = 1, join = 'inner')
corr = Corr(data2,5)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha91']
return alpha
@timer
def alpha92(self):
volume = self.volume
vwap = self.vwap
close = self.close
data = pd.concat([close,vwap],axis = 1, join = 'inner')
data['price'] = data['Close'] * 0.35 + data['Vwap'] * 0.65
price_delta = Delta(pd.DataFrame(data['price']),2)
price_delta_decay = DecayLinear(price_delta,3)
r1 = Rank(price_delta_decay)
volume_mean = Mean(volume,180)
rank = pd.concat([volume_mean,close],axis = 1,join = 'inner')
corr = Corr(rank,13)
temp = pd.DataFrame(np.abs(corr))
temp_decay = DecayLinear(temp,5)
r2 = TsRank(temp_decay,15)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
alpha = pd.DataFrame(-1 * np.max(r, axis = 1))
alpha.columns = ['alpha92']
return alpha
@timer
def alpha93(self):
low = self.low
Open = self.open
open_delay = Delay(Open,1)
data = pd.concat([low,Open,open_delay],axis = 1,join = 'inner')
data.columns = ['low','open','open_delay']
temp1 = pd.DataFrame(data['open'] - data['low'])
temp2 = pd.DataFrame(data['open'] - data['open_delay'])
data_temp = pd.concat([temp1,temp2],axis = 1 ,join = 'inner')
temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
temp_max.columns = ['max']
data2 = pd.concat([data,temp_max],axis = 1,join = 'inner')
data2['temp'] = data2['max']
data2['temp'][data2['open'] >= data2['open_delay']] = 0
alpha = Sum(pd.DataFrame(data2['temp']),20)
alpha.columns = ['alpha93']
return alpha
@timer
def alpha94(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,30)
alpha.columns = ['alpha94']
return alpha
@timer
def alpha95(self):
amt = self.amt
alpha = STD(amt,20)
alpha.columns = ['alpha95']
return alpha
@timer
def alpha96(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = ( data['close'] - data['low_min'])/(data['high_max'] - data['low_min']) * 100
alpha_temp = SMA(pd.DataFrame(temp),3,1)
alpha = SMA(alpha_temp,3,1)
alpha.columns = ['alpha96']
return alpha
@timer
def alpha97(self):
volume = self.volume
alpha = STD(volume,10)
alpha.columns = ['alpha97']
return alpha
@timer
def alpha98(self):
close = self.close
close_mean = Mean(close,100)
close_mean_delta = Delta(close_mean,100)
close_delay = Delay(close,100)
data = pd.concat([close_mean_delta,close_delay],axis = 1,join = 'inner')
data.columns = ['delta','delay']
temp = pd.DataFrame(data['delta']/ data['delay'])
close_delta = Delta(close,3)
close_min = TsMin(close,100)
data_temp = pd.concat([close,close_delta,close_min,temp],axis = 1,join = 'inner')
data_temp.columns = ['close','close_delta','close_min','temp']
data_temp['diff'] = (data_temp['close'] - data_temp['close_min']) * -1
data_temp['diff'][data_temp['temp'] < 0.05] = 0
data_temp['close_delta'] = data_temp['close_delta'] * -1
data_temp['close_delta'][data_temp['temp'] >= 0.05]= 0
alpha = pd.DataFrame(data_temp['close_delta'] + data_temp['diff'])
alpha.columns = ['alpha98']
return alpha
@timer
def alpha99(self):
close = self.close
volume = self.volume
r1 = Rank(close)
r2 = Rank(volume)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
cov = Cov(r,5)
alpha = -1 * Rank(cov)
alpha.columns = ['alpha99']
return alpha
@timer
def alpha100(self):
volume = self.volume
alpha = STD(volume,20)
alpha.columns = ['alpha100']
return alpha
@timer
def alpha101(self):
close = self.close
volume = self.volume
high = self.high
vwap = self.vwap
volume_mean = Mean(volume,30)
volume_mean_sum = Sum(volume_mean,37)
data1 = pd.concat([close,volume_mean_sum], axis = 1, join = 'inner')
corr1 = Corr(data1,15)
r1 = Rank(corr1)
data2 = pd.concat([high,vwap],axis = 1, join = 'inner')
temp = pd.DataFrame(data2['High'] * 0.1 + data2['Vwap'] * 0.9)
temp_r = Rank(temp)
volume_r = Rank(volume)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,11)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] < r['r2']] = -1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha101']
return alpha
@timer
def alpha102(self):
volume = self.volume
temp = Delta(volume,1)
temp.columns = ['temp']
temp['max'] = temp['temp']
temp['max'][temp['temp'] < 0 ] = 0
temp['abs'] = np.abs(temp['temp'])
sma1 = SMA(pd.DataFrame(temp['max']),6,1)
sma2 = SMA(pd.DataFrame(temp['abs']),6,1)
sma = pd.concat([sma1,sma2], axis = 1 ,join ='inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/ sma['sma2'] * 100)
alpha.columns = ['alpha102']
return alpha
@timer
def alpha103(self):
low = self.low
lowday = Lowday(low,20)
alpha = (20 - lowday)/20.0 * 100
alpha.columns = ['alpha103']
return alpha
@timer
def alpha104(self):
close = self.close
volume = self.volume
high = self.high
data = pd.concat([high,volume], axis = 1, join = 'inner')
corr = Corr(data,5)
corr_delta = Delta(corr,5)
close_std = STD(close,20)
r1 = Rank(close_std)
temp = pd.concat([corr_delta,r1], axis = 1, join = 'inner')
temp.columns = ['delta','r']
alpha = pd.DataFrame(-1 * temp['delta'] * temp['r'])
alpha.columns = ['alpha104']
return alpha
@timer
def alpha105(self):
volume = self.volume
Open = self.open
volume_r = Rank(volume)
open_r = Rank(Open)
rank = pd.concat([volume_r,open_r],axis = 1, join = 'inner')
alpha = -1 * Corr(rank,10)
alpha.columns = ['alpha105']
return alpha
@timer
def alpha106(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
alpha = pd.DataFrame(data['close'] - data['close_delay'])
alpha.columns = ['alpha106']
return alpha
@timer
def alpha107(self):
Open = self.open
high = self.high
close = self.close
low = self.low
high_delay = Delay(high,1)
close_delay = Delay(close,1)
low_delay = Delay(low,1)
data = pd.concat([high_delay,close_delay,low_delay,Open], axis = 1, join = 'inner')
data.columns = ['high_delay','close_delay','low_delay','open']
r1 = Rank(pd.DataFrame(data['open'] - data['high_delay']))
r2 = Rank(pd.DataFrame(data['open'] - data['close_delay']))
r3 = Rank(pd.DataFrame(data['open'] - data['low_delay']))
alpha = -1 * r1 * r2 * r3
alpha.columns = ['alpha107']
return alpha
@timer
def alpha108(self):
high = self.high
volume = self.volume
vwap = self.vwap
high_min = TsMin(high,2)
data1 = pd.concat([high,high_min], axis = 1, join = 'inner')
data1.columns = ['high','high_min']
r1 = Rank(pd.DataFrame(data1['high'] - data1['high_min']))
volume_mean = Mean(volume,120)
rank = pd.concat([vwap,volume_mean],axis = 1, join = 'inner')
corr = Corr(rank,6)
r2 = Rank(corr)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = r['r1'] * r['r2'] * -1
alpha.columns = ['alpha108']
return alpha
@timer
def alpha109(self):
high = self.high
low = self.low
data = pd.concat([high,low],axis = 1, join = 'inner')
temp = SMA(pd.DataFrame(data['High'] - data['Low']),10,2)
sma = SMA(temp,10,2)
sma_temp = pd.concat([temp,sma],axis = 1, join = 'inner')
sma_temp.columns = ['temp','sma']
alpha = pd.DataFrame(sma_temp['temp']/sma_temp['sma'])
alpha.columns = ['alpha109']
return alpha
@timer
def alpha110(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([high,low,close_delay], axis = 1, join = 'inner')
data['max1'] = data['High'] - data['close_delay']
data['max2'] = data['close_delay'] - data['Low']
data['max1'][data['max1'] < 0] = 0
data['max2'][data['max2'] < 0] = 0
s1 = Sum(pd.DataFrame(data['max1']),20)
s2 = Sum(pd.DataFrame(data['max2']),20)
s = pd.concat([s1,s2], axis = 1 , join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'])
alpha.columns = ['alpha110']
return alpha
@timer
def alpha111(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume], axis = 1, join = 'inner')
temp = pd.DataFrame(data['Vol'] * (2 * data['Close'] - data['Low'] - data['High'])\
/(data['High'] - data['Low']))
sma1 = SMA(temp,11,2)
sma2 = SMA(temp,4,2)
sma = pd.concat([sma1, sma2], axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] - sma['sma2'])
alpha.columns = ['alpha111']
return alpha
@timer
def alpha112(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close, close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['temp'] = 1
data['temp'][data['close'] > data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha112']
return alpha
@timer
def alpha113(self):
close = self.close
volume = self.volume
close_delay = Delay(close,5)
close_delay_mean = Mean(close_delay,20)
data1 = pd.concat([close,volume],axis = 1, join = 'inner')
corr = Corr(data1,2)
r1 = Rank(close_delay_mean)
data2 = pd.concat([r1,corr], axis = 1, join = 'inner')
data2.columns = ['r1','corr']
r1 = pd.DataFrame(data2['r1'] * data2['corr'])
close_sum5 = Sum(close,5)
close_sum20 = Sum(close,20)
data3 = pd.concat([close_sum5,close_sum20],axis = 1, join = 'inner')
corr2 = Corr(data3,2)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha113']
return alpha
@timer
def alpha114(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
vwap = self.vwap
close_mean = Mean(close,5)
data = pd.concat([high,low,close_mean], axis = 1, join = 'inner')
data.columns = ['high','low','close_mean']
temp = pd.DataFrame(data['high'] - data['low'] / data['close_mean'])
temp_delay = Delay(temp,2)
r1 = TsRank(temp_delay,5)
temp1 = pd.concat([temp,vwap,close], axis = 1, join = 'inner')
temp1.columns = ['temp','vwap','close']
tep = pd.DataFrame(temp1['temp']/(temp1['vwap'] - temp1['close']))
r2 = TsRank(volume,5)
data2 = pd.concat([r2,tep], axis = 1, join = 'inner')
data2.columns = ['r2','tep']
tep1 = pd.DataFrame(data2['r2']/data2['tep'])
r3 = TsRank(tep1,5)
r = pd.concat([r1,r3],axis = 1, join = 'inner')
r.columns = ['r1','r3']
alpha = pd.DataFrame(r['r1'] + r['r3'])
alpha.columns = ['alpha114']
return alpha
@timer
def alpha115(self):
high = self.high
low = self.low
volume = self.volume
volume_mean = Mean(volume,30)
price = pd.concat([high,low], axis = 1, join = 'inner')
price.columns = ['high','low']
price_temp = price['high'] * 0.9 + price['low'] * 0.1
data = pd.concat([price_temp,volume_mean],axis = 1, join = 'inner')
corr = Corr(data,10)
r1 = Rank(corr)
data2 = pd.concat([high,low], axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
temp_r = TsRank(temp,4)
volume_r = TsRank(volume,10)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,7)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha115']
return alpha
@timer
def alpha116(self):
close = self.close
alpha = RegResi(0,close,None,20)
alpha.columns = ['alpha116']
return alpha
@timer
def alpha117(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
ret = self.ret
r1 = TsRank(volume,32)
data1 = pd.concat([close,high,low],axis = 1, join = 'inner')
r2 = TsRank(pd.DataFrame(data1['Close'] + data1['High'] - data1['Low']),16)
r3 = TsRank(ret,32)
r = pd.concat([r1,r2,r3], axis = 1, join = 'inner')
r.columns = ['r1','r2','r3']
alpha = pd.DataFrame(r['r1'] * (1 - r['r2']) * (1 - r['r3']))
alpha.columns = ['alpha117']
return alpha
@timer
def alpha118(self):
high = self.high
low = self.low
Open = self.open
data = pd.concat([high,low,Open], axis = 1, join = 'inner')
s1 = Sum(pd.DataFrame(data['High'] - data['Open']),20)
s2 = Sum(pd.DataFrame(data['Open'] - data['Low']),20)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha118']
return alpha
@timer
def alpha119(self):
Open = self.open
volume = self.volume
vwap = self.vwap
volume_mean = Mean(volume,5)
volume_mean_sum = Sum(volume_mean,26)
data1 = pd.concat([vwap,volume_mean_sum],axis = 1, join = 'inner')
corr1 = Corr(data1,5)
corr1_decay = DecayLinear(corr1,7)
r1 = Rank(corr1_decay)
open_r = Rank(Open)
volume_mean2 = Mean(volume,15)
volume_mean2_r = Rank(volume_mean2)
data2 = pd.concat([open_r, volume_mean2_r], axis = 1, join = 'inner')
corr2 = Corr(data2,21)
corr2_min = TsMin(corr2,9)
corr2_min_r = TsRank(corr2_min,7)
corr_min_r_decay = DecayLinear(corr2_min_r,8)
r2 = Rank(corr_min_r_decay)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] - r['r2'])
alpha.columns = ['alpha119']
return alpha
@timer
def alpha120(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close], axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vwap'] - data['Close']))
r2 = Rank(pd.DataFrame(data['Vwap'] + data['Close']))
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha120']
return alpha
@timer
def alpha121(self):
vwap = self.vwap
volume = self.volume
vwap_r = TsRank(vwap,20)
volume_mean = Mean(volume,60)
volume_mean_r = TsRank(volume_mean,2)
data = pd.concat([vwap_r,volume_mean_r], axis = 1, join = 'inner')
corr= Corr(data,18)
temp = TsRank(corr,3)
vwap_min = TsMin(vwap,12)
data2 = pd.concat([vwap,vwap_min],axis = 1, join = 'inner')
data2.columns = ['vwap','vwap_min']
rank = Rank(pd.DataFrame(data2['vwap'] - data2['vwap_min']))
data3 = pd.concat([rank,temp],axis = 1, join = 'inner')
data3.columns = ['rank','temp']
alpha = pd.DataFrame(np.power(data3['rank'],data3['temp']) * -1)
alpha.columns = ['alpha121']
return alpha
@timer
def alpha122(self):
close = self.close
close_ln = pd.DataFrame(np.log(close))
temp1 = SMA(close_ln,13,2)
sma1 = SMA(temp1,13,2)
sma2 = SMA(sma1,13,2)
sma3 = SMA(sma2,13,2)
sma3_delay = Delay(sma3,1)
data = pd.concat([sma3,sma3_delay],axis = 1, join = 'inner')
data.columns = ['sma','sma_delay']
alpha = pd.DataFrame(data['sma']/data['sma_delay'])
alpha.columns = ['alpha122']
return alpha
@timer
def alpha123(self):
volume = self.volume
high = self.high
low = self.low
data1 = pd.concat([high,low], axis = 1, join = 'inner')
s1 = Sum(pd.DataFrame((data1['High'] + data1['Low'])/2),20)
volume_mean = Mean(volume,60)
s2 = Sum(volume_mean,20)
data2 = pd.concat([s1,s2], axis = 1, join = 'inner')
corr1 = Corr(data2,9)
data3 = pd.concat([low,volume], axis = 1, join = 'inner')
corr2 = Corr(data3,6)
corr1_r = Rank(corr1)
corr2_r = Rank(corr2)
data = pd.concat([corr1_r,corr2_r], axis = 1, join = 'inner')
data.columns = ['r1','r2']
data['alpha'] = -1
data['alpha'][data['r1'] >= data['r2']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha123']
return alpha
@timer
def alpha124(self):
close = self.close
vwap = self.vwap
close_max = TsMax(close,30)
close_max_r = Rank(close_max)
close_max_r_decay = DecayLinear(close_max_r,2)
close_max_r_decay.columns = ['decay']
data = pd.concat([close,vwap,close_max_r_decay], axis = 1, join ='inner')
alpha = pd.DataFrame((data['Close'] - data['Vwap'])/data['decay'])
alpha.columns = ['alpha124']
return alpha
@timer
def alpha125(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_mean = Mean(volume,80)
data1 = pd.concat([vwap,volume_mean], axis = 1, join = 'inner')
corr1 = Corr(data1,17)
data2 = pd.concat([close,vwap], axis = 1, join = 'inner')
temp2 = pd.DataFrame(0.5*(data2['Close'] + data2['Vwap']))
temp2_delta = Delta(temp2,3)
corr1_decay = DecayLinear(corr1,20)
r1 = Rank(corr1_decay)
temp2_delta_decay = DecayLinear(temp2_delta,16)
r2 = Rank(temp2_delta_decay)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha125']
return alpha
@timer
def alpha126(self):
close = self.close
high = self.high
low = self.low
data = pd.concat([close,high,low], axis = 1, join = 'inner')
alpha =
|
pd.DataFrame((data['Close'] + data['High'] + data['Low'])/3)
|
pandas.DataFrame
|
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import enum
import functools
import itertools
from typing import Callable, Union
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
try:
import cudf
import cupy as cp
import dask_cudf
from cudf.core.column import as_column, build_column
from cudf.utils.dtypes import is_list_dtype, is_string_dtype
HAS_GPU = True
except ImportError:
HAS_GPU = False
cp = None
cudf = None
try:
# Dask >= 2021.5.1
from dask.dataframe.core import hash_object_dispatch
except ImportError:
# Dask < 2021.5.1
from dask.dataframe.utils import hash_object_dispatch
try:
import nvtx
annotate = nvtx.annotate
except ImportError:
# don't have nvtx installed - don't annotate our functions
def annotate(*args, **kwargs):
def inner1(func):
@functools.wraps(func)
def inner2(*args, **kwargs):
return func(*args, **kwargs)
return inner2
return inner1
if HAS_GPU:
DataFrameType = Union[pd.DataFrame, cudf.DataFrame]
SeriesType = Union[pd.Series, cudf.Series]
else:
DataFrameType = Union[pd.DataFrame]
SeriesType = Union[pd.Series]
class ExtData(enum.Enum):
"""Simple Enum to track external-data types"""
DATASET = 0
ARROW = 1
CUDF = 2
PANDAS = 3
DASK_CUDF = 4
DASK_PANDAS = 5
PARQUET = 6
CSV = 7
def get_lib():
return cudf if HAS_GPU else pd
def _is_dataframe_object(x):
# Simple check if object is a cudf or pandas
# DataFrame object
if not HAS_GPU:
return isinstance(x, pd.DataFrame)
return isinstance(x, (cudf.DataFrame, pd.DataFrame))
def _is_series_object(x):
# Simple check if object is a cudf or pandas
# Series object
if not HAS_GPU:
return isinstance(x, pd.Series)
return isinstance(x, (cudf.Series, pd.Series))
def _is_cpu_object(x):
# Simple check if object is a cudf or pandas
# DataFrame object
return isinstance(x, (pd.DataFrame, pd.Series))
def is_series_or_dataframe_object(maybe_series_or_df):
return _is_series_object(maybe_series_or_df) or _is_dataframe_object(maybe_series_or_df)
def _hex_to_int(s, dtype=None):
def _pd_convert_hex(x):
if pd.isnull(x):
return pd.NA
return int(x, 16)
if isinstance(s, pd.Series):
# Pandas Version
if s.dtype == "object":
s = s.apply(_pd_convert_hex)
return s.astype("Int64").astype(dtype or "Int32")
else:
# CuDF Version
if s.dtype == "object":
s = s.str.htoi()
return s.astype(dtype or np.int32)
def _random_state(seed, like_df=None):
"""Dispatch for numpy.random.RandomState"""
if not HAS_GPU or isinstance(like_df, (pd.DataFrame, pd.Series)):
return np.random.RandomState(seed)
else:
return cp.random.RandomState(seed)
def _arange(size, like_df=None, dtype=None):
"""Dispatch for numpy.arange"""
if not HAS_GPU or isinstance(like_df, (np.ndarray, pd.DataFrame, pd.Series)):
return np.arange(size, dtype=dtype)
else:
return cp.arange(size, dtype=dtype)
def _array(x, like_df=None, dtype=None):
"""Dispatch for numpy.array"""
if not HAS_GPU or isinstance(like_df, (np.ndarray, pd.DataFrame, pd.Series)):
return np.array(x, dtype=dtype)
else:
return cp.array(x, dtype=dtype)
def _zeros(size, like_df=None, dtype=None):
"""Dispatch for numpy.array"""
if not HAS_GPU or isinstance(like_df, (np.ndarray, pd.DataFrame, pd.Series)):
return np.zeros(size, dtype=dtype)
else:
return cp.zeros(size, dtype=dtype)
def _hash_series(s):
"""Row-wise Series hash"""
if not HAS_GPU or isinstance(s, pd.Series):
# Using pandas hashing, which does not produce the
# same result as cudf.Series.hash_values(). Do not
# expect hash-based data transformations to be the
# same on CPU and CPU. TODO: Fix this (maybe use
# murmurhash3 manually on CPU).
return hash_object_dispatch(s).values
else:
if _is_list_dtype(s):
return s.list.leaves.hash_values()
else:
return s.hash_values()
def _natural_log(df):
"""Natural logarithm of all columns in a DataFrame"""
if isinstance(df, pd.DataFrame):
return pd.DataFrame(np.log(df.values), columns=df.columns, index=df.index)
else:
return df.log()
def _series_has_nulls(s):
"""Check if Series contains any null values"""
if isinstance(s, pd.Series):
return s.isnull().values.any()
else:
return s._column.has_nulls
def _is_list_dtype(ser):
"""Check if Series contains list elements"""
if not HAS_GPU or isinstance(ser, pd.Series):
if not len(ser): # pylint: disable=len-as-condition
return False
return pd.api.types.is_list_like(ser.values[0])
return is_list_dtype(ser)
def _is_string_dtype(obj):
if not HAS_GPU:
return pd.api.types.is_string_dtype(obj)
else:
return is_string_dtype(obj)
def _flatten_list_column(s):
"""Flatten elements of a list-based column"""
if isinstance(s, pd.Series):
return pd.DataFrame({s.name: itertools.chain(*s)})
else:
return cudf.DataFrame({s.name: s.list.leaves})
def _concat_columns(args: list):
"""Dispatch function to concatenate DataFrames with axis=1"""
if len(args) == 1:
return args[0]
else:
_lib = cudf if HAS_GPU and isinstance(args[0], cudf.DataFrame) else pd
return _lib.concat(
[a.reset_index(drop=True) for a in args],
axis=1,
)
return None
def _read_parquet_dispatch(df: DataFrameType) -> Callable:
return _read_dispatch(df=df, fmt="parquet")
def _read_dispatch(df: DataFrameType = None, cpu=None, collection=False, fmt="parquet") -> Callable:
"""Return the necessary read_parquet function to generate
data of a specified type.
"""
if cpu or isinstance(df, pd.DataFrame) or not HAS_GPU:
_mod = dd if collection else pd
else:
_mod = dask_cudf if collection else cudf.io
_attr = "read_csv" if fmt == "csv" else "read_parquet"
return getattr(_mod, _attr)
def _parquet_writer_dispatch(df: DataFrameType, path=None, **kwargs):
"""Return the necessary ParquetWriter class to write
data of a specified type.
If `path` is specified, an initialized `ParquetWriter`
object will be returned. To do this, the pyarrow schema
will be inferred from df, and kwargs will be used for the
ParquetWriter-initialization call.
"""
_args = []
if isinstance(df, pd.DataFrame):
_cls = pq.ParquetWriter
if path:
_args.append(pa.Table.from_pandas(df, preserve_index=False).schema)
else:
_cls = cudf.io.parquet.ParquetWriter
if not path:
return _cls
ret = _cls(path, *_args, **kwargs)
if isinstance(df, pd.DataFrame):
ret.write_table = lambda df: _cls.write_table(
ret, pa.Table.from_pandas(df, preserve_index=False)
)
return ret
def _encode_list_column(original, encoded, dtype=None):
"""Convert `encoded` to be a list column with the
same offsets as `original`
"""
if isinstance(original, pd.Series):
# Pandas version (not very efficient)
offset = 0
new_data = []
for val in original.values:
size = len(val)
new_data.append(np.array(encoded[offset : offset + size], dtype=dtype))
offset += size
return pd.Series(new_data)
else:
# CuDF version
encoded = as_column(encoded)
if dtype:
encoded = encoded.astype(dtype, copy=False)
list_dtype = cudf.core.dtypes.ListDtype(encoded.dtype if dtype is None else dtype)
return build_column(
None,
dtype=list_dtype,
size=original.size,
children=(original._column.offsets, encoded),
)
def _pull_apart_list(original):
values = _flatten_list_column(original)
if isinstance(original, pd.Series):
offsets = pd.Series([0]).append(original.map(len).cumsum())
else:
offsets = original._column.offsets
elements = original._column.elements
if isinstance(elements, cudf.core.column.lists.ListColumn):
offsets = elements.list(parent=original.list._parent)._column.offsets[offsets]
return values, offsets
def _to_arrow(x):
"""Move data to arrow format"""
if isinstance(x, pd.DataFrame):
return pa.Table.from_pandas(x, preserve_index=False)
else:
return x.to_arrow()
def _concat(objs, **kwargs):
if isinstance(objs[0], (pd.DataFrame, pd.Series)):
return pd.concat(objs, **kwargs)
else:
return cudf.core.reshape.concat(objs, **kwargs)
def _make_df(_like_df=None, device=None):
if not cudf or isinstance(_like_df, (pd.DataFrame, pd.Series)):
return pd.DataFrame(_like_df)
elif isinstance(_like_df, (cudf.DataFrame, cudf.Series)):
return cudf.DataFrame(_like_df)
elif isinstance(_like_df, dict) and len(_like_df) > 0:
is_pandas = all(isinstance(v, pd.Series) for v in _like_df.values())
return pd.DataFrame(_like_df) if is_pandas else cudf.DataFrame(_like_df)
if device == "cpu":
return
|
pd.DataFrame(_like_df)
|
pandas.DataFrame
|
from typing import Optional
import numpy as np
import pandas as pd
from fipie import tree
from fipie.cluster import ClusterAlgo, NoCluster
from fipie.weighting import Weighting
class Portfolio:
""" A portfolio of instrument returns """
def __init__(self, ret: pd.DataFrame):
""" Create a ``Portfolio`` instance
:param ret: time-series of instrument returns
:type ret: pd.DataFrame
.. note::
``ret`` is frequency agnostic -- i.e., it can be daily, weekly or any other frequency as long as
``fipie.date.infer_ts_frequency`` can infer its frequency.
"""
ret = self._preprocess_returns(ret)
self.ret = ret
def __repr__(self):
n_asset = self.ret.shape[1]
if n_asset == 1:
return f'Portfolio({n_asset} asset)'
else:
return f'Portfolio({n_asset} assets)'
def _preprocess_returns(self, ret) -> pd.DataFrame:
if isinstance(ret, pd.DataFrame):
# No need to prerocess
return ret
elif isinstance(ret, pd.Series):
return ret.to_frame()
else:
raise ValueError(f'Unsupported data type for returns. Got {ret}')
def create_tree(self,
cluster: ClusterAlgo,
ret: Optional[pd.DataFrame] = None) -> tree.Tree:
""" Create a tree out of the return data frame
:param cluster: clustering algorithm instance
:type cluster: ClusterAlgo
:param ret: portfolio returns to use to create a tree. If not provided, use the returns provided upon
instantiation. If provided, this parameter will be used to create a tree instead.
:type ret: pd.DataFrame, optional
:return: ``Tree`` instance which groups instruments into clusters
"""
if ret is None:
ret = self.ret
return tree.create_tree(ret, cluster)
def _calculate_weight(self,
ret: pd.DataFrame,
weighting: Weighting,
cluster: ClusterAlgo,
instrument_only: bool = True,
final_weight: bool = True) -> pd.Series:
""" An inner function to compute the latest portfolio weights given the return, weighting scheme and clustering
algorithm.
:param ret: portfolio returns
:param weighting: weighting scheme instance
:param cluster: clustering algorithm instance
:param instrument_only: If True only weights for instruments are shown and ones for intermediate are omitted
:param final_weight: If True return the final weights for each instruments are returned.
:return: weights for each node
"""
tree = self.create_tree(cluster, ret)
tree.set_local_weights(weighting)
result = [(i.node_id, i.local_weight, i.weight) for i in tree.nodes]
result = pd.DataFrame(result, columns=['node_id', 'local_weight', 'weight'])
result = result.set_index('node_id')
if instrument_only:
# only select rows that are in the original return time-series
instruments = ret.columns.tolist()
result = result.reindex(index=instruments)
if final_weight:
result = result['weight']
else:
result = result['local_weight']
return result
def weight_latest(self,
weighting: Weighting,
cluster: ClusterAlgo = NoCluster(),
instrument_only: bool = True,
final_weight: bool = True) -> pd.Series:
r""" Compute the latest portfolio weights using the full return time-series.
:param weighting: weighting scheme instance
:type weighting: Weighting
:param cluster: clustering algorithm instance
:type cluster: ClusterAlgo
:param instrument_only: If True only weights for instruments are shown and ones for intermediate are omitted
:type instrument_only: bool, default True
:param final_weight: If True return the final weights for each instruments are returned. The portfolio return
:math:`r` can then be calculated as follows:
.. math::
r = \sum_i w_i \cdot r_i
where :math:`i` is the index for each instrument, :math:`w_i` is the final weight for instrument :math:`i`,
and :math:`r_i` is the return for instrument :math:`i`.
:type final_weight: bool, default True
:return: weights for each node
:rtype: pd.Series
"""
result = self._calculate_weight(self.ret, weighting, cluster,
instrument_only=instrument_only,
final_weight=final_weight)
return result
def weight_historical(self,
weighting: Weighting,
cluster: ClusterAlgo = NoCluster(),
instrument_only: bool = True,
final_weight: bool = True,
freq: str = 'm',
lookback: int = 52 * 2) -> pd.DataFrame:
""" Compute the historical portfolio weights by applying the calculation on a rolling basis
:param weighting: weighting scheme instance
:type weighting: Weighting
:param cluster: clustering algorithm instance
:type cluster: ClusterAlgo
:param instrument_only: If True only weights for instruments are shown and ones for intermediate are omitted
:type instrument_only: bool, default True
:param final_weight: If True return the final weights for each instruments are returned.
:type final_weight: bool, default True
:param freq: frequency to update the portfolio weights.
:type freq: str, default 'm'
:param lookback: the number of return samples (lookback horizon) to compute the portfolio weights
:type lookback: int, default 52 * 2 (2 years with weekly observations)
:return: historical weights for each node
:rtype: pd.DataFrame
"""
# rebalance dates
dates = self.ret.asfreq(freq, method='pad').index
result = []
for i in dates:
ret = self.ret.loc[:i].tail(lookback)
if len(ret) == lookback:
weight = self._calculate_weight(ret, weighting, cluster,
instrument_only=instrument_only,
final_weight=final_weight)
weight = weight.to_frame(i).T
else:
weight = pd.Series(np.nan, index=ret.columns).to_frame(i).T
result.append(weight)
result =
|
pd.concat(result)
|
pandas.concat
|
"""
"""
import os
from pathlib import Path
import pandas as pd
from pandas._testing import assert_frame_equal
import matplotlib.pyplot as plt
def generate_measurements_plots(in_a_measurements_df, in_b_measurements_df,
band_mutations, plot_measurements,
oa_in_a_df, oa_in_b_df,
oa_band_mutations, oa_plot_measurements,
esa_oa_in_a_df, esa_oa_in_b_df,
esa_oa_band_mutations, esa_oa_plot_measurements,
sr_diff_all_sites,
m_title, oa_title, esa_oa_title, msr_diff_header,
prepare_and_filter, generate_df, plan):
"""Plot measurements and write the DataFrames used to name matched data files."""
plt.close('all')
plt.style.use(plan.get('plot_style'))
temp_a_df = None
temp_b_df = None
oa_temp_a_df = None
oa_temp_b_df = None
ratio_dfs = {}
if in_a_measurements_df is not None and in_b_measurements_df is not None:
for idx_band_ab, band_ab in enumerate(band_mutations):
m_fig, m_axs = plt.subplots(
(len(esa_oa_band_mutations) > 0 and 3) or 2,
1, figsize=(12, 10), squeeze=False)
temp_a_df, temp_b_df, temp_c_df = prepare_and_filter.prepare_ab_data(
in_a_measurements_df, in_b_measurements_df, None,
plan.get('band_col'),
band_ab[0], band_ab[1], None,
plan.get('in_a_measurements_min_valid_pixel_percentage'),
plan.get('in_b_measurements_min_valid_pixel_percentage'),
None,
plot_measurements[idx_band_ab][0],
plan.get('sr_measurements_date_filtering'), plan)
msr_diff_min, msr_diff_max, msr_diff_mean = prepare_and_filter.get_min_max_mean(temp_a_df, temp_b_df, plot_measurements[idx_band_ab][0], plan)
msr_diff_df =
|
pd.DataFrame(columns=msr_diff_header)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 06 09:49:33 2015
@author: JMS
"""
import random
from abc import ABCMeta, abstractmethod
import numpy as np
import pandas as pd
from scipy.linalg import orth
from occupancy_map import Map,ZMap
from ptp import LocalArea,PointToPoint,matrixrank, anglebetween
from math import degrees
import json
import threading
from multiprocessing.pool import ThreadPool
from contextlib import closing
import scipy.spatial as spt
class PointType:
calibrated = "CALIBRATED" # Points that have both map coordinates
non_calibrated = "NON_CALIBRATED" # Points with map1 coordinates but not with map2.
target = "TARGET" # Points with map1 but that only can be predicted to map2.
acquired = "ACQUIRED" # Points with only map2 but with no information about map1
unknown = "NA"
class State:
"""
The class State is a special feature that does not correspond to the PointType.
The PointType is a static situation that gives identity to the point.
The state is something temporary that can be altered.
"""
protected = "PROTECTED" # Point has been manually overwritten and cannot be modified
blocked = "BLOCKED"
zeroed = "" # No especial states
class virtualGridMap(object):
"""
A virtual map is a class that gets all the information of the grid and tries
to give a prediction of unknown positions.
It considers two homologous maps and establishes correspondences between them.
E.g.:
- Given a LM coordinate, returns the corresponding estimation of the SEM (not possible in LM map)
- Given a letter returns the corresponding coordinates of the estimated center
- Given a coordinate, estimate the letter where we are going to land
Representation of the points
We have selected 4 different kind of points:
- Non Calibrated NC: points coming from LM without assigned correspondence, used for calibration
- Calibrated C: points coming from LM, with the correspondent SEM coordinates, used for calibration
- Targets T: points coming from LM used for targeting
- Acquisition Acq: points acquired on the fly
Instead of saving the points in 4 different lists, we are saving all of them in one array and then
saving the indices for each categorie (Ind).
That allows having points belonging to more than one categorie, or easily to introduce
more category points.
Could be a 2D or a 3D
"""
__metaclass__ = ABCMeta
warning_transformation =""
map_lock = threading.Lock()
def __init__(self,logger, force2D =False, parent = None):
self.logger = logger
self.current_pos = "" # Landmark reference
self.last_point_added = ""
# LANDMARK
# Dataframe instead of class reason it is because the
# porting to a file is immediate and the managing of lists of arrays too.
# In design terms, having a Landmark class would be much better, but in practical terms
# slows down. The following is a mixture between class and database, linked by the landmark ID
self.columns = [ 'LANDMARK','TYPE', 'STATE',
'UPDATE_ORIGIN','UPDATE_DESTINY','UPDATE_TAG',
'COORDS_ORIGIN_X', 'COORDS_ORIGIN_Y', 'COORDS_ORIGIN_Z',
'COORDS_DESTINY_X', 'COORDS_DESTINY_Y', 'COORDS_DESTINY_Z']
#
self.rms_avg = []
self.rms_sd = []
self.columns_corigin = ['LANDMARK','BELIEF','COORDS_ORIGIN_X', 'COORDS_ORIGIN_Y', 'COORDS_ORIGIN_Z']
self.columns_cdestiny =['LANDMARK','BELIEF','COORDS_DESTINY_X', 'COORDS_DESTINY_Y', 'COORDS_DESTINY_Z']
if(force2D):
self.col_dim_coords_origin = ['COORDS_ORIGIN_X','COORDS_ORIGIN_Y']
self.col_dim_coords_destiny = ['COORDS_DESTINY_X','COORDS_DESTINY_Y']
else:
self.col_dim_coords_origin = ['COORDS_ORIGIN_X', 'COORDS_ORIGIN_Y','COORDS_ORIGIN_Z']
self.col_dim_coords_destiny = ['COORDS_DESTINY_X', 'COORDS_DESTINY_Y','COORDS_DESTINY_Z']
self.col_reset = ['RMS_AVG','RMS_SD']
self.map_df = pd.DataFrame(columns=self.columns)
self.cor_df = pd.DataFrame(columns=self.columns_corigin)
self.cde_df = pd.DataFrame(columns=self.columns_cdestiny)
self.list_local_area = {} # every point can have a radius of action
# List of error associated to each point
self.list_errorOrigin = {}
self.list_errorDestiny = {}
self.map_exists = False
self.map_id = "map1_map2"
self.CalibratedPtp = PointToPoint()
self.GlobalPtp = PointToPoint()
# Occupancy map
self.grid_map = Map(1)
self.orientation = 0
@staticmethod
def dist_microns(x, y):
return np.sqrt(np.sum((x - y) ** 2)) * 1000.0 ## Error in um
@staticmethod
def dist(x, y):
if (x[0] == np.inf or x[1] == np.inf or y[0] == np.inf or y[1] == np.inf):
return np.inf
else:
return np.sqrt(np.sum((x - y) ** 2))
def checkValidSystem(self, calculateOrientation = False):
# Get all calibration points
coordsOrigin, coordsDestiny, pids = self.getLandmarksByType(PointType.calibrated)
coordsDestiny = coordsDestiny[:,0:2]
if(matrixrank(coordsDestiny,1)>=2):
# TODO : calculate orientation based on data
# A = orth(coordsDestiny)
# angle = anglebetween(A[0],[1,0])
#if(calculateOrientation):
# self.orientation = np.rad2deg(angle) # this angle has to b
return True
def unit_vector(vector):
""" Returns the unit vector of the vector. """
eps = np.finfo(np.float32).eps
if (np.sum(np.linalg.norm(vector)) < eps):
return vector
return vector / np.linalg.norm(vector)
def collinear(p0, p1, p2):
x1, y1 = p1[0] - p0[0], p1[1] - p0[1]
x2, y2 = p2[0] - p0[0], p2[1] - p0[1]
val = x1 * y2 - x2 * y1
return abs(val) < 1e-2
def loadMap(self,dict_map):
# Split in 3 dictionaries
stmap = dict_map['MAP']
stcor = dict_map['COR']
stcde = dict_map['CDE']
self.map_df = pd.read_json(stmap)
self.cor_df = pd.read_json(stcor)
self.cde_df =
|
pd.read_json(stcde)
|
pandas.read_json
|
import pytest
from pandas import (
Index,
Series,
date_range,
)
import pandas._testing as tm
class TestSeriesDelItem:
def test_delitem(self):
# GH#5542
# should delete the item inplace
s = Series(range(5))
del s[0]
expected = Series(range(1, 5), index=range(1, 5))
tm.assert_series_equal(s, expected)
del s[1]
expected = Series(range(2, 5), index=range(2, 5))
tm.assert_series_equal(s, expected)
# only 1 left, del, add, del
s = Series(1)
del s[0]
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))
s[0] = 1
tm.assert_series_equal(s, Series(1))
del s[0]
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))
def test_delitem_object_index(self):
# Index(dtype=object)
s = Series(1, index=["a"])
del s["a"]
tm.assert_series_equal(
s, Series(dtype="int64", index=Index([], dtype="object"))
)
s["a"] = 1
tm.assert_series_equal(s, Series(1, index=["a"]))
del s["a"]
tm.assert_series_equal(
s, Series(dtype="int64", index=
|
Index([], dtype="object")
|
pandas.Index
|
# %% 0.0 Imports
import pandas as pd
import inflection
import math
import seaborn as sns
import numpy as np
from matplotlib import pyplot as plt
from IPython.core.display import HTML
from IPython.display import Image
# %% 0.1 Helper Functions
# %% 0.1 Loading data
df_sales_raw =
|
pd.read_csv('data/train.csv', low_memory=False)
|
pandas.read_csv
|
from gensim.models import KeyedVectors
from gensim.models import word2vec
import numpy as np
import pandas as pd
import re
import datetime
from operator import itemgetter
from random import randint
import seaborn as sns
import matplotlib.pyplot as plt
import wget
import os
import time
import string
import dill
import pickle
from nltk import *
from nltk import wordpunct_tokenize, WordNetLemmatizer, sent_tokenize, pos_tag
from nltk.corpus import stopwords as sw, wordnet as wn
from nltk.stem.snowball import SnowballStemmer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline
from sklearn.preprocessing import LabelEncoder, FunctionTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import precision_score, accuracy_score, confusion_matrix, classification_report as clsr
from sklearn.feature_extraction.text import TfidfVectorizer, TfidfTransformer, CountVectorizer
from sklearn.model_selection import GridSearchCV, train_test_split as tts
from sklearn.manifold import TSNE
from sklearn.multiclass import OneVsRestClassifier
import tensorflow as tf
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential, Model, model_from_json
from keras.layers.normalization import BatchNormalization
from keras.layers.embeddings import Embedding
from keras.layers import Dense, LSTM, SpatialDropout1D, Activation, Conv1D, MaxPooling1D, Input, concatenate
from keras.utils.np_utils import to_categorical
class train:
def __init__(self, corpus):
self.max_sentence_len = 300
self.max_features = 300
self.embed_dim = 300
self.lstm_out = 180
self.dropout_lstm = 0.3
self.recurrent_dropout_lstm = 0.3
self.dropout = 0.3
self.conv_nfilters = 128
self.conv_kernel_size = 8
self.max_pool_size = 2
self.NLTKPreprocessor = self.NLTKPreprocessor(corpus)
#self.MyRNNTransformer = self.MyRNNTransformer()
class NLTKPreprocessor(BaseEstimator, TransformerMixin):
"""
Transforms input data by using NLTK tokenization, POS tagging, lemmatization and vectorization.
"""
def __init__(self, corpus, max_sentence_len = 300, stopwords=None, punct=None, lower=True, strip=True):
"""
Instantiates the preprocessor.
"""
self.lower = lower
self.strip = strip
self.stopwords = set(stopwords) if stopwords else set(sw.words('english'))
self.punct = set(punct) if punct else set(string.punctuation)
self.lemmatizer = WordNetLemmatizer()
self.corpus = corpus
self.max_sentence_len = max_sentence_len
def fit(self, X, y=None):
"""
Fit simply returns self.
"""
return self
def inverse_transform(self, X):
"""
No inverse transformation.
"""
return X
def transform(self, X):
"""
Actually runs the preprocessing on each document.
"""
output = np.array([(self.tokenize(doc)) for doc in X])
return output
def tokenize(self, document):
"""
Returns a normalized, lemmatized list of tokens from a document by
applying segmentation, tokenization, and part of speech tagging.
Uses the part of speech tags to look up the lemma in WordNet, and returns the lowercase
version of all the words, removing stopwords and punctuation.
"""
lemmatized_tokens = []
# Clean the text
document = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", document)
document = re.sub(r"what's", "what is ", document)
document = re.sub(r"\'s", " ", document)
document = re.sub(r"\'ve", " have ", document)
document = re.sub(r"can't", "cannot ", document)
document = re.sub(r"n't", " not ", document)
document = re.sub(r"i'm", "i am ", document)
document = re.sub(r"\'re", " are ", document)
document = re.sub(r"\'d", " would ", document)
document = re.sub(r"\'ll", " will ", document)
document = re.sub(r"(\d+)(k)", r"\g<1>000", document)
# Break the document into sentences
for sent in sent_tokenize(document):
# Break the sentence into part of speech tagged tokens
for token, tag in pos_tag(wordpunct_tokenize(sent)):
# Apply preprocessing to the token
token = token.lower() if self.lower else token
token = token.strip() if self.strip else token
token = token.strip('_') if self.strip else token
token = token.strip('*') if self.strip else token
# If punctuation or stopword, ignore token and continue
if token in self.stopwords or all(char in self.punct for char in token):
continue
# Lemmatize the token
lemma = self.lemmatize(token, tag)
lemmatized_tokens.append(lemma)
doc = ' '.join(lemmatized_tokens)
tokenized_document = self.vectorize(np.array(doc)[np.newaxis])
return tokenized_document
def vectorize(self, doc):
"""
Returns a vectorized padded version of sequences.
"""
save_path = "Data/padding.pickle"
with open(save_path, 'rb') as f:
tokenizer = pickle.load(f)
doc_pad = tokenizer.texts_to_sequences(doc)
doc_pad = pad_sequences(doc_pad, padding='pre', truncating='pre', maxlen=self.max_sentence_len)
return np.squeeze(doc_pad)
def lemmatize(self, token, tag):
"""
Converts the Penn Treebank tag to a WordNet POS tag, then uses that
tag to perform WordNet lemmatization.
"""
tag = {
'N': wn.NOUN,
'V': wn.VERB,
'R': wn.ADV,
'J': wn.ADJ
}.get(tag[0], wn.NOUN)
return self.lemmatizer.lemmatize(token, tag)
class MyRNNTransformer(BaseEstimator, TransformerMixin):
"""
Transformer allowing our Keras model to be included in our pipeline
"""
def __init__(self, classifier):
self.classifier = classifier
def fit(self, X, y):
batch_size = 32
num_epochs = 135
batch_size = batch_size
epochs = num_epochs
self.classifier.fit(X, y, epochs=epochs, batch_size=batch_size, verbose=2)
return self
def transform(self, X):
self.pred = self.classifier.predict(X)
self.classes = [[0 if el < 0.2 else 1 for el in item] for item in self.pred]
return self.classes
def multiclass_accuracy(self,predictions, target):
"Returns the multiclass accuracy of the classifier's predictions"
score = []
for j in range(0, 5):
count = 0
for i in range(len(predictions)):
if predictions[i][j] == target[i][j]:
count += 1
score.append(count / len(predictions))
return score
def load_google_vec(self):
url = 'https://s3.amazonaws.com/dl4j-distribution/GoogleNews-vectors-negative300.bin.gz'
#wget.download(url, 'Data/GoogleNews-vectors.bin.gz')
return KeyedVectors.load_word2vec_format(
'Data/GoogleNews-vectors.bin.gz',
binary=True)
def lemmatize_token(self, token, tag):
tag = {
'N': wn.NOUN,
'V': wn.VERB,
'R': wn.ADV,
'J': wn.ADJ
}.get(tag[0], wn.NOUN)
return WordNetLemmatizer().lemmatize(token, tag)
def get_preprocessed_corpus(self, X_corpus):
"""
Returns a preprocessed version of a full corpus (ie. tokenization and lemmatization using POS taggs)
"""
X = ' '.join(X_corpus)
lemmatized_tokens = []
# Break the document into sentences
for sent in sent_tokenize(X):
# Break the sentence into part of speech tagged tokens
for token, tag in pos_tag(wordpunct_tokenize(sent)):
# Apply preprocessing to the token
token = token.lower()
token = token.strip()
token = token.strip('_')
token = token.strip('*')
# If punctuation or stopword, ignore token and continue
if token in set(sw.words('english')) or all(char in set(string.punctuation) for char in token):
continue
# Lemmatize the token and yield
lemma = self.lemmatize_token(token, tag)
lemmatized_tokens.append(lemma)
doc = ' '.join(lemmatized_tokens)
return doc
def prepare_embedding(self, X):
"""
Returns the embedding weights matrix, the word index, and the word-vector dictionnary corresponding
to the training corpus set of words.
"""
# Load Word2Vec vectors
word2vec = self.load_google_vec()
# Fit and apply an NLTK tokenizer on the preprocessed training corpus to obtain sequences.
tokenizer = Tokenizer(num_words=self.max_features)
X_pad = self.get_preprocessed_corpus(X)
tokenizer.fit_on_texts(
|
pd.Series(X_pad)
|
pandas.Series
|
#Based on https://www.kaggle.com/tezdhar/wordbatch-with-memory-test
import gc
import time
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix, hstack
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
import psutil
import os
import wordbatch
from wordbatch.extractors import WordBag
from wordbatch.models import FTRL, FM_FTRL
from nltk.corpus import stopwords
import re
def rmsle(y, y0):
assert len(y) == len(y0)
return np.sqrt(np.mean(np.power(np.log1p(y) - np.log1p(y0), 2)))
def handle_missing_inplace(dataset):
dataset['description'].fillna(value='na', inplace=True)
dataset["image"].fillna("noinformation", inplace=True)
dataset["param_1"].fillna("nicapotato", inplace=True)
dataset["param_2"].fillna("nicapotato", inplace=True)
dataset["param_3"].fillna("nicapotato", inplace=True)
dataset['image_top_1'].fillna(value=-1, inplace=True)
dataset['price'].fillna(value=0, inplace=True)
def to_categorical(dataset):
dataset['param_1'] = dataset['param_1'].astype('category')
dataset['param_2'] = dataset['param_2'].astype('category')
dataset['param_3'] = dataset['param_3'].astype('category')
dataset['image_top_1'] = dataset['image_top_1'].astype('category')
dataset['image'] = dataset['image'].astype('category')
dataset['price'] = dataset['price'].astype('category')
#counting
dataset['num_desc_punct'] = dataset['num_desc_punct'].astype('category')
dataset['num_desc_capE'] = dataset['num_desc_capE'].astype('category')
dataset['num_desc_capP'] = dataset['num_desc_capP'].astype('category')
dataset['num_title_punct'] = dataset['num_title_punct'].astype('category')
dataset['num_title_capE'] = dataset['num_title_capE'].astype('category')
dataset['num_title_capP'] = dataset['num_title_capP'].astype('category')
dataset['is_in_desc_хорошо'] = dataset['is_in_desc_хорошо'].astype('category')
dataset['is_in_desc_Плохо'] = dataset['is_in_desc_Плохо'].astype('category')
dataset['is_in_desc_новый'] = dataset['is_in_desc_новый'].astype('category')
dataset['is_in_desc_старый'] = dataset['is_in_desc_старый'].astype('category')
dataset['is_in_desc_используемый'] = dataset['is_in_desc_используемый'].astype('category')
dataset['is_in_desc_есплатная_доставка'] = dataset['is_in_desc_есплатная_доставка'].astype('category')
dataset['is_in_desc_есплатный_возврат'] = dataset['is_in_desc_есплатный_возврат'].astype('category')
dataset['is_in_desc_идеально'] = dataset['is_in_desc_идеально'].astype('category')
dataset['is_in_desc_подержанный'] = dataset['is_in_desc_подержанный'].astype('category')
dataset['is_in_desc_пСниженные_цены'] = dataset['is_in_desc_пСниженные_цены'].astype('category')
#region
dataset['region'] = dataset['region'].astype('category')
dataset['city'] = dataset['city'].astype('category')
dataset['user_type'] = dataset['user_type'].astype('category')
dataset['category_name'] = dataset['category_name'].astype('category')
dataset['parent_category_name'] = dataset['parent_category_name'].astype('category')
# dataset['price+'] = dataset['price+'].astype('category')
# dataset['desc_len'] = dataset['desc_len'].astype('category')
# dataset['title_len'] = dataset['title_len'].astype('category')
# dataset['title_desc_len_ratio'] = dataset['title_desc_len_ratio'].astype('category')
# dataset['desc_word_count'] = dataset['desc_word_count'].astype('category')
# dataset['mean_des'] = dataset['mean_des'].astype('category')
# Define helpers for text normalization
stopwords = {x: 1 for x in stopwords.words('russian')}
non_alphanums = re.compile(u'[^A-Za-z0-9]+')
def normalize_text(text):
# if np.isnan(text): text='na'
return u" ".join(
[x for x in [y for y in non_alphanums.sub(' ', text).lower().strip().split(" ")] \
if len(x) > 1 and x not in stopwords])
develop = True
# develop= False
if __name__ == '__main__':
start_time = time.time()
from time import gmtime, strftime
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import re
from scipy.sparse import hstack
from nltk.corpus import stopwords
from contextlib import contextmanager
@contextmanager
def timer(name):
t0 = time.time()
yield
print('[{}] done in {:.0f} s'.format(name, (time.time() - t0)))
import string
print(strftime("%Y-%m-%d %H:%M:%S", gmtime()))
print("\nData Load Stage")
# , nrows = nrows
nrows=10000*1
training = pd.read_csv('../input/train.csv', index_col="item_id", parse_dates=["activation_date"])
len_train = len(training)
traindex = training.index
testing = pd.read_csv('../input/test.csv', index_col="item_id", parse_dates=["activation_date"])
testdex = testing.index
# labels = training['deal_probability'].values
y = training.deal_probability.copy()
training.drop("deal_probability", axis=1, inplace=True)
# suppl
# used_cols = ["item_id", "user_id"]
# train_active = pd.read_csv("../input/train_active.csv", usecols=used_cols)
# test_active = pd.read_csv("../input/test_active.csv", usecols=used_cols)
# train_periods = pd.read_csv("../input/periods_train.csv", parse_dates=["date_from", "date_to"])
# test_periods = pd.read_csv("../input/periods_test.csv", parse_dates=["date_from", "date_to"])
# =============================================================================
# Add region-income
# =============================================================================
tmp = pd.read_csv("../input/region_income.csv", sep=";", names=["region", "income"])
training = training.merge(tmp, on="region", how="left")
testing = testing.merge(tmp, on="region", how="left")
del tmp;
gc.collect()
# =============================================================================
# Add region-income
# =============================================================================
tmp = pd.read_csv("../input/city_population_wiki_v3.csv",)
training = training.merge(tmp, on="city", how="left")
testing = testing.merge(tmp, on="city", how="left")
del tmp;
gc.collect()
import pickle
with open('../input/train_image_features.p', 'rb') as f:
x = pickle.load(f)
train_blurinesses = x['blurinesses']
train_ids = x['ids']
with open('../input/test_image_features.p', 'rb') as f:
x = pickle.load(f)
test_blurinesses = x['blurinesses']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df = pd.DataFrame(train_blurinesses, columns=['blurinesses'])
incep_test_image_df = pd.DataFrame(test_blurinesses, columns=[f'blurinesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
training = training.join(incep_train_image_df.set_index('image'), on='image')
testing = testing.join(incep_test_image_df.set_index('image'), on='image')
print('adding whitenesses ...')
with open('../input/train_image_features.p', 'rb') as f:
x = pickle.load(f)
train_whitenesses = x['whitenesses']
train_ids = x['ids']
with open('../input/test_image_features.p', 'rb') as f:
x = pickle.load(f)
test_whitenesses = x['whitenesses']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df = pd.DataFrame(train_whitenesses, columns=['whitenesses'])
incep_test_image_df = pd.DataFrame(test_whitenesses, columns=[f'whitenesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
training = training.join(incep_train_image_df.set_index('image'), on='image')
testing = testing.join(incep_test_image_df.set_index('image'), on='image')
print('adding dullnesses ...')
with open('../input/train_image_features.p', 'rb') as f:
x = pickle.load(f)
train_dullnesses = x['dullnesses']
train_ids = x['ids']
with open('../input/test_image_features.p', 'rb') as f:
x = pickle.load(f)
test_dullnesses = x['dullnesses']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df =
|
pd.DataFrame(train_dullnesses, columns=['dullnesses'])
|
pandas.DataFrame
|
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from sklearn.base import TransformerMixin
import pandas as pd
import csv
import re
class AsciiTransformer(TransformerMixin):
def transform(self,X,**transform_params):
if str(type(X)) != "<class 'pandas.core.series.Series'>":
X =
|
pd.Series(X)
|
pandas.Series
|
from __future__ import annotations
from datetime import (
datetime,
time,
timedelta,
tzinfo,
)
from typing import (
TYPE_CHECKING,
Literal,
overload,
)
import warnings
import numpy as np
from pandas._libs import (
lib,
tslib,
)
from pandas._libs.arrays import NDArrayBacked
from pandas._libs.tslibs import (
BaseOffset,
NaT,
NaTType,
Resolution,
Timestamp,
conversion,
fields,
get_resolution,
iNaT,
ints_to_pydatetime,
is_date_array_normalized,
normalize_i8_timestamps,
timezones,
to_offset,
tzconversion,
)
from pandas._typing import npt
from pandas.errors import PerformanceWarning
from pandas.util._validators import validate_inclusive
from pandas.core.dtypes.cast import astype_dt64_to_dt64tz
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_object_dtype,
is_period_dtype,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.core.dtypes.missing import isna
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.arrays import (
ExtensionArray,
datetimelike as dtl,
)
from pandas.core.arrays._ranges import generate_regular_range
from pandas.core.arrays.integer import IntegerArray
import pandas.core.common as com
from pandas.core.construction import extract_array
from pandas.tseries.frequencies import get_period_alias
from pandas.tseries.offsets import (
BDay,
Day,
Tick,
)
if TYPE_CHECKING:
from pandas import DataFrame
from pandas.core.arrays import (
PeriodArray,
TimedeltaArray,
)
_midnight = time(0, 0)
def tz_to_dtype(tz):
"""
Return a datetime64[ns] dtype appropriate for the given timezone.
Parameters
----------
tz : tzinfo or None
Returns
-------
np.dtype or Datetime64TZDType
"""
if tz is None:
return DT64NS_DTYPE
else:
return DatetimeTZDtype(tz=tz)
def _field_accessor(name: str, field: str, docstring=None):
def f(self):
values = self._local_timestamps()
if field in self._bool_ops:
result: np.ndarray
if field.endswith(("start", "end")):
freq = self.freq
month_kw = 12
if freq:
kwds = freq.kwds
month_kw = kwds.get("startingMonth", kwds.get("month", 12))
result = fields.get_start_end_field(
values, field, self.freqstr, month_kw
)
else:
result = fields.get_date_field(values, field)
# these return a boolean by-definition
return result
if field in self._object_ops:
result = fields.get_date_name_field(values, field)
result = self._maybe_mask_results(result, fill_value=None)
else:
result = fields.get_date_field(values, field)
result = self._maybe_mask_results(
result, fill_value=None, convert="float64"
)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):
"""
Pandas ExtensionArray for tz-naive or tz-aware datetime data.
.. warning::
DatetimeArray is currently experimental, and its API may change
without warning. In particular, :attr:`DatetimeArray.dtype` is
expected to change to always be an instance of an ``ExtensionDtype``
subclass.
Parameters
----------
values : Series, Index, DatetimeArray, ndarray
The datetime data.
For DatetimeArray `values` (or a Series or Index boxing one),
`dtype` and `freq` will be extracted from `values`.
dtype : numpy.dtype or DatetimeTZDtype
Note that the only NumPy dtype allowed is 'datetime64[ns]'.
freq : str or Offset, optional
The frequency.
copy : bool, default False
Whether to copy the underlying array of values.
Attributes
----------
None
Methods
-------
None
"""
_typ = "datetimearray"
_scalar_type = Timestamp
_recognized_scalars = (datetime, np.datetime64)
_is_recognized_dtype = is_datetime64_any_dtype
_infer_matches = ("datetime", "datetime64", "date")
# define my properties & methods for delegation
_bool_ops: list[str] = [
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
"is_leap_year",
]
_object_ops: list[str] = ["freq", "tz"]
_field_ops: list[str] = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekofyear",
"week",
"weekday",
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"days_in_month",
"daysinmonth",
"microsecond",
"nanosecond",
]
_other_ops: list[str] = ["date", "time", "timetz"]
_datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + _other_ops
_datetimelike_methods: list[str] = [
"to_period",
"tz_localize",
"tz_convert",
"normalize",
"strftime",
"round",
"floor",
"ceil",
"month_name",
"day_name",
]
# ndim is inherited from ExtensionArray, must exist to ensure
# Timestamp.__richcmp__(DateTimeArray) operates pointwise
# ensure that operations with numpy arrays defer to our implementation
__array_priority__ = 1000
# -----------------------------------------------------------------
# Constructors
_dtype: np.dtype | DatetimeTZDtype
_freq = None
def __init__(self, values, dtype=DT64NS_DTYPE, freq=None, copy: bool = False):
values = extract_array(values, extract_numpy=True)
if isinstance(values, IntegerArray):
values = values.to_numpy("int64", na_value=iNaT)
inferred_freq = getattr(values, "_freq", None)
if isinstance(values, type(self)):
# validation
dtz = getattr(dtype, "tz", None)
if dtz and values.tz is None:
dtype = DatetimeTZDtype(tz=dtype.tz)
elif dtz and values.tz:
if not timezones.tz_compare(dtz, values.tz):
msg = (
"Timezone of the array and 'dtype' do not match. "
f"'{dtz}' != '{values.tz}'"
)
raise TypeError(msg)
elif values.tz:
dtype = values.dtype
if freq is None:
freq = values.freq
values = values._ndarray
if not isinstance(values, np.ndarray):
raise ValueError(
f"Unexpected type '{type(values).__name__}'. 'values' must be "
"a DatetimeArray, ndarray, or Series or Index containing one of those."
)
if values.ndim not in [1, 2]:
raise ValueError("Only 1-dimensional input arrays are supported.")
if values.dtype == "i8":
# for compat with datetime/timedelta/period shared methods,
# we can sometimes get here with int64 values. These represent
# nanosecond UTC (or tz-naive) unix timestamps
values = values.view(DT64NS_DTYPE)
if values.dtype != DT64NS_DTYPE:
raise ValueError(
"The dtype of 'values' is incorrect. Must be 'datetime64[ns]'. "
f"Got {values.dtype} instead."
)
dtype = _validate_dt64_dtype(dtype)
if freq == "infer":
raise ValueError(
"Frequency inference not allowed in DatetimeArray.__init__. "
"Use 'pd.array()' instead."
)
if copy:
values = values.copy()
if freq:
freq = to_offset(freq)
if getattr(dtype, "tz", None):
# https://github.com/pandas-dev/pandas/issues/18595
# Ensure that we have a standard timezone for pytz objects.
# Without this, things like adding an array of timedeltas and
# a tz-aware Timestamp (with a tz specific to its datetime) will
# be incorrect(ish?) for the array as a whole
dtype = DatetimeTZDtype(tz=timezones.tz_standardize(dtype.tz))
NDArrayBacked.__init__(self, values=values, dtype=dtype)
self._freq = freq
if inferred_freq is None and freq is not None:
type(self)._validate_frequency(self, freq)
# error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
@classmethod
def _simple_new( # type: ignore[override]
cls, values: np.ndarray, freq: BaseOffset | None = None, dtype=DT64NS_DTYPE
) -> DatetimeArray:
assert isinstance(values, np.ndarray)
assert values.dtype == DT64NS_DTYPE
result = super()._simple_new(values, dtype)
result._freq = freq
return result
@classmethod
def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False):
return cls._from_sequence_not_strict(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_sequence_not_strict(
cls,
data,
dtype=None,
copy: bool = False,
tz=None,
freq=lib.no_default,
dayfirst: bool = False,
yearfirst: bool = False,
ambiguous="raise",
):
explicit_none = freq is None
freq = freq if freq is not lib.no_default else None
freq, freq_infer = dtl.maybe_infer_freq(freq)
subarr, tz, inferred_freq = sequence_to_dt64ns(
data,
dtype=dtype,
copy=copy,
tz=tz,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
)
freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer)
if explicit_none:
freq = None
dtype = tz_to_dtype(tz)
result = cls._simple_new(subarr, freq=freq, dtype=dtype)
if inferred_freq is None and freq is not None:
# this condition precludes `freq_infer`
cls._validate_frequency(result, freq, ambiguous=ambiguous)
elif freq_infer:
# Set _freq directly to bypass duplicative _validate_frequency
# check.
result._freq = to_offset(result.inferred_freq)
return result
@classmethod
def _generate_range(
cls,
start,
end,
periods,
freq,
tz=None,
normalize=False,
ambiguous="raise",
nonexistent="raise",
inclusive="both",
):
periods = dtl.validate_periods(periods)
if freq is None and any(x is None for x in [periods, start, end]):
raise ValueError("Must provide freq argument if no data is supplied")
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the four parameters: start, end, periods, "
"and freq, exactly three must be specified"
)
freq = to_offset(freq)
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if start is NaT or end is NaT:
raise ValueError("Neither `start` nor `end` can be NaT")
left_inclusive, right_inclusive = validate_inclusive(inclusive)
start, end, _normalized = _maybe_normalize_endpoints(start, end, normalize)
tz = _infer_tz_from_endpoints(start, end, tz)
if tz is not None:
# Localize the start and end arguments
start_tz = None if start is None else start.tz
end_tz = None if end is None else end.tz
start = _maybe_localize_point(
start, start_tz, start, freq, tz, ambiguous, nonexistent
)
end = _maybe_localize_point(
end, end_tz, end, freq, tz, ambiguous, nonexistent
)
if freq is not None:
# We break Day arithmetic (fixed 24 hour) here and opt for
# Day to mean calendar day (23/24/25 hour). Therefore, strip
# tz info from start and day to avoid DST arithmetic
if isinstance(freq, Day):
if start is not None:
start = start.tz_localize(None)
if end is not None:
end = end.tz_localize(None)
if isinstance(freq, Tick):
values = generate_regular_range(start, end, periods, freq)
else:
xdr = generate_range(start=start, end=end, periods=periods, offset=freq)
values = np.array([x.value for x in xdr], dtype=np.int64)
_tz = start.tz if start is not None else end.tz
values = values.view("M8[ns]")
index = cls._simple_new(values, freq=freq, dtype=tz_to_dtype(_tz))
if tz is not None and index.tz is None:
arr = tzconversion.tz_localize_to_utc(
index.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent
)
index = cls(arr)
# index is localized datetime64 array -> have to convert
# start/end as well to compare
if start is not None:
start = start.tz_localize(tz, ambiguous, nonexistent).asm8
if end is not None:
end = end.tz_localize(tz, ambiguous, nonexistent).asm8
else:
# Create a linearly spaced date_range in local time
# Nanosecond-granularity timestamps aren't always correctly
# representable with doubles, so we limit the range that we
# pass to np.linspace as much as possible
arr = (
np.linspace(0, end.value - start.value, periods, dtype="int64")
+ start.value
)
dtype = tz_to_dtype(tz)
arr = arr.astype("M8[ns]", copy=False)
index = cls._simple_new(arr, freq=None, dtype=dtype)
if start == end:
if not left_inclusive and not right_inclusive:
index = index[1:-1]
else:
if not left_inclusive or not right_inclusive:
if not left_inclusive and len(index) and index[0] == start:
index = index[1:]
if not right_inclusive and len(index) and index[-1] == end:
index = index[:-1]
dtype = tz_to_dtype(tz)
return cls._simple_new(index._ndarray, freq=freq, dtype=dtype)
# -----------------------------------------------------------------
# DatetimeLike Interface
def _unbox_scalar(self, value, setitem: bool = False) -> np.datetime64:
if not isinstance(value, self._scalar_type) and value is not NaT:
raise ValueError("'value' should be a Timestamp.")
self._check_compatible_with(value, setitem=setitem)
return value.asm8
def _scalar_from_string(self, value) -> Timestamp | NaTType:
return Timestamp(value, tz=self.tz)
def _check_compatible_with(self, other, setitem: bool = False):
if other is NaT:
return
self._assert_tzawareness_compat(other)
if setitem:
# Stricter check for setitem vs comparison methods
if not timezones.tz_compare(self.tz, other.tz):
raise ValueError(f"Timezones don't match. '{self.tz}' != '{other.tz}'")
# -----------------------------------------------------------------
# Descriptive Properties
def _box_func(self, x) -> Timestamp | NaTType:
if isinstance(x, np.datetime64):
# GH#42228
# Argument 1 to "signedinteger" has incompatible type "datetime64";
# expected "Union[SupportsInt, Union[str, bytes], SupportsIndex]"
x = np.int64(x) # type: ignore[arg-type]
ts = Timestamp(x, tz=self.tz)
# Non-overlapping identity check (left operand type: "Timestamp",
# right operand type: "NaTType")
if ts is not NaT: # type: ignore[comparison-overlap]
# GH#41586
# do this instead of passing to the constructor to avoid FutureWarning
ts._set_freq(self.freq)
return ts
@property
# error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype"
# incompatible with return type "ExtensionDtype" in supertype
# "ExtensionArray"
def dtype(self) -> np.dtype | DatetimeTZDtype: # type: ignore[override]
"""
The dtype for the DatetimeArray.
.. warning::
A future version of pandas will change dtype to never be a
``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will
always be an instance of an ``ExtensionDtype`` subclass.
Returns
-------
numpy.dtype or DatetimeTZDtype
If the values are tz-naive, then ``np.dtype('datetime64[ns]')``
is returned.
If the values are tz-aware, then the ``DatetimeTZDtype``
is returned.
"""
return self._dtype
@property
def tz(self) -> tzinfo | None:
"""
Return timezone, if any.
Returns
-------
datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None
Returns None when the array is tz-naive.
"""
# GH 18595
return getattr(self.dtype, "tz", None)
@tz.setter
def tz(self, value):
# GH 3746: Prevent localizing or converting the index by setting tz
raise AttributeError(
"Cannot directly set timezone. Use tz_localize() "
"or tz_convert() as appropriate"
)
@property
def tzinfo(self) -> tzinfo | None:
"""
Alias for tz attribute
"""
return self.tz
@property # NB: override with cache_readonly in immutable subclasses
def is_normalized(self) -> bool:
"""
Returns True if all of the dates are at midnight ("no time")
"""
return is_date_array_normalized(self.asi8, self.tz)
@property # NB: override with cache_readonly in immutable subclasses
def _resolution_obj(self) -> Resolution:
return get_resolution(self.asi8, self.tz)
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
def __array__(self, dtype=None) -> np.ndarray:
if dtype is None and self.tz:
# The default for tz-aware is object, to preserve tz info
dtype = object
return super().__array__(dtype=dtype)
def __iter__(self):
"""
Return an iterator over the boxed values
Yields
------
tstamp : Timestamp
"""
if self.ndim > 1:
for i in range(len(self)):
yield self[i]
else:
# convert in chunks of 10k for efficiency
data = self.asi8
length = len(self)
chunksize = 10000
chunks = (length // chunksize) + 1
with warnings.catch_warnings():
# filter out warnings about Timestamp.freq
warnings.filterwarnings("ignore", category=FutureWarning)
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
converted = ints_to_pydatetime(
data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp"
)
yield from converted
def astype(self, dtype, copy: bool = True):
# We handle
# --> datetime
# --> period
# DatetimeLikeArrayMixin Super handles the rest.
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, self.dtype):
if copy:
return self.copy()
return self
elif is_datetime64_ns_dtype(dtype):
return astype_dt64_to_dt64tz(self, dtype, copy, via_utc=False)
elif self.tz is None and is_datetime64_dtype(dtype) and dtype != self.dtype:
# unit conversion e.g. datetime64[s]
return self._ndarray.astype(dtype)
elif is_period_dtype(dtype):
return self.to_period(freq=dtype.freq)
return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy)
# -----------------------------------------------------------------
# Rendering Methods
@dtl.ravel_compat
def _format_native_types(
self, na_rep="NaT", date_format=None, **kwargs
) -> npt.NDArray[np.object_]:
from pandas.io.formats.format import get_format_datetime64_from_values
fmt = get_format_datetime64_from_values(self, date_format)
return tslib.format_array_from_datetime(
self.asi8, tz=self.tz, format=fmt, na_rep=na_rep
)
# -----------------------------------------------------------------
# Comparison Methods
def _has_same_tz(self, other) -> bool:
# vzone shouldn't be None if value is non-datetime like
if isinstance(other, np.datetime64):
# convert to Timestamp as np.datetime64 doesn't have tz attr
other = Timestamp(other)
if not hasattr(other, "tzinfo"):
return False
other_tz = other.tzinfo
return timezones.tz_compare(self.tzinfo, other_tz)
def _assert_tzawareness_compat(self, other) -> None:
# adapted from _Timestamp._assert_tzawareness_compat
other_tz = getattr(other, "tzinfo", None)
other_dtype = getattr(other, "dtype", None)
if is_datetime64tz_dtype(other_dtype):
# Get tzinfo from Series dtype
other_tz = other.dtype.tz
if other is NaT:
# pd.NaT quacks both aware and naive
pass
elif self.tz is None:
if other_tz is not None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects."
)
elif other_tz is None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects"
)
# -----------------------------------------------------------------
# Arithmetic Methods
def _sub_datetime_arraylike(self, other):
"""subtract DatetimeArray/Index or ndarray[datetime64]"""
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
if isinstance(other, np.ndarray):
assert is_datetime64_dtype(other)
other = type(self)(other)
if not self._has_same_tz(other):
# require tz compat
raise TypeError(
f"{type(self).__name__} subtraction must have the same "
"timezones or no timezones"
)
self_i8 = self.asi8
other_i8 = other.asi8
arr_mask = self._isnan | other._isnan
new_values = checked_add_with_arr(self_i8, -other_i8, arr_mask=arr_mask)
if self._hasnans or other._hasnans:
np.putmask(new_values, arr_mask, iNaT)
return new_values.view("timedelta64[ns]")
def _add_offset(self, offset) -> DatetimeArray:
if self.ndim == 2:
return self.ravel()._add_offset(offset).reshape(self.shape)
assert not isinstance(offset, Tick)
try:
if self.tz is not None:
values = self.tz_localize(None)
else:
values = self
result = offset._apply_array(values).view("M8[ns]")
result = DatetimeArray._simple_new(result)
result = result.tz_localize(self.tz)
except NotImplementedError:
warnings.warn(
"Non-vectorized DateOffset being applied to Series or DatetimeIndex.",
PerformanceWarning,
)
result = self.astype("O") + offset
if not len(self):
# GH#30336 _from_sequence won't be able to infer self.tz
return type(self)._from_sequence(result).tz_localize(self.tz)
return type(self)._from_sequence(result)
def _sub_datetimelike_scalar(self, other):
# subtract a datetime from myself, yielding a ndarray[timedelta64[ns]]
assert isinstance(other, (datetime, np.datetime64))
assert other is not NaT
other = Timestamp(other)
# error: Non-overlapping identity check (left operand type: "Timestamp",
# right operand type: "NaTType")
if other is NaT: # type: ignore[comparison-overlap]
return self - NaT
if not self._has_same_tz(other):
# require tz compat
raise TypeError(
"Timestamp subtraction must have the same timezones or no timezones"
)
i8 = self.asi8
result = checked_add_with_arr(i8, -other.value, arr_mask=self._isnan)
result = self._maybe_mask_results(result)
return result.view("timedelta64[ns]")
# -----------------------------------------------------------------
# Timezone Conversion and Localization Methods
def _local_timestamps(self) -> np.ndarray:
"""
Convert to an i8 (unix-like nanosecond timestamp) representation
while keeping the local timezone and not using UTC.
This is used to calculate time-of-day information as if the timestamps
were timezone-naive.
"""
if self.tz is None or timezones.is_utc(self.tz):
return self.asi8
return tzconversion.tz_convert_from_utc(self.asi8, self.tz)
def tz_convert(self, tz) -> DatetimeArray:
"""
Convert tz-aware Datetime Array/Index from one time zone to another.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted
to this time zone of the Datetime Array/Index. A `tz` of None will
convert to UTC and remove the timezone information.
Returns
-------
Array or Index
Raises
------
TypeError
If Datetime Array/Index is tz-naive.
See Also
--------
DatetimeIndex.tz : A timezone that has a variable offset from UTC.
DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
given time zone, or remove timezone from a tz-aware DatetimeIndex.
Examples
--------
With the `tz` parameter, we can change the DatetimeIndex
to other time zones:
>>> dti = pd.date_range(start='2014-08-01 09:00',
... freq='H', periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert('US/Central')
DatetimeIndex(['2014-08-01 02:00:00-05:00',
'2014-08-01 03:00:00-05:00',
'2014-08-01 04:00:00-05:00'],
dtype='datetime64[ns, US/Central]', freq='H')
With the ``tz=None``, we can remove the timezone (after converting
to UTC if necessary):
>>> dti = pd.date_range(start='2014-08-01 09:00', freq='H',
... periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert(None)
DatetimeIndex(['2014-08-01 07:00:00',
'2014-08-01 08:00:00',
'2014-08-01 09:00:00'],
dtype='datetime64[ns]', freq='H')
"""
tz = timezones.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError(
"Cannot convert tz-naive timestamps, use tz_localize to localize"
)
# No conversion since timestamps are all UTC to begin with
dtype = tz_to_dtype(tz)
return self._simple_new(self._ndarray, dtype=dtype, freq=self.freq)
@dtl.ravel_compat
def tz_localize(self, tz, ambiguous="raise", nonexistent="raise") -> DatetimeArray:
"""
Localize tz-naive Datetime Array/Index to tz-aware
Datetime Array/Index.
This method takes a time zone (tz) naive Datetime Array/Index object
and makes this time zone aware. It does not move the time to another
time zone.
This method can also be used to do the inverse -- to create a time
zone unaware object from an aware object. To that end, pass `tz=None`.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone to convert timestamps to. Passing ``None`` will
remove the time zone information preserving local time.
ambiguous : 'infer', 'NaT', bool array, default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
Returns
-------
Same type as self
Array/Index converted to the specified time zone.
Raises
------
TypeError
If the Datetime Array/Index is tz-aware and tz is not None.
See Also
--------
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
--------
>>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)
>>> tz_naive
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
Localize DatetimeIndex in US/Eastern time zone:
>>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')
>>> tz_aware
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
With the ``tz=None``, we can remove the time zone information
while keeping the local time (not converted to UTC):
>>> tz_aware.tz_localize(None)
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq=None)
Be careful with DST changes. When there is sequential data, pandas can
infer the DST time:
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.dt.tz_localize('CET', ambiguous='infer')
0 2018-10-28 01:30:00+02:00
1 2018-10-28 02:00:00+02:00
2 2018-10-28 02:30:00+02:00
3 2018-10-28 02:00:00+01:00
4 2018-10-28 02:30:00+01:00
5 2018-10-28 03:00:00+01:00
6 2018-10-28 03:30:00+01:00
dtype: datetime64[ns, CET]
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))
0 2018-10-28 01:20:00+02:00
1 2018-10-28 02:36:00+02:00
2 2018-10-28 03:46:00+01:00
dtype: datetime64[ns, CET]
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
0 2015-03-29 03:00:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
0 2015-03-29 01:59:59.999999999+01:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
0 2015-03-29 03:30:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
if self.tz is not None:
if tz is None:
new_dates = tzconversion.tz_convert_from_utc(self.asi8, self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = timezones.maybe_get_tz(tz)
# Convert to UTC
new_dates = tzconversion.tz_localize_to_utc(
self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent
)
new_dates = new_dates.view(DT64NS_DTYPE)
dtype = tz_to_dtype(tz)
freq = None
if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates[0])):
# we can preserve freq
# TODO: Also for fixed-offsets
freq = self.freq
elif tz is None and self.tz is None:
# no-op
freq = self.freq
return self._simple_new(new_dates, dtype=dtype, freq=freq)
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timestamp methods
def to_pydatetime(self) -> npt.NDArray[np.object_]:
"""
Return Datetime Array/Index as object ndarray of datetime.datetime
objects.
Returns
-------
datetimes : ndarray[object]
"""
return ints_to_pydatetime(self.asi8, tz=self.tz)
def normalize(self) -> DatetimeArray:
"""
Convert times to midnight.
The time component of the date-time is converted to midnight i.e.
00:00:00. This is useful in cases, when the time does not matter.
Length is unaltered. The timezones are unaffected.
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on Datetime Array/Index.
Returns
-------
DatetimeArray, DatetimeIndex or Series
The same type as the original data. Series will have the same
name and index. DatetimeIndex will have the same name.
See Also
--------
floor : Floor the datetimes to the specified freq.
ceil : Ceil the datetimes to the specified freq.
round : Round the datetimes to the specified freq.
Examples
--------
>>> idx = pd.date_range(start='2014-08-01 10:00', freq='H',
... periods=3, tz='Asia/Calcutta')
>>> idx
DatetimeIndex(['2014-08-01 10:00:00+05:30',
'2014-08-01 11:00:00+05:30',
'2014-08-01 12:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq='H')
>>> idx.normalize()
DatetimeIndex(['2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq=None)
"""
new_values = normalize_i8_timestamps(self.asi8, self.tz)
return type(self)(new_values)._with_freq("infer").tz_localize(self.tz)
@dtl.ravel_compat
def to_period(self, freq=None) -> PeriodArray:
"""
Cast to PeriodArray/Index at a particular frequency.
Converts DatetimeArray/Index to PeriodArray/Index.
Parameters
----------
freq : str or Offset, optional
One of pandas' :ref:`offset strings <timeseries.offset_aliases>`
or an Offset object. Will be inferred by default.
Returns
-------
PeriodArray/Index
Raises
------
ValueError
When converting a DatetimeArray/Index with non-regular values,
so that a frequency cannot be inferred.
See Also
--------
PeriodIndex: Immutable ndarray holding ordinal values.
DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.
Examples
--------
>>> df = pd.DataFrame({"y": [1, 2, 3]},
... index=pd.to_datetime(["2000-03-31 00:00:00",
... "2000-05-31 00:00:00",
... "2000-08-31 00:00:00"]))
>>> df.index.to_period("M")
PeriodIndex(['2000-03', '2000-05', '2000-08'],
dtype='period[M]')
Infer the daily frequency
>>> idx = pd.date_range("2017-01-01", periods=2)
>>> idx.to_period()
PeriodIndex(['2017-01-01', '2017-01-02'],
dtype='period[D]')
"""
from pandas.core.arrays import PeriodArray
if self.tz is not None:
warnings.warn(
"Converting to PeriodArray/Index representation "
"will drop timezone information.",
UserWarning,
)
if freq is None:
freq = self.freqstr or self.inferred_freq
if freq is None:
raise ValueError(
"You must pass a freq argument as current index has none."
)
res = get_period_alias(freq)
# https://github.com/pandas-dev/pandas/issues/33358
if res is None:
res = freq
freq = res
return PeriodArray._from_datetime64(self._ndarray, freq, tz=self.tz)
def to_perioddelta(self, freq) -> TimedeltaArray:
"""
Calculate TimedeltaArray of difference between index
values and index converted to PeriodArray at specified
freq. Used for vectorized offsets.
Parameters
----------
freq : Period frequency
Returns
-------
TimedeltaArray/Index
"""
# Deprecaation GH#34853
warnings.warn(
"to_perioddelta is deprecated and will be removed in a "
"future version. "
"Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.",
FutureWarning,
# stacklevel chosen to be correct for when called from DatetimeIndex
stacklevel=3,
)
from pandas.core.arrays.timedeltas import TimedeltaArray
i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8
m8delta = i8delta.view("m8[ns]")
return TimedeltaArray(m8delta)
# -----------------------------------------------------------------
# Properties - Vectorized Timestamp Properties/Methods
def month_name(self, locale=None):
"""
Return the month names of the DateTimeIndex with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale.
Returns
-------
Index
Index of month names.
Examples
--------
>>> idx = pd.date_range(start='2018-01', freq='M', periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='M')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='object')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(values, "month_name", locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result
def day_name(self, locale=None):
"""
Return the day names of the DateTimeIndex with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the day name.
Default is English locale.
Returns
-------
Index
Index of day names.
Examples
--------
>>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3)
>>> idx
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', freq='D')
>>> idx.day_name()
Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(values, "day_name", locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result
@property
def time(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
timestamps = self._local_timestamps()
return ints_to_pydatetime(timestamps, box="time")
@property
def timetz(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of datetime.time also containing timezone
information. The time part of the Timestamps.
"""
return ints_to_pydatetime(self.asi8, self.tz, box="time")
@property
def date(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of python datetime.date objects (namely, the date
part of Timestamps without timezone information).
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
timestamps = self._local_timestamps()
return ints_to_pydatetime(timestamps, box="date")
def isocalendar(self) -> DataFrame:
"""
Returns a DataFrame with the year, week, and day calculated according to
the ISO 8601 standard.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
with columns year, week and day
See Also
--------
Timestamp.isocalendar : Function return a 3-tuple containing ISO year,
week number, and weekday for the given Timestamp object.
datetime.date.isocalendar : Return a named tuple object with
three components: year, week and weekday.
Examples
--------
>>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4)
>>> idx.isocalendar()
year week day
2019-12-29 2019 52 7
2019-12-30 2020 1 1
2019-12-31 2020 1 2
2020-01-01 2020 1 3
>>> idx.isocalendar().week
2019-12-29 52
2019-12-30 1
2019-12-31 1
2020-01-01 1
Freq: D, Name: week, dtype: UInt32
"""
from pandas import DataFrame
values = self._local_timestamps()
sarray = fields.build_isocalendar_sarray(values)
iso_calendar_df = DataFrame(
sarray, columns=["year", "week", "day"], dtype="UInt32"
)
if self._hasnans:
iso_calendar_df.iloc[self._isnan] = None
return iso_calendar_df
@property
def weekofyear(self):
"""
The week ordinal of the year.
.. deprecated:: 1.1.0
weekofyear and week have been deprecated.
Please use DatetimeIndex.isocalendar().week instead.
"""
warnings.warn(
"weekofyear and week have been deprecated, please use "
"DatetimeIndex.isocalendar().week instead, which returns "
"a Series. To exactly reproduce the behavior of week and "
"weekofyear and return an Index, you may call "
"pd.Int64Index(idx.isocalendar().week)",
FutureWarning,
stacklevel=3,
)
week_series = self.isocalendar().week
if week_series.hasnans:
return week_series.to_numpy(dtype="float64", na_value=np.nan)
return week_series.to_numpy(dtype="int64")
week = weekofyear
year = _field_accessor(
"year",
"Y",
"""
The year of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="Y")
... )
>>> datetime_series
0 2000-12-31
1 2001-12-31
2 2002-12-31
dtype: datetime64[ns]
>>> datetime_series.dt.year
0 2000
1 2001
2 2002
dtype: int64
""",
)
month = _field_accessor(
"month",
"M",
"""
The month as January=1, December=12.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="M")
... )
>>> datetime_series
0 2000-01-31
1 2000-02-29
2 2000-03-31
dtype: datetime64[ns]
>>> datetime_series.dt.month
0 1
1 2
2 3
dtype: int64
""",
)
day = _field_accessor(
"day",
"D",
"""
The day of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="D")
... )
>>> datetime_series
0 2000-01-01
1 2000-01-02
2 2000-01-03
dtype: datetime64[ns]
>>> datetime_series.dt.day
0 1
1 2
2 3
dtype: int64
""",
)
hour = _field_accessor(
"hour",
"h",
"""
The hours of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="h")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 01:00:00
2 2000-01-01 02:00:00
dtype: datetime64[ns]
>>> datetime_series.dt.hour
0 0
1 1
2 2
dtype: int64
""",
)
minute = _field_accessor(
"minute",
"m",
"""
The minutes of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="T")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 00:01:00
2 2000-01-01 00:02:00
dtype: datetime64[ns]
>>> datetime_series.dt.minute
0 0
1 1
2 2
dtype: int64
""",
)
second = _field_accessor(
"second",
"s",
"""
The seconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="s")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 00:00:01
2 2000-01-01 00:00:02
dtype: datetime64[ns]
>>> datetime_series.dt.second
0 0
1 1
2 2
dtype: int64
""",
)
microsecond = _field_accessor(
"microsecond",
"us",
"""
The microseconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="us")
... )
>>> datetime_series
0 2000-01-01 00:00:00.000000
1 2000-01-01 00:00:00.000001
2 2000-01-01 00:00:00.000002
dtype: datetime64[ns]
>>> datetime_series.dt.microsecond
0 0
1 1
2 2
dtype: int64
""",
)
nanosecond = _field_accessor(
"nanosecond",
"ns",
"""
The nanoseconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="ns")
... )
>>> datetime_series
0 2000-01-01 00:00:00.000000000
1 2000-01-01 00:00:00.000000001
2 2000-01-01 00:00:00.000000002
dtype: datetime64[ns]
>>> datetime_series.dt.nanosecond
0 0
1 1
2 2
dtype: int64
""",
)
_dayofweek_doc = """
The day of the week with Monday=0, Sunday=6.
Return the day of the week. It is assumed the week starts on
Monday, which is denoted by 0 and ends on Sunday which is denoted
by 6. This method is available on both Series with datetime
values (using the `dt` accessor) or DatetimeIndex.
Returns
-------
Series or Index
Containing integers indicating the day number.
See Also
--------
Series.dt.dayofweek : Alias.
Series.dt.weekday : Alias.
Series.dt.day_name : Returns the name of the day of the week.
Examples
--------
>>> s = pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series()
>>> s.dt.dayofweek
2016-12-31 5
2017-01-01 6
2017-01-02 0
2017-01-03 1
2017-01-04 2
2017-01-05 3
2017-01-06 4
2017-01-07 5
2017-01-08 6
Freq: D, dtype: int64
"""
day_of_week = _field_accessor("day_of_week", "dow", _dayofweek_doc)
dayofweek = day_of_week
weekday = day_of_week
day_of_year = _field_accessor(
"dayofyear",
"doy",
"""
The ordinal day of the year.
""",
)
dayofyear = day_of_year
quarter = _field_accessor(
"quarter",
"q",
"""
The quarter of the date.
""",
)
days_in_month = _field_accessor(
"days_in_month",
"dim",
"""
The number of days in the month.
""",
)
daysinmonth = days_in_month
_is_month_doc = """
Indicates whether the date is the {first_or_last} day of the month.
Returns
-------
Series or array
For Series, returns a Series with boolean values.
For DatetimeIndex, returns a boolean array.
See Also
--------
is_month_start : Return a boolean indicating whether the date
is the first day of the month.
is_month_end : Return a boolean indicating whether the date
is the last day of the month.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> s = pd.Series(pd.date_range("2018-02-27", periods=3))
>>> s
0 2018-02-27
1 2018-02-28
2 2018-03-01
dtype: datetime64[ns]
>>> s.dt.is_month_start
0 False
1 False
2 True
dtype: bool
>>> s.dt.is_month_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2018-02-27", periods=3)
>>> idx.is_month_start
array([False, False, True])
>>> idx.is_month_end
array([False, True, False])
"""
is_month_start = _field_accessor(
"is_month_start", "is_month_start", _is_month_doc.format(first_or_last="first")
)
is_month_end = _field_accessor(
"is_month_end", "is_month_end", _is_month_doc.format(first_or_last="last")
)
is_quarter_start = _field_accessor(
"is_quarter_start",
"is_quarter_start",
"""
Indicator for whether the date is the first day of a quarter.
Returns
-------
is_quarter_start : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_end : Similar property for indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_start=df.dates.dt.is_quarter_start)
dates quarter is_quarter_start
0 2017-03-30 1 False
1 2017-03-31 1 False
2 2017-04-01 2 True
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_quarter_start
array([False, False, True, False])
""",
)
is_quarter_end = _field_accessor(
"is_quarter_end",
"is_quarter_end",
"""
Indicator for whether the date is the last day of a quarter.
Returns
-------
is_quarter_end : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_start : Similar property indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_end=df.dates.dt.is_quarter_end)
dates quarter is_quarter_end
0 2017-03-30 1 False
1 2017-03-31 1 True
2 2017-04-01 2 False
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_quarter_end
array([False, True, False, False])
""",
)
is_year_start = _field_accessor(
"is_year_start",
"is_year_start",
"""
Indicate whether the date is the first day of a year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_end : Similar property indicating the last day of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_start
0 False
1 False
2 True
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_year_start
array([False, False, True])
""",
)
is_year_end = _field_accessor(
"is_year_end",
"is_year_end",
"""
Indicate whether the date is the last day of the year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_start : Similar property indicating the start of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_year_end
array([False, True, False])
""",
)
is_leap_year = _field_accessor(
"is_leap_year",
"is_leap_year",
"""
Boolean indicator if the date belongs to a leap year.
A leap year is a year, which has 366 days (instead of 365) including
29th of February as an intercalary day.
Leap years are years which are multiples of four with the exception
of years divisible by 100 but not by 400.
Returns
-------
Series or ndarray
Booleans indicating if dates belong to a leap year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="Y")
>>> idx
DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'],
dtype='datetime64[ns]', freq='A-DEC')
>>> idx.is_leap_year
array([ True, False, False])
>>> dates_series = pd.Series(idx)
>>> dates_series
0 2012-12-31
1 2013-12-31
2 2014-12-31
dtype: datetime64[ns]
>>> dates_series.dt.is_leap_year
0 True
1 False
2 False
dtype: bool
""",
)
def to_julian_date(self) -> np.ndarray:
"""
Convert Datetime Array to float64 ndarray of Julian Dates.
0 Julian date is noon January 1, 4713 BC.
https://en.wikipedia.org/wiki/Julian_day
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
year = np.asarray(self.year)
month = np.asarray(self.month)
day = np.asarray(self.day)
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
return (
day
+ np.fix((153 * month - 457) / 5)
+ 365 * year
+ np.floor(year / 4)
- np.floor(year / 100)
+ np.floor(year / 400)
+ 1_721_118.5
+ (
self.hour
+ self.minute / 60
+ self.second / 3600
+ self.microsecond / 3600 / 10 ** 6
+ self.nanosecond / 3600 / 10 ** 9
)
/ 24
)
# -----------------------------------------------------------------
# Reductions
def std(
self,
axis=None,
dtype=None,
out=None,
ddof: int = 1,
keepdims: bool = False,
skipna: bool = True,
):
# Because std is translation-invariant, we can get self.std
# by calculating (self - Timestamp(0)).std, and we can do it
# without creating a copy by using a view on self._ndarray
from pandas.core.arrays import TimedeltaArray
tda = TimedeltaArray(self._ndarray.view("i8"))
return tda.std(
axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims, skipna=skipna
)
# -------------------------------------------------------------------
# Constructor Helpers
@overload
def sequence_to_datetimes(
data, allow_object: Literal[False] = ..., require_iso8601: bool = ...
) -> DatetimeArray:
...
@overload
def sequence_to_datetimes(
data, allow_object: Literal[True] = ..., require_iso8601: bool = ...
) -> np.ndarray | DatetimeArray:
...
def sequence_to_datetimes(
data, allow_object: bool = False, require_iso8601: bool = False
) -> np.ndarray | DatetimeArray:
"""
Parse/convert the passed data to either DatetimeArray or np.ndarray[object].
"""
result, tz, freq = sequence_to_dt64ns(
data,
allow_object=allow_object,
allow_mixed=True,
require_iso8601=require_iso8601,
)
if result.dtype == object:
return result
dtype = tz_to_dtype(tz)
dta = DatetimeArray._simple_new(result, freq=freq, dtype=dtype)
return dta
def sequence_to_dt64ns(
data,
dtype=None,
copy=False,
tz=None,
dayfirst=False,
yearfirst=False,
ambiguous="raise",
*,
allow_object: bool = False,
allow_mixed: bool = False,
require_iso8601: bool = False,
):
"""
Parameters
----------
data : list-like
dtype : dtype, str, or None, default None
copy : bool, default False
tz : tzinfo, str, or None, default None
dayfirst : bool, default False
yearfirst : bool, default False
ambiguous : str, bool, or arraylike, default 'raise'
See pandas._libs.tslibs.tzconversion.tz_localize_to_utc.
allow_object : bool, default False
Whether to return an object-dtype ndarray instead of raising if the
data contains more than one timezone.
allow_mixed : bool, default False
Interpret integers as timestamps when datetime objects are also present.
require_iso8601 : bool, default False
Only consider ISO-8601 formats when parsing strings.
Returns
-------
result : numpy.ndarray
The sequence converted to a numpy array with dtype ``datetime64[ns]``.
tz : tzinfo or None
Either the user-provided tzinfo or one inferred from the data.
inferred_freq : Tick or None
The inferred frequency of the sequence.
Raises
------
TypeError : PeriodDType data is passed
"""
inferred_freq = None
dtype = _validate_dt64_dtype(dtype)
tz = timezones.maybe_get_tz(tz)
# if dtype has an embedded tz, capture it
tz = validate_tz_from_dtype(dtype, tz)
if not hasattr(data, "dtype"):
# e.g. list, tuple
if np.ndim(data) == 0:
# i.e. generator
data = list(data)
data = np.asarray(data)
copy = False
elif isinstance(data, ABCMultiIndex):
raise TypeError("Cannot create a DatetimeArray from a MultiIndex.")
else:
data = extract_array(data, extract_numpy=True)
if isinstance(data, IntegerArray):
data = data.to_numpy("int64", na_value=iNaT)
elif not isinstance(data, (np.ndarray, ExtensionArray)):
# GH#24539 e.g. xarray, dask object
data = np.asarray(data)
if isinstance(data, DatetimeArray):
inferred_freq = data.freq
# By this point we are assured to have either a numpy array or Index
data, copy = maybe_convert_dtype(data, copy)
data_dtype = getattr(data, "dtype", None)
if (
is_object_dtype(data_dtype)
or is_string_dtype(data_dtype)
or is_sparse(data_dtype)
):
# TODO: We do not have tests specific to string-dtypes,
# also complex or categorical or other extension
copy = False
if lib.infer_dtype(data, skipna=False) == "integer":
data = data.astype(np.int64)
else:
# data comes back here as either i8 to denote UTC timestamps
# or M8[ns] to denote wall times
data, inferred_tz = objects_to_datetime64ns(
data,
dayfirst=dayfirst,
yearfirst=yearfirst,
allow_object=allow_object,
allow_mixed=allow_mixed,
require_iso8601=require_iso8601,
)
if tz and inferred_tz:
# two timezones: convert to intended from base UTC repr
data = tzconversion.tz_convert_from_utc(data.view("i8"), tz)
data = data.view(DT64NS_DTYPE)
elif inferred_tz:
tz = inferred_tz
elif allow_object and data.dtype == object:
# We encountered mixed-timezones.
return data, None, None
data_dtype = data.dtype
# `data` may have originally been a Categorical[datetime64[ns, tz]],
# so we need to handle these types.
if is_datetime64tz_dtype(data_dtype):
# DatetimeArray -> ndarray
tz = _maybe_infer_tz(tz, data.tz)
result = data._ndarray
elif is_datetime64_dtype(data_dtype):
# tz-naive DatetimeArray or ndarray[datetime64]
data = getattr(data, "_ndarray", data)
if data.dtype != DT64NS_DTYPE:
data = conversion.ensure_datetime64ns(data)
copy = False
if tz is not None:
# Convert tz-naive to UTC
tz = timezones.maybe_get_tz(tz)
data = tzconversion.tz_localize_to_utc(
data.view("i8"), tz, ambiguous=ambiguous
)
data = data.view(DT64NS_DTYPE)
assert data.dtype == DT64NS_DTYPE, data.dtype
result = data
else:
# must be integer dtype otherwise
# assume this data are epoch timestamps
if tz:
tz = timezones.maybe_get_tz(tz)
if data.dtype != INT64_DTYPE:
data = data.astype(np.int64, copy=False)
result = data.view(DT64NS_DTYPE)
if copy:
result = result.copy()
assert isinstance(result, np.ndarray), type(result)
assert result.dtype == "M8[ns]", result.dtype
# We have to call this again after possibly inferring a tz above
validate_tz_from_dtype(dtype, tz)
return result, tz, inferred_freq
def objects_to_datetime64ns(
data: np.ndarray,
dayfirst,
yearfirst,
utc=False,
errors="raise",
require_iso8601: bool = False,
allow_object: bool = False,
allow_mixed: bool = False,
):
"""
Convert data to array of timestamps.
Parameters
----------
data : np.ndarray[object]
dayfirst : bool
yearfirst : bool
utc : bool, default False
Whether to convert timezone-aware timestamps to UTC.
errors : {'raise', 'ignore', 'coerce'}
require_iso8601 : bool, default False
allow_object : bool
Whether to return an object-dtype ndarray instead of raising if the
data contains more than one timezone.
allow_mixed : bool, default False
Interpret integers as timestamps when datetime objects are also present.
Returns
-------
result : ndarray
np.int64 dtype if returned values represent UTC timestamps
np.datetime64[ns] if returned values represent wall times
object if mixed timezones
inferred_tz : tzinfo or None
Raises
------
ValueError : if data cannot be converted to datetimes
"""
assert errors in ["raise", "ignore", "coerce"]
# if str-dtype, convert
data = np.array(data, copy=False, dtype=np.object_)
flags = data.flags
order: Literal["F", "C"] = "F" if flags.f_contiguous else "C"
try:
result, tz_parsed = tslib.array_to_datetime(
data.ravel("K"),
errors=errors,
utc=utc,
dayfirst=dayfirst,
yearfirst=yearfirst,
require_iso8601=require_iso8601,
allow_mixed=allow_mixed,
)
result = result.reshape(data.shape, order=order)
except ValueError as err:
try:
values, tz_parsed = conversion.datetime_to_datetime64(data.ravel("K"))
# If tzaware, these values represent unix timestamps, so we
# return them as i8 to distinguish from wall times
values = values.reshape(data.shape, order=order)
return values.view("i8"), tz_parsed
except (ValueError, TypeError):
raise err
if tz_parsed is not None:
# We can take a shortcut since the datetime64 numpy array
# is in UTC
# Return i8 values to denote unix timestamps
return result.view("i8"), tz_parsed
elif is_datetime64_dtype(result):
# returning M8[ns] denotes wall-times; since tz is None
# the distinction is a thin one
return result, tz_parsed
elif is_object_dtype(result):
# GH#23675 when called via `pd.to_datetime`, returning an object-dtype
# array is allowed. When called via `pd.DatetimeIndex`, we can
# only accept datetime64 dtype, so raise TypeError if object-dtype
# is returned, as that indicates the values can be recognized as
# datetimes but they have conflicting timezones/awareness
if allow_object:
return result, tz_parsed
raise TypeError(result)
else: # pragma: no cover
# GH#23675 this TypeError should never be hit, whereas the TypeError
# in the object-dtype branch above is reachable.
raise TypeError(result)
def maybe_convert_dtype(data, copy: bool):
"""
Convert data based on dtype conventions, issuing deprecation warnings
or errors where appropriate.
Parameters
----------
data : np.ndarray or pd.Index
copy : bool
Returns
-------
data : np.ndarray or pd.Index
copy : bool
Raises
------
TypeError : PeriodDType data is passed
"""
if not hasattr(data, "dtype"):
# e.g. collections.deque
return data, copy
if
|
is_float_dtype(data.dtype)
|
pandas.core.dtypes.common.is_float_dtype
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python 3.8.8 64-bit ('cam')
# language: python
# name: python388jvsc74a57bd0acafb728b15233fa3654ff8b422c21865df0ca42ea3b74670e1f2f098ebd61ca
# ---
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/python-logo-notext.svg"
# style="display:block;margin:auto;width:10%"/>
# <h1 style="text-align:center;">Python: Pandas Series</h1>
# <h2 style="text-align:center;">Coding Akademie München GmbH</h2>
# <br/>
# <div style="text-align:center;">Dr. <NAME></div>
# <div style="text-align:center;"><NAME></div>
# %% [markdown] slideshow={"slide_type": "slide"}
#
# # Der Typ `Series`
#
# Der Pandas Typ `Series` repräsentiert eine Folge von Werten, die ähnlich wie eine Python Liste numerisch indiziert werden kann, gleichzeitig aber auch einen semantisch sinnvollerern Index haben kann, z.B. Daten für Zeitreihen.
#
# Intern wird ein `Series`-Objekt durch ein NumPy Array realisiert, daher sind die meisten Operationen von NumPy Arrays auch auf Pandas-`Series`-Objekte anwendbar.
# %%
import numpy as np
import pandas as pd
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Erzeugen von Serien
#
# ### Aus Listen
# %%
pd.Series(data=[10, 20, 30, 40])
# %%
|
pd.Series(['a', 'b', 'c'])
|
pandas.Series
|
# -*- coding: utf-8 -*-
import os
import pickle
from copy import deepcopy
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from mabwiser.mab import MAB, LearningPolicy, NeighborhoodPolicy
from tests.test_base import BaseTest
class MABTest(BaseTest):
#################################################
# Test property decorator methods
################################################
def test_learning_policy_property(self):
for lp in BaseTest.lps:
mab = MAB([1, 2], lp)
test_lp = mab.learning_policy
self.assertTrue(type(test_lp) is type(lp))
for para_lp in BaseTest.para_lps:
mab = MAB([1, 2], para_lp)
test_lp = mab.learning_policy
self.assertTrue(type(test_lp) is type(para_lp))
for cp in BaseTest.cps:
for lp in BaseTest.lps:
mab = MAB([1, 2], lp, cp)
test_lp = mab.learning_policy
self.assertTrue(type(test_lp) is type(lp))
for cp in BaseTest.cps:
for para_lp in BaseTest.lps:
mab = MAB([1, 2], para_lp, cp)
test_lp = mab.learning_policy
self.assertTrue(type(test_lp) is type(para_lp))
def test_learning_policy_values(self):
lp = LearningPolicy.EpsilonGreedy(epsilon=0.6)
mab = MAB([0, 1], lp)
self.assertEqual(lp.epsilon, mab.learning_policy.epsilon)
data = np.array([[1, 2, 3], [3, 2, 1]])
sc = StandardScaler()
sc.fit(data)
arm_to_scaler = {0: sc, 1: sc}
lp = LearningPolicy.LinUCB(alpha=2.0, l2_lambda=0.3, arm_to_scaler=arm_to_scaler)
mab = MAB([0, 1], lp)
self.assertEqual(lp.alpha, mab.learning_policy.alpha)
self.assertEqual(lp.l2_lambda, mab.learning_policy.l2_lambda)
self.assertIs(sc, mab.learning_policy.arm_to_scaler[0])
self.assertIs(sc, mab.learning_policy.arm_to_scaler[1])
lp = LearningPolicy.Softmax(tau=0.5)
mab = MAB([0, 1], lp)
self.assertEqual(lp.tau, mab.learning_policy.tau)
def binary(arm, reward):
return reward == 1
lp = LearningPolicy.ThompsonSampling(binarizer=binary)
mab = MAB([0, 1], lp)
self.assertIs(lp.binarizer, mab.learning_policy.binarizer)
lp = LearningPolicy.UCB1(alpha=0.7)
mab = MAB([0, 1], lp)
self.assertEqual(lp.alpha, mab.learning_policy.alpha)
def test_neighborhood_policy_property(self):
for cp in BaseTest.cps:
for lp in BaseTest.lps:
mab = MAB([1, 2], lp, cp)
test_np = mab.neighborhood_policy
self.assertTrue(type(test_np) is type(cp))
for cp in BaseTest.cps:
for para_lp in BaseTest.lps:
mab = MAB([1, 2], para_lp, cp)
test_np = mab.neighborhood_policy
self.assertTrue(type(test_np) is type(cp))
def test_neighborhood_policy_values(self):
lp = LearningPolicy.EpsilonGreedy()
np = NeighborhoodPolicy.Clusters(n_clusters=3)
mab = MAB([0, 1], lp, np)
self.assertEqual(np.n_clusters, mab.neighborhood_policy.n_clusters)
self.assertFalse(mab.neighborhood_policy.is_minibatch)
np = NeighborhoodPolicy.Clusters(n_clusters=5, is_minibatch=True)
mab = MAB([0, 1], lp, np)
self.assertEqual(np.n_clusters, mab.neighborhood_policy.n_clusters)
self.assertTrue(mab.neighborhood_policy.is_minibatch)
np = NeighborhoodPolicy.KNearest(k=10, metric='cityblock')
mab = MAB([0, 1], lp, np)
self.assertEqual(np.k, mab.neighborhood_policy.k)
self.assertEqual(np.metric, mab.neighborhood_policy.metric)
np = NeighborhoodPolicy.Radius(radius=1.5, metric='canberra', no_nhood_prob_of_arm=[0.2, 0.8])
mab = MAB([0, 1], lp, np)
self.assertEqual(np.radius, mab.neighborhood_policy.radius)
self.assertEqual(np.metric, mab.neighborhood_policy.metric)
self.assertEqual(np.no_nhood_prob_of_arm, mab.neighborhood_policy.no_nhood_prob_of_arm)
np = NeighborhoodPolicy.LSHNearest(n_dimensions=2, n_tables=2, no_nhood_prob_of_arm=[0.2, 0.8])
mab = MAB([0, 1], lp, np)
self.assertEqual(np.n_dimensions, mab.neighborhood_policy.n_dimensions)
self.assertEqual(np.n_tables, mab.neighborhood_policy.n_tables)
self.assertEqual(np.no_nhood_prob_of_arm, mab.neighborhood_policy.no_nhood_prob_of_arm)
#################################################
# Test context free predict() method
################################################
def test_arm_list_int(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_arm_list_str(self):
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_decision_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_series_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_array_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
#################################################
# Test context free predict_expectation() method
################################################
def test_exp_arm_list_int(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_arm_list_str(self):
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=
|
pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3])
|
pandas.Series
|
import pickle as pk
import pandas as pd
import statsmodels.api as sm
# %% paths to the pkl files storing the reliability scores
path_ls = ['../data/results/reliability/reddit/askhistorians/glove.pkl',
'../data/results/reliability/reddit/askhistorians/sgns.pkl',
'../data/results/reliability/reddit/askscience/glove.pkl',
'../data/results/reliability/reddit/askscience/sgns.pkl',
'../data/results/reliability/wikitext-103/glove.pkl',
'../data/results/reliability/wikitext-103/sgns.pkl']
# %% make a data frame to store all the test-retest reliability scores
test_retest_df = pd.DataFrame()
for path in path_ls:
with open(path, 'rb') as f:
data = pk.load(f)
data_wide =
|
pd.DataFrame(data['test-retest'][0])
|
pandas.DataFrame
|
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import copy
import warnings
import re
import pandas as pd
pd.set_option('use_inf_as_na', True)
import numpy as np
from joblib import Memory
from xgboost import XGBClassifier
from sklearn import model_selection
from bayes_opt import BayesianOptimization
from sklearn.model_selection import cross_validate
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import classification_report
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
from eli5.sklearn import PermutationImportance
from joblib import Parallel, delayed
import multiprocessing
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
# this block of code is for the connection between the server, the database, and the client (plus routing)
# access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []\
global StanceTest
StanceTest = False
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global keepOriginalFeatures
keepOriginalFeatures = []
global XData
XData = []
global yData
yData = []
global XDataNoRemoval
XDataNoRemoval = []
global XDataNoRemovalOrig
XDataNoRemovalOrig = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global finalResultsData
finalResultsData = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerfCrossMutr
allParametersPerfCrossMutr = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 8
#crossValidation = 5
#crossValidation = 3
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global keyFirstTime
keyFirstTime = True
global target_namesLoc
target_namesLoc = []
global featureCompareData
featureCompareData = []
global columnsKeep
columnsKeep = []
global columnsNewGen
columnsNewGen = []
global columnsNames
columnsNames = []
global fileName
fileName = []
global listofTransformations
listofTransformations = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
return 'The reset was done!'
# retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def retrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
global DataResultsRawExternal
global DataRawLengthExternal
global fileName
fileName = []
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global keepOriginalFeatures
keepOriginalFeatures = []
global XData
XData = []
global XDataNoRemoval
XDataNoRemoval = []
global XDataNoRemovalOrig
XDataNoRemovalOrig = []
global previousState
previousState = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global finalResultsData
finalResultsData = []
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerfCrossMutr
allParametersPerfCrossMutr = []
global HistoryPreservation
HistoryPreservation = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 8
#crossValidation = 5
#crossValidation = 3
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global keyFirstTime
keyFirstTime = True
global target_namesLoc
target_namesLoc = []
global featureCompareData
featureCompareData = []
global columnsKeep
columnsKeep = []
global columnsNewGen
columnsNewGen = []
global columnsNames
columnsNames = []
global listofTransformations
listofTransformations = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
target_names.append('Healthy')
target_names.append('Diseased')
elif data['fileName'] == 'biodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
CollectionDBExternal = mongo.db.biodegCExt.find()
target_names.append('Non-biodegr.')
target_names.append('Biodegr.')
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
target_names.append('Negative')
target_names.append('Positive')
elif data['fileName'] == 'MaterialC':
CollectionDB = mongo.db.MaterialC.find()
target_names.append('Cylinder')
target_names.append('Disk')
target_names.append('Flatellipsold')
target_names.append('Longellipsold')
target_names.append('Sphere')
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
target_names.append('No-use')
target_names.append('Long-term')
target_names.append('Short-term')
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
target_names.append('Van')
target_names.append('Car')
target_names.append('Bus')
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
target_names.append('Fine')
target_names.append('Superior')
target_names.append('Inferior')
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
DataResultsRawExternal = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
for index, item in enumerate(CollectionDBExternal):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawExternal.append(item)
DataRawLengthExternal = len(DataResultsRawExternal)
dataSetSelection()
return 'Everything is okay'
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def sendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
global fileName
data = json.loads(fileName)
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
global XDataStoredOriginal
XDataStoredOriginal = XData.copy()
global finalResultsData
finalResultsData = XData.copy()
global XDataNoRemoval
XDataNoRemoval = XData.copy()
global XDataNoRemovalOrig
XDataNoRemovalOrig = XData.copy()
return 'Processed uploaded data set'
def dataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global XDataExternal, yDataExternal
XDataExternal = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResultsExternal = copy.deepcopy(DataResultsRawExternal)
for dictionary in DataResultsRawExternal:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawExternal.sort(key=lambda x: x[target], reverse=True)
DataResultsExternal.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsExternal:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsExternal = [o[target] for o in DataResultsRawExternal]
AllTargetsFloatValuesExternal = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsExternal):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesExternal.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesExternal.append(Class)
previous = value
ArrayDataResultsExternal = pd.DataFrame.from_dict(DataResultsExternal)
XDataExternal, yDataExternal = ArrayDataResultsExternal, AllTargetsFloatValuesExternal
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
global fileName
data = json.loads(fileName)
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
AllTargetsFloatValues.append(Class)
previous = value
dfRaw = pd.DataFrame.from_dict(DataResultsRaw)
# OneTimeTemp = copy.deepcopy(dfRaw)
# OneTimeTemp.drop(columns=['_id', 'InstanceID'])
# column_names = ['volAc', 'chlorides', 'density', 'fixAc' , 'totalSuDi' , 'citAc', 'resSu' , 'pH' , 'sulphates', 'freeSulDi' ,'alcohol', 'quality*']
# OneTimeTemp = OneTimeTemp.reindex(columns=column_names)
# OneTimeTemp.to_csv('dataExport.csv', index=False)
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global keepOriginalFeatures
global OrignList
if (data['fileName'] == 'biodegC'):
keepOriginalFeatures = XData.copy()
storeNewColumns = []
for col in keepOriginalFeatures.columns:
newCol = col.replace("-", "_")
storeNewColumns.append(newCol.replace("_",""))
keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(storeNewColumns)]
columnsNewGen = keepOriginalFeatures.columns.values.tolist()
OrignList = keepOriginalFeatures.columns.values.tolist()
else:
keepOriginalFeatures = XData.copy()
keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(keepOriginalFeatures.columns)]
columnsNewGen = keepOriginalFeatures.columns.values.tolist()
OrignList = keepOriginalFeatures.columns.values.tolist()
XData.columns = ['F'+str(idx+1) for idx, col in enumerate(XData.columns)]
XDataTest.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataTest.columns)]
XDataExternal.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataExternal.columns)]
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
global XDataStoredOriginal
XDataStoredOriginal = XData.copy()
global finalResultsData
finalResultsData = XData.copy()
global XDataNoRemoval
XDataNoRemoval = XData.copy()
global XDataNoRemovalOrig
XDataNoRemovalOrig = XData.copy()
warnings.simplefilter('ignore')
executeModel([], 0, '')
return 'Everything is okay'
def create_global_function():
global estimator
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def estimator(n_estimators, eta, max_depth, subsample, colsample_bytree):
# initialize model
print('loopModels')
n_estimators = int(n_estimators)
max_depth = int(max_depth)
model = XGBClassifier(n_estimators=n_estimators, eta=eta, max_depth=max_depth, subsample=subsample, colsample_bytree=colsample_bytree, n_jobs=-1, random_state=RANDOM_SEED, silent=True, verbosity = 0, use_label_encoder=False)
# set in cross-validation
result = cross_validate(model, XData, yData, cv=crossValidation, scoring='accuracy')
# result is mean of test_score
return np.mean(result['test_score'])
# check this issue later because we are not getting the same results
def executeModel(exeCall, flagEx, nodeTransfName):
global XDataTest, yDataTest
global XDataExternal, yDataExternal
global keyFirstTime
global estimator
global yPredictProb
global scores
global featureImportanceData
global XData
global XDataStored
global previousState
global columnsNewGen
global columnsNames
global listofTransformations
global XDataStoredOriginal
global finalResultsData
global OrignList
global tracker
global XDataNoRemoval
global XDataNoRemovalOrig
columnsNames = []
scores = []
if (len(exeCall) == 0):
if (flagEx == 3):
XDataStored = XData.copy()
XDataNoRemovalOrig = XDataNoRemoval.copy()
OrignList = columnsNewGen
elif (flagEx == 2):
XData = XDataStored.copy()
XDataStoredOriginal = XDataStored.copy()
XDataNoRemoval = XDataNoRemovalOrig.copy()
columnsNewGen = OrignList
else:
XData = XDataStored.copy()
XDataNoRemoval = XDataNoRemovalOrig.copy()
XDataStoredOriginal = XDataStored.copy()
else:
if (flagEx == 4):
XDataStored = XData.copy()
XDataNoRemovalOrig = XDataNoRemoval.copy()
#XDataStoredOriginal = XDataStored.copy()
elif (flagEx == 2):
XData = XDataStored.copy()
XDataStoredOriginal = XDataStored.copy()
XDataNoRemoval = XDataNoRemovalOrig.copy()
columnsNewGen = OrignList
else:
XData = XDataStored.copy()
#XDataNoRemoval = XDataNoRemovalOrig.copy()
XDataStoredOriginal = XDataStored.copy()
# Bayesian Optimization CHANGE INIT_POINTS!
if (keyFirstTime):
create_global_function()
params = {"n_estimators": (5, 200), "eta": (0.05, 0.3), "max_depth": (6,12), "subsample": (0.8,1), "colsample_bytree": (0.8,1)}
bayesopt = BayesianOptimization(estimator, params, random_state=RANDOM_SEED)
bayesopt.maximize(init_points=20, n_iter=5, acq='ucb') # 20 and 5
bestParams = bayesopt.max['params']
estimator = XGBClassifier(n_estimators=int(bestParams.get('n_estimators')), eta=bestParams.get('eta'), max_depth=int(bestParams.get('max_depth')), subsample=bestParams.get('subsample'), colsample_bytree=bestParams.get('colsample_bytree'), probability=True, random_state=RANDOM_SEED, silent=True, verbosity = 0, use_label_encoder=False)
columnsNewGen = OrignList
if (len(exeCall) != 0):
if (flagEx == 1):
currentColumnsDeleted = []
for uniqueValue in exeCall:
currentColumnsDeleted.append(tracker[uniqueValue])
for column in XData.columns:
if (column in currentColumnsDeleted):
XData = XData.drop(column, axis=1)
XDataStoredOriginal = XDataStoredOriginal.drop(column, axis=1)
elif (flagEx == 2):
columnsKeepNew = []
columns = XDataGen.columns.values.tolist()
for indx, col in enumerate(columns):
if indx in exeCall:
columnsKeepNew.append(col)
columnsNewGen.append(col)
XDataTemp = XDataGen[columnsKeepNew]
XData[columnsKeepNew] = XDataTemp.values
XDataStoredOriginal[columnsKeepNew] = XDataTemp.values
XDataNoRemoval[columnsKeepNew] = XDataTemp.values
elif (flagEx == 4):
splittedCol = nodeTransfName.split('_')
for col in XDataNoRemoval.columns:
splitCol = col.split('_')
if ((splittedCol[0] in splitCol[0])):
newSplitted = re.sub("[^0-9]", "", splittedCol[0])
newCol = re.sub("[^0-9]", "", splitCol[0])
if (newSplitted == newCol):
storeRenamedColumn = col
XData.rename(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
XDataNoRemoval.rename(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
currentColumn = columnsNewGen[exeCall[0]]
subString = currentColumn[currentColumn.find("(")+1:currentColumn.find(")")]
replacement = currentColumn.replace(subString, nodeTransfName)
for ind, column in enumerate(columnsNewGen):
splitCol = column.split('_')
if ((splittedCol[0] in splitCol[0])):
newSplitted = re.sub("[^0-9]", "", splittedCol[0])
newCol = re.sub("[^0-9]", "", splitCol[0])
if (newSplitted == newCol):
columnsNewGen[ind] = columnsNewGen[ind].replace(storeRenamedColumn, nodeTransfName)
if (len(splittedCol) == 1):
XData[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
XDataNoRemoval[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
else:
if (splittedCol[1] == 'r'):
XData[nodeTransfName] = XData[nodeTransfName].round()
elif (splittedCol[1] == 'b'):
number_of_bins = np.histogram_bin_edges(XData[nodeTransfName], bins='auto')
emptyLabels = []
for index, number in enumerate(number_of_bins):
if (index == 0):
pass
else:
emptyLabels.append(index)
XData[nodeTransfName] = pd.cut(XData[nodeTransfName], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
XData[nodeTransfName] = pd.to_numeric(XData[nodeTransfName], downcast='signed')
elif (splittedCol[1] == 'zs'):
XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].mean())/XData[nodeTransfName].std()
elif (splittedCol[1] == 'mms'):
XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].min())/(XData[nodeTransfName].max()-XData[nodeTransfName].min())
elif (splittedCol[1] == 'l2'):
dfTemp = []
dfTemp = np.log2(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'l1p'):
dfTemp = []
dfTemp = np.log1p(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'l10'):
dfTemp = []
dfTemp = np.log10(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'e2'):
dfTemp = []
dfTemp = np.exp2(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'em1'):
dfTemp = []
dfTemp = np.expm1(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'p2'):
XData[nodeTransfName] = np.power(XData[nodeTransfName], 2)
elif (splittedCol[1] == 'p3'):
XData[nodeTransfName] = np.power(XData[nodeTransfName], 3)
else:
XData[nodeTransfName] = np.power(XData[nodeTransfName], 4)
XDataNoRemoval[nodeTransfName] = XData[nodeTransfName]
XDataStored = XData.copy()
XDataNoRemovalOrig = XDataNoRemoval.copy()
columnsNamesLoc = XData.columns.values.tolist()
for col in columnsNamesLoc:
splittedCol = col.split('_')
if (len(splittedCol) == 1):
for tran in listofTransformations:
columnsNames.append(splittedCol[0]+'_'+tran)
else:
for tran in listofTransformations:
if (splittedCol[1] == tran):
columnsNames.append(splittedCol[0])
else:
columnsNames.append(splittedCol[0]+'_'+tran)
featureImportanceData = estimatorFeatureSelection(XDataNoRemoval, estimator)
tracker = []
for value in columnsNewGen:
value = value.split(' ')
if (len(value) > 1):
tracker.append(value[1])
else:
tracker.append(value[0])
estimator.fit(XData, yData)
yPredict = estimator.predict(XData)
yPredictProb = cross_val_predict(estimator, XData, yData, cv=crossValidation, method='predict_proba')
num_cores = multiprocessing.cpu_count()
inputsSc = ['accuracy','precision_weighted','recall_weighted']
flat_results = Parallel(n_jobs=num_cores)(delayed(solve)(estimator,XData,yData,crossValidation,item,index) for index, item in enumerate(inputsSc))
scoresAct = [item for sublist in flat_results for item in sublist]
#print(scoresAct)
# if (StanceTest):
# y_pred = estimator.predict(XDataTest)
# print('Test data set')
# print(classification_report(yDataTest, y_pred))
# y_pred = estimator.predict(XDataExternal)
# print('External data set')
# print(classification_report(yDataExternal, y_pred))
howMany = 0
if (keyFirstTime):
previousState = scoresAct
keyFirstTime = False
howMany = 3
if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
finalResultsData = XData.copy()
if (keyFirstTime == False):
if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
previousState[0] = scoresAct[0]
previousState[1] = scoresAct[1]
howMany = 3
#elif ((scoresAct[2]-scoresAct[3]) > (previousState[2]-previousState[3])):
previousState[2] = scoresAct[2]
previousState[3] = scoresAct[3]
#howMany = howMany + 1
#elif ((scoresAct[4]-scoresAct[5]) > (previousState[4]-previousState[5])):
previousState[4] = scoresAct[4]
previousState[5] = scoresAct[5]
#howMany = howMany + 1
#else:
#pass
scores = scoresAct + previousState
if (howMany == 3):
scores.append(1)
else:
scores.append(0)
return 'Everything Okay'
@app.route('/data/RequestBestFeatures', methods=["GET", "POST"])
def BestFeat():
global finalResultsData
finalResultsDataJSON = finalResultsData.to_json()
response = {
'finalResultsData': finalResultsDataJSON
}
return jsonify(response)
def featFun (clfLocalPar,DataLocalPar,yDataLocalPar):
PerFeatureAccuracyLocalPar = []
scores = model_selection.cross_val_score(clfLocalPar, DataLocalPar, yDataLocalPar, cv=None, n_jobs=-1)
PerFeatureAccuracyLocalPar.append(scores.mean())
return PerFeatureAccuracyLocalPar
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def estimatorFeatureSelection(Data, clf):
resultsFS = []
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
ImpurityFS = []
RankingFS = []
estim = clf.fit(Data, yData)
importances = clf.feature_importances_
# std = np.std([tree.feature_importances_ for tree in estim.feature_importances_],
# axis=0)
maxList = max(importances)
minList = min(importances)
for f in range(Data.shape[1]):
ImpurityFS.append((importances[f] - minList) / (maxList - minList))
estim = LogisticRegression(n_jobs = -1, random_state=RANDOM_SEED)
selector = RFECV(estimator=estim, n_jobs = -1, step=1, cv=crossValidation)
selector = selector.fit(Data, yData)
RFEImp = selector.ranking_
for f in range(Data.shape[1]):
if (RFEImp[f] == 1):
RankingFS.append(0.95)
elif (RFEImp[f] == 2):
RankingFS.append(0.85)
elif (RFEImp[f] == 3):
RankingFS.append(0.75)
elif (RFEImp[f] == 4):
RankingFS.append(0.65)
elif (RFEImp[f] == 5):
RankingFS.append(0.55)
elif (RFEImp[f] == 6):
RankingFS.append(0.45)
elif (RFEImp[f] == 7):
RankingFS.append(0.35)
elif (RFEImp[f] == 8):
RankingFS.append(0.25)
elif (RFEImp[f] == 9):
RankingFS.append(0.15)
else:
RankingFS.append(0.05)
perm = PermutationImportance(clf, cv=None, refit = True, n_iter = 25).fit(Data, yData)
permList.append(perm.feature_importances_)
n_feats = Data.shape[1]
num_cores = multiprocessing.cpu_count()
print("Parallelization Initilization")
flat_results = Parallel(n_jobs=num_cores)(delayed(featFun)(clf,Data.values[:, i].reshape(-1, 1),yData) for i in range(n_feats))
PerFeatureAccuracy = [item for sublist in flat_results for item in sublist]
# for i in range(n_feats):
# scoresHere = model_selection.cross_val_score(clf, Data.values[:, i].reshape(-1, 1), yData, cv=None, n_jobs=-1)
# PerFeatureAccuracy.append(scoresHere.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
clf.fit(Data, yData)
yPredict = clf.predict(Data)
yPredict = np.nan_to_num(yPredict)
RankingFSDF = pd.DataFrame(RankingFS)
RankingFSDF = RankingFSDF.to_json()
ImpurityFSDF = pd.DataFrame(ImpurityFS)
ImpurityFSDF = ImpurityFSDF.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
if (perm_imp_eli5PD.empty):
for col in Data.columns:
perm_imp_eli5PD.append({0:0})
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=f_classif, k='all')
fit = bestfeatures.fit(Data,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(Data.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
resultsFS.append(featureScores)
resultsFS.append(ImpurityFSDF)
resultsFS.append(perm_imp_eli5PD)
resultsFS.append(PerFeatureAccuracyPandas)
resultsFS.append(RankingFSDF)
return resultsFS
@app.route('/data/sendFeatImp', methods=["GET", "POST"])
def sendFeatureImportance():
global featureImportanceData
response = {
'Importance': featureImportanceData
}
return jsonify(response)
@app.route('/data/sendFeatImpComp', methods=["GET", "POST"])
def sendFeatureImportanceComp():
global featureCompareData
global columnsKeep
response = {
'ImportanceCompare': featureCompareData,
'FeatureNames': columnsKeep
}
return jsonify(response)
def solve(sclf,XData,yData,crossValidation,scoringIn,loop):
scoresLoc = []
temp = model_selection.cross_val_score(sclf, XData, yData, cv=crossValidation, scoring=scoringIn, n_jobs=-1)
scoresLoc.append(temp.mean())
scoresLoc.append(temp.std())
return scoresLoc
@app.route('/data/sendResults', methods=["GET", "POST"])
def sendFinalResults():
global scores
response = {
'ValidResults': scores
}
return jsonify(response)
def Transformation(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5):
# XDataNumericColumn = XData.select_dtypes(include='number')
XDataNumeric = XDataStoredOriginal.select_dtypes(include='number')
columns = list(XDataNumeric)
global packCorrTransformed
packCorrTransformed = []
for count, i in enumerate(columns):
dicTransf = {}
splittedCol = columnsNames[(count)*len(listofTransformations)+0].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = XDataNumericCopy[i].round()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+1].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
number_of_bins = np.histogram_bin_edges(XDataNumericCopy[i], bins='auto')
emptyLabels = []
for index, number in enumerate(number_of_bins):
if (index == 0):
pass
else:
emptyLabels.append(index)
XDataNumericCopy[i] = pd.cut(XDataNumericCopy[i], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
XDataNumericCopy[i] = pd.to_numeric(XDataNumericCopy[i], downcast='signed')
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+2].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].mean())/XDataNumericCopy[i].std()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+3].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].min())/(XDataNumericCopy[i].max()-XDataNumericCopy[i].min())
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+4].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.log2(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+5].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.log1p(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+6].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.log10(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+7].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.exp2(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
if (np.isinf(dfTemp.var())):
flagInf = True
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+8].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.expm1(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
if (np.isinf(dfTemp.var())):
flagInf = True
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+9].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 2)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+10].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 3)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+11].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 4)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
packCorrTransformed.append(dicTransf)
return 'Everything Okay'
def NewComputationTransf(DataRows1, DataRows2, DataRows3, DataRows4, DataRows5, quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, feature, count, flagInf):
corrMatrix1 = DataRows1.corr()
corrMatrix1 = corrMatrix1.abs()
corrMatrix2 = DataRows2.corr()
corrMatrix2 = corrMatrix2.abs()
corrMatrix3 = DataRows3.corr()
corrMatrix3 = corrMatrix3.abs()
corrMatrix4 = DataRows4.corr()
corrMatrix4 = corrMatrix4.abs()
corrMatrix5 = DataRows5.corr()
corrMatrix5 = corrMatrix5.abs()
corrMatrix1 = corrMatrix1.loc[[feature]]
corrMatrix2 = corrMatrix2.loc[[feature]]
corrMatrix3 = corrMatrix3.loc[[feature]]
corrMatrix4 = corrMatrix4.loc[[feature]]
corrMatrix5 = corrMatrix5.loc[[feature]]
DataRows1 = DataRows1.reset_index(drop=True)
DataRows2 = DataRows2.reset_index(drop=True)
DataRows3 = DataRows3.reset_index(drop=True)
DataRows4 = DataRows4.reset_index(drop=True)
DataRows5 = DataRows5.reset_index(drop=True)
targetRows1 = [yData[i] for i in quadrant1]
targetRows2 = [yData[i] for i in quadrant2]
targetRows3 = [yData[i] for i in quadrant3]
targetRows4 = [yData[i] for i in quadrant4]
targetRows5 = [yData[i] for i in quadrant5]
targetRows1Arr = np.array(targetRows1)
targetRows2Arr = np.array(targetRows2)
targetRows3Arr = np.array(targetRows3)
targetRows4Arr = np.array(targetRows4)
targetRows5Arr = np.array(targetRows5)
uniqueTarget1 = unique(targetRows1)
uniqueTarget2 = unique(targetRows2)
uniqueTarget3 = unique(targetRows3)
uniqueTarget4 = unique(targetRows4)
uniqueTarget5 = unique(targetRows5)
if (len(targetRows1Arr) > 0):
onehotEncoder1 = OneHotEncoder(sparse=False)
targetRows1Arr = targetRows1Arr.reshape(len(targetRows1Arr), 1)
onehotEncoder1 = onehotEncoder1.fit_transform(targetRows1Arr)
hotEncoderDF1 = pd.DataFrame(onehotEncoder1)
concatDF1 = pd.concat([DataRows1, hotEncoderDF1], axis=1)
corrMatrixComb1 = concatDF1.corr()
corrMatrixComb1 = corrMatrixComb1.abs()
corrMatrixComb1 = corrMatrixComb1.iloc[:,-len(uniqueTarget1):]
DataRows1 = DataRows1.replace([np.inf, -np.inf], np.nan)
DataRows1 = DataRows1.fillna(0)
X1 = add_constant(DataRows1)
X1 = X1.replace([np.inf, -np.inf], np.nan)
X1 = X1.fillna(0)
VIF1 = pd.Series([variance_inflation_factor(X1.values, i)
for i in range(X1.shape[1])],
index=X1.columns)
if (flagInf == False):
VIF1 = VIF1.replace([np.inf, -np.inf], np.nan)
VIF1 = VIF1.fillna(0)
VIF1 = VIF1.loc[[feature]]
else:
VIF1 = pd.Series()
if ((len(targetRows1Arr) > 2) and (flagInf == False)):
MI1 = mutual_info_classif(DataRows1, targetRows1Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI1List = MI1.tolist()
MI1List = MI1List[count]
else:
MI1List = []
else:
corrMatrixComb1 = pd.DataFrame()
VIF1 = pd.Series()
MI1List = []
if (len(targetRows2Arr) > 0):
onehotEncoder2 = OneHotEncoder(sparse=False)
targetRows2Arr = targetRows2Arr.reshape(len(targetRows2Arr), 1)
onehotEncoder2 = onehotEncoder2.fit_transform(targetRows2Arr)
hotEncoderDF2 = pd.DataFrame(onehotEncoder2)
concatDF2 = pd.concat([DataRows2, hotEncoderDF2], axis=1)
corrMatrixComb2 = concatDF2.corr()
corrMatrixComb2 = corrMatrixComb2.abs()
corrMatrixComb2 = corrMatrixComb2.iloc[:,-len(uniqueTarget2):]
DataRows2 = DataRows2.replace([np.inf, -np.inf], np.nan)
DataRows2 = DataRows2.fillna(0)
X2 = add_constant(DataRows2)
X2 = X2.replace([np.inf, -np.inf], np.nan)
X2 = X2.fillna(0)
VIF2 = pd.Series([variance_inflation_factor(X2.values, i)
for i in range(X2.shape[1])],
index=X2.columns)
if (flagInf == False):
VIF2 = VIF2.replace([np.inf, -np.inf], np.nan)
VIF2 = VIF2.fillna(0)
VIF2 = VIF2.loc[[feature]]
else:
VIF2 = pd.Series()
if ((len(targetRows2Arr) > 2) and (flagInf == False)):
MI2 = mutual_info_classif(DataRows2, targetRows2Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI2List = MI2.tolist()
MI2List = MI2List[count]
else:
MI2List = []
else:
corrMatrixComb2 = pd.DataFrame()
VIF2 = pd.Series()
MI2List = []
if (len(targetRows3Arr) > 0):
onehotEncoder3 = OneHotEncoder(sparse=False)
targetRows3Arr = targetRows3Arr.reshape(len(targetRows3Arr), 1)
onehotEncoder3 = onehotEncoder3.fit_transform(targetRows3Arr)
hotEncoderDF3 = pd.DataFrame(onehotEncoder3)
concatDF3 = pd.concat([DataRows3, hotEncoderDF3], axis=1)
corrMatrixComb3 = concatDF3.corr()
corrMatrixComb3 = corrMatrixComb3.abs()
corrMatrixComb3 = corrMatrixComb3.iloc[:,-len(uniqueTarget3):]
DataRows3 = DataRows3.replace([np.inf, -np.inf], np.nan)
DataRows3 = DataRows3.fillna(0)
X3 = add_constant(DataRows3)
X3 = X3.replace([np.inf, -np.inf], np.nan)
X3 = X3.fillna(0)
if (flagInf == False):
VIF3 = pd.Series([variance_inflation_factor(X3.values, i)
for i in range(X3.shape[1])],
index=X3.columns)
VIF3 = VIF3.replace([np.inf, -np.inf], np.nan)
VIF3 = VIF3.fillna(0)
VIF3 = VIF3.loc[[feature]]
else:
VIF3 = pd.Series()
if ((len(targetRows3Arr) > 2) and (flagInf == False)):
MI3 = mutual_info_classif(DataRows3, targetRows3Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI3List = MI3.tolist()
MI3List = MI3List[count]
else:
MI3List = []
else:
corrMatrixComb3 = pd.DataFrame()
VIF3 = pd.Series()
MI3List = []
if (len(targetRows4Arr) > 0):
onehotEncoder4 = OneHotEncoder(sparse=False)
targetRows4Arr = targetRows4Arr.reshape(len(targetRows4Arr), 1)
onehotEncoder4 = onehotEncoder4.fit_transform(targetRows4Arr)
hotEncoderDF4 = pd.DataFrame(onehotEncoder4)
concatDF4 = pd.concat([DataRows4, hotEncoderDF4], axis=1)
corrMatrixComb4 = concatDF4.corr()
corrMatrixComb4 = corrMatrixComb4.abs()
corrMatrixComb4 = corrMatrixComb4.iloc[:,-len(uniqueTarget4):]
DataRows4 = DataRows4.replace([np.inf, -np.inf], np.nan)
DataRows4 = DataRows4.fillna(0)
X4 = add_constant(DataRows4)
X4 = X4.replace([np.inf, -np.inf], np.nan)
X4 = X4.fillna(0)
if (flagInf == False):
VIF4 = pd.Series([variance_inflation_factor(X4.values, i)
for i in range(X4.shape[1])],
index=X4.columns)
VIF4 = VIF4.replace([np.inf, -np.inf], np.nan)
VIF4 = VIF4.fillna(0)
VIF4 = VIF4.loc[[feature]]
else:
VIF4 = pd.Series()
if ((len(targetRows4Arr) > 2) and (flagInf == False)):
MI4 = mutual_info_classif(DataRows4, targetRows4Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI4List = MI4.tolist()
MI4List = MI4List[count]
else:
MI4List = []
else:
corrMatrixComb4 = pd.DataFrame()
VIF4 = pd.Series()
MI4List = []
if (len(targetRows5Arr) > 0):
onehotEncoder5 = OneHotEncoder(sparse=False)
targetRows5Arr = targetRows5Arr.reshape(len(targetRows5Arr), 1)
onehotEncoder5 = onehotEncoder5.fit_transform(targetRows5Arr)
hotEncoderDF5 = pd.DataFrame(onehotEncoder5)
concatDF5 = pd.concat([DataRows5, hotEncoderDF5], axis=1)
corrMatrixComb5 = concatDF5.corr()
corrMatrixComb5 = corrMatrixComb5.abs()
corrMatrixComb5 = corrMatrixComb5.iloc[:,-len(uniqueTarget5):]
DataRows5 = DataRows5.replace([np.inf, -np.inf], np.nan)
DataRows5 = DataRows5.fillna(0)
X5 = add_constant(DataRows5)
X5 = X5.replace([np.inf, -np.inf], np.nan)
X5 = X5.fillna(0)
if (flagInf == False):
VIF5 = pd.Series([variance_inflation_factor(X5.values, i)
for i in range(X5.shape[1])],
index=X5.columns)
VIF5 = VIF5.replace([np.inf, -np.inf], np.nan)
VIF5 = VIF5.fillna(0)
VIF5 = VIF5.loc[[feature]]
else:
VIF5 = pd.Series()
if ((len(targetRows5Arr) > 2) and (flagInf == False)):
MI5 = mutual_info_classif(DataRows5, targetRows5Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI5List = MI5.tolist()
MI5List = MI5List[count]
else:
MI5List = []
else:
corrMatrixComb5 = pd.DataFrame()
VIF5 = pd.Series()
MI5List = []
if(corrMatrixComb1.empty):
corrMatrixComb1 = pd.DataFrame()
else:
corrMatrixComb1 = corrMatrixComb1.loc[[feature]]
if(corrMatrixComb2.empty):
corrMatrixComb2 = pd.DataFrame()
else:
corrMatrixComb2 = corrMatrixComb2.loc[[feature]]
if(corrMatrixComb3.empty):
corrMatrixComb3 = pd.DataFrame()
else:
corrMatrixComb3 = corrMatrixComb3.loc[[feature]]
if(corrMatrixComb4.empty):
corrMatrixComb4 = pd.DataFrame()
else:
corrMatrixComb4 = corrMatrixComb4.loc[[feature]]
if(corrMatrixComb5.empty):
corrMatrixComb5 = pd.DataFrame()
else:
corrMatrixComb5 = corrMatrixComb5.loc[[feature]]
targetRows1ArrDF = pd.DataFrame(targetRows1Arr)
targetRows2ArrDF = pd.DataFrame(targetRows2Arr)
targetRows3ArrDF = pd.DataFrame(targetRows3Arr)
targetRows4ArrDF = pd.DataFrame(targetRows4Arr)
targetRows5ArrDF = pd.DataFrame(targetRows5Arr)
concatAllDF1 = pd.concat([DataRows1, targetRows1ArrDF], axis=1)
concatAllDF2 = pd.concat([DataRows2, targetRows2ArrDF], axis=1)
concatAllDF3 = pd.concat([DataRows3, targetRows3ArrDF], axis=1)
concatAllDF4 = pd.concat([DataRows4, targetRows4ArrDF], axis=1)
concatAllDF5 = pd.concat([DataRows5, targetRows5ArrDF], axis=1)
corrMatrixCombTotal1 = concatAllDF1.corr()
corrMatrixCombTotal1 = corrMatrixCombTotal1.abs()
corrMatrixCombTotal2 = concatAllDF2.corr()
corrMatrixCombTotal2 = corrMatrixCombTotal2.abs()
corrMatrixCombTotal3 = concatAllDF3.corr()
corrMatrixCombTotal3 = corrMatrixCombTotal3.abs()
corrMatrixCombTotal4 = concatAllDF4.corr()
corrMatrixCombTotal4 = corrMatrixCombTotal4.abs()
corrMatrixCombTotal5 = concatAllDF5.corr()
corrMatrixCombTotal5 = corrMatrixCombTotal5.abs()
corrMatrixCombTotal1 = corrMatrixCombTotal1.loc[[feature]]
corrMatrixCombTotal1 = corrMatrixCombTotal1.iloc[:,-1]
corrMatrixCombTotal2 = corrMatrixCombTotal2.loc[[feature]]
corrMatrixCombTotal2 = corrMatrixCombTotal2.iloc[:,-1]
corrMatrixCombTotal3 = corrMatrixCombTotal3.loc[[feature]]
corrMatrixCombTotal3 = corrMatrixCombTotal3.iloc[:,-1]
corrMatrixCombTotal4 = corrMatrixCombTotal4.loc[[feature]]
corrMatrixCombTotal4 = corrMatrixCombTotal4.iloc[:,-1]
corrMatrixCombTotal5 = corrMatrixCombTotal5.loc[[feature]]
corrMatrixCombTotal5 = corrMatrixCombTotal5.iloc[:,-1]
corrMatrixCombTotal1 = pd.concat([corrMatrixCombTotal1.tail(1)])
corrMatrixCombTotal2 = pd.concat([corrMatrixCombTotal2.tail(1)])
corrMatrixCombTotal3 = pd.concat([corrMatrixCombTotal3.tail(1)])
corrMatrixCombTotal4 = pd.concat([corrMatrixCombTotal4.tail(1)])
corrMatrixCombTotal5 = pd.concat([corrMatrixCombTotal5.tail(1)])
packCorrLoc = []
packCorrLoc.append(corrMatrix1.to_json())
packCorrLoc.append(corrMatrix2.to_json())
packCorrLoc.append(corrMatrix3.to_json())
packCorrLoc.append(corrMatrix4.to_json())
packCorrLoc.append(corrMatrix5.to_json())
packCorrLoc.append(corrMatrixComb1.to_json())
packCorrLoc.append(corrMatrixComb2.to_json())
packCorrLoc.append(corrMatrixComb3.to_json())
packCorrLoc.append(corrMatrixComb4.to_json())
packCorrLoc.append(corrMatrixComb5.to_json())
packCorrLoc.append(corrMatrixCombTotal1.to_json())
packCorrLoc.append(corrMatrixCombTotal2.to_json())
packCorrLoc.append(corrMatrixCombTotal3.to_json())
packCorrLoc.append(corrMatrixCombTotal4.to_json())
packCorrLoc.append(corrMatrixCombTotal5.to_json())
packCorrLoc.append(VIF1.to_json())
packCorrLoc.append(VIF2.to_json())
packCorrLoc.append(VIF3.to_json())
packCorrLoc.append(VIF4.to_json())
packCorrLoc.append(VIF5.to_json())
packCorrLoc.append(json.dumps(MI1List))
packCorrLoc.append(json.dumps(MI2List))
packCorrLoc.append(json.dumps(MI3List))
packCorrLoc.append(json.dumps(MI4List))
packCorrLoc.append(json.dumps(MI5List))
return packCorrLoc
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/thresholdDataSpace', methods=["GET", "POST"])
def Seperation():
thresholds = request.get_data().decode('utf8').replace("'", '"')
thresholds = json.loads(thresholds)
thresholdsPos = thresholds['PositiveValue']
thresholdsNeg = thresholds['NegativeValue']
getCorrectPrediction = []
for index, value in enumerate(yPredictProb):
getCorrectPrediction.append(value[yData[index]]*100)
quadrant1 = []
quadrant2 = []
quadrant3 = []
quadrant4 = []
quadrant5 = []
probabilityPredictions = []
for index, value in enumerate(getCorrectPrediction):
if (value > 50 and value > thresholdsPos):
quadrant1.append(index)
elif (value > 50 and value <= thresholdsPos):
quadrant2.append(index)
elif (value <= 50 and value > thresholdsNeg):
quadrant3.append(index)
else:
quadrant4.append(index)
quadrant5.append(index)
probabilityPredictions.append(value)
# Main Features
DataRows1 = XData.iloc[quadrant1, :]
DataRows2 = XData.iloc[quadrant2, :]
DataRows3 = XData.iloc[quadrant3, :]
DataRows4 = XData.iloc[quadrant4, :]
DataRows5 = XData.iloc[quadrant5, :]
Transformation(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5)
corrMatrix1 = DataRows1.corr()
corrMatrix1 = corrMatrix1.abs()
corrMatrix2 = DataRows2.corr()
corrMatrix2 = corrMatrix2.abs()
corrMatrix3 = DataRows3.corr()
corrMatrix3 = corrMatrix3.abs()
corrMatrix4 = DataRows4.corr()
corrMatrix4 = corrMatrix4.abs()
corrMatrix5 = DataRows5.corr()
corrMatrix5 = corrMatrix5.abs()
DataRows1 = DataRows1.reset_index(drop=True)
DataRows2 = DataRows2.reset_index(drop=True)
DataRows3 = DataRows3.reset_index(drop=True)
DataRows4 = DataRows4.reset_index(drop=True)
DataRows5 = DataRows5.reset_index(drop=True)
targetRows1 = [yData[i] for i in quadrant1]
targetRows2 = [yData[i] for i in quadrant2]
targetRows3 = [yData[i] for i in quadrant3]
targetRows4 = [yData[i] for i in quadrant4]
targetRows5 = [yData[i] for i in quadrant5]
targetRows1Arr = np.array(targetRows1)
targetRows2Arr = np.array(targetRows2)
targetRows3Arr = np.array(targetRows3)
targetRows4Arr = np.array(targetRows4)
targetRows5Arr = np.array(targetRows5)
uniqueTarget1 = unique(targetRows1)
uniqueTarget2 = unique(targetRows2)
uniqueTarget3 = unique(targetRows3)
uniqueTarget4 = unique(targetRows4)
uniqueTarget5 = unique(targetRows5)
if (len(targetRows1Arr) > 0):
onehotEncoder1 = OneHotEncoder(sparse=False)
targetRows1Arr = targetRows1Arr.reshape(len(targetRows1Arr), 1)
onehotEncoder1 = onehotEncoder1.fit_transform(targetRows1Arr)
hotEncoderDF1 = pd.DataFrame(onehotEncoder1)
concatDF1 = pd.concat([DataRows1, hotEncoderDF1], axis=1)
corrMatrixComb1 = concatDF1.corr()
corrMatrixComb1 = corrMatrixComb1.abs()
corrMatrixComb1 = corrMatrixComb1.iloc[:,-len(uniqueTarget1):]
DataRows1 = DataRows1.replace([np.inf, -np.inf], np.nan)
DataRows1 = DataRows1.fillna(0)
X1 = add_constant(DataRows1)
X1 = X1.replace([np.inf, -np.inf], np.nan)
X1 = X1.fillna(0)
VIF1 = pd.Series([variance_inflation_factor(X1.values, i)
for i in range(X1.shape[1])],
index=X1.columns)
VIF1 = VIF1.replace([np.inf, -np.inf], np.nan)
VIF1 = VIF1.fillna(0)
if (len(targetRows1Arr) > 2):
MI1 = mutual_info_classif(DataRows1, targetRows1Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI1List = MI1.tolist()
else:
MI1List = []
else:
corrMatrixComb1 = pd.DataFrame()
VIF1 = pd.Series()
MI1List = []
if (len(targetRows2Arr) > 0):
onehotEncoder2 = OneHotEncoder(sparse=False)
targetRows2Arr = targetRows2Arr.reshape(len(targetRows2Arr), 1)
onehotEncoder2 = onehotEncoder2.fit_transform(targetRows2Arr)
hotEncoderDF2 = pd.DataFrame(onehotEncoder2)
concatDF2 = pd.concat([DataRows2, hotEncoderDF2], axis=1)
corrMatrixComb2 = concatDF2.corr()
corrMatrixComb2 = corrMatrixComb2.abs()
corrMatrixComb2 = corrMatrixComb2.iloc[:,-len(uniqueTarget2):]
DataRows2 = DataRows2.replace([np.inf, -np.inf], np.nan)
DataRows2 = DataRows2.fillna(0)
X2 = add_constant(DataRows2)
X2 = X2.replace([np.inf, -np.inf], np.nan)
X2 = X2.fillna(0)
VIF2 = pd.Series([variance_inflation_factor(X2.values, i)
for i in range(X2.shape[1])],
index=X2.columns)
VIF2 = VIF2.replace([np.inf, -np.inf], np.nan)
VIF2 = VIF2.fillna(0)
if (len(targetRows2Arr) > 2):
MI2 = mutual_info_classif(DataRows2, targetRows2Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI2List = MI2.tolist()
else:
MI2List = []
else:
corrMatrixComb2 = pd.DataFrame()
VIF2 = pd.Series()
MI2List = []
if (len(targetRows3Arr) > 0):
onehotEncoder3 = OneHotEncoder(sparse=False)
targetRows3Arr = targetRows3Arr.reshape(len(targetRows3Arr), 1)
onehotEncoder3 = onehotEncoder3.fit_transform(targetRows3Arr)
hotEncoderDF3 = pd.DataFrame(onehotEncoder3)
concatDF3 = pd.concat([DataRows3, hotEncoderDF3], axis=1)
corrMatrixComb3 = concatDF3.corr()
corrMatrixComb3 = corrMatrixComb3.abs()
corrMatrixComb3 = corrMatrixComb3.iloc[:,-len(uniqueTarget3):]
DataRows3 = DataRows3.replace([np.inf, -np.inf], np.nan)
DataRows3 = DataRows3.fillna(0)
X3 = add_constant(DataRows3)
X3 = X3.replace([np.inf, -np.inf], np.nan)
X3 = X3.fillna(0)
VIF3 = pd.Series([variance_inflation_factor(X3.values, i)
for i in range(X3.shape[1])],
index=X3.columns)
VIF3 = VIF3.replace([np.inf, -np.inf], np.nan)
VIF3 = VIF3.fillna(0)
if (len(targetRows3Arr) > 2):
MI3 = mutual_info_classif(DataRows3, targetRows3Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI3List = MI3.tolist()
else:
MI3List = []
else:
corrMatrixComb3 = pd.DataFrame()
VIF3 = pd.Series()
MI3List = []
if (len(targetRows4Arr) > 0):
onehotEncoder4 = OneHotEncoder(sparse=False)
targetRows4Arr = targetRows4Arr.reshape(len(targetRows4Arr), 1)
onehotEncoder4 = onehotEncoder4.fit_transform(targetRows4Arr)
hotEncoderDF4 = pd.DataFrame(onehotEncoder4)
concatDF4 = pd.concat([DataRows4, hotEncoderDF4], axis=1)
corrMatrixComb4 = concatDF4.corr()
corrMatrixComb4 = corrMatrixComb4.abs()
corrMatrixComb4 = corrMatrixComb4.iloc[:,-len(uniqueTarget4):]
DataRows4 = DataRows4.replace([np.inf, -np.inf], np.nan)
DataRows4 = DataRows4.fillna(0)
X4 = add_constant(DataRows4)
X4 = X4.replace([np.inf, -np.inf], np.nan)
X4 = X4.fillna(0)
VIF4 = pd.Series([variance_inflation_factor(X4.values, i)
for i in range(X4.shape[1])],
index=X4.columns)
VIF4 = VIF4.replace([np.inf, -np.inf], np.nan)
VIF4 = VIF4.fillna(0)
if (len(targetRows4Arr) > 2):
MI4 = mutual_info_classif(DataRows4, targetRows4Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI4List = MI4.tolist()
else:
MI4List = []
else:
corrMatrixComb4 = pd.DataFrame()
VIF4 = pd.Series()
MI4List = []
if (len(targetRows5Arr) > 0):
onehotEncoder5 = OneHotEncoder(sparse=False)
targetRows5Arr = targetRows5Arr.reshape(len(targetRows5Arr), 1)
onehotEncoder5 = onehotEncoder5.fit_transform(targetRows5Arr)
hotEncoderDF5 = pd.DataFrame(onehotEncoder5)
concatDF5 = pd.concat([DataRows5, hotEncoderDF5], axis=1)
corrMatrixComb5 = concatDF5.corr()
corrMatrixComb5 = corrMatrixComb5.abs()
corrMatrixComb5 = corrMatrixComb5.iloc[:,-len(uniqueTarget5):]
DataRows5 = DataRows5.replace([np.inf, -np.inf], np.nan)
DataRows5 = DataRows5.fillna(0)
X5 = add_constant(DataRows5)
X5 = X5.replace([np.inf, -np.inf], np.nan)
X5 = X5.fillna(0)
VIF5 = pd.Series([variance_inflation_factor(X5.values, i)
for i in range(X5.shape[1])],
index=X5.columns)
VIF5 = VIF5.replace([np.inf, -np.inf], np.nan)
VIF5 = VIF5.fillna(0)
if (len(targetRows5Arr) > 2):
MI5 = mutual_info_classif(DataRows5, targetRows5Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI5List = MI5.tolist()
else:
MI5List = []
else:
corrMatrixComb5 =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Module for calculating a list of vegetation indices from a datacube containing bands without a user having to implement callback functions
"""
from openeo.rest.datacube import DataCube
from openeo.processes import ProcessBuilder, array_modify, power, sqrt, if_, multiply, divide, arccos, add, subtract, linear_scale_range
from shapely.geometry import Point
import numpy as np
import netCDF4 as nc
import glob
import seaborn as sns
from matplotlib.dates import DateFormatter
import geopandas as gpd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, BoundaryNorm
import earthpy.plot as ep
import pandas as pd
import rasterio
WL_B04 = 0.6646
WL_B08 = 0.8328
WL_B11 = 1.610
one_over_pi = 1. / np.pi
# source: https://git.vito.be/projects/LCLU/repos/satio/browse/satio/rsindices.py
ndvi = lambda B04, B08: (B08 - B04) / (B08 + B04)
ndmi = lambda B08, B11: (B08 - B11) / (B08 + B11)
ndgi = lambda B03, B04: (B03 - B04) / (B03 + B04)
def anir(B04, B08, B11):
a = sqrt(np.square(WL_B08 - WL_B04) + power(B08 - B04, 2))
b = sqrt(np.square(WL_B11 - WL_B08) + power(B11 - B08, 2))
c = sqrt(np.square(WL_B11 - WL_B04) + power(B11 - B04, 2))
# calculate angle with NIR as reference (ANIR)
site_length = (power(a, 2) + power(b, 2) - power(c, 2)) / (2 * a * b)
site_length = if_(site_length.lt(-1), -1, site_length)
site_length = if_(site_length.gt(1), 1, site_length)
return multiply(one_over_pi, arccos(site_length))
ndre1 = lambda B05, B08: (B08 - B05) / (B08 + B05)
ndre2 = lambda B06, B08: (B08 - B06) / (B08 + B06)
ndre5 = lambda B05, B07: (B07 - B05) / (B07 + B05)
indices = {
"NDVI": [ndvi, (0,1)],
"NDMI": [ndmi, (-1,1)],
"NDGI": [ndgi, (-1,1)],
"ANIR": [anir, (0,1)],
"NDRE1": [ndre1, (-1,1)],
"NDRE2": [ndre2, (-1,1)],
"NDRE5": [ndre5, (-1,1)]
}
def _callback(x: ProcessBuilder, index_list: list, datacube: DataCube, scaling_factor: int) -> ProcessBuilder:
index_values = []
x_res = x
for index_name in index_list:
if index_name not in indices:
raise NotImplementedError("Index " + index_name + " has not been implemented.")
index_fun, index_range = indices[index_name]
band_indices = [
datacube.metadata.get_band_index(band)
for band in index_fun.__code__.co_varnames[:index_fun.__code__.co_argcount]
]
index_result = index_fun(*[x.array_element(i) for i in band_indices])
if scaling_factor is not None:
index_result = index_result.linear_scale_range(*index_range, 0, scaling_factor)
index_values.append(index_result)
if scaling_factor is not None:
x_res = x_res.linear_scale_range(0,8000,0,scaling_factor)
return array_modify(data=x_res, values=index_values, index=len(datacube.metadata._band_dimension.bands))
def compute_indices(datacube: DataCube, index_list: list, scaling_factor: int = None) -> DataCube:
"""
Computes a list of indices from a datacube
param datacube: an instance of openeo.rest.DataCube
param index_list: a list of indices. The following indices are currently implemented: NDVI, NDMI, NDGI, ANIR, NDRE1, NDRE2 and NDRE5
return: the datacube with the indices attached as bands
"""
return datacube.apply_dimension(dimension="bands",
process=lambda x: _callback(x, index_list, datacube, scaling_factor)).rename_labels('bands',
target=datacube.metadata.band_names + index_list)
def lin_scale_range(x,inputMin,inputMax,outputMin,outputMax):
return add(multiply(divide(subtract(x,inputMin), subtract(inputMax, inputMin)), subtract(outputMax, outputMin)), outputMin)
def _random_point_in_shp(shp):
within = False
while not within:
x = np.random.uniform(shp.bounds[0], shp.bounds[2])
y = np.random.uniform(shp.bounds[1], shp.bounds[3])
within = shp.contains(Point(x, y))
return Point(x,y)
def point_sample_fields(crop_samples, nr_iterations):
points = {"name":[], "geometry":[]}
for name,crop_df in crop_samples.items():
for num in range(nr_iterations):
points["name"] += [name]*len(crop_df)
points["geometry"] += np.asarray(crop_df['geometry'].apply(_random_point_in_shp)).tolist()
gpd_points = gpd.GeoDataFrame(points, crs="EPSG:4326")
gpd_points_utm = gpd_points.to_crs("EPSG:32631")
points_per_type = {}
for i in set(gpd_points_utm["name"]):
crop = gpd_points_utm[gpd_points_utm["name"]==i].buffer(1).to_crs("EPSG:4326").to_json()
points_per_type[i] = crop
return points_per_type
def prep_boxplot(year, bands):
df = pd.DataFrame(columns=["Crop type","Date","Band","Iteration nr","Band value"])
for file in glob.glob('.\\data\\300_*\\*.nc'):
ds_orig = nc.Dataset(file)
dt_rng = pd.date_range("01-01-"+str(year), "31-12-"+str(year),freq="MS")
spl = file.split("\\")
f_name = spl[-1].split(".")[0]
crop_type = spl[-2].split("_")[-1]
for band in bands:
try:
ds = ds_orig[band][:]
except:
print("File "+file+" is corrupt. Please remove it from your folder.")
vals = None
if ds.shape[1:3] == (1,2):
vals = np.mean(ds,axis=2).flatten().tolist()
elif ds.shape[1:3] == (2,1):
vals = np.mean(ds,axis=1).flatten().tolist()
elif ds.shape[1:3] == (1,1):
vals = ds.flatten().tolist()
elif ds.shape[1:3] == (2,2):
vals = np.mean(np.mean(ds,axis=1),axis=1).tolist()
else:
print(file)
df = df.append(pd.DataFrame({
"Crop type": crop_type,
"Date": dt_rng,
"Band": band,
"Iteration nr": [f_name]*12,
"Band value": vals
}), ignore_index=True)
df["Band value"] /= 250
return df
def create_boxplots(crop_df=None, year=2019):
bands = ["B08", "B11", "NDVI", "ratio"]
if crop_df is None:
crop_df = prep_boxplot(year, bands)
x_dates = crop_df["Date"].dt.strftime("%m-%d-%y").unique()
for crop in set(crop_df["Crop type"]):
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2,figsize=(18,18))
fig.suptitle(crop,y=0.91)
axes = [ax1, ax2, ax3, ax4]
df_m = crop_df[crop_df["Crop type"]==crop]
for i in range(4):
df_m_n = df_m[df_m["Band"]==bands[i]]
sns.boxplot(ax=axes[i],data=df_m_n, x="Date",y="Band value")
axes[i].set_xticklabels(labels=x_dates, rotation=45, ha='right')
axes[i].title.set_text(str(bands[i])+" per month")
axes[i].set_ylim(0,1)
comb = {
0: "none",
1: "corn",
2: "barley",
3: "corn barley",
4: "sugarbeet",
5: "sugarbeet corn",
6: "sugarbeet barley",
7: "sugarbeet barley corn",
8: "potato",
9: "potato corn",
10: "potato barley",
11: "potato barley corn",
12: "potato sugarbeet",
13: "potato sugarbeet corn",
14: "potato sugarbeet barley",
15: "potato sugarbeet barley corn",
16: "soy",
17: "soy corn",
18: "soy barley",
19: "soy barley corn",
20: "soy sugarbeet",
21: "soy sugarbeet corn",
22: "soy sugarbeet barley",
23: "soy sugarbeet barley corn",
24: "soy potato",
25: "soy potato corn",
26: "soy potato barley",
27: "soy potato barley corn",
28: "soy potato sugarbeet",
29: "soy potato sugarbeet corn",
30: "soy potato sugarbeet barley",
31: "soy potato sugarbeet barley corn"
}
col_palette = ['linen',
'chartreuse',
'tomato',
'olivedrab',
'maroon',
'whitesmoke',
'wheat',
'palevioletred',
'darkturquoise',
'tomato',
'thistle',
'teal',
'darkgoldenrod',
'darkmagenta',
'darkorange',
'sienna',
'black',
'silver',
'tan',
'seagreen',
'mediumspringgreen',
'lightseagreen',
'royalblue',
'mediumpurple',
'plum',
'darkcyan',
'moccasin',
'rosybrown',
'gray',
'sandybrown',
'm',
'navy']
def plot_croptypes(fn='./data/total.tif',only_unique_classes=True):
with rasterio.open(fn,mode="r+",crs=rasterio.crs.CRS({"init": "epsg:4326"})) as dataset:
ds = dataset.read(1)
if only_unique_classes:
ds = np.where(np.isin(ds, [1,2,4,8,16]), ds, 0)
keys = np.unique(ds).astype(int)
height_class_labels = [comb[key] for key in comb.keys() if key in keys]
colors = col_palette[0:len(height_class_labels)]
cmap = ListedColormap(colors)
class_bins = [-0.5]+[i+0.5 for i in keys]
norm = BoundaryNorm(class_bins, len(colors))
f, ax = plt.subplots(figsize=(10, 8))
im = ax.imshow(ds, cmap=cmap, norm=norm)
ep.draw_legend(im, titles=height_class_labels)
ax.set(title="Rule-based crop classification")
ax.set_axis_off()
plt.show()
def get_classification_colors():
cmap = ListedColormap(col_palette)
classification_colors = {x:cmap(x) for x in range(0, len(col_palette))}
return classification_colors
def get_trained_model():
year = 2019
bands = ["B08", "B11", "NDVI", "ratio"]
df = pd.DataFrame(columns=["Crop type","Date","Iteration nr"]+bands)
for file in glob.glob('.\\data\\300_*\\*.nc'):
ds_orig = nc.Dataset(file)
dt_rng = pd.date_range("01-01-"+str(year), "01-01-"+str(year+1),freq="MS")
spl = file.split("\\")
f_name = spl[-1].split(".")[0]
crop_type = spl[-2].split("_")[-1]
df_row = {
"Crop type": crop_type[0:12],
"Date": dt_rng[0:12],
"Iteration nr": [f_name]*12,
}
for band in bands:
try:
ds = ds_orig[band][:]
except:
print("File "+file+" is corrupt. Please remove it from your folder.")
vals = None
if ds.shape[1:3] == (1,2):
vals = np.mean(ds,axis=2).flatten().tolist()
elif ds.shape[1:3] == (2,1):
vals = np.mean(ds,axis=1).flatten().tolist()
elif ds.shape[1:3] == (1,1):
vals = ds.flatten().tolist()
elif ds.shape[1:3] == (2,2):
vals = np.mean(np.mean(ds,axis=1),axis=1).tolist()
else:
print(file)
df_row[band] = vals[0:12] #[x/250 if x is not None else x for x in vals]
df = df.append(pd.DataFrame(df_row), ignore_index=True)
df = df[pd.notnull(df["B08"])]
X = df[["NDVI","B08","B11","ratio"]]
y = df["Crop type"]
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
clf=RandomForestClassifier(n_estimators=100)
clf.fit(X_train,y_train)
return clf
def prep_df(year, bands):
df =
|
pd.DataFrame(columns=["Crop type","Iteration nr"]+bands)
|
pandas.DataFrame
|
# tested with python2.7 and 3.4
from spyre import server
import pandas as pd
try:
import urllib.request as urllib2
except ImportError:
import urllib2
from bokeh.resources import INLINE
from bokeh.resources import CDN
from bokeh.embed import components
from bokeh.plotting import figure
from bokeh.palettes import Set2
import numpy as np
class HoltWinter:
forecast = []
def __init__(self, series, slen, alpha, beta, gamma, n_preds):
self.forecast = self.triple_exponential_smoothing(series, slen, alpha, beta, gamma, n_preds)
def initial_trend(self, series, slen):
sum = 0.0
for i in range(slen):
sum += float(series[i + slen] - series[i]) / slen
return sum / slen
def initial_seasonal_components(self, series, slen):
seasonals = {}
season_averages = []
n_seasons = int(len(series) / slen)
# compute season averages
for j in range(n_seasons):
season_averages.append(sum(series[slen * j:slen * j + slen]) / float(slen))
# compute initial values
for i in range(slen):
sum_of_vals_over_avg = 0.0
for j in range(n_seasons):
sum_of_vals_over_avg += series[slen * j + i] - season_averages[j]
seasonals[i] = sum_of_vals_over_avg / n_seasons
return seasonals
def triple_exponential_smoothing(self, series, slen, alpha, beta, gamma, n_preds):
result = []
smooth = 0.0
trend = 0.0
seasonals = self.initial_seasonal_components(series, slen)
for i in range(len(series) + n_preds):
if i == 0: # initial values
smooth = series[0]
trend = self.initial_trend(series, slen)
result.append(series[0])
continue
if i >= len(series): # we are forecasting
m = i - len(series) + 1
result.append((smooth + m * trend) + seasonals[i % slen])
else:
val = series[i]
last_smooth, smooth = smooth, alpha * (val - seasonals[i % slen]) + (1 - alpha) * (smooth + trend)
trend = beta * (smooth - last_smooth) + (1 - beta) * trend
seasonals[i % slen] = gamma * (val - smooth) + (1 - gamma) * seasonals[i % slen]
result.append(smooth + trend + seasonals[i % slen])
return result
class TrendAnalysis(server.App):
title = "Trend Analysis"
adgrp_options = [{"label": "-- Ad Group --", "value":"empty"}]
for n in range(1, 41):
adgrp = "ad_group_{}".format(n)
adgrp_options.append({"label": adgrp, "value": adgrp})
inputs = [{"type":'dropdown',
"label": 'Select Ad Group',
"options" : adgrp_options,
"key": 'ad',
"action_id": "update_data",
# "linked_key": 'attribute',
# "linked_type": 'dropdown',
},
{"type": 'dropdown',
"label": 'Select Attribute for Trend Analysis',
"options": [{"label": "-- Attribute --", "value":"empty"},
{"label": "Shown", "value": "shown"},
{"label": "Clicked", "value": "clicked"},
{"label": "Conversions", "value": "converted"},
{"label": "Average Cost per Click", "value": "avg_cost_per_click"},
{"label": "Total Revenue", "value": "total_revenue"},
],
"key": 'attribute',
"action_id": "update_data",
}
# { "type":'text',
# "label": 'or enter a ticker symbol',
# "key": 'custom_ticker',
# "action_id": "update_data",
# "linked_key":'ticker',
# "linked_type":'dropdown',
# "linked_value":'empty' }
]
controls = [{"type" : "hidden",
"label" : "trend analysis",
"id" : "update_data"
}]
outputs = [{"type" : "html",
"id" : "html_id",
"control_id" : "update_data",
"tab" : "Trend"},
{"type": "table",
"id": "table_id",
"control_id": "update_data",
"tab": "Table",
"sortable": True,
"on_page_load": False}]
tabs = ["Trend", "Table"]
def getData(self, params):
ad = params['ad']
attribute = params['attribute']
df =
|
pd.read_csv('ad_table.csv')
|
pandas.read_csv
|
import collections
import fnmatch
import os
from typing import Union
import tarfile
import pandas as pd
import numpy as np
from pandas.core.dtypes.common import is_string_dtype, is_numeric_dtype
from hydrodataset.data.data_base import DataSourceBase
from hydrodataset.data.stat import cal_fdc
from hydrodataset.utils import hydro_utils
from hydrodataset.utils.hydro_utils import download_one_zip, unzip_nested_zip
CAMELS_NO_DATASET_ERROR_LOG = (
"We cannot read this dataset now. Please check if you choose the correct dataset:\n"
' ["AUS", "BR", "CA", "CL", "GB", "US", "YR"]'
)
def time_intersect_dynamic_data(obs: np.array, date: np.array, t_range: list):
"""
chose data from obs in the t_range
Parameters
----------
obs
a np array
date
all periods for obs
t_range
the time range we need, such as ["1990-01-01","2000-01-01"]
Returns
-------
np.array
the chosen data
"""
t_lst = hydro_utils.t_range_days(t_range)
nt = t_lst.shape[0]
if len(obs) != nt:
out = np.full([nt], np.nan)
[c, ind1, ind2] = np.intersect1d(date, t_lst, return_indices=True)
out[ind2] = obs[ind1]
else:
out = obs
return out
class Camels(DataSourceBase):
def __init__(self, data_path, download=False, region: str = "US"):
"""
Initialization for CAMELS series dataset
Parameters
----------
data_path
where we put the dataset
download
if true, download
region
the default is CAMELS(-US), since it's the first CAMELS dataset.
Others now include: AUS, BR, CL, GB, YR
"""
super().__init__(data_path)
region_lst = ["AUS", "BR", "CA", "CE", "CL", "GB", "US", "YR"]
assert region in region_lst
self.region = region
self.data_source_description = self.set_data_source_describe()
if download:
self.download_data_source()
self.camels_sites = self.read_site_info()
def get_name(self):
return "CAMELS_" + self.region
def set_data_source_describe(self) -> collections.OrderedDict:
"""
Introduce the files in the dataset and list their location in the file system
Returns
-------
collections.OrderedDict
the description for a CAMELS dataset
"""
camels_db = self.data_source_dir
if self.region == "US":
# shp file of basins
camels_shp_file = os.path.join(
camels_db, "basin_set_full_res", "HCDN_nhru_final_671.shp"
)
# config of flow data
flow_dir = os.path.join(
camels_db,
"basin_timeseries_v1p2_metForcing_obsFlow",
"basin_dataset_public_v1p2",
"usgs_streamflow",
)
# forcing
forcing_dir = os.path.join(
camels_db,
"basin_timeseries_v1p2_metForcing_obsFlow",
"basin_dataset_public_v1p2",
"basin_mean_forcing",
)
forcing_types = ["daymet", "maurer", "nldas"]
# attr
attr_dir = os.path.join(
camels_db, "camels_attributes_v2.0", "camels_attributes_v2.0"
)
gauge_id_file = os.path.join(attr_dir, "camels_name.txt")
attr_key_lst = ["topo", "clim", "hydro", "vege", "soil", "geol"]
download_url_lst = [
"https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/camels_attributes_v2.0.zip",
"https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/basin_set_full_res.zip",
"https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/basin_timeseries_v1p2_metForcing_obsFlow.zip",
]
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_FORCING_TYPE=forcing_types,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_ATTR_KEY_LST=attr_key_lst,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
CAMELS_DOWNLOAD_URL_LST=download_url_lst,
)
elif self.region == "AUS":
# id and name
gauge_id_file = os.path.join(
camels_db,
"01_id_name_metadata",
"01_id_name_metadata",
"id_name_metadata.csv",
)
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"02_location_boundary_area",
"02_location_boundary_area",
"shp",
"CAMELS_AUS_BasinOutlets_adopted.shp",
)
# config of flow data
flow_dir = os.path.join(camels_db, "03_streamflow", "03_streamflow")
# attr
attr_dir = os.path.join(camels_db, "04_attributes", "04_attributes")
# forcing
forcing_dir = os.path.join(
camels_db, "05_hydrometeorology", "05_hydrometeorology"
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "BR":
# attr
attr_dir = os.path.join(
camels_db, "01_CAMELS_BR_attributes", "01_CAMELS_BR_attributes"
)
# we don't need the location attr file
attr_key_lst = [
"climate",
"geology",
"human_intervention",
"hydrology",
"land_cover",
"quality_check",
"soil",
"topography",
]
# id and name, there are two types stations in CAMELS_BR, and we only chose the 897-stations version
gauge_id_file = os.path.join(attr_dir, "camels_br_topography.txt")
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"14_CAMELS_BR_catchment_boundaries",
"14_CAMELS_BR_catchment_boundaries",
"camels_br_catchments.shp",
)
# config of flow data
flow_dir_m3s = os.path.join(
camels_db, "02_CAMELS_BR_streamflow_m3s", "02_CAMELS_BR_streamflow_m3s"
)
flow_dir_mm_selected_catchments = os.path.join(
camels_db,
"03_CAMELS_BR_streamflow_mm_selected_catchments",
"03_CAMELS_BR_streamflow_mm_selected_catchments",
)
flow_dir_simulated = os.path.join(
camels_db,
"04_CAMELS_BR_streamflow_simulated",
"04_CAMELS_BR_streamflow_simulated",
)
# forcing
forcing_dir_precipitation_chirps = os.path.join(
camels_db,
"05_CAMELS_BR_precipitation_chirps",
"05_CAMELS_BR_precipitation_chirps",
)
forcing_dir_precipitation_mswep = os.path.join(
camels_db,
"06_CAMELS_BR_precipitation_mswep",
"06_CAMELS_BR_precipitation_mswep",
)
forcing_dir_precipitation_cpc = os.path.join(
camels_db,
"07_CAMELS_BR_precipitation_cpc",
"07_CAMELS_BR_precipitation_cpc",
)
forcing_dir_evapotransp_gleam = os.path.join(
camels_db,
"08_CAMELS_BR_evapotransp_gleam",
"08_CAMELS_BR_evapotransp_gleam",
)
forcing_dir_evapotransp_mgb = os.path.join(
camels_db,
"09_CAMELS_BR_evapotransp_mgb",
"09_CAMELS_BR_evapotransp_mgb",
)
forcing_dir_potential_evapotransp_gleam = os.path.join(
camels_db,
"10_CAMELS_BR_potential_evapotransp_gleam",
"10_CAMELS_BR_potential_evapotransp_gleam",
)
forcing_dir_temperature_min_cpc = os.path.join(
camels_db,
"11_CAMELS_BR_temperature_min_cpc",
"11_CAMELS_BR_temperature_min_cpc",
)
forcing_dir_temperature_mean_cpc = os.path.join(
camels_db,
"12_CAMELS_BR_temperature_mean_cpc",
"12_CAMELS_BR_temperature_mean_cpc",
)
forcing_dir_temperature_max_cpc = os.path.join(
camels_db,
"13_CAMELS_BR_temperature_max_cpc",
"13_CAMELS_BR_temperature_max_cpc",
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=[
flow_dir_m3s,
flow_dir_mm_selected_catchments,
flow_dir_simulated,
],
CAMELS_FORCING_DIR=[
forcing_dir_precipitation_chirps,
forcing_dir_precipitation_mswep,
forcing_dir_precipitation_cpc,
forcing_dir_evapotransp_gleam,
forcing_dir_evapotransp_mgb,
forcing_dir_potential_evapotransp_gleam,
forcing_dir_temperature_min_cpc,
forcing_dir_temperature_mean_cpc,
forcing_dir_temperature_max_cpc,
],
CAMELS_ATTR_DIR=attr_dir,
CAMELS_ATTR_KEY_LST=attr_key_lst,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "CL":
# attr
attr_dir = os.path.join(camels_db, "1_CAMELScl_attributes")
attr_file = os.path.join(attr_dir, "1_CAMELScl_attributes.txt")
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"CAMELScl_catchment_boundaries",
"catchments_camels_cl_v1.3.shp",
)
# config of flow data
flow_dir_m3s = os.path.join(camels_db, "2_CAMELScl_streamflow_m3s")
flow_dir_mm = os.path.join(camels_db, "3_CAMELScl_streamflow_mm")
# forcing
forcing_dir_precip_cr2met = os.path.join(
camels_db, "4_CAMELScl_precip_cr2met"
)
forcing_dir_precip_chirps = os.path.join(
camels_db, "5_CAMELScl_precip_chirps"
)
forcing_dir_precip_mswep = os.path.join(
camels_db, "6_CAMELScl_precip_mswep"
)
forcing_dir_precip_tmpa = os.path.join(camels_db, "7_CAMELScl_precip_tmpa")
forcing_dir_tmin_cr2met = os.path.join(camels_db, "8_CAMELScl_tmin_cr2met")
forcing_dir_tmax_cr2met = os.path.join(camels_db, "9_CAMELScl_tmax_cr2met")
forcing_dir_tmean_cr2met = os.path.join(
camels_db, "10_CAMELScl_tmean_cr2met"
)
forcing_dir_pet_8d_modis = os.path.join(
camels_db, "11_CAMELScl_pet_8d_modis"
)
forcing_dir_pet_hargreaves = os.path.join(
camels_db,
"12_CAMELScl_pet_hargreaves",
)
forcing_dir_swe = os.path.join(camels_db, "13_CAMELScl_swe")
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=[flow_dir_m3s, flow_dir_mm],
CAMELS_FORCING_DIR=[
forcing_dir_precip_cr2met,
forcing_dir_precip_chirps,
forcing_dir_precip_mswep,
forcing_dir_precip_tmpa,
forcing_dir_tmin_cr2met,
forcing_dir_tmax_cr2met,
forcing_dir_tmean_cr2met,
forcing_dir_pet_8d_modis,
forcing_dir_pet_hargreaves,
forcing_dir_swe,
],
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=attr_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "GB":
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"data",
"CAMELS_GB_catchment_boundaries",
"CAMELS_GB_catchment_boundaries.shp",
)
# flow and forcing data are in a same file
flow_dir = os.path.join(
camels_db,
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"data",
"timeseries",
)
forcing_dir = flow_dir
# attr
attr_dir = os.path.join(
camels_db,
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"data",
)
gauge_id_file = os.path.join(
attr_dir, "CAMELS_GB_hydrometry_attributes.csv"
)
attr_key_lst = [
"climatic",
"humaninfluence",
"hydrogeology",
"hydrologic",
"hydrometry",
"landcover",
"soil",
"topographic",
]
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_ATTR_KEY_LST=attr_key_lst,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "YR":
# shp files of basins
camels_shp_files_dir = os.path.join(
camels_db, "9_Normal_Camels_YR", "Normal_Camels_YR_basin_boundary"
)
# attr, flow and forcing data are all in the same dir. each basin has one dir.
flow_dir = os.path.join(
camels_db, "9_Normal_Camels_YR", "1_Normal_Camels_YR_basin_data"
)
forcing_dir = flow_dir
attr_dir = flow_dir
# no gauge id file for CAMELS_YR; natural_watersheds.txt showed unregulated basins in CAMELS_YR
gauge_id_file = os.path.join(
camels_db, "9_Normal_Camels_YR", "natural_watersheds.txt"
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_DIR=camels_shp_files_dir,
)
elif self.region == "CA":
# shp file of basins
camels_shp_files_dir = os.path.join(camels_db, "CANOPEX_BOUNDARIES")
# config of flow data
flow_dir = os.path.join(
camels_db, "CANOPEX_NRCAN_ASCII", "CANOPEX_NRCAN_ASCII"
)
forcing_dir = flow_dir
# There is no attr data in CANOPEX, hence we use attr from HYSET -- https://osf.io/7fn4c/
attr_dir = camels_db
gauge_id_file = os.path.join(camels_db, "STATION_METADATA.xlsx")
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_DIR=camels_shp_files_dir,
)
elif self.region == "CE":
# We use A_basins_total_upstrm
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"2_LamaH-CE_daily",
"A_basins_total_upstrm",
"3_shapefiles",
"Basins_A.shp",
)
# config of flow data
flow_dir = os.path.join(
camels_db, "2_LamaH-CE_daily", "D_gauges", "2_timeseries", "daily"
)
forcing_dir = os.path.join(
camels_db,
"2_LamaH-CE_daily",
"A_basins_total_upstrm",
"2_timeseries",
"daily",
)
attr_dir = os.path.join(
camels_db, "2_LamaH-CE_daily", "A_basins_total_upstrm", "1_attributes"
)
gauge_id_file = os.path.join(
camels_db,
"2_LamaH-CE_daily",
"D_gauges",
"1_attributes",
"Gauge_attributes.csv",
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
def download_data_source(self) -> None:
"""
Download CAMELS dataset.
Now we only support CAMELS-US's downloading.
For others, please download it manually and put all files of a CAMELS dataset in one directory.
For example, all files of CAMELS_AUS should be put in "camels_aus" directory
Returns
-------
None
"""
camels_config = self.data_source_description
if self.region == "US":
if not os.path.isdir(camels_config["CAMELS_DIR"]):
os.makedirs(camels_config["CAMELS_DIR"])
[
download_one_zip(attr_url, camels_config["CAMELS_DIR"])
for attr_url in camels_config["CAMELS_DOWNLOAD_URL_LST"]
if not os.path.isfile(
os.path.join(camels_config["CAMELS_DIR"], attr_url.split("/")[-1])
)
]
print("The CAMELS_US data have been downloaded!")
print(
"Please download it manually and put all files of a CAMELS dataset in the CAMELS_DIR directory."
)
print("We unzip all files now.")
if self.region == "CE":
# We only use CE's dauly files now and it is tar.gz formatting
file = tarfile.open(
os.path.join(camels_config["CAMELS_DIR"], "2_LamaH-CE_daily.tar.gz")
)
# extracting file
file.extractall(
os.path.join(camels_config["CAMELS_DIR"], "2_LamaH-CE_daily")
)
file.close()
for f_name in os.listdir(camels_config["CAMELS_DIR"]):
if fnmatch.fnmatch(f_name, "*.zip"):
unzip_dir = os.path.join(camels_config["CAMELS_DIR"], f_name[0:-4])
file_name = os.path.join(camels_config["CAMELS_DIR"], f_name)
unzip_nested_zip(file_name, unzip_dir)
def read_site_info(self) -> pd.DataFrame:
"""
Read the basic information of gages in a CAMELS dataset
Returns
-------
pd.DataFrame
basic info of gages
"""
camels_file = self.data_source_description["CAMELS_GAUGE_FILE"]
if self.region == "US":
data = pd.read_csv(
camels_file, sep=";", dtype={"gauge_id": str, "huc_02": str}
)
elif self.region == "AUS":
data = pd.read_csv(camels_file, sep=",", dtype={"station_id": str})
elif self.region == "BR":
data = pd.read_csv(camels_file, sep="\s+", dtype={"gauge_id": str})
elif self.region == "CL":
data = pd.read_csv(camels_file, sep="\t", index_col=0)
elif self.region == "GB":
data = pd.read_csv(camels_file, sep=",", dtype={"gauge_id": str})
elif self.region == "YR":
dirs_ = os.listdir(self.data_source_description["CAMELS_ATTR_DIR"])
data = pd.DataFrame({"gauge_id": dirs_})
elif self.region == "CA":
data = pd.read_excel(camels_file)
elif self.region == "CE":
data = pd.read_csv(camels_file, sep=";")
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
return data
def get_constant_cols(self) -> np.array:
"""
all readable attrs in CAMELS
Returns
-------
np.array
attribute types
"""
data_folder = self.data_source_description["CAMELS_ATTR_DIR"]
if self.region == "US":
var_dict = dict()
var_lst = list()
key_lst = self.data_source_description["CAMELS_ATTR_KEY_LST"]
for key in key_lst:
data_file = os.path.join(data_folder, "camels_" + key + ".txt")
data_temp = pd.read_csv(data_file, sep=";")
var_lst_temp = list(data_temp.columns[1:])
var_dict[key] = var_lst_temp
var_lst.extend(var_lst_temp)
return np.array(var_lst)
elif self.region == "AUS":
attr_all_file = os.path.join(
self.data_source_description["CAMELS_DIR"],
"CAMELS_AUS_Attributes-Indices_MasterTable.csv",
)
camels_aus_attr_indices_data = pd.read_csv(attr_all_file, sep=",")
# exclude station id
return camels_aus_attr_indices_data.columns.values[1:]
elif self.region == "BR":
var_dict = dict()
var_lst = list()
key_lst = self.data_source_description["CAMELS_ATTR_KEY_LST"]
for key in key_lst:
data_file = os.path.join(data_folder, "camels_br_" + key + ".txt")
data_temp = pd.read_csv(data_file, sep="\s+")
var_lst_temp = list(data_temp.columns[1:])
var_dict[key] = var_lst_temp
var_lst.extend(var_lst_temp)
return np.array(var_lst)
elif self.region == "CL":
camels_cl_attr_data = self.camels_sites
# exclude station id
return camels_cl_attr_data.index.values
elif self.region == "GB":
var_dict = dict()
var_lst = list()
key_lst = self.data_source_description["CAMELS_ATTR_KEY_LST"]
for key in key_lst:
data_file = os.path.join(
data_folder, "CAMELS_GB_" + key + "_attributes.csv"
)
data_temp = pd.read_csv(data_file, sep=",")
var_lst_temp = list(data_temp.columns[1:])
var_dict[key] = var_lst_temp
var_lst.extend(var_lst_temp)
return np.array(var_lst)
elif self.region == "YR":
attr_json_file = os.path.join(
self.data_source_description["CAMELS_ATTR_DIR"],
"0000",
"attributes.json",
)
attr_json = hydro_utils.unserialize_json_ordered(attr_json_file)
return np.array(list(attr_json.keys()))
elif self.region == "CA":
attr_all_file = os.path.join(
self.data_source_description["CAMELS_DIR"],
"HYSETS_watershed_properties.txt",
)
canopex_attr_indices_data = pd.read_csv(attr_all_file, sep=";")
# exclude HYSETS watershed id
return canopex_attr_indices_data.columns.values[1:]
elif self.region == "CE":
attr_all_file = os.path.join(
self.data_source_description["CAMELS_ATTR_DIR"],
"Catchment_attributes.csv",
)
lamah_ce_attr_indices_data = pd.read_csv(attr_all_file, sep=";")
return lamah_ce_attr_indices_data.columns.values[1:]
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
def get_relevant_cols(self) -> np.array:
"""
all readable forcing types
Returns
-------
np.array
forcing types
"""
if self.region == "US":
return np.array(["dayl", "prcp", "srad", "swe", "tmax", "tmin", "vp"])
elif self.region == "AUS":
forcing_types = []
for root, dirs, files in os.walk(
self.data_source_description["CAMELS_FORCING_DIR"]
):
if root == self.data_source_description["CAMELS_FORCING_DIR"]:
continue
for file in files:
forcing_types.append(file[:-4])
return np.array(forcing_types)
elif self.region == "BR":
return np.array(
[
forcing_dir.split(os.sep)[-1][13:]
for forcing_dir in self.data_source_description[
"CAMELS_FORCING_DIR"
]
]
)
elif self.region == "CL":
return np.array(
[
"_".join(forcing_dir.split(os.sep)[-1].split("_")[2:])
for forcing_dir in self.data_source_description[
"CAMELS_FORCING_DIR"
]
]
)
elif self.region == "GB":
return np.array(
[
"precipitation",
"pet",
"temperature",
"peti",
"humidity",
"shortwave_rad",
"longwave_rad",
"windspeed",
]
)
elif self.region == "YR":
return np.array(
[
"pre",
"evp",
"gst_mean",
"prs_mean",
"tem_mean",
"rhu",
"win_mean",
"gst_min",
"prs_min",
"tem_min",
"gst_max",
"prs_max",
"tem_max",
"ssd",
"win_max",
]
)
elif self.region == "CA":
# Although there is climatic potential evaporation item, CANOPEX does not have any PET data
return np.array(["prcp", "tmax", "tmin"])
elif self.region == "CE":
# Although there is climatic potential evaporation item, CANOPEX does not have any PET data
return np.array(
[
"2m_temp_max",
"2m_temp_mean",
"2m_temp_min",
"2m_dp_temp_max",
"2m_dp_temp_mean",
"2m_dp_temp_min",
"10m_wind_u",
"10m_wind_v",
"fcst_alb",
"lai_high_veg",
"lai_low_veg",
"swe",
"surf_net_solar_rad_max",
"surf_net_solar_rad_mean",
"surf_net_therm_rad_max",
"surf_net_therm_rad_mean",
"surf_press",
"total_et",
"prec",
"volsw_123",
"volsw_4",
]
)
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
def get_target_cols(self) -> np.array:
"""
For CAMELS, the target vars are streamflows
Returns
-------
np.array
streamflow types
"""
if self.region == "US":
return np.array(["usgsFlow"])
elif self.region == "AUS":
# QualityCodes are not streamflow data.
# MLd means "1 Megaliters Per Day"; 1 MLd = 0.011574074074074 cubic-meters-per-second
# mmd means "mm/day"
return np.array(
[
"streamflow_MLd",
"streamflow_MLd_inclInfilled",
"streamflow_mmd",
"streamflow_QualityCodes",
]
)
elif self.region == "BR":
return np.array(
[
flow_dir.split(os.sep)[-1][13:]
for flow_dir in self.data_source_description["CAMELS_FLOW_DIR"]
]
)
elif self.region == "CL":
return np.array(
[
flow_dir.split(os.sep)[-1][11:]
for flow_dir in self.data_source_description["CAMELS_FLOW_DIR"]
]
)
elif self.region == "GB":
return np.array(["discharge_spec", "discharge_vol"])
elif self.region == "YR":
return np.array(["normalized_q"])
elif self.region == "CA":
return np.array(["discharge"])
elif self.region == "CE":
return np.array(["qobs"])
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
def get_other_cols(self) -> dict:
return {
"FDC": {"time_range": ["1980-01-01", "2000-01-01"], "quantile_num": 100}
}
def read_object_ids(self, **kwargs) -> np.array:
"""
read station ids
Parameters
----------
**kwargs
optional params if needed
Returns
-------
np.array
gage/station ids
"""
if self.region in ["BR", "GB", "US", "YR"]:
return self.camels_sites["gauge_id"].values
elif self.region == "AUS":
return self.camels_sites["station_id"].values
elif self.region == "CL":
station_ids = self.camels_sites.columns.values
# for 7-digit id, replace the space with 0 to get a 8-digit id
cl_station_ids = [
station_id.split(" ")[-1].zfill(8) for station_id in station_ids
]
return np.array(cl_station_ids)
elif self.region == "CA":
ids = self.camels_sites["STATION_ID"].values
id_strs = [id_.split("'")[1] for id_ in ids]
# although there are 698 sites, there are only 611 sites with attributes data.
# Hence we only use 611 sites now
attr_all_file = os.path.join(
self.data_source_description["CAMELS_DIR"],
"HYSETS_watershed_properties.txt",
)
if not os.path.isfile(attr_all_file):
raise FileNotFoundError(
"Please download HYSETS_watershed_properties.txt from https://osf.io/7fn4c/ and put it in the "
"root directory of CANOPEX"
)
canopex_attr_data = pd.read_csv(attr_all_file, sep=";")
return np.intersect1d(id_strs, canopex_attr_data["Official_ID"].values)
elif self.region == "CE":
# Not all basins have attributes, so we just chose those with attrs
ids = self.camels_sites["ID"].values
attr_all_file = os.path.join(
self.data_source_description["CAMELS_ATTR_DIR"],
"Catchment_attributes.csv",
)
attr_data = pd.read_csv(attr_all_file, sep=";")
return np.intersect1d(ids, attr_data["ID"].values)
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
def read_usgs_gage(self, usgs_id, t_range):
"""
read streamflow data of a station from CAMELS-US
Parameters
----------
usgs_id
the station id
t_range
the time range, for example, ["1990-01-01", "2000-01-01"]
Returns
-------
np.array
streamflow data of one station for a given time range
"""
print("reading %s streamflow data", usgs_id)
gage_id_df = self.camels_sites
huc = gage_id_df[gage_id_df["gauge_id"] == usgs_id]["huc_02"].values[0]
usgs_file = os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
huc,
usgs_id + "_streamflow_qc.txt",
)
data_temp = pd.read_csv(usgs_file, sep=r"\s+", header=None)
obs = data_temp[4].values
obs[obs < 0] = np.nan
t_lst = hydro_utils.t_range_days(t_range)
nt = t_lst.shape[0]
if len(obs) != nt:
out = np.full([nt], np.nan)
df_date = data_temp[[1, 2, 3]]
df_date.columns = ["year", "month", "day"]
date = pd.to_datetime(df_date).values.astype("datetime64[D]")
[C, ind1, ind2] = np.intersect1d(date, t_lst, return_indices=True)
out[ind2] = obs[ind1]
else:
out = obs
return out
def read_br_gage_flow(self, gage_id, t_range, flow_type):
"""
Read gage's streamflow from CAMELS-BR
Parameters
----------
gage_id
the station id
t_range
the time range, for example, ["1990-01-01", "2000-01-01"]
flow_type
"streamflow_m3s" or "streamflow_mm_selected_catchments" or "streamflow_simulated"
Returns
-------
np.array
streamflow data of one station for a given time range
"""
dir_ = [
flow_dir
for flow_dir in self.data_source_description["CAMELS_FLOW_DIR"]
if flow_type in flow_dir
][0]
if flow_type == "streamflow_mm_selected_catchments":
flow_type = "streamflow_mm"
elif flow_type == "streamflow_simulated":
flow_type = "simulated_streamflow"
gage_file = os.path.join(dir_, gage_id + "_" + flow_type + ".txt")
data_temp = pd.read_csv(gage_file, sep=r"\s+")
obs = data_temp.iloc[:, 3].values
obs[obs < 0] = np.nan
df_date = data_temp[["year", "month", "day"]]
date = pd.to_datetime(df_date).values.astype("datetime64[D]")
out = time_intersect_dynamic_data(obs, date, t_range)
return out
def read_gb_gage_flow_forcing(self, gage_id, t_range, var_type):
"""
Read gage's streamflow or forcing from CAMELS-GB
Parameters
----------
gage_id
the station id
t_range
the time range, for example, ["1990-01-01", "2000-01-01"]
var_type
flow type: "discharge_spec" or "discharge_vol"
forcing type: "precipitation", "pet", "temperature", "peti", "humidity", "shortwave_rad", "longwave_rad",
"windspeed"
Returns
-------
np.array
streamflow or forcing data of one station for a given time range
"""
gage_file = os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
"CAMELS_GB_hydromet_timeseries_" + gage_id + "_19701001-20150930.csv",
)
data_temp = pd.read_csv(gage_file, sep=",")
obs = data_temp[var_type].values
if var_type in ["discharge_spec", "discharge_vol"]:
obs[obs < 0] = np.nan
date = pd.to_datetime(data_temp["date"]).values.astype("datetime64[D]")
out = time_intersect_dynamic_data(obs, date, t_range)
return out
def read_target_cols(
self,
gage_id_lst: Union[list, np.array] = None,
t_range: list = None,
target_cols: Union[list, np.array] = None,
**kwargs
) -> np.array:
"""
read target values; for CAMELS, they are streamflows
default target_cols is an one-value list
Parameters
----------
gage_id_lst
station ids
t_range
the time range, for example, ["1990-01-01", "2000-01-01"]
target_cols
the default is None, but we neea at least one default target.
For CAMELS-US, it is ["usgsFlow"];
for CAMELS-AUS, it's ["streamflow_mmd"]
for CAMELS-AUS, it's ["streamflow_m3s"]
kwargs
some other params if needed
Returns
-------
np.array
streamflow data, 3-dim [station, time, streamflow]
"""
if target_cols is None:
return np.array([])
else:
nf = len(target_cols)
t_range_list = hydro_utils.t_range_days(t_range)
nt = t_range_list.shape[0]
y = np.empty([len(gage_id_lst), nt, nf])
if self.region == "US":
for k in range(len(gage_id_lst)):
data_obs = self.read_usgs_gage(gage_id_lst[k], t_range)
# For CAMELS-US, only ["usgsFlow"]
y[k, :, 0] = data_obs
elif self.region == "AUS":
for k in range(len(target_cols)):
flow_data = pd.read_csv(
os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
target_cols[k] + ".csv",
)
)
df_date = flow_data[["year", "month", "day"]]
date = pd.to_datetime(df_date).values.astype("datetime64[D]")
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
chosen_data = flow_data[gage_id_lst].values[ind1, :]
chosen_data[chosen_data < 0] = np.nan
y[:, ind2, k] = chosen_data.T
elif self.region == "BR":
for j in range(len(target_cols)):
for k in range(len(gage_id_lst)):
data_obs = self.read_br_gage_flow(
gage_id_lst[k], t_range, target_cols[j]
)
y[k, :, j] = data_obs
elif self.region == "CL":
for k in range(len(target_cols)):
if target_cols[k] == "streamflow_m3s":
flow_data = pd.read_csv(
os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"][0],
"2_CAMELScl_streamflow_m3s.txt",
),
sep="\t",
index_col=0,
)
elif target_cols[k] == "streamflow_mm":
flow_data = pd.read_csv(
os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"][1],
"3_CAMELScl_streamflow_mm.txt",
),
sep="\t",
index_col=0,
)
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
date = pd.to_datetime(flow_data.index.values).values.astype(
"datetime64[D]"
)
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
station_ids = self.read_object_ids()
assert all(x < y for x, y in zip(station_ids, station_ids[1:]))
[s, ind3, ind4] = np.intersect1d(
station_ids, gage_id_lst, return_indices=True
)
chosen_data = flow_data.iloc[ind1, ind3].replace(
"\s+", np.nan, regex=True
)
chosen_data = chosen_data.astype(float)
chosen_data[chosen_data < 0] = np.nan
y[:, ind2, k] = chosen_data.values.T
elif self.region == "GB":
for j in range(len(target_cols)):
for k in range(len(gage_id_lst)):
data_obs = self.read_gb_gage_flow_forcing(
gage_id_lst[k], t_range, target_cols[j]
)
y[k, :, j] = data_obs
elif self.region == "YR":
for k in range(len(gage_id_lst)):
# only one streamflow type: normalized_q
flow_file = os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
gage_id_lst[k],
target_cols[0] + ".csv",
)
flow_data = pd.read_csv(flow_file, sep=",")
date = pd.to_datetime(flow_data["date"]).values.astype("datetime64[D]")
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
# flow data has been normalized, so we don't set negative values NaN
y[k, ind2, 0] = flow_data["q"].values[ind1]
elif self.region == "CA":
for k in range(len(gage_id_lst)):
# only one streamflow type: discharge
canopex_id = self.camels_sites[
self.camels_sites["STATION_ID"] == "'" + gage_id_lst[k] + "'"
]["CANOPEX_ID"].values[0]
flow_file = os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
str(canopex_id) + ".dly",
)
read_flow_file = pd.read_csv(flow_file, header=None).values.tolist()
flow_data = []
flow_date = []
for one_site in read_flow_file:
flow_date.append(
hydro_utils.t2dt(int(one_site[0][:8].replace(" ", "0")))
)
all_data = one_site[0].split(" ")
real_data = [one_data for one_data in all_data if one_data != ""]
flow_data.append(float(real_data[-3]))
date = pd.to_datetime(flow_date).values.astype("datetime64[D]")
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
obs = np.array(flow_data)
obs[obs < 0] = np.nan
y[k, ind2, 0] = obs[ind1]
elif self.region == "CE":
for k in range(len(gage_id_lst)):
flow_file = os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
"ID_" + str(gage_id_lst[k]) + ".csv",
)
flow_data =
|
pd.read_csv(flow_file, sep=";")
|
pandas.read_csv
|
import pandas as pd
import tensorflow as tf
# from IPython.display import clear_output
from matplotlib import pyplot as plt
from sklearn.metrics import roc_curve
tf.random.set_seed(123)
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
dftrain.age.hist(bins=20)
plt.show()
dftrain.sex.value_counts().plot(kind='barh')
plt.show()
dftrain['class'].value_counts().plot(kind='barh')
plt.show()
dftrain['embark_town'].value_counts().plot(kind='barh')
plt.show()
|
pd.concat([dftrain, y_train], axis=1)
|
pandas.concat
|
#!/usr/bin/env python3
# partition_for_experiment.py
# The goal here is to create two metadata files,
# containing the same genres but two different
# random selections of them
# A) Partition A has random 1800-1930,
# a random half of fantasy-supernatural 1800-1930,
# and detective 1800-1930.
# B) Partition B has the same genres,
# but a different random half of them
import random, math
import pandas as pd
import numpy as np
def tags2tagset(x):
''' function that will be applied to transform
fantasy|science-fiction into {'fantasy', 'science-fiction'} '''
if type(x) == float:
return set()
else:
return set(x.split('|'))
master = pd.read_csv('../metadata/mastermetadata.csv', index_col = 'docid')
column_of_sets = master['tags'].apply(tags2tagset)
df = master.assign(tagset = column_of_sets)
mainstream = df[df['tagset'].map(lambda tagset: ('random' in tagset) or ('randomB' in tagset))]
fantasy = df[df['tagset'].map(lambda tagset: ('fantasy_loc' in tagset) or ('fantasy_oclc' in tagset) or ('supernat' in tagset))]
detective = df[df['tagset'].map(lambda tagset: ('detective' in tagset))]
mainstream = mainstream[mainstream.firstpub < 1930]
detective = detective[detective.firstpub < 1930]
fantasy = fantasy[fantasy.firstpub < 1930]
for idx, row in mainstream.iterrows():
if type(row['author']) != str:
mainstream.loc[idx, 'author'] = random.choice(['A anonymous', 'B anonymous', 'C anonymous', 'X anonymous', 'Y anonymous', 'Z anonymous'])
mainstream.sort_values(by = 'author', inplace = True)
detective.sort_values(by = 'author', inplace = True)
fantasy.sort_values(by = 'author', inplace = True)
for idx in mainstream.index:
mainstream.loc[idx, 'tags'] = 'random'
for idx in fantasy.index:
fantasy.loc[idx, 'tags'] = 'fantasy'
mainstream = mainstream.drop('tagset', 1)
detective = detective.drop('tagset', 1)
fantasy = fantasy.drop('tagset', 1)
print(mainstream.shape)
print(detective.shape)
print(fantasy.shape)
main1, main2 = np.array_split(mainstream, 2)
fant1, fant2 = np.array_split(fantasy, 2)
dete1, dete2 = np.array_split(detective, 2)
partition1 = pd.concat([main1, dete1, fant1])
partition2 =
|
pd.concat([main2, dete2, fant2])
|
pandas.concat
|
# !/usr/bin/env python
from argparse import ArgumentDefaultsHelpFormatter
def func(args, parser):
# delay import of the rest of the module to improve `mdentropy -h` performance
import pickle
import mdtraj as md
import pandas as pd
from ..utils import parse_files
from ..metrics import DihedralMutualInformation
files = parse_files(args.traj)
traj = md.load(files, top=args.top, stride=args.stride)
mi = DihedralMutualInformation(n_bins=args.nbins, types=args.types,
method=args.method, threads=args.n_threads,
normed=True)
M = mi.partial_transform(traj, shuffle=iter, verbose=True)
df =
|
pd.DataFrame(M, columns=mi.labels)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""LDA.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1NqaR37beAJisogi9k0ro2GCnu1_Ihjf8
# Apply LDA to OSHA
"""
#Install packages
#!pip install pyldavis
data_path = "your_own_path"
data_file_lst = os.listdir(data_path)
print("data_file_lst: {}".format(data_file_lst))
import pandas as pd
df = pd.read_excel(data_path+'tagged1000.xlsx')
print(len(df))
df.head()
df.dropna(subset=['summary.new', 'Tagged2'])
print(len(df))
df.head(5)
summary = df['summary.new']
summary.head()
print("Number of tags: {}".format(len(df.Tagged2.unique())))
frequency = df.Tagged2.value_counts()
frequency
y_train_df = df['Tagged2']
"""## Preprocessing: Remove punctuation, lower alphabet"""
import re
summary = summary.apply(lambda x: x.strip())
# Remove punctuation
summary = summary.apply(lambda x: re.sub('[.,\!]', '', str(x)))
# Lower the letter
summary = summary.apply(lambda x: x.lower())
summary.head()
"""## Word Cloud - Visualization"""
summary_combined_string = ','.join(list(summary.values))
summary_combined_string
# Combine 'summary' sentence into one string
from wordcloud import WordCloud
summary_combined_string = ','.join(list(summary.values))
# Create a word cloud object
summary_wordcloud = WordCloud(width=500,height=300,max_words=5000, contour_width=2, background_color="white", collocations=False)
# Word cloud generation
summary_wordcloud.generate(summary_combined_string)
# Visualize the word cloud
summary_wordcloud.to_image()
"""## LDA"""
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
import gensim
from gensim.utils import simple_preprocess
stop_words = stopwords.words('english')
extend_lst = ['from', '']
stop_words.extend(extend_lst)
print(stop_words)
import gensim.corpora as corpora
from pprint import pprint
summary_tolist = summary.tolist()
#summary_tolist
summary_to_wordlist = []
## Remove Stopwords & Blank
for i in range(len(summary_tolist)):
sent = summary_tolist[i]
empty_lst = []
for word in sent.split(" "):
if (word != stop_words) and (word !="") :
empty_lst.append(word)
summary_to_wordlist.append(empty_lst)
#summary_to_wordlist
print(summary_to_wordlist[:1])
# Create Dictionary
summary_id2word = corpora.Dictionary(summary_to_wordlist)
# Create Corpus
summary_texts = summary_to_wordlist
# Term Document Frequency
summary_corpus = [summary_id2word.doc2bow(text) for text in summary_texts]
# View
print(summary_corpus[:1][0])
texts = [['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees','trees','trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']]
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
corpus
print(summary_corpus[:5])
from pprint import pprint
# number of topics
topic_num = 11
# Build LDA model
summary_lda_model = gensim.models.LdaMulticore(corpus=summary_corpus,
id2word=summary_id2word,
num_topics=topic_num)
# Print the Keyword in the 10 topics
pprint(summary_lda_model.print_topics())
summary_doc_lda = summary_lda_model[summary_corpus]
"""## Visualize LDA results - with pyLDAvis"""
#!pip install pyLDAvis
#!pip install --upgrade pandas==1.2
"""## Model Perplexity and Coherence Score"""
#Condition to find optimized topics
start = 2
end = 20
interval = 1
# Commented out IPython magic to ensure Python compatibility.
import matplotlib.pyplot as plt
# %matplotlib inline
"""##### Coherence Graph"""
from gensim.models import CoherenceModel
summary_conherence_lst = []
summary_perplexity_lst = []
summary_model_lst = []
for topic_num in range(start,end,interval):
temp_summary_lda_model = gensim.models.LdaMulticore(corpus=summary_corpus,
id2word=summary_id2word,
num_topics=topic_num)
summary_model_lst.append(temp_summary_lda_model)
# coherence
coherence_model_lda = CoherenceModel(model=temp_summary_lda_model, texts=summary_to_wordlist, dictionary=summary_id2word, coherence='c_v')
### coherence score
coherence_lda_value = coherence_model_lda.get_coherence()
summary_conherence_lst.append(coherence_lda_value)
# perplexity
### coherence score
perplexity_value = temp_summary_lda_model.log_perplexity(summary_corpus)
summary_perplexity_lst.append(perplexity_value)
"""##### Coherence Graph"""
## coherence graph
x_value = range(start,end,interval)
plt.plot(x_value,summary_conherence_lst)
plt.xlabel("Number of Topics")
plt.ylabel("Coherence score")
plt.xlim([2,20])
plt.show()
for m, cv in zip(x_value,summary_conherence_lst):
print("Number of Topics = ",m, " has coherence value of", round(cv, 4))
"""##Analyze with optimal model"""
summary_optimal_model = summary_model_lst[9]
summary_topic_in_model = summary_optimal_model.show_topics(formatted=False)
#pprint(summary_optimal_model.print_topics(num_words=10))
"""##### Representative title sentence for each topic group"""
value = 20
tag2idx = {t : i+value for i,t in enumerate(list(set(y_train_df.values)))}
tag2idx
import numpy as np
y_train = y_train_df.to_numpy()
y_train_number = np.zeros(y_train.shape)
for i in range(y_train_number.shape[0]):
y_train_number[i,] = tag2idx[y_train[i,]]
y_train_number = y_train_number.astype(int)
#y_train_number
# Init output
df_represent_summary_sent =
|
pd.DataFrame()
|
pandas.DataFrame
|
from collections import Counter
import matplotlib.pyplot as plt
import pandas as pd
import re
def extracao_booleana(PATH_EXCEL_D, PATH_EXCEL_R):
dic_regex = {}
df_regex = pd.read_excel(PATH_EXCEL_R)
df_dados = pd.read_excel(PATH_EXCEL_D)
for _, row in df_regex.iterrows():
dic_regex[row['var']] = r'{}'.format(row['regex'])
rows_result = []
for _, row in df_dados.iterrows():
dic_aux = {k:0 for k in dic_regex}
for v,reg in dic_regex.items():
if re.search(reg,row['texto_publicacao'],re.I):
dic_aux[v] = 1
rows_result.append(dic_aux)
df_res = pd.DataFrame(rows_result)
df_res.to_excel('resultado_extração.xlsx',index=False)
def graficos_pizza(PATH_EXCEL_RES):
df = pd.read_excel(PATH_EXCEL_RES)
colunas_desconsiderar = ['numero_processo']
for c in df.columns:
if c not in colunas_desconsiderar:
dic_valores = Counter(df[c].tolist())
labels = list(dic_valores.keys())
values = list(dic_valores.values())
plt.pie(values,labels=labels,explode=[0.1 for i in range(len(values))])
plt.savefig('variavel_{}.png'.format(c))
plt.clf()
def relatorio_geral(PATH_EXCEL_RES):
df =
|
pd.read_excel(PATH_EXCEL_RES)
|
pandas.read_excel
|
import os
from collections import OrderedDict
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
from .building import Building
from .datastore.datastore import join_key
from .utils import get_datastore
from .timeframe import TimeFrame
class DataSet(object):
"""
Attributes
----------
buildings : OrderedDict
Each key is an integer, starting from 1.
Each value is a nilmtk.Building object.
store : nilmtk.DataStore
metadata : dict
Metadata describing the dataset name, authors etc.
(Metadata about specific buildings, meters, appliances etc.
is stored elsewhere.)
See nilm-metadata.readthedocs.org/en/latest/dataset_metadata.html#dataset
"""
def __init__(self, filename=None, format='HDF'):
"""
Parameters
----------
filename : str
path to data set
format : str
format of output. 'HDF', 'CSV' or None. Defaults to 'HDF'.
Use None for automatic inference from file name extension.
"""
self.store = None
self.buildings = OrderedDict()
self.metadata = {}
if filename is not None:
self.import_metadata(get_datastore(filename, format))
def import_metadata(self, store):
"""
Parameters
----------
store : nilmtk.DataStore
"""
self.store = store
self.metadata = store.load_metadata()
self._init_buildings(store)
return self
def save(self, destination):
for b_id, building in self.buildings.items():
building.save(destination, '/building' + str(b_id))
def _init_buildings(self, store):
buildings = store.elements_below_key('/')
buildings.sort()
for b_key in buildings:
building = Building()
building.import_metadata(
store, '/'+b_key, self.metadata.get('name'))
self.buildings[building.identifier.instance] = building
def set_window(self, start=None, end=None):
"""Set the timeframe window on self.store. Used for setting the
'region of interest' non-destructively for all processing.
Parameters
----------
start, end : str or pd.Timestamp or datetime or None
"""
if self.store is None:
raise RuntimeError("You need to set self.store first!")
tz = self.metadata.get('timezone')
if tz is None:
raise RuntimeError("'timezone' is not set in dataset metadata.")
self.store.window = TimeFrame(start, end, tz)
def describe(self, **kwargs):
"""Returns a DataFrame describing this dataset.
Each column is a building. Each row is a feature."""
keys = list(self.buildings.keys())
keys.sort()
results = pd.DataFrame(columns=keys)
for i, building in self.buildings.items():
results[i] = building.describe(**kwargs)
return results
def plot_good_sections(self, axes=None, label_func=None, gap=0, **kwargs):
"""Plots all good sections for all buildings.
Parameters
----------
axes : list of axes or None.
If None then they will be generated.
Returns
-------
axes : list of axes
"""
n = len(self.buildings)
if axes is None:
n_meters_per_building = [len(elec.all_meters())
for elec in self.elecs()]
gridspec_kw = dict(height_ratios=n_meters_per_building)
fig, axes = plt.subplots(
n, 1, sharex=True, gridspec_kw=gridspec_kw)
assert n == len(axes)
for i, (ax, elec) in enumerate(zip(axes, self.elecs())):
elec.plot_good_sections(ax=ax, label_func=label_func, gap=gap,
**kwargs)
ax.set_title('House {}'.format(elec.building()), y=0.4, va='top')
ax.grid(False)
for spine in ax.spines.values():
spine.set_linewidth(0.5)
if i == n // 2:
ax.set_ylabel('Meter', rotation=0,
ha='center', va='center', y=.4)
ax.set_xlabel('Date')
plt.tight_layout()
plt.subplots_adjust(hspace=0.05)
plt.draw()
return axes
def elecs(self):
return [building.elec for building in self.buildings.values()]
def clear_cache(self):
for elec in self.elecs():
elec.clear_cache()
def plot_mains_power_histograms(self, axes=None, **kwargs):
n = len(self.buildings)
if axes is None:
fig, axes = plt.subplots(n, 1, sharex=True)
assert n == len(axes)
for ax, elec in zip(axes, self.elecs()):
ax = elec.mains().plot_power_histogram(ax=ax, **kwargs)
ax.set_title('House {}'.format(elec.building()))
return axes
def get_activity_script(self, filename):
"""Extracts an activity script from this dataset.
Saves the activity script to an HDF5 file.
Keys in the HDF5 file take the form:
'/building<building_i>/<appliance type>__<appliance instance>'
e.g. '/building1/electric_oven__1'
Spaces in the appliance type are replaced by underscores.
Each table is of fixed format and stores a pd.Series.
The index is the datetime of the start time or end time of
each appliance activation. The values are booleans. True means
the start time of an appliance activation; false means the
end time of an appliance activation.
Parameters
----------
filename : str
The full filename, including path and suffix, for the HDF5 file
for storing the activity script.
"""
store = pd.HDFStore(
filename, mode='w', complevel=9, complib='blosc')
for building in self.buildings.values():
submeters = building.elec.submeters().meters
for meter in submeters:
appliance = meter.dominant_appliance()
key = '/building{:d}/{:s}__{:d}'.format(
building.identifier.instance,
appliance.identifier.type.replace(' ', '_'),
appliance.identifier.instance)
print("Computing activations for", key)
activations = meter.get_activations()
starts = []
ends = []
for activation in activations:
starts.append(activation.index[0])
ends.append(activation.index[-1])
del activations
starts = pd.Series(True, index=starts)
ends =
|
pd.Series(False, index=ends)
|
pandas.Series
|
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import wikipedia
import musicbrainzngs
import urllib.request
import urllib.request as urllib2
import urllib.parse
import json
import requests
from bs4 import BeautifulSoup
import re
import h5py
import time
import datetime
import pandas as pd
import pickle
import pycountry
import random
from shutil import copyfile
import logging
import argparse
import os
import glob
import API_KEYS
SPOTIFY_CLIENT_ID = API_KEYS.SPOTIFY_CLIENT_ID
SPOTIFY_CLIENT_SECRET = API_KEYS.SPOTIFY_CLIENT_SECRET
LAST_FM_API_KEY = API_KEYS.LAST_FM_API_KEY
LAST_FM_SHARED_SECRET = API_KEYS.LAST_FM_SHARED_SECRET
LAST_FM_REGISTERED_TO = API_KEYS.LAST_FM_REGISTERED_TO
LAST_FM_API_KEY2 = API_KEYS.LAST_FM_API_KEY2
LAST_FM_SHARED_SECRET2 = API_KEYS.LAST_FM_SHARED_SECRET2
LAST_FM_REGISTERED_TO2 = API_KEYS.LAST_FM_REGISTERED_TO2
GENIUS_CLIENT_ID = API_KEYS.GENIUS_CLIENT_ID
GENIUS_CLIENT_SECRET = API_KEYS.GENIUS_CLIENT_SECRET
GENIUS_CLIENT_ACCESS_TOKEN = API_KEYS.GENIUS_CLIENT_ACCESS_TOKEN
MM_API_KEY = API_KEYS.MM_API_KEY
MB_CLIENT_ID = API_KEYS.MB_CLIENT_ID
MB_SECRET = API_KEYS.MB_SECRET
MAPS_API_KEY = API_KEYS.MAPS_API_KEY
LYRICS_FOUND_BY_MM = '681F1AF6-8A1A-4493-8020-E44E2006ADB1***LYRICS_FOUND_BY_MM***361E1163-EE9C-444D-874D-7E0D438EF459'
NOW = datetime.datetime.now()
NOW = str(NOW.month) + '_' + str(NOW.day) + '_' + str(NOW.hour) + '_' + str(NOW.minute)
logging.basicConfig(filename='./dumps/' + NOW + '_.log', format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)
musicbrainzngs.set_useragent('haamr', 1.0)
client_credentials_manager = SpotifyClientCredentials(client_id=SPOTIFY_CLIENT_ID, client_secret=SPOTIFY_CLIENT_SECRET)
SPOTIFY = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
GENRE_LOC = pd.read_csv(
'./external_datasets/genre_places.csv',
index_col='genre'
)
with open('./external_datasets/external_data.pickle', 'rb') as f:
save = pickle.load(f)
LYRICS = save['LYRICS']
WORLD_CITIES = save['WORLD_CITIES']
MSD_ARTIST_LOCATION = save['MSD_ARTIST_LOCATION']
del save
parser = argparse.ArgumentParser(description="scrapes various apis for music content")
parser.add_argument('-n', '--num-seed-artists', default=0, help='number of seed_artists to scrape')
parser.add_argument('-c', '--random', default=False, help='grab random seed artists rather than from the top')
parser.add_argument('-s', '--seeds', default=None, help='injects seed artists via comma separated list')
parser.add_argument('-b', '--seeds-bolster', default=False, help='use bolster seed artists list instead of seed artist list')
parser.add_argument('-m', '--merge-seeds-bolster', default=False, help='merge the bolster list with the seeds list')
parser.add_argument('-t', '--seeds-top', default=False, help='inject seeds at the top of the list')
parser.add_argument('-r', '--seeds-reset', default=False, help='reset seed artists that failed so they can run a second time')
parser.add_argument('-u', '--set-seed-unscraped', default=None, help='sets a seed as unscraped')
args = parser.parse_args()
####### Utility #######
def printlog(message, e=False):
print(message)
if e:
logging.exception(message)
else:
logging.info(message)
def get_dataframes():
if os.path.isfile('./data.pickle'):
with open('./data.pickle', 'rb') as f:
save = pickle.load(f)
artists = save['artists']
future_artists = save['future_artists']
seed_artists = save['seed_artists']
bolster_artists = save['bolster_artists']
albums = save['albums']
tracks = save['tracks']
del save
else:
# id: {name: '', genres: [], related: [], lat: 0.0, lng: 0.0}
col = ['name', 'genres', 'related', 'lat', 'lng']
artists = pd.DataFrame(columns=col)
future_artists = pd.DataFrame(columns=col)
# id: {has_been_scraped: bool}
col = ['has_been_scraped']
seed_artists =
|
pd.DataFrame(columns=col)
|
pandas.DataFrame
|
#!/usr/bin/env python
import os
import sys
import json
import shutil
import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from plotly.offline import plot
TIERS = {'Tier 1': 'Variants of<br>strong<br>clinical<br>significance',
'Tier 2': 'Variants of<br>potential<br>clinical<br>significance',
'Tier 3': 'Variants of<br>uncertain<br>clinical<br>significance',
'Tier 4': 'Other<br>coding<br>mutation',
'Noncoding': 'Noncoding<br>mutation'}
COLOURS = ['', '#028ddf', '#1faafc', '#57bffc', '#8fd4fd', '#c7e9fe' ]
def __main__():
combined = sys.argv[1]
print("Input combined tiers file: ", combined)
reader =
|
pd.read_csv(combined, sep='\t', header=0, chunksize=1000, usecols=['TIER', 'GENOMIC_CHANGE'])
|
pandas.read_csv
|
# =========================================================================== #
# INDEPENDENCE MODULE #
# =========================================================================== #
'''Modules for analyzing indendence between variables.'''
# %%
# --------------------------------------------------------------------------- #
# LIBRARIES #
# --------------------------------------------------------------------------- #
import collections
from collections import OrderedDict
import itertools
from itertools import combinations
from itertools import product
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import scipy
from scipy import stats
import scikit_posthocs as sp
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
# %%
# ---------------------------------------------------------------------------- #
# CORRELATION #
# ---------------------------------------------------------------------------- #
class Correlation:
'''Class that computes the pairwise correlation between numeric variables
and renders a heatmap
'''
def __init__(self):
self._corr = None
pass
def test(self, df, method='pearson'):
self._corr = df.corr(method)
return(self._corr)
def pairwise(self, df, x, y, method='pearson', threshold=None):
r_tests = pd.DataFrame()
for xs, ys in zip(x,y):
r = df[xs].corr(df[ys])
df_r = pd.DataFrame({'x':xs, 'y':ys}, index=[0])
df_r['r'] = r
df_r['r_abs'] = np.absolute(r)
df_r['strength'] = np.where(df_r.r_abs<0.2, 'Very Weak',
np.where(df_r.r_abs<0.4, 'Weak',
np.where(df_r.r_abs<0.6, "Moderate",
np.where(df_r.r_abs<0.8, "Strong", "Very Strong"))))
df_r['direction'] = np.where(df_r.r <0, "Negative", "Positive")
r_tests = pd.concat([r_tests, df_r], axis=0)
r_tests = r_tests.sort_values(by='r_abs', ascending=False)
if threshold:
r_tests = r_tests[r_tests.r_abs > threshold]
return(r_tests)
def corrtable(self, threshold=None):
r_tests = pd.DataFrame()
cols = self._corr.columns.tolist()
for i in range(len(cols)):
for j in range(len(cols)):
if i != j:
df_r = pd.DataFrame({'x': cols[i], 'y':cols[j], 'r': self._corr.iloc[i][j],
'r_abs': np.absolute(self._corr.iloc[i][j])}, index=[0])
df_r['strength'] = np.where(df_r.r_abs<0.2, 'Very Weak',
np.where(df_r.r_abs<0.4, 'Weak',
np.where(df_r.r_abs<0.6, "Moderate",
np.where(df_r.r_abs<0.8, "Strong", "Very Strong"))))
df_r['direction'] = np.where(df_r.r <0, "Negative", "Positive")
r_tests = pd.concat([r_tests, df_r], axis=0)
r_tests = r_tests.sort_values(by='r_abs', ascending=False)
if threshold:
r_tests = r_tests[r_tests.r_abs > threshold]
return(r_tests)
def corrplot(self):
sns.heatmap(self._corr, xticklabels=self._corr.columns,
yticklabels=self._corr.columns)
# ---------------------------------------------------------------------------- #
# INDEPENDENCE #
# ---------------------------------------------------------------------------- #
class Independence:
"Class that performs a test of independence"
def __init__(self):
self._sig = 0.05
self._x2 = 0
self._p = 0
self._df = 0
self._obs = []
self._exp = []
def summary(self):
print("\n*", "=" * 78, "*")
print('{:^80}'.format("Pearson's Chi-squared Test of Independence"))
print('{:^80}'.format('Data'))
print('{:^80}'.format("x = " + self._xvar + " y = " + self._yvar + "\n"))
print('{:^80}'.format('Observed Frequencies'))
visual.print_df(self._obs)
print("\n", '{:^80}'.format('Expected Frequencies'))
visual.print_df(self._exp)
results = ("Pearson's chi-squared statistic = " + str(round(self._x2, 3)) + ", Df = " +
str(self._df) + ", p-value = " + '{0:1.2e}'.format(round(self._p, 3)))
print("\n", '{:^80}'.format(results))
print("\n*", "=" * 78, "*")
def post_hoc(self, rowwise=True, verbose=False):
dfs = []
if rowwise:
rows = range(0, len(self._obs))
for pair in list(combinations(rows, 2)):
ct = self._obs.iloc[[pair[0], pair[1]], ]
levels = ct.index.values
x2, p, dof, exp = stats.chi2_contingency(ct)
df = pd.DataFrame({'level_1': levels[0],
'level_2': levels[1],
'x2': x2,
'N': ct.values.sum(),
'p_value': p}, index=[0])
dfs.append(df)
self._post_hoc_tests = pd.concat(dfs)
else:
cols = range(0, len(self._obs.columns.values))
for pair in list(combinations(cols, 2)):
ct = self._obs.iloc[:, [pair[0], pair[1]]]
levels = ct.columns.values
x2, p, dof, exp = stats.chi2_contingency(ct)
df = pd.DataFrame({'level_1': levels[0],
'level_2': levels[1],
'x2': x2,
'N': ct.values.sum(),
'p_value': p}, index=[0])
dfs.append(df)
self._post_hoc_tests = pd.concat(dfs)
if (verbose):
visual.print_df(self._post_hoc_tests)
return(self._post_hoc_tests)
def test(self, x, y, sig=0.05):
self._x = x
self._y = y
self._xvar = x.name
self._yvar = y.name
self._n = x.shape[0]
self._sig = sig
ct = pd.crosstab(x, y)
x2, p, dof, exp = stats.chi2_contingency(ct)
self._x2 = x2
self._p = p
self._df = dof
self._obs = ct
self._exp = pd.DataFrame(exp).set_index(ct.index)
self._exp.columns = ct.columns
if p < sig:
self._result = 'significant'
self._hypothesis = 'reject'
else:
self._result = 'not significant'
self._hypothesis = 'fail to reject'
return x2, p, dof, exp
def report(self, verbose=False):
"Returns or prints results in APA format"
tup = ("A Chi-square test of independence was conducted to "
"examine the relation between " + self._xvar + " and " + self._yvar + ". "
"The relation between the variables was " + self._result + ", "
"X2(" + str(self._df) + ", N = ", str(self._n) + ") = " +
str(round(self._x2, 2)) + ", p = " + '{0:1.2e}'.format(round(self._p, 3)))
self._report = ''.join(tup)
wrapper = textwrap.TextWrapper(width=80)
lines = wrapper.wrap(text=self._report)
if verbose:
for line in lines:
print(line)
return(self._report)
# ---------------------------------------------------------------------------- #
# ANOVA #
# ---------------------------------------------------------------------------- #
#%%
class Anova:
'''
Computes Anova tests
'''
def __init__(self):
pass
def aov_test(self, df, x, y, type=2, test='F', sig=0.05):
df2 = pd.DataFrame({'x': df[x], 'y': df[y]})
df2 = df2.dropna()
model = smf.ols('y~x', data=df2).fit()
aov = sm.stats.anova_lm(model, typ=type, test=test)
tbl = pd.DataFrame({
'Test': 'Anova',
'Dependent': y, 'Independent': x, 'Statistic': 'F Statistic',
'Statistic Value': aov['F'][0], 'p-Value': aov['PR(>F)'][0]
}, index=[0])
tbl['H0'] = np.where(tbl['p-Value']<sig, 'Reject', 'Fail to Reject')
return(tbl)
def aov_table(self, df, x=None, y=None, type=2, test='F', threshold=0):
tests = pd.DataFrame()
if x and y:
df2 = pd.DataFrame({'x': df[x], 'y': df[y]})
df2 = df2.dropna()
model = smf.ols('y~x', data=df2).fit()
aov = sm.stats.anova_lm(model, typ=type, test=test)
tbl = pd.DataFrame({
'Dependent': y, 'Independent': x, 'Sum Sq Model': aov['sum_sq'][0],
'Sum Sq Residuals': aov['sum_sq'][1], 'df Model': aov['df'][0],
'df Residuals': aov['df'][1], 'F': aov['F'][0],
'PR(>F)': aov['PR(>F)'][0]
}, index=[0])
tbl['Eta Squared'] = tbl['Sum Sq Model'] / (tbl['Sum Sq Model'] + tbl['Sum Sq Residuals'])
tests = tests.append(tbl)
elif x:
dfy = df.select_dtypes(include='object')
ys = dfy.columns
for y in ys:
df2 = pd.DataFrame({'x': df[x], 'y': df[y]})
df2 = df2.dropna()
model = smf.ols('x~y', data=df2).fit()
aov = sm.stats.anova_lm(model, typ=type, test=test)
tbl = pd.DataFrame({
'Dependent': y, 'Independent': x, 'Sum Sq Model': aov['sum_sq'][0],
'Sum Sq Residuals': aov['sum_sq'][1], 'df Model': aov['df'][0],
'df Residuals': aov['df'][1], 'F': aov['F'][0],
'PR(>F)': aov['PR(>F)'][0]
}, index=[0])
tbl['Eta Squared'] = tbl['Sum Sq Model'] / (tbl['Sum Sq Model'] + tbl['Sum Sq Residuals'])
tests = tests.append(tbl)
elif y:
dfx = df.select_dtypes(include=[np.number])
xs = dfx.columns
for x in xs:
df2 = pd.DataFrame({'x': df[x], 'y': df[y]})
df2 = df2.dropna()
model = smf.ols('x~y', data=df2).fit()
aov = sm.stats.anova_lm(model, typ=type, test=test)
tbl = pd.DataFrame({
'Dependent': y, 'Independent': x, 'Sum Sq Model': aov['sum_sq'][0],
'Sum Sq Residuals': aov['sum_sq'][1], 'df Model': aov['df'][0],
'df Residuals': aov['df'][1], 'F': aov['F'][0],
'PR(>F)': aov['PR(>F)'][0]
}, index=[0])
tbl['Eta Squared'] = tbl['Sum Sq Model'] / (tbl['Sum Sq Model'] + tbl['Sum Sq Residuals'])
tests = tests.append(tbl)
else:
dfx = df.select_dtypes(include=[np.number])
dfy = df.select_dtypes(include='object')
xs = dfx.columns
ys = dfy.columns
for pair in list(itertools.product(xs,ys)):
df2 = df[[pair[0], pair[1]]].dropna()
df2 = pd.DataFrame({'x': df2[pair[0]], 'y': df2[pair[1]]})
model = smf.ols('x~y', data=df2).fit()
aov = sm.stats.anova_lm(model, typ=type, test=test)
tbl = pd.DataFrame({
'Dependent': y, 'Independent': x, 'Sum Sq Model': aov['sum_sq'][0],
'Sum Sq Residuals': aov['sum_sq'][1], 'df Model': aov['df'][0],
'df Residuals': aov['df'][1], 'F': aov['F'][0],
'PR(>F)': aov['PR(>F)'][0]
}, index=[0])
tbl['Eta Squared'] = tbl['Sum Sq Model'] / (tbl['Sum Sq Model'] + tbl['Sum Sq Residuals'])
tests = tests.append(tbl)
tests = tests.loc[tests['Eta Squared'] > threshold]
tests = tests.sort_values(by='Eta Squared', ascending=False)
return(tests)
# ---------------------------------------------------------------------------- #
# KRUSKAL #
# ---------------------------------------------------------------------------- #
#%%
class Kruskal:
'''
Class provides non-parametric methods for testing independence
'''
def __init__(self):
pass
def kruskal_test(self, df, x, y, sig=0.05):
'''Computes the Kruskal-Wallis H-test tests
Args:
df (pd.DataFrame): Dataframe containing data
x (str): The name of the categorical independent variable
y (str): The name of the numerical dependent variable
Returns:
DataFrame containing statistic and p-value
'''
df = df[[x,y]].dropna()
groups = {}
for grp in df[x].unique():
groups[grp] = df[y][df[x]==grp].values
args = groups.values()
k = stats.kruskal(*args)
columns = ['Test', 'Dependent', 'Independent', 'Statistic', 'Statistic Value', 'p-Value']
data = [['Kruskal', y, x, 'H-Statistic', k[0], k[1]]]
r = pd.DataFrame(data, columns = columns)
r['H0'] = np.where(r['p-Value']<sig, 'Reject', 'Fail to Reject')
return(r)
def kruskal_table(self, df, x=None, y=None, sig=0.05, sort=False):
tests = pd.DataFrame()
if x and y:
test = self.kruskal_test(df, x, y)
tests = tests.append(test)
elif x:
dfy = df.select_dtypes(include=[np.number])
ys = dfy.columns.tolist()
for y in ys:
df2 = df[[x,y]].dropna()
test = self.kruskal_test(df2, x, y)
tests = tests.append(test)
elif y:
dfx = df.select_dtypes(include='object')
xs = dfx.columns.tolist()
for x in xs:
df2 = df[[x,y]].dropna()
test = self.kruskal_test(df2, x, y)
tests = tests.append(test)
else:
dfx = df.select_dtypes(include='object')
dfy = df.select_dtypes(include=[np.number])
xs = dfx.columns.tolist()
ys = dfy.columns.tolist()
for pair in list(itertools.product(xs,ys)):
df2 = df[[pair[0], pair[1]]].dropna()
test = self.kruskal_test(df2, pair[0], pair[1])
tests = tests.append(test)
if sort:
tests = tests.sort_values(by=['Independent','Statistic Value'], ascending=False)
return(tests)
def posthoc(self, df, x, y):
df = df[[x,y]].dropna()
p = sp.posthoc_conover(df, val_col=y, group_col=x, p_adjust = 'fdr_bh')
return(p)
def sign_plot(self, df, x, y):
p = self.posthoc(df, x, y)
heatmap_args = {'linewidths': 0.25, 'linecolor': '0.5', 'clip_on': False,
'square': True, 'cbar_ax_bbox': [0.80, 0.35, 0.04, 0.3]}
sp.sign_plot(p, **heatmap_args)
# %%
# ---------------------------------------------------------------------------- #
# CORRELATION #
# ---------------------------------------------------------------------------- #
def correlation(df, x, y):
'''
Computes the correlation between two quantitative variables x and y.
Args:
df (pd.DataFrame): Dataframe containing numeric variables
x (str): The column name for the x variable
y (str): The column name for the y variable
Returns:
Data frame containing the results of the correlation tests
'''
df = df.dropna()
r = stats.pearsonr(df[x], df[y])
test = pd.DataFrame({'x': x, 'y': y, "Correlation": r[0], "p-value": r[1]},
index=[0])
test['AbsCorr'] = test['Correlation'].abs()
test['Strength'] = np.where(test["AbsCorr"] < .1, 'Extremely Weak Correlation',
np.where(test["AbsCorr"] < .30, 'Small Correlation',
np.where(test["AbsCorr"] < .5, 'Moderate Correlation',
'Strong Correlation')))
return(test)
# ---------------------------------------------------------------------------- #
# CORR_TABLE #
# ---------------------------------------------------------------------------- #
def corr_table(df, x=None, y=None, target=None, threshold=0, sig=None):
'''For a dataframe containing numeric variables, this function
computes pairwise pearson's R tests of correlation correlation.
Args:
df (pd.DataFrame): Data frame containing numeric variables
x(str): Name of independent variable column (optional)
y(str): Name of dependent variable column (optional)
target(str):
threshold (float): Threshold above which correlations should be
reported.
Returns:
Data frame containing the results of the pairwise tests of correlation.
'''
tests = []
if x is not None:
for pair in list(itertools.product(x, y)):
df2 = df[[pair[0], pair[1]]].dropna()
x = df2[pair[0]]
y = df2[pair[1]]
r = stats.pearsonr(x, y)
tests.append(OrderedDict(
{'x': pair[0], 'y': pair[1], "Correlation": r[0], "p-value": r[1]}))
tests = pd.DataFrame(tests, index=[0])
tests['AbsCorr'] = tests['Correlation'].abs()
tests['Strength'] = np.where(tests["AbsCorr"] < .1, 'Extremely Weak Correlation',
np.where(tests["AbsCorr"] < .30, 'Small Correlation',
np.where(tests["AbsCorr"] < .5, 'Moderate Correlation',
'Strong Correlation')))
else:
df2 = df.select_dtypes(include=['int', 'float64'])
terms = df2.columns
if target:
if target not in df2.columns:
df2 = df2.join(df[target])
for term in terms:
df2 = df2.dropna()
x = df2[term]
y = df2[target]
r = stats.pearsonr(x, y)
tests.append(OrderedDict(
{'x': term, 'y': target, "Correlation": r[0], "p-value": r[1]}))
tests = pd.DataFrame(tests)
tests['AbsCorr'] = tests['Correlation'].abs()
tests['Strength'] = np.where(tests["AbsCorr"] < .1, 'Extremely Weak Correlation',
np.where(tests["AbsCorr"] < .30, 'Small Correlation',
np.where(tests["AbsCorr"] < .5, 'Moderate Correlation',
'Strong Correlation')))
else:
for pair in list(combinations(terms, 2)):
df2 = df[[pair[0], pair[1]]].dropna()
x = df2[pair[0]]
y = df2[pair[1]]
r = stats.pearsonr(x, y)
tests.append(OrderedDict(
{'x': pair[0], 'y': pair[1], "Correlation": r[0], "p-value": r[1]}))
tests = pd.DataFrame(tests)
tests['AbsCorr'] = tests['Correlation'].abs()
tests['Strength'] = np.where(tests["AbsCorr"] < .1, 'Extremely Weak Correlation',
np.where(tests["AbsCorr"] < .30, 'Small Correlation',
np.where(tests["AbsCorr"] < .5, 'Moderate Correlation',
'Strong Correlation')))
top = tests.loc[tests['AbsCorr'] > threshold]
if sig is not None:
top = tests.loc[tests['p-value']<sig]
top = top.sort_values(by='AbsCorr', ascending=False)
return top
# ---------------------------------------------------------------------------- #
# CRAMER'S V (Corrected) #
# ---------------------------------------------------------------------------- #
def cramers(contingency_table, correction=False):
""" calculate Cramers V statistic for categorical-categorical association.
If correction is True, it uses correction from Bergsma and Wicher,
Journal of the Korean Statistical Society 42 (2013): 323-328
Args:
contingency_table (pd.DataFrame): Contingency table containing
counts for the two variables
being analyzed
correction (bool): If True, use Bergsma's correction
Returns:
float: Corrected Cramer's V measure of Association
"""
chi2, p = stats.chi2_contingency(contingency_table)[0:2]
n = contingency_table.sum().sum()
phi = np.sqrt(chi2/n)
r, c = contingency_table.shape
if correction:
phi2corr = max(0, phi**2 - ((c-1)*(r-1))/(n-1))
rcorr = r - ((r-1)**2)/(n-1)
ccorr = c - ((c-1)**2)/(n-1)
V = np.sqrt(phi2corr / min((ccorr-1), (rcorr-1)))
else:
V = np.sqrt(phi**2/min(r,c))
return p, V
# %%
# ---------------------------------------------------------------------------- #
# ASSOCIATION #
# ---------------------------------------------------------------------------- #
def association(df, x, y, z=None, sig=0.05):
'''
Computes the association between two or three categorical variables.
Args:
df (pd.DataFrame): Dataframe containing categorical variables
x (str): The column name for the x variable
y (str): The column name for the y variable
z (str): Optional column containing the z variable
Returns:
Data frame containing the results of the correlation tests
'''
if z:
df = df[[x,y,z]].dropna()
ct = pd.crosstab(df[z], [df[x], df[y]], rownames=[z], colnames=[x, y])
p, cv = cramers(ct)
test = pd.DataFrame({'x': x, 'y': y, 'z':z, 'p-Value':p, "Cramer's V": cv},
index=[0])
else:
df = df[[x,y]].dropna()
ct = pd.crosstab(df[x], df[y])
p, cv = cramers(ct)
test = pd.DataFrame({'x': x, 'y': y, 'p-Value':p, "Cramer's V": cv},
index=[0])
test['Strength'] = np.where(test["Cramer's V"] < .16, 'Very Weak Association',
np.where(test["Cramer's V"] < .20, 'Weak Association',
np.where(test["Cramer's V"] < .25, 'Moderate Association',
np.where(test["Cramer's V"] < .30, 'Moderately Strong Association',
np.where(test["Cramer's V"] < .35, 'Strong Association',
np.where(test["Cramer's V"] < .40, 'Very Strong Association',
np.where(test["Cramer's V"] < .50, 'Extremely Strong Association',
'Redundant')))))))
test['Result'] = np.where(test['p-Value']<sig, 'Significant', 'Not Significant')
return(test)
# ---------------------------------------------------------------------------- #
# ASSOCTABLE #
# ---------------------------------------------------------------------------- #
def assoc_table(df, x=None, y=None, threshold=0):
'''For a dataframe containing categorical variables, this function
computes a series of association tests for each pair of categorical
variables. It returns the adjusted Cramer's V measure of
association between the pairs of categorical variables. Note, this
is NOT a hypothesis test.
Args:
df (pd.DataFrame): Data frame containing categorical variables
x (str): Optional column to be used as the independent variable for
all tests
y (str): Optional column to be used as the dependent variable for
all tests
threshold (float): The minimum Cramer's V threshold to report.
Returns:
Data frame containing the results of the pairwise association measures.
'''
df2 = df.select_dtypes(include='object')
terms = list(df2.columns)
tests = []
if x and y:
ct = pd.crosstab(df[x], df[y])
cv = cramers(ct)
tests.append(OrderedDict(
{'x': x, 'y': y, "Cramer's V": cv}))
elif x:
if x in terms:
terms.remove(x)
if x not in df2.columns:
df2 = df2.join(df[x])
df2 = df2.dropna()
for term in terms:
ct = pd.crosstab(df2[x], df2[term])
cv = cramers(ct)
tests.append(OrderedDict(
{'x': x, 'y': term, "Cramer's V": cv}))
elif y:
if y in terms:
terms.remove(y)
if y not in df2.columns:
df2 = df2.join(df[y])
df2 = df2.dropna()
for term in terms:
ct =
|
pd.crosstab(df2[term], df2[y])
|
pandas.crosstab
|
import numpy as np
import cv2
import imutils
import sys
import pytesseract
import pandas as pd
import time
pytesseract.pytesseract.tesseract_cmd = r"tesseract"
# read and resize image to the required size
image = cv2.imread('examples/test1.png')
image = imutils.resize(image, width=500)
cv2.imshow("Original Image", image)
# convert to gray scale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow("Grayscale Conversion", gray)
# blur to reduce noise
gray = cv2.bilateralFilter(gray, 11, 17, 17)
cv2.imshow("Bilateral Filter", gray)
rectKern = cv2.getStructuringElement(cv2.MORPH_RECT, (13, 5))
blackhat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, rectKern)
cv2.imshow("Blackhat", blackhat)
# perform edge detection
edged = cv2.Canny(gray, 170, 200)
cv2.imshow("Canny Edges", edged)
cv2.waitKey(0)
cv2.destroyAllWindows()
# find contours in the edged image
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts=sorted(cnts, key = cv2.contourArea, reverse = True)[:30]
NumberPlateCnt = None
count = 0
# loop over contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if the approximated contour has four points, then assume that screen is found
if len(approx) == 4:
NumberPlateCnt = approx
break
# mask the part other than the number plate
mask = np.zeros(gray.shape,np.uint8)
new_image = cv2.drawContours(mask,[NumberPlateCnt],0,255,-1)
new_image = cv2.bitwise_and(image,image,mask=mask)
cv2.namedWindow("Final Image",cv2.WINDOW_NORMAL)
cv2.imshow("Final Image",new_image)
# configuration for tesseract
config = (' --oem 1 --psm 7 -c tessedit_char_whitelist=0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ')
# run tesseract OCR on image
text = pytesseract.image_to_string(new_image, config=config)
# data is stored in CSV file
raw_data = {'date':[time.asctime( time.localtime(time.time()))],'':[text]}
df =
|
pd.DataFrame(raw_data)
|
pandas.DataFrame
|
"""Preprocessing data methods."""
import numpy as np
import pandas as pd
from autots.tools.impute import FillNA
def remove_outliers(df, std_threshold: float = 3):
"""Replace outliers with np.nan.
https://stackoverflow.com/questions/23199796/detect-and-exclude-outliers-in-pandas-data-frame
Args:
df (pandas.DataFrame): DataFrame containing numeric data, DatetimeIndex
std_threshold (float): The number of standard deviations away from mean to count as outlier.
"""
df = df[np.abs(df - df.mean()) <= (std_threshold * df.std())]
return df
def clip_outliers(df, std_threshold: float = 3):
"""Replace outliers above threshold with that threshold. Axis = 0.
Args:
df (pandas.DataFrame): DataFrame containing numeric data
std_threshold (float): The number of standard deviations away from mean to count as outlier.
"""
df_std = df.std(axis=0, skipna=True)
df_mean = df.mean(axis=0, skipna=True)
lower = df_mean - (df_std * std_threshold)
upper = df_mean + (df_std * std_threshold)
df2 = df.clip(lower=lower, upper=upper, axis=1)
return df2
def simple_context_slicer(df, method: str = 'None', forecast_length: int = 30):
"""Condensed version of context_slicer with more limited options.
Args:
df (pandas.DataFrame): training data frame to slice
method (str): Option to slice dataframe
'None' - return unaltered dataframe
'HalfMax' - return half of dataframe
'ForecastLength' - return dataframe equal to length of forecast
'2ForecastLength' - return dataframe equal to twice length of forecast
(also takes 4, 6, 8, 10 in addition to 2)
"""
if method in [None, "None"]:
return df
df = df.sort_index(ascending=True)
if 'forecastlength' in str(method).lower():
len_int = int([x for x in str(method) if x.isdigit()][0])
return df.tail(len_int * forecast_length)
elif method == 'HalfMax':
return df.tail(int(len(df.index) / 2))
elif str(method).isdigit():
return df.tail(int(method))
else:
print("Context Slicer Method not recognized")
return df
"""
if method == '2ForecastLength':
return df.tail(2 * forecast_length)
elif method == '6ForecastLength':
return df.tail(6 * forecast_length)
elif method == '12ForecastLength':
return df.tail(12 * forecast_length)
elif method == 'ForecastLength':
return df.tail(forecast_length)
elif method == '4ForecastLength':
return df.tail(4 * forecast_length)
elif method == '8ForecastLength':
return df.tail(8 * forecast_length)
elif method == '10ForecastLength':
return df.tail(10 * forecast_length)
"""
class Detrend(object):
"""Remove a linear trend from the data."""
def __init__(self):
self.name = 'Detrend'
def fit(self, df):
"""Fits trend for later detrending.
Args:
df (pandas.DataFrame): input dataframe
"""
from statsmodels.regression.linear_model import GLS
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
# formerly df.index.astype( int ).values
y = df.values
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
# from statsmodels.tools import add_constant
# X = add_constant(X, has_constant='add')
self.model = GLS(y, X, missing='drop').fit()
self.shape = df.shape
return self
def fit_transform(self, df):
"""Fit and Return Detrended DataFrame.
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def transform(self, df):
"""Return detrended data.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
# formerly X = df.index.astype( int ).values
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
# from statsmodels.tools import add_constant
# X = add_constant(X, has_constant='add')
df = df.astype(float) - self.model.predict(X)
return df
def inverse_transform(self, df):
"""Return data to original form.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
# from statsmodels.tools import add_constant
# X = add_constant(X, has_constant='add')
df = df.astype(float) + self.model.predict(X)
return df
class StatsmodelsFilter(object):
"""Irreversible filters."""
def __init__(self, method: str = 'bkfilter'):
self.method = method
def fit(self, df):
"""Fits filter.
Args:
df (pandas.DataFrame): input dataframe
"""
return self
def fit_transform(self, df):
"""Fit and Return Detrended DataFrame.
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def transform(self, df):
"""Return detrended data.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
if self.method == 'bkfilter':
from statsmodels.tsa.filters import bk_filter
cycles = bk_filter.bkfilter(df, K=1)
cycles.columns = df.columns
df = (df - cycles).fillna(method='ffill').fillna(method='bfill')
elif self.method == 'cffilter':
from statsmodels.tsa.filters import cf_filter
cycle, trend = cf_filter.cffilter(df)
cycle.columns = df.columns
df = df - cycle
return df
def inverse_transform(self, df):
"""Return data to original form.
Args:
df (pandas.DataFrame): input dataframe
"""
return df
class SinTrend(object):
"""Modelling sin."""
def __init__(self):
self.name = 'SinTrend'
def fit_sin(self, tt, yy):
"""Fit sin to the input time sequence, and return fitting parameters "amp", "omega", "phase", "offset", "freq", "period" and "fitfunc"
from user unsym @ https://stackoverflow.com/questions/16716302/how-do-i-fit-a-sine-curve-to-my-data-with-pylab-and-numpy
"""
import scipy.optimize
tt = np.array(tt)
yy = np.array(yy)
ff = np.fft.fftfreq(len(tt), (tt[1] - tt[0])) # assume uniform spacing
Fyy = abs(np.fft.fft(yy))
guess_freq = abs(
ff[np.argmax(Fyy[1:]) + 1]
) # excluding the zero frequency "peak", which is related to offset
guess_amp = np.std(yy) * 2.0 ** 0.5
guess_offset = np.mean(yy)
guess = np.array([guess_amp, 2.0 * np.pi * guess_freq, 0.0, guess_offset])
def sinfunc(t, A, w, p, c):
return A * np.sin(w * t + p) + c
popt, pcov = scipy.optimize.curve_fit(sinfunc, tt, yy, p0=guess, maxfev=10000)
A, w, p, c = popt
# f = w/(2.*np.pi)
# fitfunc = lambda t: A * np.sin(w*t + p) + c
return {
"amp": A,
"omega": w,
"phase": p,
"offset": c,
} # , "freq": f, "period": 1./f, "fitfunc": fitfunc, "maxcov": np.max(pcov), "rawres": (guess,popt,pcov)}
def fit(self, df):
"""Fits trend for later detrending
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
self.sin_params = pd.DataFrame()
# make this faster
for column in df.columns:
try:
y = df[column].values
vals = self.fit_sin(X, y)
current_param = pd.DataFrame(vals, index=[column])
except Exception as e:
print(e)
current_param = pd.DataFrame(
{"amp": 0, "omega": 1, "phase": 1, "offset": 1}, index=[column]
)
self.sin_params = pd.concat([self.sin_params, current_param], axis=0)
self.shape = df.shape
return self
def fit_transform(self, df):
"""Fits and Returns Detrended DataFrame
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def transform(self, df):
"""Returns detrended data
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
sin_df = pd.DataFrame()
# make this faster
for index, row in self.sin_params.iterrows():
yy = pd.DataFrame(
row['amp'] * np.sin(row['omega'] * X + row['phase']) + row['offset'],
columns=[index],
)
sin_df = pd.concat([sin_df, yy], axis=1)
df_index = df.index
df = df.astype(float).reset_index(drop=True) - sin_df.reset_index(drop=True)
df.index = df_index
return df
def inverse_transform(self, df):
"""Returns data to original form
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
sin_df = pd.DataFrame()
# make this faster
for index, row in self.sin_params.iterrows():
yy = pd.DataFrame(
row['amp'] * np.sin(row['omega'] * X + row['phase']) + row['offset'],
columns=[index],
)
sin_df = pd.concat([sin_df, yy], axis=1)
df_index = df.index
df = df.astype(float).reset_index(drop=True) + sin_df.reset_index(drop=True)
df.index = df_index
return df
class PositiveShift(object):
"""Shift each series if necessary to assure all values >= 1.
Args:
log (bool): whether to include a log transform.
center_one (bool): whether to shift to 1 instead of 0.
"""
def __init__(self, log: bool = False, center_one: bool = True, squared=False):
self.name = 'PositiveShift'
self.log = log
self.center_one = center_one
self.squared = squared
def fit(self, df):
"""Fits shift interval.
Args:
df (pandas.DataFrame): input dataframe
"""
if self.log or self.center_one:
shift_amount = df.min(axis=0) - 1
else:
shift_amount = df.min(axis=0)
self.shift_amount = shift_amount.where(shift_amount < 0, 0).abs()
return self
def fit_transform(self, df):
"""Fit and Return Detrended DataFrame.
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def transform(self, df):
"""Return detrended data.
Args:
df (pandas.DataFrame): input dataframe
"""
df = df + self.shift_amount
if self.squared:
df = df ** 2
if self.log:
df_log = pd.DataFrame(np.log(df))
return df_log
else:
return df
def inverse_transform(self, df):
"""Return data to original form.
Args:
df (pandas.DataFrame): input dataframe
"""
if self.log:
df = pd.DataFrame(np.exp(df))
if self.squared:
df = df ** 0.5
df = df - self.shift_amount
return df
class IntermittentOccurrence(object):
"""Intermittent inspired binning predicts probability of not median."""
def __init__(self):
self.name = 'IntermittentOccurrence'
def fit(self, df):
"""Fits shift interval.
Args:
df (pandas.DataFrame): input dataframe
"""
self.df_med = df.median(axis=0)
self.upper_mean = df[df > self.df_med].mean(axis=0) - self.df_med
self.lower_mean = df[df < self.df_med].mean(axis=0) - self.df_med
return self
def fit_transform(self, df):
"""Fit and Return Detrended DataFrame.
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def transform(self, df):
"""Return detrended data.
Args:
df (pandas.DataFrame): input dataframe
"""
temp = df.where(df >= self.df_med, -1)
temp = temp.where(df <= self.df_med, 1).where(df != self.df_med, 0)
return temp
def inverse_transform(self, df):
"""Return data to original form.
Args:
df (pandas.DataFrame): input dataframe
"""
invtrans_df = df.copy()
invtrans_df = invtrans_df.where(df <= 0, self.upper_mean * df, axis=1)
invtrans_df = invtrans_df.where(
df >= 0, (self.lower_mean * df).abs() * -1, axis=1
)
invtrans_df = invtrans_df + self.df_med
invtrans_df = invtrans_df.where(df != 0, self.df_med, axis=1)
return invtrans_df
class RollingMeanTransformer(object):
"""Attempt at Rolling Mean with built-in inverse_transform for time series
inverse_transform can only be applied to the original series, or an immediately following forecast
Does not play well with data with NaNs
Inverse transformed values returned will also not return as 'exactly' equals due to floating point imprecision.
Args:
window (int): number of periods to take mean over
"""
def __init__(self, window: int = 10, fixed: bool = False):
self.window = window
self.fixed = fixed
def fit(self, df):
"""Fits.
Args:
df (pandas.DataFrame): input dataframe
"""
self.shape = df.shape
self.last_values = (
df.tail(self.window).fillna(method='ffill').fillna(method='bfill')
)
self.first_values = (
df.head(self.window).fillna(method='ffill').fillna(method='bfill')
)
df = df.tail(self.window + 1).rolling(window=self.window, min_periods=1).mean()
self.last_rolling = df.tail(1)
return self
def transform(self, df):
"""Returns rolling data
Args:
df (pandas.DataFrame): input dataframe
"""
df = df.rolling(window=self.window, min_periods=1).mean()
# self.last_rolling = df.tail(1)
return df
def fit_transform(self, df):
"""Fits and Returns Magical DataFrame
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def inverse_transform(self, df, trans_method: str = "forecast"):
"""Returns data to original *or* forecast form
Args:
df (pandas.DataFrame): input dataframe
trans_method (str): whether to inverse on original data, or on a following sequence
- 'original' return original data to original numbers
- 'forecast' inverse the transform on a dataset immediately following the original
"""
if self.fixed:
return df
else:
window = self.window
if trans_method == 'original':
staged = self.first_values
diffed = ((df.astype(float) - df.shift(1).astype(float)) * window).tail(
len(df.index) - window
)
temp_cols = diffed.columns
for n in range(len(diffed.index)):
temp_index = diffed.index[n]
temp_row = diffed.iloc[n].reset_index(drop=True) + staged.iloc[
n
].reset_index(drop=True).astype(float)
temp_row = pd.DataFrame(
temp_row.values.reshape(1, len(temp_row)), columns=temp_cols
)
temp_row.index = pd.DatetimeIndex([temp_index])
staged = pd.concat([staged, temp_row], axis=0)
return staged
# current_inversed = current * window - cumsum(window-1 to previous)
if trans_method == 'forecast':
staged = self.last_values
df = pd.concat([self.last_rolling, df], axis=0)
diffed = ((df.astype(float) - df.shift(1).astype(float)) * window).tail(
len(df.index)
)
diffed = diffed.tail(len(diffed.index) - 1)
temp_cols = diffed.columns
for n in range(len(diffed.index)):
temp_index = diffed.index[n]
temp_row = diffed.iloc[n].reset_index(drop=True) + staged.iloc[
n
].reset_index(drop=True).astype(float)
temp_row = pd.DataFrame(
temp_row.values.reshape(1, len(temp_row)), columns=temp_cols
)
temp_row.index = pd.DatetimeIndex([temp_index])
staged = pd.concat([staged, temp_row], axis=0)
staged = staged.tail(len(diffed.index))
return staged
"""
df = df_wide_numeric.tail(60).head(50).fillna(0)
df_forecast = (df_wide_numeric).tail(10).fillna(0)
forecats = transformed.tail(10)
test = RollingMeanTransformer().fit(df)
transformed = test.transform(df)
inverse = test.inverse_transform(forecats, trans_method = 'forecast')
df == test.inverse_transform(test.transform(df), trans_method = 'original')
inverse == df_wide_numeric.tail(10)
"""
"""
df = df_wide_numeric.tail(60).fillna(0)
test = SeasonalDifference().fit(df)
transformed = test.transform(df)
forecats = transformed.tail(10)
df == test.inverse_transform(transformed, trans_method = 'original')
df = df_wide_numeric.tail(60).head(50).fillna(0)
test = SeasonalDifference().fit(df)
inverse = test.inverse_transform(forecats, trans_method = 'forecast')
inverse == df_wide_numeric.tail(10).fillna(0)
"""
class SeasonalDifference(object):
"""Remove seasonal component.
Args:
lag_1 (int): length of seasonal period to remove.
method (str): 'LastValue', 'Mean', 'Median' to construct seasonality
"""
def __init__(self, lag_1: int = 7, method: str = 'LastValue'):
self.lag_1 = 7 # abs(int(lag_1))
self.method = method
def fit(self, df):
"""Fits.
Args:
df (pandas.DataFrame): input dataframe
"""
df_length = df.shape[0]
if self.method in ['Mean', 'Median']:
tile_index = np.tile(
np.arange(self.lag_1), int(np.ceil(df_length / self.lag_1))
)
tile_index = tile_index[len(tile_index) - (df_length) :]
df.index = tile_index
if self.method == "Median":
self.tile_values_lag_1 = df.groupby(level=0, axis=0).median()
else:
self.tile_values_lag_1 = df.groupby(level=0, axis=0).mean()
else:
self.method == 'LastValue'
self.tile_values_lag_1 = df.tail(self.lag_1)
return self
def transform(self, df):
"""Returns rolling data
Args:
df (pandas.DataFrame): input dataframe
"""
tile_len = len(self.tile_values_lag_1.index)
df_len = df.shape[0]
sdf = pd.DataFrame(
np.tile(self.tile_values_lag_1, (int(np.ceil(df_len / tile_len)), 1))
)
sdf = sdf.tail(df_len)
sdf.index = df.index
sdf.columns = df.columns
return df - sdf
def fit_transform(self, df):
"""Fits and Returns Magical DataFrame
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def inverse_transform(self, df, trans_method: str = "forecast"):
"""Returns data to original *or* forecast form
Args:
df (pandas.DataFrame): input dataframe
trans_method (str): whether to inverse on original data, or on a following sequence
- 'original' return original data to original numbers
- 'forecast' inverse the transform on a dataset immediately following the original
"""
tile_len = len(self.tile_values_lag_1.index)
df_len = df.shape[0]
sdf = pd.DataFrame(
np.tile(self.tile_values_lag_1, (int(np.ceil(df_len / tile_len)), 1))
)
if trans_method == 'original':
sdf = sdf.tail(df_len)
else:
sdf = sdf.head(df_len)
sdf.index = df.index
sdf.columns = df.columns
return df + sdf
class DatepartRegression(object):
"""Remove a regression on datepart from the data."""
def __init__(
self,
regression_model: dict = {
"model": 'DecisionTree',
"model_params": {"max_depth": 5, "min_samples_split": 2},
},
datepart_method: str = 'expanded',
):
self.name = 'DatepartRegression'
self.regression_model = regression_model
self.datepart_method = datepart_method
def fit(self, df):
"""Fits trend for later detrending.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
y = df.values
from autots.models.sklearn import date_part
X = date_part(df.index, method=self.datepart_method)
from autots.models.sklearn import retrieve_regressor
self.model = retrieve_regressor(
regression_model=self.regression_model,
verbose=0,
verbose_bool=False,
random_seed=2020,
)
self.model = self.model.fit(X, y)
self.shape = df.shape
return self
def fit_transform(self, df):
"""Fit and Return Detrended DataFrame.
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def transform(self, df):
"""Return detrended data.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
from autots.models.sklearn import date_part
X = date_part(df.index, method=self.datepart_method)
y = pd.DataFrame(self.model.predict(X))
y.columns = df.columns
y.index = df.index
df = df - y
return df
def inverse_transform(self, df):
"""Return data to original form.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
from autots.models.sklearn import date_part
X = date_part(df.index, method=self.datepart_method)
y = pd.DataFrame(self.model.predict(X))
y.columns = df.columns
y.index = df.index
df = df + y
return df
class DifferencedTransformer(object):
"""Difference from lag n value.
inverse_transform can only be applied to the original series, or an immediately following forecast
Args:
lag (int): number of periods to shift (not implemented, default = 1)
"""
def __init__(self):
self.lag = 1
self.beta = 1
def fit(self, df):
"""Fit.
Args:
df (pandas.DataFrame): input dataframe
"""
self.last_values = df.tail(self.lag)
self.first_values = df.head(self.lag)
return self
def transform(self, df):
"""Return differenced data.
Args:
df (pandas.DataFrame): input dataframe
"""
# df = df_wide_numeric.tail(60).head(50)
# df_forecast = (df_wide_numeric - df_wide_numeric.shift(1)).tail(10)
df = (df - df.shift(self.lag)).fillna(method='bfill')
return df
def fit_transform(self, df):
"""Fits and Returns Magical DataFrame
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def inverse_transform(self, df, trans_method: str = "forecast"):
"""Returns data to original *or* forecast form
Args:
df (pandas.DataFrame): input dataframe
trans_method (str): whether to inverse on original data, or on a following sequence
- 'original' return original data to original numbers
- 'forecast' inverse the transform on a dataset immediately following the original
"""
lag = self.lag
# add last values, group by lag, cumsum
if trans_method == 'original':
df = pd.concat([self.first_values, df.tail(df.shape[0] - lag)])
return df.cumsum()
else:
df_len = df.shape[0]
df = pd.concat([self.last_values, df], axis=0)
return df.cumsum().tail(df_len)
class PctChangeTransformer(object):
"""% Change of Data.
Warning:
Because % change doesn't play well with zeroes, zeroes are replaced by positive of the lowest non-zero value.
Inverse transformed values returned will also not return as 'exactly' equals due to floating point imprecision.
inverse_transform can only be applied to the original series, or an immediately following forecast
"""
def __init__(self):
self.name = 'PctChangeTransformer'
def fit(self, df):
"""Fits.
Args:
df (pandas.DataFrame): input dataframe
"""
temp = (
df.replace([0], np.nan).fillna((df[df != 0]).abs().min(axis=0)).fillna(0.1)
)
self.last_values = temp.tail(1)
self.first_values = temp.head(1)
return self
def transform(self, df):
"""Returns changed data
Args:
df (pandas.DataFrame): input dataframe
"""
df = df.replace([0], np.nan)
df = df.fillna((df[df != 0]).abs().min(axis=0)).fillna(0.1)
df = df.pct_change(periods=1, fill_method='ffill').fillna(0)
df = df.replace([np.inf, -np.inf], 0)
return df
def fit_transform(self, df):
"""Fit and Return *Magical* DataFrame.
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def inverse_transform(self, df, trans_method: str = "forecast"):
"""Returns data to original *or* forecast form
Args:
df (pandas.DataFrame): input dataframe
trans_method (str): whether to inverse on original data, or on a following sequence
- 'original' return original data to original numbers
- 'forecast' inverse the transform on a dataset immediately following the original
"""
df = (df + 1).replace([0], np.nan)
df = df.fillna((df[df != 0]).abs().min()).fillna(0.1)
# add last values, group by lag, cumprod
if trans_method == 'original':
df = pd.concat([self.first_values, df.tail(df.shape[0] - 1)], axis=0)
return df.cumprod()
else:
df_len = df.shape[0]
df = pd.concat([self.last_values, df], axis=0)
return df.cumprod().tail(df_len)
class CumSumTransformer(object):
"""Cumulative Sum of Data.
Warning:
Inverse transformed values returned will also not return as 'exactly' equals due to floating point imprecision.
inverse_transform can only be applied to the original series, or an immediately following forecast
"""
def fit(self, df):
"""Fits.
Args:
df (pandas.DataFrame): input dataframe
"""
self.last_values = df.tail(1)
self.first_values = df.head(1)
return self
def transform(self, df):
"""Returns changed data
Args:
df (pandas.DataFrame): input dataframe
"""
df = df.cumsum(skipna=True)
return df
def fit_transform(self, df):
"""Fits and Returns *Magical* DataFrame
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def inverse_transform(self, df, trans_method: str = "forecast"):
"""Returns data to original *or* forecast form
Args:
df (pandas.DataFrame): input dataframe
trans_method (str): whether to inverse on original data, or on a following sequence
- 'original' return original data to original numbers
- 'forecast' inverse the transform on a dataset immediately following the original
"""
if trans_method == 'original':
df = pd.concat(
[self.first_values, (df - df.shift(1)).tail(df.shape[0] - 1)], axis=0
)
return df
else:
df_len = df.shape[0]
df =
|
pd.concat([self.last_values, df], axis=0)
|
pandas.concat
|
"""
oil price data source: https://www.ppac.gov.in/WriteReadData/userfiles/file/PP_9_a_DailyPriceMSHSD_Metro.pdf
"""
import pandas as pd
import numpy as np
import tabula
import requests
import plotly.express as px
import plotly.graph_objects as go
import time
from pandas.tseries.offsets import MonthEnd
import re
import xmltodict
def process_table(table_df):
print("processing the downloaded PDF from PPAC website.")
cols = ['Date', 'Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol', 'Kolkata_Petrol',
'Date_D', 'Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']
table_df.columns = cols
table_df.drop(table_df.index[[0,3]],inplace=True)
table_df.drop('Date_D',axis=1,inplace=True)
table_df.dropna(how='any',inplace=True)
table_df = table_df.astype(str)
table_df = table_df.apply(lambda x: x.str.replace(" ", ""))
table_df[['Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol', 'Kolkata_Petrol', 'Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']] = table_df[['Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol', 'Kolkata_Petrol', 'Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']].astype(float)
table_df['Date'] = pd.to_datetime(table_df['Date'])
table_petrol = table_df[['Date','Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol','Kolkata_Petrol']]
table_diesel = table_df[['Date','Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']]
new_cols = [i.replace("_Petrol", "") for i in list(table_petrol.columns)]
table_petrol.columns = new_cols
table_diesel.columns = new_cols
return table_petrol, table_diesel
def get_international_exchange_rates(start_date,end_date):
print("sending request for international exchange rates.")
exchange_dates_url = "https://api.exchangeratesapi.io/history?"
params = {"start_at": start_date, "end_at":end_date, "base":"USD", "symbols":"INR"}
try:
req = requests.get(exchange_dates_url,params=params)
except Exception as e:
print(e)
print("request failed. using the saved data.")
dollar_exchange_rates = pd.read_csv("dollar_exhange_rates.csv")
dollar_exchange_rates['Date'] = pd.to_datetime(dollar_exchange_rates)
dollar_exchange_rates.set_index('Date').sort_index(ascending=False)
return dollar_exchange_rates
else:
print("request successful. processing the data.")
dollar_exchange_rates = pd.DataFrame(req.json()['rates']).T.reset_index()
dollar_exchange_rates['index'] = pd.to_datetime(dollar_exchange_rates['index'])
dollar_exchange_rates.set_index('index').sort_index(ascending=False)
dollar_exchange_rates.to_csv("dollar_exhange_rates.csv")
return dollar_exchange_rates
# def merge_data(dollar_exchange_rates, international_oil_prices, oil_price_data):
# print("merging the international oil price data, international exchange rate data and domestic oil price data.")
# trim_int = international_oil_prices.loc[international_oil_prices.index.isin(oil_price_data.index)].dropna()
# oil_price_data = oil_price_data.merge(trim_int, left_index=True, right_index=True).sort_index(ascending=False)
# oil_price_data = oil_price_data.merge(dollar_exchange_rates, left_index=True, right_index=True).sort_index(ascending=False)
# oil_price_data['INR'] = oil_price_data['INR'].round(2)
# oil_price_data['INR_pc'] = (((oil_price_data['INR'] - oil_price_data['INR'].iloc[-1])/oil_price_data['INR'].iloc[-1])*100).round(2)
# oil_price_data['rup_lit_crude'] = (oil_price_data['Price'] / 159) * oil_price_data['INR']
# oil_price_data['int_pc'] = (((oil_price_data['Price'] - oil_price_data['Price'].iloc[-1])/oil_price_data['Price'].iloc[-1])*100).round(2)
# oil_price_data['rup_lit_crude_pc'] = (((oil_price_data['rup_lit_crude'] - oil_price_data['rup_lit_crude'].iloc[-1])/oil_price_data['rup_lit_crude'].iloc[-1])*100).round(2)
# return oil_price_data
def download_ppac():
print("sending request for domestic oil price data from PPAC website.")
ppac_url = r"https://www.ppac.gov.in/WriteReadData/userfiles/file/PP_9_a_DailyPriceMSHSD_Metro.pdf"
try:
req = requests.get(ppac_url)
except Exception as e:
print(e)
print("Request unsuccessful. The saved file will be used.")
else:
with open('DATA/price_data.pdf', 'wb') as file:
file.write(req.content)
print('file saved successfully.')
def prepare_downloaded_file():
print("preparing downloaded file for analysis.")
oil_prices = 'DATA/price_data.pdf'
tables = tabula.read_pdf(oil_prices, pages="all")
proc_dfs = [process_table(i) for i in tables]
petrol_df = pd.concat(i[0] for i in proc_dfs)
diesel_df =
|
pd.concat(i[1] for i in proc_dfs)
|
pandas.concat
|
import pandas as pd
import numpy as np
from datetime import datetime
from math import radians, cos, sin, asin, sqrt
def __init__():
print("Using DataFormatter Class")
def weekday(x):
"""
Figures out the day of the week. Outputs 1 for monday,2 for tuesday and so on.
"""
return (x.weekday()+1)
def is_weekend(x):
"""
Figures out if it was weekend. Outputs 0 or 1 for weekday or weekend.
"""
z = x.weekday()+1
return z//6
def hourly_info(x):
"""
separates the hour from time stamp. Returns hour of time.
"""
n1 = x.hour
return n1
def minute_info(x):
"""
separates the minutes from time stamp. Returns minute of time.
"""
n2 = x.minute
return n2/60
def haversine(x):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1 = x['pickup_longitude']
lat1 = x['pickup_latitude']
lon2 = x['dropoff_longitude']
lat2 = x['dropoff_latitude']
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 3956 # Radius of earth in kilometers. Use 3956 for miles
return c * r
def formatter(train):
#convert vendor id into one-hot
df_id = pd.DataFrame()
df_id['vendor_id'] = train['vendor_id']//2
df_id[['id']] = train[['id']].copy()
#print(df_id.head())
#convert flag into one-hot
tmp_df_flag = pd.get_dummies(train['store_and_fwd_flag'])
df_flag = pd.DataFrame()
df_flag[['flag_n', 'flag_y']] = tmp_df_flag.copy()
df_flag = df_flag.drop(['flag_y'],axis=1)
df_flag[['id']] = train[['id']].copy()
#print(df_flag.head())
df_weekday = pd.DataFrame()
n = train.shape[0]
#well-format the pickup time
df_weekday['pickup_time'] = pd.to_datetime(train['pickup_datetime'], format="%Y-%m-%d %H:%M:%S")
df_weekday['pickup_weekday'] = df_weekday['pickup_time'].apply(weekday)
df_weekday['is_weekend'] = df_weekday['pickup_time'].apply(is_weekend)
#print(df_weekday['pickup_weekday'].head())
df_weekday['p_hour'] = df_weekday['pickup_time'].apply(hourly_info)
df_weekday['p_min'] = df_weekday['pickup_time'].apply(minute_info)
#print(df_weekday['p_time'].head())
#Convert pick-up hour into categorical variables
df_pickup_time = pd.DataFrame()
#df_pickup_time = pd.get_dummies(df_weekday['p_hour'],prefix='p_hour', prefix_sep='_')
df_weekday['p_hour'] = df_weekday['p_hour']/24
df_pickup_time[['p_hour', 'p_min', 'is_weekend']] = df_weekday[['p_hour', 'p_min','is_weekend']]
df_pickup_time[['id']] = train[['id']].copy()
#Convert pick-up weekday into categorical variables
df_pickup_weekday =
|
pd.DataFrame()
|
pandas.DataFrame
|
import sys
import hydra
import numpy as np
import pandas as pd
import torch
from omegaconf import DictConfig
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSequenceClassification
from ruatd.dataset import RuARDDataset
from ruatd.engine import predict_fn
@hydra.main(config_path="config", config_name="binary")
def main(config: DictConfig):
df_valid = pd.read_csv(config.data.val)
df_test = pd.read_csv(config.data.test)
valid_dataset = RuARDDataset(
text=df_valid.Text.values, target=None, config=config, is_test=True
)
valid_data_loader = DataLoader(
valid_dataset,
batch_size=config.batch_size,
num_workers=config.num_workers,
pin_memory=config.pin_memory,
)
test_dataset = RuARDDataset(
text=df_test.Text.values,
target=None,
config=config,
is_test=True,
)
test_data_loader = DataLoader(
test_dataset,
batch_size=config.batch_size,
num_workers=config.num_workers,
pin_memory=config.pin_memory,
)
device = torch.device(config.device)
model = AutoModelForSequenceClassification.from_pretrained(
config.model, num_labels=config.num_classes, ignore_mismatched_sizes=True
)
model.load_state_dict(
torch.load(
f"{config.checkpoint}/{config.classification}_{config.model.split('/')[-1]}.pt"
)
)
model.to(device)
model.eval()
prob_valid, df_valid["Class"] = predict_fn(valid_data_loader, model, config)
prob_test, df_test["Class"] = predict_fn(test_data_loader, model, config)
if config.classification == "multiclass":
class_dict = {
0: "ruGPT3-Small",
1: "ruGPT3-Medium",
2: "OPUS-MT",
3: "M2M-100",
4: "ruT5-Base-Multitask",
5: "Human",
6: "M-BART50",
7: "ruGPT3-Large",
8: "ruGPT2-Large",
9: "M-BART",
10: "ruT5-Large",
11: "ruT5-Base",
12: "mT5-Large",
13: "mT5-Small",
}
else:
class_dict = {0: "H", 1: "M"}
pd.concat(
[df_valid["Id"], pd.DataFrame(prob_valid).rename(columns=class_dict)], axis=1
).to_csv(
f"{config.submission}/{config.classification}/prob_valid_{config.model.split('/')[-1]}.csv",
index=None,
)
pd.concat(
[df_test["Id"],
|
pd.DataFrame(prob_test)
|
pandas.DataFrame
|
"""
Copyright 2020 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
'''
Reads in articles from the New York Times API and saves
them to a cache.
'''
import time
import os
import datetime
import argparse
# News archive api
from nytimesarticle import articleAPI
import pandas as pd
import requests
from dateutil.rrule import rrule, MONTHLY
NYT_KEY = open('nyt_key.txt').read().strip()
api = articleAPI(NYT_KEY)
def parse_articles(articles):
'''
This function takes in a response to the NYT api and parses
the articles into a list of dictionaries
'''
news = []
for i in articles['response']['docs']:
if 'abstract' not in i.keys():
continue
if 'headline' not in i.keys():
continue
if 'news_desk' not in i.keys():
continue
if 'pub_date' not in i.keys():
continue
if 'snippet' not in i.keys():
continue
dic = {}
dic['id'] = i['_id']
if i.get('abstract', 'EMPTY') is not None:
dic['abstract'] = i.get('abstract', 'EMPTY').encode("utf8")
dic['headline'] = i['headline']['main'].encode("utf8")
dic['desk'] = i.get('news_desk', 'EMPTY')
if len(i['pub_date']) < 20:
continue
dic['date'] = i['pub_date'][0:10] # cutting time of day.
dic['time'] = i['pub_date'][11:19]
dic['section'] = i.get('section_name', 'EMPTY')
if i['snippet'] is not None:
dic['snippet'] = i['snippet'].encode("utf8")
dic['source'] = i.get('source', 'EMPTY')
dic['type'] = i.get('type_of_material', 'EMPTY')
dic['word_count'] = i.get('type_of_material', 0)
news.append(dic)
return
|
pd.DataFrame(news)
|
pandas.DataFrame
|
import os
import pandas as pd
import argparse
import sqlite3
import numpy as np
def get_args():
desc = ('Extracts the locations, novelty, and transcript assignments of'
' exons/introns in a TALON database or GTF file. All positions '
'are 1-based.')
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--gtf', dest='gtf', default=None,
help = 'TALON GTF file from which to extract exons/introns')
parser.add_argument('--db', dest='db', default=None,
help = 'TALON database from which to extract exons/introns')
parser.add_argument('--ref', dest='ref_gtf',
help = ('GTF reference file (ie GENCODE). Will be used to '
'label novelty.'))
parser.add_argument('--mode', dest='mode',
help= ("Choices are 'intron' or 'exon' (default is 'intron'). "
"Determines whether to include introns or exons in the "
"output"), default='intron')
parser.add_argument('--outprefix', dest='outprefix',
help = 'Prefix for output file')
args = parser.parse_args()
if args.gtf and args.db:
raise Exception('only input gtf or db')
return args
# creates a dictionary of the last field of a gtf
# adapted from <NAME>
def get_fields(tab_fields):
attributes = {}
# remove trailing newline and split by semicolon
description = tab_fields[-1].strip('\n')
description = description.split(';')
# Parse description
for fields in description:
if fields == "" or fields == " ": continue
fields = fields.split()
if fields[0] == '': fields = fields[1:]
key = fields[0].replace('"', '')
val = ' '.join(fields[1:]).replace('"', '')
attributes[key] = val
# Put in placeholders for important attributes (such as gene_id) if they
# are absent
if "gene_id" not in attributes:
attributes["gene_id"] = "NULL"
return attributes
# create loc_df (for nodes), edge_df (for edges), and t_df (for paths)
def create_dfs_db(db):
# make sure file exists
if not os.path.exists(db):
raise Exception('TALON db file not found. Check path.')
# open db connection
conn = sqlite3.connect(db)
c = conn.cursor()
# loc_df
q = 'SELECT loc.* FROM location loc'
c.execute(q)
locs = c.fetchall()
loc_df = pd.DataFrame(locs,
columns=['location_ID', 'genome_build',
'chrom', 'position'])
# do some df reformatting, add strand
loc_df.drop('genome_build', axis=1, inplace=True)
loc_df.rename({'location_ID': 'vertex_id',
'position': 'coord'},
inplace=True, axis=1)
loc_df.vertex_id = loc_df.vertex_id.map(int)
# edge_df
q = """SELECT * FROM edge """
c.execute(q)
edges = c.fetchall()
edge_df = pd.DataFrame(edges,
columns=['edge_id', 'v1', 'v2',
'edge_type', 'strand'])
edge_df.v1 = edge_df.v1.map(int)
edge_df.v2 = edge_df.v2.map(int)
edge_df['talon_edge_id'] = edge_df.edge_id
edge_df['edge_id'] = edge_df.apply(lambda x: (int(x.v1), int(x.v2)), axis=1)
# t_df
t_df = pd.DataFrame()
# get tid, gid, gname, and paths
q = """SELECT ga.value, ta.value,
t.start_exon, t.jn_path, t.end_exon,
t.start_vertex, t.end_vertex
FROM gene_annotations ga
JOIN transcripts t ON ga.ID=t.gene_ID
JOIN transcript_annotations ta ON t.transcript_ID=ta.ID
WHERE ta.attribute='transcript_id'
AND (ga.attribute='gene_name'
OR ga.attribute='gene_id')
"""
c.execute(q)
data = c.fetchall()
# get fields from each transcript and add to dataframe
gids, tids, paths = zip(*[(i[0], i[1], i[2:]) for i in data[::2]])
gnames = [i[0] for i in data[1::2]]
paths = get_db_edge_paths(paths)
t_df['tid'] = np.asarray(tids)
t_df['path'] = np.asarray(paths)
t_df = create_dupe_index(t_df, 'tid')
t_df = set_dupe_index(t_df, 'tid')
# furnish the last bit of info in each df
t_df['path'] = [[int(n) for n in path]
for path in get_db_vertex_paths(paths, edge_df)]
loc_df = create_dupe_index(loc_df, 'vertex_id')
loc_df = set_dupe_index(loc_df, 'vertex_id')
edge_df.drop('talon_edge_id', axis=1, inplace=True)
edge_df = create_dupe_index(edge_df, 'edge_id')
edge_df = set_dupe_index(edge_df, 'edge_id')
return loc_df, edge_df, t_df
# create loc_df (nodes), edge_df (edges), and t_df (transcripts) from gtf
# adapted from <NAME> and TALON
def create_dfs_gtf(gtf_file):
# make sure file exists
if not os.path.exists(gtf_file):
raise Exception('GTF file not found. Check path.')
# depending on the strand, determine the stard and stop
# coords of an intron or exon
def find_edge_start_stop(v1, v2, strand):
if strand == '-':
start = max([v1, v2])
stop = min([v1, v2])
elif strand == '+':
start = min([v1, v2])
stop = max([v1, v2])
return start, stop
# dictionaries to hold unique edges and transcripts
transcripts = {}
exons = {}
with open(gtf_file) as gtf:
for line in gtf:
# ignore header lines
if line.startswith('#'):
continue
# split each entry
line = line.strip().split('\t')
# get some fields from gtf that we care about
chrom = line[0]
entry_type = line[2]
start = int(line[3])
stop = int(line[4])
strand = line[6]
fields = line[-1]
# transcript entry
if entry_type == "transcript":
attributes = get_fields(line)
tid = attributes['transcript_id']
gid = attributes['gene_id']
# add transcript to dictionary
transcript = {tid: {'gid': gid,
'tid': tid,
'strand': strand,
'exons': []}}
transcripts.update(transcript)
# exon entry
elif entry_type == "exon":
attributes = get_fields(line)
start, stop = find_edge_start_stop(start, stop, strand)
eid = '{}_{}_{}_{}_exon'.format(chrom, start, stop, strand)
tid = attributes['transcript_id']
# add novel exon to dictionary
if eid not in exons:
edge = {eid: {'eid': eid,
'chrom': chrom,
'v1': start,
'v2': stop,
'strand': strand}}
exons.update(edge)
# add this exon to the transcript's list of exons
if tid in transcripts:
transcripts[tid]['exons'].append(eid)
# once we have all transcripts, make loc_df
locs = {}
vertex_id = 0
for edge_id, edge in exons.items():
chrom = edge['chrom']
strand = edge['strand']
v1 = edge['v1']
v2 = edge['v2']
# exon start
key = (chrom, v1)
if key not in locs:
locs[key] = vertex_id
vertex_id += 1
# exon end
key = (chrom, v2)
if key not in locs:
locs[key] = vertex_id
vertex_id += 1
# add locs-indexed path to transcripts, and populate edges
edges = {}
for _,t in transcripts.items():
t['path'] = []
strand = t['strand']
t_exons = t['exons']
for i, exon_id in enumerate(t_exons):
# pull some information from exon dict
exon = exons[exon_id]
chrom = exon['chrom']
v1 = exon['v1']
v2 = exon['v2']
strand = exon['strand']
# add current exon and subsequent intron
# (if not the last exon) for each exon to edges
key = (chrom, v1, v2, strand)
v1_key = (chrom, v1)
v2_key = (chrom, v2)
edge_id = (locs[v1_key], locs[v2_key])
if key not in edges:
edges[key] = {'edge_id': edge_id, 'edge_type': 'exon'}
# add exon locs to path
t['path'] += list(edge_id)
# if this isn't the last exon, we also needa add an intron
# this consists of v2 of the prev exon and v1 of the next exon
if i < len(t_exons)-1:
next_exon = exons[t_exons[i+1]]
v1 = next_exon['v1']
key = (chrom, v2, v1, strand)
v1_key = (chrom, v1)
edge_id = (locs[v2_key], locs[v1_key])
if key not in edges:
edges[key] = {'edge_id': edge_id, 'edge_type': 'intron'}
# turn transcripts, edges, and locs into dataframes
locs = [{'chrom': key[0],
'coord': key[1],
'vertex_id': vertex_id} for key, vertex_id in locs.items()]
loc_df = pd.DataFrame(locs)
edges = [{'v1': item['edge_id'][0],
'v2': item['edge_id'][1],
'strand': key[3],
'edge_id': item['edge_id'],
'edge_type': item['edge_type']} for key, item in edges.items()]
edge_df =
|
pd.DataFrame(edges)
|
pandas.DataFrame
|
from flask import Flask, request, render_template
from Component_Classification_Features import *
from Component_Identification_Features import *
from Feedback_System import *
import os
import pandas as pd
import nltk
app = Flask(__name__)
dir = os.path.dirname(__file__)
pickled_scripts_folder = os.path.join(dir, 'PickledScripts')
list_of_pos_tags = [',','.',':','``',"''",'CC','CD','DT','EX','FW','IN','JJ','JJR','JJS','LS','MD','NN','NNS','NNP','NNPS','PDT','POS','PRP','PRP$','RB','RBR','RBS','RP','SYM','TO','UH','VB','VBD','VBG','VBN','VBP','VBZ','WDT','WP','WP$','WRB']
nltk.download('punkt')
#Default Route to submission page
@app.route("/")
def form():
return render_template('essay_submission_form.html')
#Route to feedback page after the submission button has been pressed in submission page
@app.route('/', methods=['POST'])
def form_process():
#receive text from text area
text = request.form['text']
#runs data preprocessing on essay and sets up Dataframe Column
essay_dataframe = data_preprocess(text)
column_names = essay_dataframe.columns.to_list()
#Perform Argument Mining functions
component_identification(essay_dataframe)
component_classification(essay_dataframe)
#Receive Feedback on essay from Feedback System module
component_count_feedback_list = component_count_feedback(essay_dataframe)
paragraph_count_feedback_list = paragraph_component_count_feedback(essay_dataframe)
paragraph_flow_feedback_list = paragraph_flow_feedback(essay_dataframe)
argumentative_sentence_feedback_list = argumentative_to_none_argumentative_feedback(essay_dataframe)
sentence_breakdown_list = results_feedback(essay_dataframe)
return render_template('essay_feedback.html', overall=component_count_feedback_list, paragraph_components = paragraph_count_feedback_list, paragraph_flows = paragraph_flow_feedback_list, argumentative = argumentative_sentence_feedback_list, breakdown = sentence_breakdown_list)
def component_identification(essay):
#run all feature functions on a copy of the essay - ensures all the features are not permanently appended to the essay dataframe which may cause issues
copy_of_essay = essay.copy()
position_features(copy_of_essay)
token_features(copy_of_essay)
similarity_features(copy_of_essay)
#open trained naive bayes model from pickle file
model_file = open(os.path.join(pickled_scripts_folder,'component_identification_model.pickle'), "rb")
model = pickle.load(model_file)
#open trained tf-idf vectorizer from pickle file
tfidf_file = open(os.path.join(pickled_scripts_folder,'tfidf.pickle'), "rb")
tfidf = pickle.load(tfidf_file)
#open trained Part-Of-Speech encoder from pickle file
pos_encoder_file = open(os.path.join(pickled_scripts_folder,'pos_encoder.pickle'), "rb")
pos_encoder = pickle.load(pos_encoder_file)
#close files
model_file.close()
tfidf_file.close()
pos_encoder_file.close()
#get utilised features from essay dataframe
feature_columns=['Sentence', 'Sentence Similarity To Prompt', 'Most Common POS Token']
essay_featurised = copy_of_essay.loc[:, feature_columns]
#perform tf-idf vectorisation feature
essay_sentences = essay_featurised['Sentence']
essay_sentences_vectorized = tfidf.transform(essay_sentences)
essay_vectorized_dataframe = pd.DataFrame(essay_sentences_vectorized.todense(), columns=tfidf.get_feature_names())
essay_concat = pd.concat([essay_featurised, essay_vectorized_dataframe], axis=1)
essay_final = essay_concat.drop(['Sentence'], axis=1)
#encode the POS tags
essay_pos_encoded = pos_encoder.transform(copy_of_essay['Most Common POS Token'])
essay_final['Most Common POS Token'] = essay_pos_encoded
#perfrom predictions and append them to actual essay dataframe - NOT COPY.
predictions = model.predict(essay_final)
essay["Argumentative Label"] = predictions
def component_classification(essay):
#get copy of essay dataframe (similar as above) and remove all non-argumentative sentences from the copy to cut down on processing time.
copy_of_essay = essay.copy()
non_argumentative_sentences = copy_of_essay.index[copy_of_essay["Argumentative Label"] == 0]
copy_of_essay.drop(non_argumentative_sentences, inplace = True)
copy_of_essay.reset_index(drop=True, inplace=True)
#perform feature functions
tokenisation_features(copy_of_essay)
part_of_speech_features(copy_of_essay)
positional_features(copy_of_essay)
first_person_indicators_features(copy_of_essay)
forward_indicator_feature(copy_of_essay)
backward_indicator_feature(copy_of_essay)
thesis_indicator_feature(copy_of_essay)
#load trained naive bayes model from pickle file
model_file = open(os.path.join(pickled_scripts_folder,'component_classification_model.pickle'), "rb")
model = pickle.load(model_file)
#load trained lemmatized tf-idf vectorizer from pickle file
tfidf_file = open(os.path.join(pickled_scripts_folder,'tfidf_lemmatized.pickle'), "rb")
tfidf = pickle.load(tfidf_file)
model_file.close()
tfidf_file.close()
#extract features from essay dataframe - loop allows all of the possible POS Tags to be neatly and quickly retrieved.
feature_columns=['Lemmatized Sentence','Sentence Within Introduction', 'Sentence Within Conclusion', 'First Sentence In Paragraph', 'Last Sentence In Paragraph','Number of Proceeding Components', 'Number of Preceding Components', 'First Person Indicator Present', 'First Person Indicator Count', 'Forward Indicator Present', 'Backward Indicator Present', 'Thesis Indicator Present']
for curr_tag in list_of_pos_tags:
feature_columns.append("Distribution of " + curr_tag + " POS Tag")
essay_featurised = copy_of_essay.loc[:, feature_columns]
#perform tf-idf vectorisation feature on essay dataframe
essay_sentences = essay_featurised['Lemmatized Sentence']
essay_sentences_vectorized = tfidf.transform(essay_sentences)
essay_vectorized_dataframe = pd.DataFrame(essay_sentences_vectorized.todense(), columns=tfidf.get_feature_names())
essay_concat = pd.concat([essay_featurised, essay_vectorized_dataframe], axis=1)
essay_final = essay_concat.drop(['Lemmatized Sentence'], axis=1)
#perform predictions, add "None" classifier to index where non-argumetentative sentences are
predictions = model.predict(essay_final)
predictions_list = predictions.tolist()
for index in non_argumentative_sentences:
predictions_list.insert(index, "None")
for index, component in enumerate(predictions_list):
if component == 1:
predictions_list[index] = "MajorClaim"
elif component == 0:
predictions_list[index] = "Claim"
elif component == 2:
predictions_list[index] = "Premise"
#append predictions to essay dataframe
essay["Argument Component Type"] = predictions_list
def data_preprocess(essay_text):
end_dataframe =
|
pd.DataFrame(columns = ['Essay ID','Sentence', 'Source Paragraph', 'Paragraph Number'])
|
pandas.DataFrame
|
import numpy as np
import pandas as pa
import requests, sys
import json
from Bio.Seq import Seq
import os
class TF3DScan:
def __init__(self,genes,PWM_directory,seqs=None):
self.gene_names=genes
self.PWM_dir=PWM_directory
self.seq=None
self.PWM=None
self.weights=None
self.proteins=None
self.initialize()
def initialize(self):
self.seq=self.get_seq_by_name(self.gene_names)
self.PWM=self.convolutional_filter_for_each_TF(self.PWM_dir)
self.weights, self.proteins= self.get_Weights(self.PWM)
return
def softmax(self,x):
e_x = np.exp(x - np.max(x))
return (e_x / e_x.sum(axis=0))
def convolutional_filter_for_each_TF(self,PWM_directory):
path = PWM_directory
#print(path)
filelist = os.listdir(path)
TF_kernel_PWM={}
for file in filelist:
TF_kernel_PWM[file.split("_")[0]] = pa.read_csv(path+file, sep="\t", skiprows=[0], header=None)
return TF_kernel_PWM
def get_reverse_scaning_weights(self, weight):
return np.flipud(weight[:,[3,2,1,0]])
def get_Weights(self, filter_PWM_human):
#forward and reverse scanning matrix with reverse complement
#forward_and_reverse_direction_filter_list=[{k:np.dstack((filter_PWM_human[k],self.get_reverse_scaning_weights(np.array(filter_PWM_human[k]))))} for k in filter_PWM_human.keys()]
#forward and reverse scanning with same matrix
forward_and_reverse_direction_filter_list=[{k:np.dstack((filter_PWM_human[k],filter_PWM_human[k]))} for k in filter_PWM_human.keys()]
forward_and_reverse_direction_filter_dict=dict(j for i in forward_and_reverse_direction_filter_list for j in i.items())
unequefilter_shape=
|
pa.get_dummies([filter_PWM_human[k].shape for k in filter_PWM_human])
|
pandas.get_dummies
|
import datetime
import glob
import os
from scipy import stats
import numpy as np
from dashboard.models import Location, Report
from dashboard.libraries import constants
import pandas as pd
# 日次実績レポートを更新する
def update_report(row_report_date: datetime.date):
# カラム名を辞書形式で取得
column_names = get_column_names(row_report_date)
column_name_province_state = column_names[constants.COLUMN_KEYS[0]]
column_name_country_region = column_names[constants.COLUMN_KEYS[1]]
column_name_latitude = column_names[constants.COLUMN_KEYS[2]]
column_name_longitude = column_names[constants.COLUMN_KEYS[3]]
column_name_confirmed = column_names[constants.COLUMN_KEYS[4]]
column_name_deaths = column_names[constants.COLUMN_KEYS[5]]
column_name_recovered = column_names[constants.COLUMN_KEYS[6]]
if constants.COLUMN_KEYS[7] in column_names.keys():
column_name_active = column_names[constants.COLUMN_KEYS[7]]
else:
column_name_active = None
# 文字列型の日付を取得
str_report_date = row_report_date.strftime(constants.DATE_FORMAT_REPORT_CSV)
# pandasで指定日付のcsvファイルを読み込み
csv_file_name = constants.DIRECTORY_PATH_REPORT_CSV + str_report_date + '.csv'
df_today_report = pd.read_csv(csv_file_name, usecols=column_names.values())
# ------補完処理------
# 緯度/経度が空白の行にそれぞれ0を代入
# 読み込んだcsvファイルにactive caseを指す列がなかった場合補完
if column_name_active is None:
df_today_report[constants.COLUMNS_ACTIVE_CASES_04] = df_today_report[column_name_confirmed] - df_today_report[column_name_deaths] - df_today_report[column_name_recovered]
column_name_active = constants.COLUMNS_ACTIVE_CASES_04
df_today_report[column_name_latitude] = df_today_report[column_name_latitude].fillna(0)
df_today_report[column_name_longitude] = df_today_report[column_name_longitude].fillna(0)
# 州/都が空白の行は'Country_Region'を挿入
df_today_report[column_name_province_state] = df_today_report[column_name_province_state].fillna(
df_today_report[column_name_country_region])
# ------補完処理完了------
# ------データフレーム前処理------
# 群/州、国名ごとに合計を算出するデータフレームを用意
df_sum = df_today_report[[
column_name_province_state,
column_name_country_region,
column_name_confirmed,
column_name_deaths,
column_name_recovered,
column_name_active
]]
# 群/州、国名ごとに合計を算出
df_sum = df_sum.groupby([column_name_province_state, column_name_country_region]).sum()
# 群/州、国名ごとに平均を算出するデータフレームを用意
df_average = df_today_report[[
column_name_province_state,
column_name_country_region,
column_name_latitude,
column_name_longitude,
]]
df_mean = df_average.groupby([column_name_province_state, column_name_country_region]).mean()
# データフレームを結合
df = pd.merge(df_sum, df_mean, on=[column_name_province_state, column_name_country_region], how='inner')
# 不正値を削除
df = df[df[column_name_active] >= 0]
df[column_name_active] = df[column_name_confirmed] - df[column_name_deaths]- df[column_name_recovered]
# Report_Dateを追加
df['report_date'] = row_report_date
for index, row_data in df.iterrows():
row_province_state = index[0]
row_country_region_name = index[1]
row_report_date = row_report_date
row_latitude = row_data[column_name_latitude]
row_longitude = row_data[column_name_longitude]
row_active_cases = row_data[column_name_active]
row_total_deaths = row_data[column_name_deaths]
row_total_recovered = row_data[column_name_recovered]
row_total_cases = row_data[column_name_confirmed]
# model Countryのレコードを取得。レコードが存在しなければINSERT
Location.objects.get_or_create(
province_state=row_province_state,
country_region_name=row_country_region_name
)
# UPSERTするReportをモデルにセット
upserted_report = Report(
report_date=row_report_date,
location=Location.objects.get(
province_state=row_province_state,
country_region_name=row_country_region_name
),
latitude=row_latitude,
longitude=row_longitude,
total_cases=row_total_cases,
total_deaths=row_total_deaths,
total_recovered=row_total_recovered,
active_cases=row_active_cases
)
# reportテーブルにデータが存在するか検証
record_report = Report.objects.filter(
report_date=upserted_report.report_date,
location__province_state=upserted_report.location.province_state,
location__country_region_name=upserted_report.location.country_region_name,
)
# upsert処理
if len(record_report) == 0:
upserted_report.save()
else:
record_report.update(
report_date=upserted_report.report_date,
location=upserted_report.location,
latitude=upserted_report.latitude,
longitude=upserted_report.longitude,
total_cases=upserted_report.total_cases,
total_deaths=upserted_report.total_deaths,
total_recovered=upserted_report.total_recovered,
active_cases=upserted_report.active_cases
)
return True
# レポート日付をもとにcsvファイルのカラム情報を辞書形式で取得する
def get_column_names(report_date: datetime.date):
format_change_date = datetime.date(year=2020, month=3, day=22)
if report_date.month == 2:
csv_column_names = constants.READ_COLUMNS_04
elif report_date < format_change_date:
csv_column_names = constants.READ_COLUMNS_03
else:
csv_column_names = constants.READ_COLUMNS_04
read_column_names = {}
for i in np.arange(0, len(csv_column_names)):
read_column_names[constants.COLUMN_KEYS[i]] = csv_column_names[i]
return read_column_names
# 最新のレポートを,国別に感染者数の降順上位(max_countries)まで表示する
def view_latest_reports_by_country(max_countries):
# 今日のレポートをfilterで取得する
latest_reports = (Report.objects.filter(report_date__day=23)).order_by('total_cases').reverse()
# クエリセットで取得したレポートをDataFrameにセット
df_latest_reports = pd.DataFrame(list(latest_reports.values(
'report_date',
'location__province_state',
'location__country_region_name',
'latitude',
'longitude',
'total_cases',
'total_deaths',
'total_recovered',
'active_cases',
)))
df_latest_reports = (df_latest_reports.groupby('location__country_region_name', as_index=False).sum()).sort_values('total_cases', ascending=False)
# 表示件数の上限値対応
if max_countries is not None:
df_latest_reports = df_latest_reports[0:max_countries]
dict_latest_reports = df_latest_reports.to_dict('record')
return dict_latest_reports
# 世界全体の最新レポートを取得する
# 取得内容:世界全体の total cases/active cases/total deaths/total recovered
def get_world_summary_report(report_date: datetime.date):
# 最新日付のレポートを取得する
yesterday = report_date + datetime.timedelta(days=-1)
world_summary_reports = (Report.objects.filter(report_date=report_date)).order_by('total_cases')
world_summary_reports_before_day = (Report.objects.filter(report_date=yesterday)).order_by('total_cases')
# クエリセットで取得したレポートをDataFrameにセット
df_world_summary_reports = pd.DataFrame(list(world_summary_reports.values(
'total_cases',
'total_deaths',
'total_recovered',
'active_cases',
)))
# 前日分
df_world_summary_reports_before_day = pd.DataFrame(list(world_summary_reports_before_day.values(
'total_cases',
'total_deaths',
'total_recovered',
'active_cases',
)))
# データフレームの各行をsum(seriesができる)
series_world_summary_reports = df_world_summary_reports.sum()
series_world_summary_reports_before_day = df_world_summary_reports_before_day.sum()
# 前日比集計
series_world_summary_reports['diff_total_cases'] = series_world_summary_reports['total_cases'] - \
series_world_summary_reports_before_day['total_cases']
series_world_summary_reports['diff_total_deaths'] = series_world_summary_reports['total_deaths'] - \
series_world_summary_reports_before_day['total_deaths']
series_world_summary_reports['diff_total_recovered'] = series_world_summary_reports['total_recovered'] - \
series_world_summary_reports_before_day[
'total_recovered']
series_world_summary_reports['diff_active_cases'] = series_world_summary_reports['active_cases'] - \
series_world_summary_reports_before_day['active_cases']
series_world_summary_reports['report_date'] = report_date
# seriesを辞書に変換してreturn
return series_world_summary_reports.to_dict()
# 世界のレポートのチャートを取得する
def get_world_report_report(start_date: datetime.date, end_date: datetime.date, term):
# 日付範囲で取得
reports = Report.objects.filter(report_date__range=(start_date, end_date)).order_by('report_date')
# 曜日でフィルタ
reports = reports.filter(report_date__week_day=5)
# データフレームに変換
df = pd.DataFrame(list(reports.values(
'location__country_region_name',
'total_cases',
'total_deaths',
'total_recovered',
'active_cases',
'report_date',
)))
df = df.groupby(['report_date'], as_index=False).sum(axis=1)
df.to_csv()
# レポートの詳細を取得する
def get_report_detail(start_date: datetime.date, end_date: datetime.date, term, countries):
# 日付範囲で取得
reports = Report.objects.filter(report_date__range=(start_date, end_date)).order_by('report_date')
# 曜日でフィルタ
# reports = reports.filter(report_date__week_day=5)
# 国別に取得
if countries is not None:
reports = reports.filter(location__country_region_name__in=countries)
# データフレームに変換
df = pd.DataFrame(list(reports.values(
'location__country_region_name',
'total_cases',
'total_deaths',
'total_recovered',
'active_cases',
'report_date',
)))
# 検索条件がない場合は感染者数が上位5か国を抽出
if countries is None:
# 最新レポート日付の時点で累積感染者数上位5か国を抽出
top_5_countries = df.loc[df['report_date'] == pd.to_datetime(end_date)].groupby(['location__country_region_name'], as_index=False).sum().sort_values('total_cases', ascending=False).head(5)['location__country_region_name'].values
# 上位5カ国のみ分の行を抽出
df = df[df['location__country_region_name'].isin(top_5_countries)]
df_report_detail = pd.DataFrame()
# 日別、国別にデータフレームを分割
dfs = df.groupby(['location__country_region_name'])
# 国別に集計
for _country_region_name, _df in dfs:
_df = _df.groupby(['report_date'], as_index=False).sum()
_df['country_region_name'] = _country_region_name
# 新規感染者数
_df['new_cases'] = _df['total_cases'].diff()
# 新規感染者数(移動平均)
_df['new_cases_sma'] = _df['new_cases'].rolling(term).mean()
# 新規死亡者数
_df['new_deaths'] = _df['total_deaths'].diff()
# 新規死亡者増加率
_df['new_deaths_ratio'] = _df['total_deaths'].pct_change(1).replace([-np.inf, np.inf], np.NaN).fillna(0)
# 新規死亡者数(移動平均)
_df['new_deaths_sma'] = _df['new_deaths'].rolling(term).mean()
# 新規回復者数
_df['new_recovered'] = _df['total_recovered'].diff()
# 新規回復者数(移動平均)
_df['new_recovered_sma'] = _df['new_recovered'].rolling(term).mean()
# データフレームを縦結合
df_report_detail =
|
pd.concat([df_report_detail, _df], axis=0)
|
pandas.concat
|
import pandas as pd
import numpy as np
import os
def load_states():
# read US states
f = open('US Provinces.txt', 'r')
states = set()
for line in f.readlines():
l = line.strip('\n')
if l != '':
states.add(l)
return states
def splitandwrite(df, path, st):
business = 'yelp_academic_dataset_business.csv'
review = 'yelp_academic_dataset_review.csv'
checkins = 'yelp_academic_dataset_checkin.csv'
tip = 'yelp_academic_dataset_tip.csv'
user = 'yelp_academic_dataset_user.csv'
# make train/test path if they don't exist
path_train = path + 'train'
if not os.path.exists(path_train):
os.mkdir(path_train)
path_test = path + 'test'
if not os.path.exists(path_test):
os.mkdir(path_test)
# cross lookup on review dataset and split by 80/20
review_df = pd.read_csv(review)
# filter review by states
review_df = review_df[review_df['business_id'].isin(df['business_id'])]
# find 80 percentile
date_column = list(review_df.sort_values('date')['date'])
index = range(0, len(date_column) + 1)
cut = date_column[np.int((np.percentile(index, 80)))]
"""
Section Review
"""
# cut by date
review_train = review_df[review_df['date'] < cut]
review_test = review_df[review_df['date'] >= cut]
# write to train
review_train.to_csv(path_train + '/' + st + '_train_' + review, index=False)
# write to test
review_test.to_csv(path_test + '/' + st + '_test_' + review, index=False)
"""
Section Business
"""
busi_train = df[df['business_id'].isin(review_train['business_id'])]
busi_test = df[df['business_id'].isin(review_test['business_id'])]
# write to train
busi_train.to_csv(path_train + '/' + st + '_train_' + business, index=False)
# write to test
busi_test.to_csv(path_test + '/' + st + '_test_' + business, index=False)
"""
Section Checkin
"""
checkindf = pd.read_csv(checkins)
checkin_train = checkindf[checkindf['business_id'].isin(busi_train['business_id'])]
checkin_test = checkindf[checkindf['business_id'].isin(busi_test['business_id'])]
# write to train
checkin_train.to_csv(path_train + '/' + st + '_train_' + checkins, index=False)
# write to test
checkin_test.to_csv(path_test + '/' + st + '_test_' + checkins, index=False)
"""
Section User
"""
userdf = pd.read_csv(user)
user_train = userdf[userdf['user_id'].isin(review_train['user_id'])]
user_test = userdf[userdf['user_id'].isin(review_test['user_id'])]
# write to train
user_train.to_csv(path_train + '/' + st + '_train_' + user, index=False)
# write to test
user_test.to_csv(path_test + '/' + st + '_test_' + user, index=False)
"""
Section Tip
"""
tipdf = pd.read_csv(tip)
tip_train = tipdf[tipdf['user_id'].isin(user_train['user_id'])]
tip_train = tip_train[tip_train['business_id'].isin(busi_train['business_id'])]
tip_test = tipdf[tipdf['user_id'].isin(user_test['user_id'])]
tip_test = tip_test[tip_test['business_id'].isin(busi_test['business_id'])]
# write to train
tip_train.to_csv(path_train + '/' + st + '_train_' + tip, index=False)
# write to test
tip_test.to_csv(path_test + '/' + st + '_test_' + tip, index=False)
def splitandwrite2(df, path, st, bt):
business = 'yelp_academic_dataset_business.csv'
review = 'yelp_academic_dataset_review.csv'
checkins = 'yelp_academic_dataset_checkin.csv'
tip = 'yelp_academic_dataset_tip.csv'
user = 'yelp_academic_dataset_user.csv'
if not os.path.exists(path):
os.mkdir(path)
if not os.path.exists(path + bt):
os.mkdir(path + bt)
# make train/test path if they don't exist
path_train = path + bt + '/train'
if not os.path.exists(path_train):
os.mkdir(path_train)
path_test = path + bt + '/test'
if not os.path.exists(path_test):
os.mkdir(path_test)
path_valid = path + bt + '/valid'
if not os.path.exists(path_valid):
os.mkdir(path_valid)
# cross lookup on review dataset and split by 80/10/10
review_df =
|
pd.read_csv(review)
|
pandas.read_csv
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.