prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""NEDSS data duplicate person-record detector.
This program automates the process of identifying potential duplicate person-records
in NEDSS data. The program takes two command line arguments (1) the filepath
for NEDSS data; (2) the Identifier of the first person-record considered new.
This application will then compare each of the new records to all the preceeding records.
Example: for a new batch of NEDSS data, we need to determine the Identifier value for
the first record in that dataset that should be considered new. E.g., we may open the
NEDSS data file and determine that the new data starts at Identifier value 11960. Thus:
py nedss_duplicate_person_record_detector.py "09-29-20_Positive Cases.xlsx" -i 11960
The output from the above command is saved as "09-29-20_Positive Cases_w_DUPE_UUID.xlsx"
In this file the column dupe_uuid stores potential duplicate identifiers. Filtering
this data to only the records that contains non-empty values (and sorting on dupe_uuid)
will present an ordered list of potential dupes for manual review.
"""
import argparse # for command line arg parsing
from collections import Counter
import jellyfish # for phonetic representation
import openpyxl as xl # for writing to xls file
import pandas as pd # for data management/aggs.
from scipy import sparse # for connected comps.
import textdistance # for fuzzy string matching
from tqdm import tqdm # for process status bar
from typing import List
def first_name_similarity_scorer(a: str, b: str):
"""Compares two first name strings, returns 1 if they match, 0 otherwise.
Uses Jaro-Winkler Distance Algorithm (JWDA) (en.wikipedia.org/wiki/Jaro–Winkler_distance).
JWDA "measurement scale is 0.0 to 1.0, where 0.0 is the least likely and 1.0 is a positive match.
For our purposes, anything below a 0.8 is not considered useful." (source: SAP blog)
"""
jaro = textdistance.jaro_winkler(a, b)
if jaro > 0.8:
return 1
else:
return 0
def last_name_similarity_scorer(a: str, b: str):
"""Compares two first last strings, returns 1 if they match, 0 otherwise.
Uses Jaro-Winkler Distance Algorithm (JWDA) (en.wikipedia.org/wiki/Jaro–Winkler_distance).
JWDA "measurement scale is 0.0 to 1.0, where 0.0 is the least likely and 1.0 is a positive match.
For our purposes, anything below a 0.8 is not considered useful." (source: SAP blog)
"""
jaro = textdistance.jaro_winkler(a, b)
if jaro > 0.8:
return 1
else:
return 0
def address_similarity_scorer(a: str, b: str):
"""Compares two address strings, returns 1 if they match, 0 otherwise.
Uses Jaro-Winkler Distance Algorithm (JWDA) (en.wikipedia.org/wiki/Jaro–Winkler_distance).
JWDA "measurement scale is 0.0 to 1.0, where 0.0 is the least likely and 1.0 is a positive match.
"""
jaro = textdistance.jaro_winkler(a, b)
if jaro > 0.9:
return 1
else:
return 0
def age_similarity_scorer(age_1, age_2):
"""Compares two ages, returns 0 if they match, returns penalty of -1 otherwise.
Conservative assumptions:
1) If age cannot be cleanly cast to int, consider comparators to be a match
2) If ages are within 2 years, consider comparators to be a match
"""
try:
age_1 = int(age_1)
except:
return 0
try:
age_2 = int(age_2)
except:
return 0
# client requested age tolerance of 2 years:
if abs(age_1 - age_2) <= 2:
return 0
else:
return -1
def gender_similarity_scorer(gender_1: str, gender_2: str):
"""Compares two gender strings, returns 0 if they match, returns penalty of -1 otherwise.
Conservative assumption: if gender is nil or empty string, consider it a match against the comparator.
"""
if gender_1 == gender_2:
return 0
elif gender_1 is None or gender_2 is None:
return 0
elif gender_1 == "" or gender_2 == "":
return 0
else:
return -1
def preprocess(nedss_df: pd.DataFrame):
"""Translates name and address fields into phonetic representations."""
nedss_df.columns = nedss_df.columns.str.strip()
nedss_df["phonetic_first_name"] = nedss_df["First Name"].apply(jellyfish.metaphone)
nedss_df["phonetic_last_name"] = nedss_df["Last Name"].apply(jellyfish.metaphone)
nedss_df["phonetic_address"] = nedss_df["Address"].apply(jellyfish.metaphone)
return nedss_df
def score_record_against_records(nedss_df: pd.DataFrame, nedss_row: pd.Series) -> pd.DataFrame:
"""Calculates various similarity scores and total score, then returns the score dataframe."""
score_df = pd.DataFrame(index=nedss_df.index)
score_df['first_name_similarity_score'] = nedss_df['phonetic_first_name'].apply(
first_name_similarity_scorer, args=(nedss_row['phonetic_first_name'],))
score_df['last_name_similarity_score'] = nedss_df['phonetic_last_name'].apply(
last_name_similarity_scorer, args=(nedss_row["phonetic_last_name"],))
score_df['address_similarity_score'] = nedss_df['phonetic_address'].apply(
address_similarity_scorer, args=(nedss_row["phonetic_address"],))
score_df['age_similarity_score'] = nedss_df["Age"].apply(
age_similarity_scorer, args=(nedss_row["Age"],))
score_df['gender_similarity_score'] = nedss_df["Gender"].apply(
gender_similarity_scorer, args=(nedss_row["Gender"],))
score_df["total_score"] = score_df.sum(axis=1)
return score_df
def get_dupe_groups(adj_mat) -> List[List[int]]:
"""Takes an adjacency matrix, applies connected components to link potential dupe records."""
dupe_lists = []
n_components, labels = sparse.csgraph.connected_components(
csgraph=adj_mat, directed=False, return_labels=True)
# get the connected components (with more than one member):
dupe_ids = [k for k, v in Counter(labels).items() if v > 1]
# for each connected component (with more than one member) find the indices for its members:
for dupe_id in dupe_ids:
dupe_lists.append([i for i, x in enumerate(labels) if x == dupe_id])
return dupe_lists
def get_dupe_index_groups(data_df: pd.DataFrame, split_index: int) -> List[List[int]]:
"""Takes dataframe to process and index first new record, and returns list of linked records"""
mat_dim = data_df.index.max() + 1
adj_mat = sparse.dok_matrix((mat_dim, mat_dim), dtype='int64')
new_record_indices = [i for i in data_df.index.tolist() if i >= split_index]
for rec_idx in tqdm(new_record_indices):
df_old = data_df.loc[:rec_idx - 1]
df_new_row = data_df.loc[rec_idx]
score_df = score_record_against_records(df_old, df_new_row)
match_df = score_df[score_df["total_score"] > 1]
for match_idx in match_df.index:
adj_mat[rec_idx, match_idx] = 1
return get_dupe_groups(adj_mat)
def write_dupe_info_to_workbook(in_filepath: str, out_filepath: str, dupe_lists: List[List[int]]):
"""Takes a list of lists of duplicate record indices, annotates input file with dupe ids."""
wb = xl.load_workbook(in_filepath)
ws = wb.worksheets[0]
new_col_idx = ws.max_column + 1
ws.cell(row=1, column=new_col_idx, value="dupe_group")
for i, dupe_group in enumerate(dupe_lists):
for dupe in dupe_group:
# +2 for header and 1-index offset
ws.cell(row=dupe + 2, column=new_col_idx, value=i)
if not out_filepath:
out_filepath = in_filepath.split(".")
out_filepath[-2] = out_filepath[-2] + "_w_DUPE_UUID"
out_filepath = ".".join(out_filepath)
wb.save(out_filepath)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description='Flag NEDSS data for review')
arg_parser.add_argument('-p',
'--path',
# metavar='path',
type=str,
help='the path to the raw NEDSS data file')
arg_parser.add_argument('-i',
'--identifier',
type=int,
# required=True,
help='Identifier for first new record in NEDSS data')
arg_parser.add_argument('-o',
'--output',
type=str,
help='where to save the output file')
args = arg_parser.parse_args()
path = args.path
output = args.output
identifier = args.identifier
if not path or not output or not identifier:
path = input("Paste the input file path here (then press enter): ")
path = path.strip('"')
output = input("Paste the output file path here (then press enter): ")
output = output.strip('"')
identifier = input("Type the Identifier for the first new record here (then press enter): ")
while identifier == "":
identifier = input("Type the Identifier for the first new record here (then press enter): ")
if identifier == "1" or identifier == "0":
identifier = "2"
identifier = int(identifier)
df = | pd.read_excel(path) | pandas.read_excel |
import pandas as pd
import numpy as np
import os
from sklearn.metrics import roc_auc_score, accuracy_score
from sklearn import metrics
from scipy.stats import rankdata
import math
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--enspath", type=str, default="./data", help="Path to folder with all csvs")
parser.add_argument("--enstype", type=str, default="loop",
help="Type of ensembling to be performed - Current options: loop / sa")
parser.add_argument("--exp", type=str, default="experiment", help="Name of experiment for csv's")
parser.add_argument('--subdata', action='store_const', default=False, const=True)
# Parse the arguments.
args = parser.parse_args()
return args
### FUNCTIONS IMPLEMENTING ENSEMBLE METHODS ###
### HELPERS ###
# Optimizing accuracy based on ROC AUC
# Source: https://albertusk95.github.io/posts/2019/12/best-threshold-maximize-accuracy-from-roc-pr-curve/
# ACC = (TP + TN)/(TP + TN + FP + FN) = (TP + TN) / P + N (= Correct ones / all)
# Senstivity / tpr = TP / P
# Specificity / tnr = TN / N
def get_acc_and_best_threshold_from_roc_curve(tpr, fpr, thresholds, num_pos_class, num_neg_class):
tp = tpr * num_pos_class
tn = (1 - fpr) * num_neg_class
acc = (tp + tn) / (num_pos_class + num_neg_class)
best_threshold = thresholds[np.argmax(acc)]
return np.amax(acc), best_threshold
def set_acc(row, threshold):
if row['proba'] >= threshold:
val = 1
else:
val = 0
return val
### AVERAGES ###
def simple_average(targets, example, weights=None, power=1, normalize=False):
"""
targets: df with target values as columns
example: output df example (e.g. including ID - make sure to adjust iloc below if target is not at 1)
weights: per submission weights; default is equal weighting
power: optional for power averaging
normalize: Whether to normalize targets btw 0 & 1
"""
if weights is None:
weights = len(targets.columns) * [1.0 / len(targets.columns)]
else:
weights = weights / np.sum(weights)
preds = example.copy()
preds.iloc[:, 1] = np.zeros(len(preds))
if normalize:
targets = (targets - targets.min()) / (targets.max() - targets.min())
for i in range(len(targets.columns)):
preds.iloc[:, 1] = np.add(preds.iloc[:, 1], weights[i] * (targets.iloc[:, i].astype(float) ** power))
return preds
def rank_average(subs, weights=None):
"""
subs: list of submission dataframes with two columns (id, value)
weights: per submission weights; default is equal weighting
"""
if weights is None:
weights = len(subs) * [1.0 / len(subs)]
else:
weights = weights / np.sum(weights)
preds = subs[0].copy()
preds.iloc[:, 1] = np.zeros(len(subs[0]))
for i, sub in enumerate(subs):
preds.iloc[:, 1] = np.add(preds.iloc[:, 1], weights[i] * rankdata(sub.iloc[:, 1]) / len(sub))
return preds
### SIMPLEX ###
### Similar to scipy optimize
# Taken & adapted from:
# https://github.com/chrisstroemel/Simple
from heapq import heappush, heappop, heappushpop
import numpy
import math
import time
import matplotlib.pyplot as plotter
CAPACITY_INCREMENT = 1000
class _Simplex:
def __init__(self, pointIndices, testCoords, contentFractions, objectiveScore, opportunityCost, contentFraction,
difference):
self.pointIndices = pointIndices
self.testCoords = testCoords
self.contentFractions = contentFractions
self.contentFraction = contentFraction
self.__objectiveScore = objectiveScore
self.__opportunityCost = opportunityCost
self.update(difference)
def update(self, difference):
self.acquisitionValue = -(self.__objectiveScore + (self.__opportunityCost * difference))
self.difference = difference
def __eq__(self, other):
return self.acquisitionValue == other.acquisitionValue
def __lt__(self, other):
return self.acquisitionValue < other.acquisitionValue
class SimpleTuner:
def __init__(self, cornerPoints, objectiveFunction, exploration_preference=0.15):
self.__cornerPoints = cornerPoints
self.__numberOfVertices = len(cornerPoints)
self.queue = []
self.capacity = self.__numberOfVertices + CAPACITY_INCREMENT
self.testPoints = numpy.empty((self.capacity, self.__numberOfVertices))
self.objective = objectiveFunction
self.iterations = 0
self.maxValue = None
self.minValue = None
self.bestCoords = []
self.opportunityCostFactor = exploration_preference # / self.__numberOfVertices
def optimize(self, maxSteps=10):
for step in range(maxSteps):
# print(self.maxValue, self.iterations, self.bestCoords)
if len(self.queue) > 0:
targetSimplex = self.__getNextSimplex()
newPointIndex = self.__testCoords(targetSimplex.testCoords)
for i in range(0, self.__numberOfVertices):
tempIndex = targetSimplex.pointIndices[i]
targetSimplex.pointIndices[i] = newPointIndex
newContentFraction = targetSimplex.contentFraction * targetSimplex.contentFractions[i]
newSimplex = self.__makeSimplex(targetSimplex.pointIndices, newContentFraction)
heappush(self.queue, newSimplex)
targetSimplex.pointIndices[i] = tempIndex
else:
testPoint = self.__cornerPoints[self.iterations]
testPoint.append(0)
testPoint = numpy.array(testPoint, dtype=numpy.float64)
self.__testCoords(testPoint)
if self.iterations == (self.__numberOfVertices - 1):
initialSimplex = self.__makeSimplex(numpy.arange(self.__numberOfVertices, dtype=numpy.intp), 1)
heappush(self.queue, initialSimplex)
self.iterations += 1
def get_best(self):
return (self.maxValue, self.bestCoords[0:-1])
def __getNextSimplex(self):
targetSimplex = heappop(self.queue)
currentDifference = self.maxValue - self.minValue
while currentDifference > targetSimplex.difference:
targetSimplex.update(currentDifference)
# if greater than because heapq is in ascending order
if targetSimplex.acquisitionValue > self.queue[0].acquisitionValue:
targetSimplex = heappushpop(self.queue, targetSimplex)
return targetSimplex
def __testCoords(self, testCoords):
objectiveValue = self.objective(testCoords[0:-1])
if self.maxValue is None or objectiveValue > self.maxValue:
self.maxValue = objectiveValue
self.bestCoords = testCoords
if self.minValue is None: self.minValue = objectiveValue
elif objectiveValue < self.minValue:
self.minValue = objectiveValue
testCoords[-1] = objectiveValue
if self.capacity == self.iterations:
self.capacity += CAPACITY_INCREMENT
self.testPoints.resize((self.capacity, self.__numberOfVertices))
newPointIndex = self.iterations
self.testPoints[newPointIndex] = testCoords
return newPointIndex
def __makeSimplex(self, pointIndices, contentFraction):
vertexMatrix = self.testPoints[pointIndices]
coordMatrix = vertexMatrix[:, 0:-1]
barycenterLocation = numpy.sum(vertexMatrix, axis=0) / self.__numberOfVertices
differences = coordMatrix - barycenterLocation[0:-1]
distances = numpy.sqrt(numpy.sum(differences * differences, axis=1))
totalDistance = numpy.sum(distances)
barycentricTestCoords = distances / totalDistance
euclideanTestCoords = vertexMatrix.T.dot(barycentricTestCoords)
vertexValues = vertexMatrix[:, -1]
testpointDifferences = coordMatrix - euclideanTestCoords[0:-1]
testPointDistances = numpy.sqrt(numpy.sum(testpointDifferences * testpointDifferences, axis=1))
inverseDistances = 1 / testPointDistances
inverseSum = numpy.sum(inverseDistances)
interpolatedValue = inverseDistances.dot(vertexValues) / inverseSum
currentDifference = self.maxValue - self.minValue
opportunityCost = self.opportunityCostFactor * math.log(contentFraction, self.__numberOfVertices)
return _Simplex(pointIndices.copy(), euclideanTestCoords, barycentricTestCoords, interpolatedValue,
opportunityCost, contentFraction, currentDifference)
def plot(self):
if self.__numberOfVertices != 3: raise RuntimeError('Plotting only supported in 2D')
matrix = self.testPoints[0:self.iterations, :]
x = matrix[:, 0].flat
y = matrix[:, 1].flat
z = matrix[:, 2].flat
coords = []
acquisitions = []
for triangle in self.queue:
coords.append(triangle.pointIndices)
acquisitions.append(-1 * triangle.acquisitionValue)
plotter.figure()
plotter.tricontourf(x, y, coords, z)
plotter.triplot(x, y, coords, color='white', lw=0.5)
plotter.colorbar()
plotter.figure()
plotter.tripcolor(x, y, coords, acquisitions)
plotter.triplot(x, y, coords, color='white', lw=0.5)
plotter.colorbar()
plotter.show()
def Simplex(devs, label, df_list=False, exploration=0.01, scale=1):
"""
devs: list of dataframes with "proba" column
label: list/np array of ground truths
scale: By default we will get weights in the 0-1 range. Setting e.g. scale=50, gives weights in the 0-50 range.
"""
predictions = []
if df_list:
for df in devs:
predictions.append(df.proba)
print(len(predictions[0]))
else:
for i, column in enumerate(devs):
predictions.append(devs.iloc[:, i])
print(len(predictions[0]))
print("Optimizing {} inputs.".format(len(predictions)))
def roc_auc(weights):
''' Will pass the weights as a numpy array '''
final_prediction = 0
for weight, prediction in zip(weights, predictions):
final_prediction += weight * prediction
return roc_auc_score(label, final_prediction)
# This defines the search area, and other optimization parameters.
# For e.g. 11 models, we have 12 corner points -- e.g. all none, only model 1, all others none, only model 2 all others none..
# We concat an identity matrix & a zero array to create those
zero_vtx = np.zeros((1, len(predictions)), dtype=int)
optimization_domain_vertices = np.identity(len(predictions), dtype=int) * scale
optimization_domain_vertices = np.concatenate((zero_vtx, optimization_domain_vertices), axis=0).tolist()
number_of_iterations = 3000
exploration = exploration # optional, default 0.01
# Optimize weights
tuner = SimpleTuner(optimization_domain_vertices, roc_auc, exploration_preference=exploration)
tuner.optimize(number_of_iterations)
best_objective_value, best_weights = tuner.get_best()
print('Optimized =', best_objective_value) # same as roc_auc(best_weights)
print('Weights =', best_weights)
return best_weights
### APPLYING THE HELPER FUNCTIONS ###
def sa_wrapper(data_path="./data"):
"""
Applies simple average.
data_path: path to folder with X * (dev_seen, test_seen & test_unseen) .csv files
"""
# Make sure the lists will be ordered, i.e. test[0] is the same model as devs[0]
train, dev, test, test_unseen = [], [], [], []
train_probas, dev_probas, test_probas, test_unseen_probas = {}, {}, {}, {} # Never dynamically add to a pd Dataframe
for csv in sorted(os.listdir(data_path)):
if ".csv" in csv:
print("Included in Simple Average: ", csv)
if "train" in csv:
train.append(pd.read_csv(data_path + csv))
train_probas[csv[:-4]] = pd.read_csv(data_path + csv).proba.values
elif ("dev" in csv) or ("val" in csv):
dev.append(pd.read_csv(data_path + csv))
dev_probas[csv[:-8]] = pd.read_csv(data_path + csv).proba.values
elif "test_unseen" in csv:
test_unseen.append(pd.read_csv(data_path + csv))
test_unseen_probas[csv[:-14]] = pd.read_csv(data_path + csv).proba.values
elif "test" in csv:
test.append( | pd.read_csv(data_path + csv) | pandas.read_csv |
import pandas as pd
from autox.autox_competition.util import log
def fe_time(df, time_col):
log('[+] fe_time')
result = pd.DataFrame()
prefix = time_col + "_"
df[time_col] = | pd.to_datetime(df[time_col]) | pandas.to_datetime |
'''
MIT License
Copyright (c) 2020 Minciencia
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import requests
import utils
import pandas as pd
import datetime as dt
import numpy as np
from itertools import groupby
import time
class vacunacion:
def __init__(self,output,indicador):
self.output = output
self.indicador = indicador
self.my_files = {
'vacunacion_fabricante':
'https://raw.githubusercontent.com/IgnacioAcunaF/covid19-vaccination/master/output/chile-vaccination-type.csv',
'vacunacion_region':
'https://raw.githubusercontent.com/IgnacioAcunaF/covid19-vaccination/master/output/chile-vaccination.csv',
'vacunacion_edad':
'https://github.com/IgnacioAcunaF/covid19-vaccination/raw/master/output/chile-vaccination-ages.csv',
'vacunacion_grupo':
'https://github.com/IgnacioAcunaF/covid19-vaccination/raw/master/output/chile-vaccination-groups.csv',
}
self.path = '../input/Vacunacion'
def get_last(self):
## baja el archivo que corresponde
if self.indicador == 'fabricante':
print('Retrieving files')
print('vacunacion_fabricante')
r = requests.get(self.my_files['vacunacion_fabricante'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_fabricante' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
elif self.indicador == 'campana':
print('Retrieving files')
print('vacunacion_region')
r = requests.get(self.my_files['vacunacion_region'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_region' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
elif self.indicador == 'edad':
print('Retrieving files')
print('vacunacion_edad')
r = requests.get(self.my_files['vacunacion_edad'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_edad' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
elif self.indicador == 'caracteristicas_del_vacunado':
print('Retrieving files')
print('vacunacion_grupo')
r = requests.get(self.my_files['vacunacion_grupo'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_grupo' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
## selecciona el archivo que corresponde
if self.indicador == 'fabricante':
print('reading files')
print('vacunacion_fabricante')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_fabricante.csv')
elif self.indicador == 'campana':
print('reading files')
print('vacunacion_region')
self.last_added = | pd.read_csv('../input/Vacunacion/vacunacion_region.csv') | pandas.read_csv |
import matplotlib.pyplot as plt
import pandas as pd
class ProvData:
def __init__(self, prov):
self.prov = prov
self.price_list = []
self.quality_list = []
self.size_list = []
self.size_frac_list = []
self.pop_funds_list = []
self.mk_funds_list = []
self.total_population_list = []
self.pop_fpc_list = []
self.pop_ipc_list = []
def update(self):
prov = self.prov
quality = {}
size = {}
funds = {}
fpc = {}
ipc = {}
total_loans = 0
for p in prov.pops:
p.av_income = 0.9*p.av_income + 0.1*(p.income-p.cost)
funds[p.kind] = p.funds - p.loaned_funds
quality[p.kind] = p.quality
size[p.kind] = p.size
fpc[p.kind] = p.funds/p.size
ipc[p.kind] = p.av_income/p.size
total_size = prov.total_pop_size
total_funds = prov.total_funds
size_frac = {k:s/total_size for k,s in size.items()}
funds['MKT'] = prov.market.funds
funds['TOT'] = total_funds
self.price_list.append(prov.market.prices.copy())
self.mk_funds_list.append(prov.market.funds)
self.total_population_list.append(total_size)
self.quality_list.append(quality)
self.size_list.append(size)
self.size_frac_list.append(size_frac)
self.pop_funds_list.append(funds)
self.pop_fpc_list.append(fpc)
self.pop_ipc_list.append(ipc)
def plot_prov_data(self):
if len(self.price_list) == 0:
print('NO DATA TO PLOT')
return
fig, ax = plt.subplots(2, 4,figsize=(24,10))
name = self.prov.name
pd.DataFrame(self.price_list).plot(title=name+': PRICES',logy=True,ax=ax[0][0])
pd.DataFrame(self.quality_list).plot(title=name+': QUALITIES',ax=ax[0][1],logy=True)
pd.DataFrame(self.size_list).plot(title=name+': POPULATIONS',ax=ax[0][2],logy=True)
pd.DataFrame(self.size_frac_list).plot(title=name+': POP FRACTION',ax=ax[0][3])
pd.DataFrame(self.pop_funds_list).plot(title=name+': POP FUNDS',ax=ax[1][0])
pd.DataFrame(self.pop_fpc_list).plot(title=name+': POP FPC',ax=ax[1][1],logy=True)
| pd.DataFrame(self.pop_ipc_list) | pandas.DataFrame |
import pandas as pd
ratings = pd.read_csv('dataset/ratings.csv')
movies = pd.read_csv('dataset/movies.csv')
all_movie = movies['title'].values
new_movie = []
for movie in all_movie:
split_movie = movie.split()
split_movie.pop()
string = ' '.join(split_movie)
new_movie.append(string)
movies['movie'] = new_movie
ratings = | pd.merge(movies,ratings) | pandas.merge |
#%%
from pymaid_creds import url, name, password, token
from data_settings import pairs_path, data_date
import pymaid
rm = pymaid.CatmaidInstance(url, token, name, password)
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import cmasher as cmr
from contools import Cascade_Analyzer, Promat, Celltype, Celltype_Analyzer
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
# font settings
plt.rcParams['font.size'] = 5
plt.rcParams['font.family'] = 'arial'
adj_ad = Promat.pull_adj(type_adj='ad', date=data_date)
pairs = Promat.get_pairs(pairs_path=pairs_path)
# %%
# pull sensory annotations and then pull associated skids
order = ['olfactory', 'gustatory-external', 'gustatory-pharyngeal', 'enteric', 'thermo-warm', 'thermo-cold', 'visual', 'noci', 'mechano-Ch', 'mechano-II/III', 'proprio', 'respiratory']
sens = [Celltype(name, Celltype_Analyzer.get_skids_from_meta_annotation(f'mw {name}')) for name in order]
input_skids_list = [x.get_skids() for x in sens]
input_skids = [val for sublist in input_skids_list for val in sublist]
output_names = pymaid.get_annotated('mw brain outputs').name
output_skids_list = list(map(pymaid.get_skids_by_annotation, pymaid.get_annotated('mw brain outputs').name))
output_skids = [val for sublist in output_skids_list for val in sublist]
# identify contralateral sens neurons and contra-contra neurons to flip their left/right identities
neurons_to_flip = list(np.intersect1d(pymaid.get_skids_by_annotation('mw contralateral axon'), pymaid.get_skids_by_annotation('mw contralateral dendrite')))
inputs_to_flip = [skid for skid in pymaid.get_skids_by_annotation('mw contralateral axon') if skid in input_skids]
neurons_to_flip = neurons_to_flip + inputs_to_flip
# define left and right neurons from a hemispheric propagation perspective, flip left/right identity as appropriate
left, right = Promat.get_hemis('mw left', 'mw right', neurons_to_flip=neurons_to_flip)
input_skids_left = list(np.intersect1d(input_skids, left))
input_skids_right = list(np.intersect1d(input_skids, right))
# remove bilateral axon input neurons to see how the mixing happens at the interneuron level
bilat_axon = pymaid.get_skids_by_annotation('mw bilateral axon')
bilat_axon = bilat_axon + [3795424, 11291344] # remove the ambiguous v'td neurons (project to middle of SEZ)
input_skids_left = list(np.setdiff1d(input_skids_left, bilat_axon))
input_skids_right = list(np.setdiff1d(input_skids_right, bilat_axon))
input_skids_list = [input_skids_left, input_skids_right]
#%%
# cascades from left or right hemisphere input neurons
# save as pickle to use later because cascades are stochastic; prevents the need to remake plots everytime
import pickle
p = 0.05
max_hops = 8
n_init = 1000
simultaneous = True
adj=adj_ad
'''
input_hit_hist_list = Cascade_Analyzer.run_cascades_parallel(source_skids_list=input_skids_list, source_names = ['left_inputs', 'right_inputs'], stop_skids=output_skids,
adj=adj_ad, p=p, max_hops=max_hops, n_init=n_init, simultaneous=simultaneous, pairs=pairs, pairwise=True, disable_tqdm=False)
pickle.dump(input_hit_hist_list, open(f'data/cascades/left-right-hemisphere-cascades_{n_init}-n_init_{data_date}.p', 'wb'))
'''
input_hit_hist_list = pickle.load(open(f'data/cascades/left-right-hemisphere-cascades_{n_init}-n_init_{data_date}.p', 'rb'))
# %%
# plot heatmaps of number of neurons over-threshold per hop
def intersect_stats(hit_hist1, hit_hist2, threshold, hops):
intersect_hops = []
total_hops = []
for i in np.arange(0, hops+1):
intersect = list(np.logical_and(hit_hist1.loc[:,i]>=threshold, hit_hist2.loc[:,i]>=threshold))
total = list(np.logical_or(hit_hist1.loc[:,i]>=threshold, hit_hist2.loc[:,i]>=threshold))
intersect_hops.append(intersect)
total_hops.append(total)
intersect_hops = pd.DataFrame(intersect_hops, index=range(0, hops+1), columns = hit_hist1.index).T
total_hops = pd.DataFrame(total_hops, index=range(0, hops+1), columns = hit_hist1.index).T
percent = []
for i in np.arange(0, hops+1):
if(sum(total_hops[i])>0):
percent.append(sum(intersect_hops[i])/sum(total_hops[i]))
if(sum(total_hops[i])==0):
percent.append(0)
return(intersect_hops, total_hops, percent)
all_inputs_hit_hist_left = input_hit_hist_list[0].skid_hit_hist
all_inputs_hit_hist_right = input_hit_hist_list[1].skid_hit_hist
threshold = n_init/2
hops = 8
all_inputs_intersect, all_inputs_total, all_inputs_percent = intersect_stats(all_inputs_hit_hist_left, all_inputs_hit_hist_right, threshold, hops)
# identify left/right ipsi, bilateral, contralaterals
# majority types
ipsi = list(np.intersect1d(pymaid.get_skids_by_annotation('mw ipsilateral axon'), pymaid.get_skids_by_annotation('mw ipsilateral dendrite')))
ipsi = ipsi + list(np.intersect1d(pymaid.get_skids_by_annotation('mw contralateral axon'), pymaid.get_skids_by_annotation('mw contralateral dendrite')))
bilateral = list(np.intersect1d(pymaid.get_skids_by_annotation('mw bilateral axon'), pymaid.get_skids_by_annotation('mw ipsilateral dendrite')))
contralateral = list(np.intersect1d(pymaid.get_skids_by_annotation('mw contralateral axon'), pymaid.get_skids_by_annotation('mw ipsilateral dendrite')))
# add ipsilateral sensory to each
ipsi = ipsi + input_skids_left + input_skids_right
ipsi_left = list(np.intersect1d(ipsi, left))
ipsi_right = list(np.intersect1d(ipsi, right))
bilateral_left = list(np.intersect1d(bilateral, left))
bilateral_right = list(np.intersect1d(bilateral, right))
contra_left = list(np.intersect1d(contralateral, left))
contra_right = list(np.intersect1d(contralateral, right))
ipsi_left = list(np.intersect1d(ipsi_left, all_inputs_hit_hist_left.index))
ipsi_right = list(np.intersect1d(ipsi_right, all_inputs_hit_hist_right.index))
bilateral_left = list(np.intersect1d(bilateral_left, all_inputs_hit_hist_left.index))
bilateral_right = list(np.intersect1d(bilateral_right, all_inputs_hit_hist_right.index))
contra_left = list(np.intersect1d(contra_left, all_inputs_hit_hist_left.index))
contra_right = list(np.intersect1d(contra_right, all_inputs_hit_hist_right.index))
# plot results
fig, axs = plt.subplots(
3, 1, figsize=(1, 1.75), sharex=True
)
fig.tight_layout(pad=0.05)
ax = axs[0]
i_left = (all_inputs_hit_hist_left.loc[ipsi_left]>threshold).sum(axis=0)
b_left = (all_inputs_hit_hist_left.loc[bilateral_left]>threshold).sum(axis=0)
c_left = (all_inputs_hit_hist_left.loc[contra_left]>threshold).sum(axis=0)
c_right = (all_inputs_hit_hist_left.loc[contra_right]>threshold).sum(axis=0)
b_right = (all_inputs_hit_hist_left.loc[bilateral_right]>threshold).sum(axis=0)
i_right = (all_inputs_hit_hist_left.loc[ipsi_right]>threshold).sum(axis=0)
data_left = pd.DataFrame([i_left, b_left, c_left, c_right, b_right, i_right], index = ['Ipsi(L)', 'Bilateral(L)', 'Contra(L)', 'Contra(R)', 'Bilateral(R)', 'Ipsi(R)'])
sns.heatmap(data_left.iloc[:, 0:5], ax = ax, annot=True, fmt="d", cbar = False)
ax.tick_params(left=False, bottom=False)
ax = axs[1]
i_left = (all_inputs_hit_hist_right.loc[ipsi_left]>threshold).sum(axis=0)
b_left = (all_inputs_hit_hist_right.loc[bilateral_left]>threshold).sum(axis=0)
c_left = (all_inputs_hit_hist_right.loc[contra_left]>threshold).sum(axis=0)
c_rightc_right = (all_inputs_hit_hist_right.loc[contra_right]>threshold).sum(axis=0)
b_right = (all_inputs_hit_hist_right.loc[bilateral_right]>threshold).sum(axis=0)
i_right = (all_inputs_hit_hist_right.loc[ipsi_right]>threshold).sum(axis=0)
data_right = pd.DataFrame([i_left, b_left, c_left, c_right, b_right, i_right], index = ['Ipsi(L)', 'Bilateral(L)', 'Contra(L)', 'Contra(R)', 'Bilateral(R)', 'Ipsi(R)'])
sns.heatmap(data_right.iloc[:, 0:5], ax = ax, annot=True, fmt="d", cbar = False)
ax.tick_params(left=False, bottom=False)
ax = axs[2]
i_left = all_inputs_intersect.loc[ipsi_left].sum(axis=0)/all_inputs_total.loc[ipsi_left].sum(axis=0)
b_left = all_inputs_intersect.loc[bilateral_left].sum(axis=0)/all_inputs_total.loc[bilateral_left].sum(axis=0)
c_left = all_inputs_intersect.loc[contra_left].sum(axis=0)/all_inputs_total.loc[contra_left].sum(axis=0)
c_right = all_inputs_intersect.loc[contra_right].sum(axis=0)/all_inputs_total.loc[contra_right].sum(axis=0)
b_right = all_inputs_intersect.loc[bilateral_right].sum(axis=0)/all_inputs_total.loc[bilateral_right].sum(axis=0)
i_right = all_inputs_intersect.loc[ipsi_right].sum(axis=0)/all_inputs_total.loc[ipsi_right].sum(axis=0)
data = | pd.DataFrame([i_left, b_left, c_left, c_right, b_right, i_right], index = ['Ipsi(L)', 'Bilateral(L)', 'Contra(L)', 'Contra(R)', 'Bilateral(R)', 'Ipsi(R)']) | pandas.DataFrame |
import datetime
import os
import pandas as pd
import pygsheets
import telegram
TELEGRAM_API_TOKEN = os.environ["TELEGRAM_API_TOKEN_MARATHON"]
bot = telegram.Bot(token=TELEGRAM_API_TOKEN)
chat_id = -408362490
def authenticate_google_sheets():
client = pygsheets.authorize(service_account_file="service_account.json")
sheet = client.open_by_key("<KEY>")
return sheet
def clean_worksheet():
sheet = authenticate_google_sheets()
wk = sheet[0]
workout_df = wk.get_as_df(has_header=False, index_column=1, end="Q36")
workout_df = workout_df.drop(index="ACTUAL").drop(
columns=[1, 2, 3, 4, 5, 6, 7, 8, 9]
)
workout_df.rename(
columns={
idx: label
for idx, label in zip(
workout_df.columns.tolist(), workout_df.iloc[0].values.tolist()
)
},
inplace=True,
)
workout_df.drop(workout_df.head(5).index, inplace=True)
workout_df.index = workout_df.index.astype(str) + " 2021"
workout_df.index = | pd.to_datetime(workout_df.index, format="%d %b %Y") | pandas.to_datetime |
import warnings
import numpy as np
from pandas import Categorical, DataFrame, Series
from .pandas_vb_common import tm
class Construction:
params = ["str", "string"]
param_names = ["dtype"]
def setup(self, dtype):
self.series_arr = tm.rands_array(nchars=10, size=10 ** 5)
self.frame_arr = self.series_arr.reshape((50_000, 2)).copy()
# GH37371. Testing construction of string series/frames from ExtensionArrays
self.series_cat_arr = Categorical(self.series_arr)
self.frame_cat_arr = Categorical(self.frame_arr)
def time_series_construction(self, dtype):
Series(self.series_arr, dtype=dtype)
def peakmem_series_construction(self, dtype):
Series(self.series_arr, dtype=dtype)
def time_frame_construction(self, dtype):
DataFrame(self.frame_arr, dtype=dtype)
def peakmem_frame_construction(self, dtype):
DataFrame(self.frame_arr, dtype=dtype)
def time_cat_series_construction(self, dtype):
Series(self.series_cat_arr, dtype=dtype)
def peakmem_cat_series_construction(self, dtype):
Series(self.series_cat_arr, dtype=dtype)
def time_cat_frame_construction(self, dtype):
| DataFrame(self.frame_cat_arr, dtype=dtype) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from covsirphy.util.error import UnExecutedError
from covsirphy.cleaning.term import Term
from covsirphy.ode.mbase import ModelBase
from covsirphy.simulation.estimator import Estimator
from covsirphy.simulation.simulator import ODESimulator
class PhaseUnit(Term):
"""
Save information of a phase.
Args:
start_date (str): start date of the phase
end_date (str): end date of the phase
population (int): population value
Examples:
>>> unit1 = PhaseUnit("01Jan2020", "01Feb2020", 1000)
>>> unit2 = PhaseUnit("02Feb2020", "01Mar2020", 1000)
>>> unit3 = PhaseUnit("02Mar2020", "01Apr2020", 1000)
>>> unit4 = PhaseUnit("02Mar2020", "01Apr2020", 1000)
>>> unit5 = PhaseUnit("01Jan2020", "01Apr2020", 1000)
>>> str(unit1)
'Phase (01Jan2020 - 01Feb2020)'
>>> unit4 == unit4
True
>>> unit1 != unit2
True
>>> unit1 < unit2
True
>>> unit3 > unit1
True
>>> unit3 < unit4
False
>>> unit3 <= unit4
True
>>> unit1 < "02Feb2020"
True
>>> unit1 <= "01Feb2020"
True
>>> unit1 > "31Dec2019"
True
>>> unit1 >= "01Jan2020"
True
>>> sorted([unit3, unit1, unit2]) == [unit1, unit2, unit3]
True
>>> str(unit1 + unit2)
'Phase (01Jan2020 - 01Mar2020)'
>>> str(unit5 - unit1)
'Phase (02Feb2020 - 01Apr2020)'
>>> str(unit5 - unit4)
'Phase (01Jan2020 - 01Mar2020)'
>>> set([unit1, unit3, unit4]) == set([unit1, unit3])
True
"""
def __init__(self, start_date, end_date, population):
self.ensure_date_order(start_date, end_date, name="end_date")
self._start_date = start_date
self._end_date = end_date
self._population = self.ensure_population(population)
# Summary of information
self.info_dict = {
self.START: start_date,
self.END: end_date,
self.N: population,
self.ODE: None,
self.RT: None
}
self._ode_dict = {self.TAU: None}
self.day_param_dict = {}
self.est_dict = {
self.RMSLE: None,
self.TRIALS: None,
self.RUNTIME: None
}
# Init
self._id_dict = None
self._enabled = True
self._model = None
self._record_df = pd.DataFrame()
self.y0_dict = {}
self._estimator = None
def __str__(self):
if self._id_dict is None:
header = "Phase"
else:
id_str = ', '.join(list(self._id_dict.values()))
header = f"{id_str:>4} phase"
return f"{header} ({self._start_date} - {self._end_date})"
def __hash__(self):
return hash((self._start_date, self._end_date))
def __eq__(self, other):
if not isinstance(other, PhaseUnit):
raise NotImplementedError
return self._start_date == other.start_date and self._end_date == other.end_date
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
# self < other
end = self.date_obj(self._end_date)
if isinstance(other, str):
sta_other = self.date_obj(other)
elif isinstance(other, PhaseUnit):
sta_other = self.date_obj(other.start_date)
else:
raise NotImplementedError
return end < sta_other
def __le__(self, other):
# self <= other
end = self.date_obj(self._end_date)
if isinstance(other, str):
sta_other = self.date_obj(other)
elif isinstance(other, PhaseUnit):
if self.__eq__(other):
return True
sta_other = self.date_obj(other.start_date)
else:
raise NotImplementedError
return end <= sta_other
def __gt__(self, other):
# self > other
if isinstance(other, PhaseUnit) and self.__eq__(other):
return False
return not self.__le__(other)
def __ge__(self, other):
# self >= other
return not self.__lt__(other)
def __add__(self, other):
if self < other:
return PhaseUnit(self._start_date, other.end_date, self._population)
raise NotImplementedError
def __iadd__(self, other):
return self.__add__(other)
def __sub__(self, other):
sta = self.date_obj(self._start_date)
end = self.date_obj(self._end_date)
sta_other = self.date_obj(other.start_date)
end_other = self.date_obj(other.end_date)
if sta < sta_other and end == end_other:
end_date = self.yesterday(other.start_date)
return PhaseUnit(self._start_date, end_date, self._population)
if sta == sta_other and end > end_other:
start_date = self.tomorrow(other.end_date)
return PhaseUnit(start_date, self._end_date, self._population)
def __isub__(self, other):
return self.__sub__(other)
def __contains__(self, date):
sta = self.date_obj(self._start_date)
end = self.date_obj(self._end_date)
date = self.date_obj(date)
return sta <= date <= end
@ property
def id_dict(self):
"""
tuple(str): id_dict of the phase
"""
return self._id_dict
@ id_dict.setter
def id_dict(self, value):
self.set_id(value)
def set_id(self, **kwargs):
"""
Set identifiers.
Args:
id_dict (dict[str, str]): dictionary of identifiers
Returns:
covsirphy.PhaseUnit: self
"""
if self._id_dict is not None:
raise AttributeError("@id_dict cannot be overwritten.")
self._id_dict = kwargs
return self
def del_id(self):
"""
Delete identifers.
Returns:
covsirphy.PhaseUnit: self
"""
self._id_dict = None
return self
def enable(self):
"""
Enable the phase.
Examples:
>>> unit.enable
>>> bool(unit)
True
"""
self._enabled = True
def disable(self):
"""
Disable the phase.
Examples:
>>> unit.disable
>>> bool(unit)
False
"""
self._enabled = False
def __bool__(self):
return self._enabled
@ property
def start_date(self):
"""
str: start date
"""
return self._start_date
@ property
def end_date(self):
"""
str: end date
"""
return self._end_date
@ property
def population(self):
"""
str: population value
"""
return self._population
@ property
def tau(self):
"""
int or None: tau value [min]
"""
return self._ode_dict[self.TAU]
@ tau.setter
def tau(self, value):
if self._ode_dict[self.TAU] is None:
self._ode_dict[self.TAU] = self.ensure_tau(value)
return
raise AttributeError(
f"PhaseUnit.tau is not None ({self._ode_dict[self.TAU]}) and cannot be changed.")
@ property
def model(self):
"""
covsirphy.ModelBase or None: model description
"""
return self._model
@ property
def estimator(self):
"""
covsirphy.Estimator or None: estimator object
"""
return self._estimator
def to_dict(self):
"""
Summarize phase information and return as a dictionary.
Returns:
dict:
- Start: start date of the phase
- End: end date of the phase
- Population: population value of the start date
- if available:
- ODE: model name
- Rt: (basic) reproduction number
- parameter values if available
- day parameter values if available
- tau: tau value [min]
- RMSLE: RMSLE value of estimation
- Trials: the number of trials in estimation
- Runtime: runtime of estimation
"""
return {
**self.info_dict,
**self._ode_dict,
**self.day_param_dict,
**self.est_dict
}
def summary(self):
"""
Summarize information.
Returns:
pandas.DataFrame:
Index:
reset index
Columns:
- Start: start date of the phase
- End: end date of the phase
- Population: population value of the start date
- if available:
- ODE (str): model name
- Rt (float): (basic) reproduction number
- rho etc. (float): parameter values if available
- tau (int): tau value [min]
- (int): day parameter values if available
- RMSLE (float): RMSLE score of parameter estimation
- Trials (int): the number of trials in parameter estimation
- Runtime (str): runtime of parameter estimation
"""
summary_dict = self.to_dict()
df = | pd.DataFrame.from_dict(summary_dict, orient="index") | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 1 10:59:51 2020
Modified on ... look at git commit log, you lazy bum
@author: <NAME>, Assistant Research Professor, CEE WSU
@author: <NAME>, Ecoinformaticist, USDA-ARS
contact: <EMAIL>
Library of functions for the Azure Data Lake download codeset; see the readme within this repo for more details about the different scripts used
Comments in this are specific to the functions
"""
# General library imports for functions; some functions have the import statements as part of the function
import pathlib
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
def format_plot(ax,yf,xf,xminor,yminor,yl,yu,xl,xu):
#subplot has to have ax as the axis handle
# Does not accept blank arguments within the function call; needs to be a number of some sort even if just a 0.
# Format the x and yticks
plt.yticks(fontsize = yf)
plt.xticks(fontsize = xf)
minor_locator = AutoMinorLocator(xminor)
ax.xaxis.set_minor_locator(minor_locator)
minor_locator = AutoMinorLocator(yminor)
ax.yaxis.set_minor_locator(minor_locator)
ax.tick_params(axis='both',direction='in',length=12.5,width=2)
ax.tick_params(axis='both',which = 'minor',direction='in',length=5)
plt.ylim([yl,yu])
plt.xlim([xl,xu])
return
def indx_fill(df_in, frq):
# Fills in missing index values for a continuous time series. Rows are left blank.
df = df_in.copy()
df.index = pd.to_datetime(df.index)
# # Sort index in case it came in out of order, a possibility depending on filenames and naming scheme
# df = df.sort_index()
# # Remove any duplicate times, can occur if files from mixed sources and have overlapping endpoints
# df = df[~df.index.duplicated(keep='first')]
# Remove any duplicated rows; keep row with more data
df['nan_count'] = | pd.isna(df) | pandas.isna |
# Copyright 2018-2019 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
import math
import datetime
from collections import Counter
from timeit import default_timer as timer
# Feature engineering packages
from dask.distributed import Client, LocalCluster
import featuretools as ft
# Preprocessing packages
from category_encoders import CatBoostEncoder, JamesSteinEncoder
from sklearn.compose import ColumnTransformer
from sklearn.feature_selection import chi2, f_classif, SelectPercentile
from sklearn.impute import SimpleImputer
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import QuantileTransformer, StandardScaler
# Sampling packages
from imblearn.over_sampling import RandomOverSampler, SMOTENC
from imblearn.under_sampling import (
EditedNearestNeighbours,
RandomUnderSampler,
TomekLinks,
)
from imblearn.pipeline import Pipeline as PipelineImb
# Modelling packages
from sklearn.ensemble import (
ExtraTreesClassifier,
IsolationForest,
RandomForestClassifier,
)
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
# from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
# Calibarion packages
from sklearn.calibration import calibration_curve, CalibratedClassifierCV
# Evaluation packages
from sklearn.metrics import (
classification_report,
confusion_matrix,
f1_score,
plot_confusion_matrix,
plot_roc_curve,
roc_auc_score,
)
def _get_time_delta(seconds=float) -> str:
return str(datetime.timedelta(seconds=seconds))
def _downcast_numeric(x):
if x.dtype == np.float64:
return x.astype(np.float32)
elif x.dtype == np.int64:
return x.astype(np.int32)
return x
def _create_client_entityset(
clients: pd.DataFrame, profiles: pd.DataFrame, entity_id: str
) -> ft.EntitySet:
es = ft.EntitySet(id=entity_id)
es = es.entity_from_dataframe(
entity_id="clients",
dataframe=clients,
index="ID",
variable_types={
"MARRIAGE": ft.variable_types.Categorical,
"SEX": ft.variable_types.Categorical,
"EDUCATION": ft.variable_types.Ordinal,
},
)
es = es.entity_from_dataframe(
entity_id="profiles",
dataframe=profiles,
index="PROFILE_ID",
time_index="MONTH",
make_index=True,
variable_types={
"ID": ft.variable_types.Id,
"CREDIT_USE": ft.variable_types.Boolean,
},
)
es = es.add_relationship(ft.Relationship(es["clients"]["ID"], es["profiles"]["ID"]))
return es
def _initialize_dask_client(n_workers: int = 2, threads: Optional[int] = None) -> List:
cluster = LocalCluster(n_workers=n_workers, threads_per_worker=threads)
return [Client(cluster), cluster]
def _get_column_dtype(df: pd.DataFrame) -> Dict:
all_cols = df.columns.tolist()
cat_cols = df.select_dtypes(include=["object", "category"]).columns.tolist()
bool_cols = df.select_dtypes(include="boolean").columns.tolist()
num_cols = df.select_dtypes(include="number").columns.tolist()
# Add ID cols if it exists
id_cols: List = []
for col in ["ID"]:
if col not in all_cols:
continue
if col in cat_cols:
cat_cols.remove(col)
if col in num_cols:
num_cols.remove(col)
id_cols.append(col)
# Add default category cols if it exists but not detected as category
for col in [
"MARRIAGE",
"SEX",
]:
if col not in all_cols:
continue
if col in num_cols:
num_cols.remove(col)
if col not in cat_cols:
cat_cols.append(col)
cat_cols.sort()
# Add default ordinal category if it exists
ordi_cols: List = []
for col in ["EDUCATION"]:
if col not in all_cols:
continue
if col in cat_cols:
cat_cols.remove(col)
if col in num_cols:
num_cols.remove(col)
ordi_cols.append(col)
ordi_cols.sort()
# Add boolean cols if it exists but not detected as boolean
for col in num_cols:
if set(df[col].astype(float).unique()) == {0, 1}:
bool_cols.append(col)
bool_cols.sort()
for col in bool_cols:
if col in num_cols:
num_cols.remove(col)
num_cols.sort()
# Seperate numerical columns with skewed distribution
skew_check = pd.DataFrame(df[num_cols].skew(), columns=["skew"])
num_skew_cols = skew_check[np.abs(skew_check["skew"]) >= 1].index.tolist()
num_skew_cols.sort()
for col in num_skew_cols:
num_cols.remove(col)
col_dict = {
"id": id_cols,
"num_normal": num_cols,
"num_skewed": num_skew_cols,
"ordinal": ordi_cols,
"boolean": bool_cols,
"category": cat_cols,
}
return col_dict
def _enforce_dtype(df: pd.DataFrame) -> pd.DataFrame:
col_dict = _get_column_dtype(df)
# Check if ID exists
if col_dict["id"]:
df[col_dict["id"]] = df[col_dict["id"]].astype("object")
# Enforce dtype
df[col_dict["num_normal"]] = df[col_dict["num_normal"]].apply(_downcast_numeric)
df[col_dict["num_skewed"]] = df[col_dict["num_skewed"]].apply(_downcast_numeric)
df[col_dict["boolean"]] = df[col_dict["boolean"]].astype(bool)
df[col_dict["ordinal"]] = df[col_dict["ordinal"]].astype("category")
df[col_dict["category"]] = df[col_dict["category"]].astype("category")
return df
def _get_ct_feature_names(ct: ColumnTransformer) -> List:
feature_names = []
for name, trans, column in ct.transformers_:
if trans == "drop" or (hasattr(column, "__len__") and not len(column)):
continue
if trans == "passthrough":
feature_names.extend(column)
continue
# if hasattr(trans, "get_feature_names"):
# feature_names.extend(trans.get_feature_names(column))
# continue
feature_names.extend(column)
return feature_names
def _get_ct_support(ct: ColumnTransformer) -> List:
support_list = []
for name, trans, column in ct.transformers_:
if not hasattr(trans, "get_support"):
continue
support_list.extend(trans.get_support())
return support_list
def _inverse_ct_transform(df: pd.DataFrame, ct: ColumnTransformer) -> pd.DataFrame:
df_inverse = df.copy()
for name, trans, column in ct.transformers_:
if trans == "drop" or (hasattr(column, "__len__") and not len(column)):
continue
if trans == "passthrough":
continue
if hasattr(trans, "inverse_transform"):
df_inverse[column] = trans.inverse_transform(df_inverse[column])
continue
return df_inverse
def _get_oversample_strategy(series: pd.Series, multiplier: float = 1.0) -> Dict:
if multiplier <= 0:
raise ValueError("Multiplier must be within greater than 0.")
counter: Counter = Counter(series)
# Store the median sample of all labels
median_sample = np.median([sample for label, sample in counter.items()])
recommended_sample = math.ceil(median_sample * multiplier)
sampling_strat: Dict = {}
# Populate sampling stategy for oversampling
for label, sample in counter.most_common():
if sample <= median_sample:
# Oversample label if its sample lower than median sample
sampling_strat[label] = recommended_sample
continue
sampling_strat[label] = sample
return sampling_strat
def _remove_unused_transformers(transformers: List) -> List:
used_trans = transformers
for i, trans_set in enumerate(used_trans):
name, trans, column = trans_set
if not column:
used_trans.pop(i)
return used_trans
def _remove_unused_steps(steps: List, remove_clf: bool = False) -> List:
used_steps = steps
for i, step_set in enumerate(used_steps):
name, trans = step_set
if not trans:
used_steps.pop(i)
continue
if remove_clf:
if name == "classifier":
used_steps.pop(i)
return used_steps
def add_feature_profiles(profiles: pd.DataFrame, clients: pd.DataFrame) -> pd.DataFrame:
"""Add additional features to profiles
Args:
profiles: Data of client monthly profiles.
clients: Data of normalized client.
Returns:
Monthly profile with additional features
"""
profiles = pd.merge(profiles, clients[["ID", "LIMIT_BAL"]], how="left", on="ID")
# Determine the percentage threshold of used balance from the limit
profiles["LIMIT_THRES"] = profiles["BILL_AMT"] / profiles["LIMIT_BAL"]
# Determine if the client use credit card on that month
profiles["CREDIT_USE"] = np.where(
(profiles["PAY_STATUS"] == 0)
& (profiles["BILL_AMT"] == 0)
& (profiles["PAY_AMT"] == 0),
False,
True,
)
profiles.drop(columns="LIMIT_BAL", inplace=True)
return profiles
def split_data(
clients: pd.DataFrame,
profiles: pd.DataFrame,
labels: pd.DataFrame,
parameters: Dict,
) -> List[pd.DataFrame]:
"""Splits data into training, calibration and test sets.
Args:
clients: Data of normalized client.
profiles: Data of client monthly profiles.
labels: Data of next month payment default status.
parameters: Parameters defined in parameters.yml.
Returns:
A list containing split data.
"""
(clients_train, clients_test, labels_train, labels_test,) = train_test_split(
clients,
labels,
test_size=parameters["test_size"],
random_state=parameters["random_state"],
stratify=labels["DEFAULT_PAY"],
)
profiles_train = profiles[profiles["ID"].isin(labels_train["ID"])]
profiles_test = profiles[profiles["ID"].isin(labels_test["ID"])]
return [
clients_train,
profiles_train,
labels_train,
clients_test,
profiles_test,
labels_test,
]
def create_feature_definitions(
clients_train: pd.DataFrame, profiles_train: pd.DataFrame, parameters: Dict
) -> List:
"""Create feature definitions and features set using DFS.
Args:
clients_train: Training data of normalized client.
profiles_train: Training data of client monthly profiles.
parameters: Parameters defined in parameters.yml.
Returns:
A list containing calculated features and its feature definitions from DFS.
"""
# Store client columns
client_cols = clients_train.drop(columns="ID").columns.tolist()
# Initialize dask client
dask_client, dask_cluster = _initialize_dask_client(n_workers=2)
# Log original features
logger = logging.getLogger(__name__)
logger.info(
f"Original features, excluding ID and MONTH from data sources: {(clients_train.shape[1] - 1) + (profiles_train.shape[1] - 2)} ."
)
# Create the EntitySet
es = _create_client_entityset(clients_train, profiles_train, "client_train_set")
# Create seed features
retirement_age = ft.Feature(es["clients"]["AGE"]) >= 55
# Start DFS
features, feature_defs = ft.dfs(
entityset=es,
target_entity="clients",
agg_primitives=[
"all",
"count",
"last",
"max",
"mean",
"median",
"min",
"num_true",
"percent_true",
"std",
"skew",
"sum",
"trend",
],
trans_primitives=["percentile"],
seed_features=[retirement_age],
max_depth=2,
dask_kwargs={"cluster": dask_cluster},
verbose=True,
)
dask_client.close()
# Log features created after DFS
logger.info(f"Features after DFS: {len(feature_defs)} features.")
# Remove highly null features
features, feature_defs = ft.selection.remove_highly_null_features(
features,
features=feature_defs,
pct_null_threshold=parameters["null_thres"],
)
logger.info(f"Features after removing highly null features: {len(feature_defs)}")
# Remove single value features
features, feature_defs = ft.selection.remove_single_value_features(
features, features=feature_defs
)
logger.info(f"Features after removing single value features: {len(feature_defs)}")
# Remove highly correlated features
features, feature_defs = ft.selection.remove_highly_correlated_features(
features,
features=feature_defs,
pct_corr_threshold=parameters["corr_thres"],
features_to_keep=client_cols,
)
logger.info(
f"Final features after removing highly correlated features: {len(feature_defs)}"
)
# Reindex based on ID of clients
features = features.reindex(index=clients_train["ID"])
features.reset_index(inplace=True)
# Enforce dtype
features = _enforce_dtype(features)
# Make sure feature matrix have the same index as clients
features.index = clients_train.index
return [features, feature_defs]
def calculate_features(
feature_defs: List, clients: pd.DataFrame, profiles: pd.DataFrame
) -> pd.DataFrame:
"""Calculate features from existing feature definitions.
Args:
feature_defs: Feature definitions from DFS.
clients: Data of normalized client.
profiles: Data of client monthly profiles.
Returns:
Independent features calculated from feature definitions.
"""
# Initialize dask client
dask_client, dask_cluster = _initialize_dask_client(n_workers=2)
# Create the EntitySet
es = _create_client_entityset(clients, profiles, "client_other_set")
# Calculate feature matrix
features = ft.calculate_feature_matrix(
features=feature_defs,
entityset=es,
dask_kwargs={"cluster": dask_cluster},
verbose=True,
)
dask_client.close()
# Reindex based on ID of clients
features = features.reindex(index=clients["ID"])
features.reset_index(inplace=True)
# Enforce dtype
features = _enforce_dtype(features)
# Make sure feature matrix have the same index as clients
features.index = clients.index
return features
def train_imputer(features: pd.DataFrame) -> Pipeline:
"""Train imputer.
Args:
features_train: Training data of independent features.
Returns:
Trained imputer.
"""
col_dict = _get_column_dtype(features)
# Create transformers for each dtype
transformers = [
("num_n_imputer", SimpleImputer(strategy="median"), col_dict["num_normal"]),
("num_s_imputer", SimpleImputer(strategy="median"), col_dict["num_skewed"]),
(
"ordi_imputer",
SimpleImputer(strategy="constant", fill_value=0),
col_dict["ordinal"],
),
("bool_pass", "passthrough", col_dict["boolean"]),
(
"cat_imputer",
SimpleImputer(strategy="constant", fill_value=0),
col_dict["category"],
),
]
transformers = _remove_unused_transformers(transformers)
# Combine the transformers as imputer
imputer = ColumnTransformer(transformers=transformers)
imputer.fit(features)
return imputer
def impute_missing_values(
imputer: ColumnTransformer, features: pd.DataFrame
) -> pd.DataFrame:
"""Impute features using trained imputer.
Args:
features: Data of independent features.
imputer: Trained imputer.
Returns:
Imputed features using the trained imputer.
"""
# Remap imputer output to DataFrame
input_cols = _get_ct_feature_names(imputer)
features_imp = pd.DataFrame(imputer.transform(features), columns=input_cols)
# Reindex based on ID of clients
features_imp.index = features["ID"]
features_imp = features_imp.reindex(index=features["ID"])
features_imp.reset_index(inplace=True)
# Enforce dtype
features_imp = _enforce_dtype(features_imp)
# Make sure feature matrix have the same index as clients
features_imp.index = features.index
return features_imp
def train_outlier_detector(
features_train: pd.DataFrame, labels_train: pd.DataFrame, parameters: Dict
) -> Pipeline:
"""Train oulier detector and remove the outliers from features and its labels.
Args:
features_train: Training data of independent features.
labels_train: Training data of next month payment default status.
parameters: Parameters defined in parameters.yml.
Returns:
A list containing features, its default labels without outliers and its trained outlier detector
"""
col_dict = _get_column_dtype(features_train)
if labels_train.shape[0] == features_train.shape[0]:
labels_train.index = features_train.index
# Create transformers for each dtype
transformers = [
("num_n_trans", StandardScaler(), col_dict["num_normal"]),
(
"num_s_trans",
QuantileTransformer(random_state=parameters["random_state"]),
col_dict["num_skewed"],
),
("ordi_trans", "passthrough", col_dict["ordinal"]),
("bool_pass", "passthrough", col_dict["boolean"]),
(
"cat_trans",
CatBoostEncoder(random_state=parameters["random_state"], return_df=False),
col_dict["category"],
),
]
transformers = _remove_unused_transformers(transformers)
# Log original features, excluding ID
logger = logging.getLogger(__name__)
features_train.replace([np.inf, -np.inf], np.nan)
logger.info(features_train.columns[features_train.isna().any()])
# Combine the transformers as preprocessor
preprocessor = ColumnTransformer(transformers=transformers)
# Extract target
target_train = labels_train["DEFAULT_PAY"]
# Create outlier detector pipeline and train it
detector = Pipeline(
steps=[
("preprocessor", preprocessor),
("detector", IsolationForest(n_jobs=-1)),
]
)
detector.fit(features_train, target_train)
return detector
def remove_outliers(
detector: Pipeline,
features: pd.DataFrame,
labels: pd.DataFrame,
) -> List:
"""Remove outliers from features and its labels using trained outlier detector.
Args:
detector: Trained outlier detector.
features: Data of independent features.
labels: Data of next month payment default status.
Returns:
A list containing features and its default labels without outliers.
"""
if labels.shape[0] == features.shape[0]:
labels.index = features.index
# Log original rows
logger = logging.getLogger(__name__)
logger.info("Original rows: {}".format(features.shape[0]))
# Store predicted outlier labels
features["OUTLIER"] = detector.predict(features)
# Remove outliers (outlier = -1)
features = features[features["OUTLIER"] != -1]
labels = labels[labels["ID"].isin(features["ID"])]
features.drop(columns="OUTLIER", inplace=True)
logger.info("Final rows after removing outliers: {}".format(features.shape[0]))
# Enforce dtype
features = _enforce_dtype(features)
return [features, labels]
def train_feature_selector(
features_train: pd.DataFrame,
labels_train: pd.DataFrame,
parameters: Dict,
) -> Pipeline:
"""Train feature selector and select only relevant features fot the label.
Args:
features_train: Training data of independent features.
labels_train: Training data of next month payment default status.
parameters: Parameters defined in parameters.yml.
Returns:
A list containing relevant features, and the trained feature selector
"""
col_dict = _get_column_dtype(features_train)
if labels_train.shape[0] == features_train.shape[0]:
labels_train.index = features_train.index
# Create transformers for each dtype
transformers = [
("num_n_trans", StandardScaler(), col_dict["num_normal"]),
(
"num_s_trans",
QuantileTransformer(random_state=parameters["random_state"]),
col_dict["num_skewed"],
),
("ordi_trans", "passthrough", col_dict["ordinal"]),
("bool_pass", "passthrough", col_dict["boolean"]),
(
"cat_trans",
JamesSteinEncoder(random_state=parameters["random_state"], return_df=False),
col_dict["category"],
),
]
transformers = _remove_unused_transformers(transformers)
# Combine the transformers as preprocessor
preprocessor = ColumnTransformer(transformers=transformers)
num_cols = col_dict["num_normal"] + col_dict["num_skewed"]
nomi_cols = col_dict["ordinal"] + col_dict["boolean"] + col_dict["category"]
selector_ct = ColumnTransformer(
transformers=[
(
"num_selector",
SelectPercentile(f_classif, percentile=parameters["numeric_pct"]),
[x for x in range(0, len(num_cols))],
),
(
"nomi_selector",
SelectPercentile(chi2, percentile=parameters["nominal_pct"]),
[x for x in range(len(num_cols), len(num_cols) + len(nomi_cols))],
),
]
)
# Extract target
target_train = labels_train["DEFAULT_PAY"]
# Create feature selector pipeline and train it
selector = Pipeline(
steps=[("preprocessor", preprocessor), ("selector", selector_ct)]
)
selector.fit(features_train, target_train)
return selector
def select_relevant_features(
selector: Pipeline, features: pd.DataFrame
) -> pd.DataFrame:
"""Select relevant features using trained feature selector
Args:
selector: Trained feature selector.
eatures: Data of independent features.
Returns:
Relevant features selected using the trained feature selector.
"""
# Log original features, excluding ID
logger = logging.getLogger(__name__)
logger.info("Original features: {}".format(features.shape[1] - 1))
# Remap feature selector output to DataFrame
input_cols = _get_ct_feature_names(selector.named_steps["preprocessor"])
selected_cols = _get_ct_support(selector.named_steps["selector"])
# Filter features that are not selected
features_sel = features[input_cols]
features_sel = features_sel.iloc[:, selected_cols]
logger.info("Final features after selection: {}".format(features_sel.shape[1]))
# Reindex based on ID of clients
features_sel.index = features["ID"]
features_sel = features_sel.reindex(index=features["ID"])
features_sel.reset_index(inplace=True)
# Enforce dtype
features_sel = _enforce_dtype(features_sel)
# Make sure feature matrix have the same index as clients
features_sel.index = features.index
return features_sel
def find_best_resampler(
features_train: pd.DataFrame, labels_train: pd.DataFrame, parameters: Dict
) -> List:
"""Compare several resamplers and find the best one to handle imbalanced labels.
Args:
features_train: Training data of independent features.
labels_train: Training data of next month payment default status.
parameters: Parameters defined in parameters.yml.
Returns:
A list containing the best resampler and the search CV results as DataFrame.
"""
col_dict = _get_column_dtype(features_train)
if labels_train.shape[0] == features_train.shape[0]:
labels_train.index = features_train.index
# Create transformers for each dtype
transformers = [
("num_n_trans", StandardScaler(), col_dict["num_normal"]),
(
"num_s_trans",
QuantileTransformer(random_state=parameters["random_state"]),
col_dict["num_skewed"],
),
("ordi_trans", "passthrough", col_dict["ordinal"]),
("bool_pass", "passthrough", col_dict["boolean"]),
(
"cat_trans",
JamesSteinEncoder(random_state=parameters["random_state"], return_df=False),
col_dict["category"],
),
]
transformers = _remove_unused_transformers(transformers)
# Combine the transformers as preprocessor
preprocessor = ColumnTransformer(transformers=transformers)
num_cols = col_dict["num_normal"] + col_dict["num_skewed"]
nomi_cols = col_dict["ordinal"] + col_dict["boolean"] + col_dict["category"]
# Extract target
target_train = labels_train["DEFAULT_PAY"]
# Initalize samplers
smotenc_smpl = SMOTENC(
categorical_features=[
x for x in range(len(num_cols), len(num_cols) + len(nomi_cols))
],
n_jobs=-1,
)
ro_smpl = RandomOverSampler()
enn_smpl = EditedNearestNeighbours(n_jobs=-1)
tl_smpl = TomekLinks(n_jobs=-1)
ru_smpl = RandomUnderSampler()
# Initalize classifier
clf = ExtraTreesClassifier(max_depth=10, n_jobs=-1)
# Create parameter grid
param_grid = {
"sampler": [None, smotenc_smpl, ro_smpl, enn_smpl, tl_smpl, ru_smpl],
"classifier": [clf],
}
# Create classifier pipeline
resampler = PipelineImb(
steps=[
("preprocessor", preprocessor),
("sampler", smotenc_smpl),
("classifier", clf),
]
)
# Start grid search
search_cv = GridSearchCV(
resampler,
param_grid=param_grid,
scoring=[
"precision",
"recall",
"f1",
"roc_auc",
],
refit="f1",
error_score=0,
verbose=2,
)
timer_start = timer()
search_cv.fit(features_train, target_train)
timer_end = timer()
# Log search duration
logger = logging.getLogger(__name__)
logger.info(
f"Best resampler search elapsed time : {_get_time_delta(timer_end - timer_start)}."
)
# Save search result as DataFrame
search_results = | pd.DataFrame(search_cv.cv_results_) | pandas.DataFrame |
import logging
import numpy as np
import pandas as pd
from pytest import approx
from lenskit.metrics.topn import recall
from lenskit.util.test import demo_recs
from lenskit import topn
_log = logging.getLogger(__name__)
def _test_recall(items, rel, **kwargs):
recs = pd.DataFrame({'item': items})
truth = pd.DataFrame({'item': rel}).set_index('item')
return recall(recs, truth, **kwargs)
def test_recall_empty_zero():
prec = _test_recall([], [1, 3])
assert prec == approx(0)
def test_recall_norel_na():
prec = _test_recall([1, 3], [])
assert prec is None
def test_recall_simple_cases():
prec = _test_recall([1, 3], [1, 3])
assert prec == approx(1.0)
prec = _test_recall([1], [1, 3])
assert prec == approx(0.5)
prec = _test_recall([1, 2, 3, 4], [1, 3])
assert prec == approx(1.0)
prec = _test_recall([1, 2, 3, 4], [1, 3, 5])
assert prec == approx(2.0 / 3)
prec = _test_recall([1, 2, 3, 4], range(5, 10))
assert prec == approx(0.0)
prec = _test_recall([1, 2, 3, 4], range(4, 9))
assert prec == approx(0.2)
def test_recall_series():
prec = _test_recall(pd.Series([1, 3]), pd.Series([1, 3]))
assert prec == approx(1.0)
prec = _test_recall(pd.Series([1, 2, 3]), pd.Series([1, 3, 5, 7]))
assert prec == approx(0.5)
prec = _test_recall(pd.Series([1, 2, 3, 4]), pd.Series(range(4, 9)))
assert prec == approx(0.2)
def test_recall_series_set():
prec = _test_recall(pd.Series([1, 2, 3, 4]), [1, 3, 5, 7])
assert prec == approx(0.5)
prec = _test_recall(pd.Series([1, 2, 3, 4]), range(4, 9))
assert prec == approx(0.2)
def test_recall_series_index():
prec = _test_recall(pd.Series([1, 3]), pd.Index([1, 3]))
assert prec == approx(1.0)
prec = _test_recall(pd.Series([1, 2, 3, 4]), pd.Index([1, 3, 5, 7]))
assert prec == approx(0.5)
prec = _test_recall(pd.Series([1, 2, 3, 4]), pd.Index(range(4, 9)))
assert prec == approx(0.2)
def test_recall_series_array():
prec = _test_recall( | pd.Series([1, 3]) | pandas.Series |
#!/usr/bin/env python
from __future__ import division
import numpy as np
import pandas as pd
import warnings
from .helpers import *
def analyze_chunk(data, subjgroup=None, subjname='Subject', listgroup=None, listname='List', analysis=None, analysis_type=None, pass_features=False, **kwargs):
"""
Private function that groups data by subject/list number and performs analysis for a chunk of data.
Parameters
----------
data : Egg data object
The data to be analyzed
subjgroup : list of strings or ints
String/int variables indicating how to group over subjects. Must be
the length of the number of subjects
subjname : string
Name of the subject grouping variable
listgroup : list of strings or ints
String/int variables indicating how to group over list. Must be
the length of the number of lists
listname : string
Name of the list grouping variable
analysis : function
This function analyzes data and returns it.
pass_features : bool
Logical indicating whether the analyses uses the features field of the Egg
Returns
----------
analyzed_data : Pandas DataFrame
DataFrame containing the analysis results
"""
# if no grouping, set default to iterate over each list independently
subjgroup = subjgroup if subjgroup else data.pres.index.levels[0].values
listgroup = listgroup if listgroup else data.pres.index.levels[1].values
# create a dictionary for grouping
subjdict = {subj : data.pres.index.levels[0].values[subj==np.array(subjgroup)] for subj in set(subjgroup)}
# listdict = {lst : data.pres.index.levels[1].values[lst==np.array(listgroup)] for lst in set(listgroup)}
# allow for lists of listgroup arguments
if all(isinstance(el, list) for el in listgroup):
listdict = [{lst : data.pres.index.levels[1].values[lst==np.array(listgrpsub)] for lst in set(listgrpsub)} for listgrpsub in listgroup]
else:
listdict = [{lst : data.pres.index.levels[1].values[lst==np.array(listgroup)] for lst in set(listgroup)} for subj in subjdict]
# perform the analysis
def perform_analysis(subj, lst):
# get data slice for presentation and recall
pres_slice = data.pres.loc[[(s,l) for s in subjdict[subj] for l in listdict[subj][lst] if all(~pd.isnull(data.pres.loc[(s,l)]))]]
pres_slice.list_length = data.list_length
rec_slice = data.rec.loc[[(s,l) for s in subjdict[subj] for l in listdict[subj][lst] if all(~pd.isnull(data.pres.loc[(s,l)]))]]
# if features are need for analysis, get the features for this slice of data
if pass_features:
feature_slice = data.features.loc[[(s,l) for s in subjdict[subj] for l in listdict[subj][lst] if all(~pd.isnull(data.pres.loc[(s,l)]))]]
# generate indices
index = pd.MultiIndex.from_arrays([[subj],[lst]], names=[subjname, listname])
# perform analysis for each data chunk
if pass_features:
return pd.DataFrame([analysis(pres_slice, rec_slice, feature_slice, data.dist_funcs, **kwargs)], index=index, columns=[feature for feature in feature_slice[0].as_matrix()[0].keys()])
else:
return pd.DataFrame([analysis(pres_slice, rec_slice, **kwargs)], index=index)
# create list of chunks to process
a=[]
b=[]
for subj in subjdict:
for lst in listdict[0]:
a.append(subj)
b.append(lst)
# handle parellel kwarg
parallel=kwargs['parallel']
del kwargs['parallel']
# if we're running permutation tests, use multiprocessing
if parallel==True:
import multiprocessing
from pathos.multiprocessing import ProcessingPool as Pool
p = Pool(multiprocessing.cpu_count())
analyzed_data = p.map(perform_analysis, a, b)
else:
analyzed_data = [perform_analysis(ai, bi) for ai,bi in zip(a,b)]
# concatenate slices
analyzed_data = | pd.concat(analyzed_data) | pandas.concat |
"""
Fetch meteorological data from the SMEAR website and bind them as a CSV table.
Hyytiälä COS campaign, April-November 2016
(c) 2016-2017 <NAME> <<EMAIL>>
"""
import io
import argparse
import copy
import datetime
import requests
import numpy as np
import pandas as pd
import preproc_config
def timestamp_parser(*args):
"""
A timestamp parser for `pandas.read_csv()`.
Argument list: year, month, day, hour, minute, second
"""
return np.datetime64('%s-%s-%s %s:%s:%s' %
args)
# define terminal argument parser
parser = argparse.ArgumentParser(description='Get SMEAR meteorological data.')
parser.add_argument('-v', '--variable', dest='flag_get_variable',
action='store_true',
help='get one variable at a time, slow mode')
parser.add_argument('-n', '--now', dest='flag_now', action='store_true',
help='get the data from the starting date till now')
args = parser.parse_args()
# echo program starting
print('Retrieving meteorological data from ' +
'SMEAR <http://avaa.tdata.fi/web/smart/smear> ... ')
dt_start = datetime.datetime.now()
print(datetime.datetime.strftime(dt_start, '%Y-%m-%d %X'))
print('numpy version = ' + np.__version__)
print('pandas version = ' + pd.__version__)
output_dir = preproc_config.data_dir['met_data']
# local winter time is UTC+2
start_dt = '2016-04-01 00:00:00'
if not args.flag_now:
end_dt = '2016-11-11 00:00:00'
else:
end_dt = (datetime.datetime.utcnow() +
datetime.timedelta(2. / 24.)).strftime('%Y-%m-%d %H:%M:%S')
# variable names for retrieval from the SMEAR data website API
varnames = ['Pamb0', 'T1250', 'T672', 'T504', 'T336', 'T168', 'T84', 'T42',
'RHIRGA1250', 'RHIRGA672', 'RHIRGA504', 'RHIRGA336',
'RHIRGA168', 'RHIRGA84', 'RHIRGA42',
'RPAR', 'PAR', 'diffPAR', 'maaPAR',
'tsoil_humus', 'tsoil_A', 'tsoil_B1', 'tsoil_B2', 'tsoil_C1',
'wsoil_humus', 'wsoil_A', 'wsoil_B1', 'wsoil_B2', 'wsoil_C1',
'Precipacc']
# renaming will be done after filling all the variables in the met dataframe
renaming_dict = {
'Pamb0': 'pres',
'T1250': 'T_atm_125m',
'T672': 'T_atm_67m',
'T504': 'T_atm_50m',
'T336': 'T_atm_34m',
'T168': 'T_atm_17m',
'T84': 'T_atm_8m',
'T42': 'T_atm_4m',
'RHIRGA1250': 'RH_125m',
'RHIRGA672': 'RH_67m',
'RHIRGA504': 'RH_50m',
'RHIRGA336': 'RH_34m',
'RHIRGA168': 'RH_17m',
'RHIRGA84': 'RH_8m',
'RHIRGA42': 'RH_4m',
'RPAR': 'PAR_reflected',
'PAR': 'PAR',
'diffPAR': 'PAR_diffuse',
'maaPAR': 'PAR_below',
'tsoil_humus': 'T_soil_surf',
'tsoil_A': 'T_soil_A',
'tsoil_B1': 'T_soil_B1',
'tsoil_B2': 'T_soil_B2',
'tsoil_C1': 'T_soil_C1',
'wsoil_humus': 'w_soil_surf',
'wsoil_A': 'w_soil_A',
'wsoil_B1': 'w_soil_B1',
'wsoil_B2': 'w_soil_B2',
'wsoil_C1': 'w_soil_C1',
'Precipacc': 'precip', }
# an url example
# url = 'http://avaa.tdata.fi/palvelut/smeardata.jsp?' +
# 'variables=Pamb0,&table=HYY_META&' +
# 'from=2016-04-01 00:00:00&to=2016-04-02 00:00:00&'
# 'quality=ANY&averaging=30MIN&type=ARITHMETIC'
if not args.flag_get_variable:
# first, request all variables except precipitation
print("Fetching variables '%s' ..." % ', '.join(varnames[0:-1]), end=' ')
avg_type = 'ARITHMETIC'
url = 'http://avaa.tdata.fi/palvelut/smeardata.jsp?variables=' + \
','.join(varnames[0:-1]) + ',&table=HYY_META&from=' + \
start_dt + '&to=' + end_dt + \
'&quality=ANY&averaging=30MIN&type=' + avg_type
response = requests.get(url, verify=True)
# set `verify=True` to check SSL certificate
if response.status_code != 200:
print('Status %d: No response from the request.' %
response.status_code)
else:
print('Successful!')
df_met = pd.read_csv(
io.BytesIO(response.text.encode('utf-8')), sep=',', header=0,
names=['year', 'month', 'day', 'hour', 'minute', 'second',
*varnames[0:-1]],
parse_dates={'timestamp': [0, 1, 2, 3, 4, 5]},
date_parser=timestamp_parser,
engine='c', encoding='utf-8')
start_year = df_met['timestamp'][0].year
df_met.insert(
1, 'doy',
(df_met['timestamp'] - | pd.Timestamp('%d-01-01' % start_year) | pandas.Timestamp |
import argparse
import collections
import pandas
import numpy as np
import os
import gym
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
import tensorflow as tf
from rl.agents.cem import CEMAgent
from rl.memory import EpisodeParameterMemory
from noise_estimator import CartpoleProcessor, CartpoleSurrogateProcessor
from utils import *
parser = argparse.ArgumentParser()
parser.add_argument('--error_positive', type=float, default=0.2,
help='Error positive rate [default: 0.2]')
parser.add_argument('--error_negative', type=float, default=0.0,
help='Error negative rate [default: 0.0]')
parser.add_argument('--log_dir', default='logs',
help='Log dir [default: logs]')
parser.add_argument('--reward', default='normal',
help='reward choice: normal/noisy/surrogate [default: normal]')
parser.add_argument('--smooth', type=str2bool, default=False,
help='Add smoothing to rewards [default: False]')
FLAGS = parser.parse_args()
ERR_P = FLAGS.error_positive
ERR_N = FLAGS.error_negative
REWARD = FLAGS.reward
SMOOTH = FLAGS.smooth
if REWARD == "normal":
LOG_DIR = os.path.join(FLAGS.log_dir, "cem_cartpole")
else:
LOG_DIR = os.path.join(os.path.join(FLAGS.log_dir, "cem_cartpole"), str(ERR_P))
ENV_NAME = 'CartPole-v0'
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
os.system('cp cem_cartpole.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'setting.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
def train():
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
obs_dim = env.observation_space.shape[0]
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
# Option 1 : Simple model
# model = Sequential()
# model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
# model.add(Dense(nb_actions))
# model.add(Activation('softmax'))
# Option 2: deep network
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('softmax'))
model.summary()
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = EpisodeParameterMemory(limit=1000, window_length=1)
if REWARD == "normal":
cem = CEMAgent(model=model, nb_actions=nb_actions, memory=memory,
batch_size=50, nb_steps_warmup=2000, train_interval=50, elite_frac=0.05)
cem.compile()
history_normal = cem.fit(env, nb_steps=100000, visualize=False, verbose=2)
cem.save_weights(os.path.join(LOG_DIR, 'cem_normal_{}_params.h5f'.format(ENV_NAME)), overwrite=True)
cem.test(env, nb_episodes=5, visualize=False)
pandas.DataFrame(history_normal.history).to_csv(os.path.join(LOG_DIR, "normal.csv"))
elif REWARD == "noisy":
if not SMOOTH:
processor_noisy = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=False, surrogate=False)
else:
processor_noisy = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=True, surrogate=False)
# processor_surrogate = CartpoleSurrogateProcessor(e_=ERR_N, e=ERR_P, surrogate=False)
cem = CEMAgent(model=model, nb_actions=nb_actions, memory=memory,
batch_size=50, nb_steps_warmup=2000, train_interval=50, elite_frac=0.05,
processor=processor_noisy)
cem.compile()
history_noisy = cem.fit(env, nb_steps=100000, visualize=False, verbose=2)
if not SMOOTH:
cem.save_weights(os.path.join(LOG_DIR, 'cem_noisy_{}_params.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy.csv"))
else:
cem.save_weights(os.path.join(LOG_DIR, 'cem_noisy_smooth_{}_params.h5f'.format(ENV_NAME)), overwrite=True)
| pandas.DataFrame(history_noisy.history) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 01 10:00:58 2021
@author: <NAME>
"""
#------------------------------------------------------------------#
# # # # # Imports # # # # #
#------------------------------------------------------------------#
from math import e
import numpy as np
import pandas as pd
import os
import time
import glob
import itertools
from joblib import Parallel, delayed
from generate_files import GenerateFiles
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import LogNorm
import seaborn as sns
import matplotlib.style as style
style.use('seaborn-poster') #sets the size of the charts
style.use('ggplot')
from scipy import ndimage
from astropy.io import fits
from astropy.wcs import WCS
from astropy.utils.data import get_pkg_data_filename
from astropy.coordinates import SkyCoord, match_coordinates_sky
import astropy.units as u
from astropy.stats import mad_std
import astrotools.healpytools as hpt
import astropy_healpix as ahp
from astropy.coordinates import ICRS
from tqdm import tqdm
from collections import Counter
import warnings
warnings.filterwarnings('ignore')
import healpy as hp
from hpproj import CutSky, to_coord
import logging
cs_logger = logging.getLogger('cutsky')
cs_logger.setLevel(logging.WARNING)
cs_logger.propagate = False
hpproj_logger = logging.getLogger('hpproj')
hpproj_logger.setLevel(logging.WARNING)
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
#------------------------------------------------------------------#
# # # # # Functions # # # # #
#------------------------------------------------------------------#
class MakeData(object):
"""Class to create and preprocess input/output files from full sky-maps.
"""
def __init__(self, dataset, npix, loops, planck_path, milca_path, disk_radius=None, output_path=None):
"""
Args:
dataset (str): file name for the cluster catalog that will used.
Options are 'planck_z', 'planck_z_no-z', 'MCXC', 'RM30', 'RM50'.
bands (list): list of full sky-maps that will be used for the input file.
loops (int): number of times the dataset containing patches with at least one cluster within will be added
again to training set with random variations (translations/rotations).
Options are 100GHz','143GHz','217GHz','353GHz','545GHz','857GHz', and 'y-map'.
More full sky-maps will be added later on (e.g. CO2, X-ray, density maps).
planck_path (str): path to directory containing planck HFI 6 frequency maps.
Files should be named as following
'HFI_SkyMap_100-field-IQU_2048_R3.00_full.fits', 'HFI_SkyMap_143-field-IQU_2048_R3.00_full.fits',
'HFI_SkyMap_143-field-IQU_2048_R3.00_full.fits', 'HFI_SkyMap_353-psb-field-IQU_2048_R3.00_full.fits',
'HFI_SkyMap_545-field-Int_2048_R3.00_full.fits', 'HFI_SkyMap_857-field-Int_2048_R3.00_full.fits'.
milca_path (str): path to directory containing MILCA full sky map. File should be named 'milca_ymaps.fits'.
disk_radius (float, optional): Disk radius that will be used to create segmentation masks for output files.
Defaults to None.
output_path (str, optional): Path to output directory. Output directory needs be created beforehand using
'python xcluster.py -m True' selecting same output directory in 'params.py'.
If None, xcluster path will be used. Defaults to None.
"""
self.path = os.getcwd() + '/'
self.dataset = dataset # 'planck_z', 'planck_z_no-z', 'MCXC', 'RM30', 'RM50'
self.bands = ['100GHz','143GHz','217GHz','353GHz','545GHz','857GHz','y-map','CO','p-noise']
self.loops = loops
self.n_labels = 2
maps = []
self.freq = 1022
self.planck_freq = 126
if '100GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_100-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 100', 'docontour': True}))
# self.freq += 2
# self.planck_freq += 2
if '143GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_143-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 143', 'docontour': True}))
# self.freq += 4
# self.planck_freq += 4
if '217GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_217-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 217', 'docontour': True}))
# self.freq += 8
# self.planck_freq += 8
if '353GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_353-psb-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 353', 'docontour': True}))
# self.freq += 16
# self.planck_freq += 16
if '545GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_545-field-Int_2048_R3.00_full.fits", {'legend': 'HFI 545', 'docontour': True}))
# self.freq += 32
# self.planck_freq += 32
if '857GHz' in self.bands:
maps.append((planck_path + "HFI_SkyMap_857-field-Int_2048_R3.00_full.fits", {'legend': 'HFI 857', 'docontour': True}))
# self.freq += 64
# self.planck_freq += 64
if 'y-map' in self.bands:
maps.append((milca_path + "milca_ymaps.fits", {'legend': 'MILCA y-map', 'docontour': True}))
# self.freq += 128
if 'CO' in self.bands:
maps.append((planck_path + "COM_CompMap_CO21-commander_2048_R2.00.fits", {'legend': 'CO', 'docontour': True}))
# self.freq += 256
if 'p-noise' in self.bands:
maps.append((planck_path + 'COM_CompMap_Compton-SZMap-milca-stddev_2048_R2.00.fits', {'legend': 'noise', 'docontour': True}))
# self.freq += 512
maps.append((milca_path + "milca_ymaps.fits", {'legend': 'MILCA y-map', 'docontour': True})) #used for plots only
self.maps = maps
self.temp_path = self.path + 'to_clean/'
self.disk_radius = disk_radius
self.npix = npix #in pixels
self.pixsize = 1.7 #in arcmin
self.ndeg = (self.npix * self.pixsize)/60 #in deg
self.nside = 2
if output_path is None:
self.output_path = self.path + 'output/' + self.dataset + time.strftime("/%Y-%m-%d/")
else:
self.output_path = output_path + 'output/' + self.dataset + time.strftime("/%Y-%m-%d/")
self.dataset_path = self.path + 'datasets/' + self.dataset + '/'
self.planck_path = planck_path
self.milca_path = milca_path
self.test_regions = [[0, 360, 90, 70],
[0, 120, 70, 40], [120, 240, 70, 40], [240, 360, 70, 40],
[0, 120, 40, 18], [120, 240, 40, 18], [240, 360, 40, 18],
[0, 120, -18, -40], [120, 240, -18, -40], [240, 360, -18, -40],
[0, 120, -40, -70], [120, 240, -40, -70], [240, 360, -40, -70],
[0, 360, -70, -90]]
self.val_regions = [[0, 180, -20, -40],
[0, 180, -20, -40], [0, 180, -20, -40], [0, 180, -20, -40],
[0, 360, -40, -60], [0, 360, -40, -60], [0, 360, -40, -60],
[0, 360, 60, 40], [0, 360, 60, 40], [0, 360, 60, 40],
[0, 180, 40, 20], [0, 180, 40, 20], [0, 180, 40, 20],
[0, 180, 40, 20]]
def plot_psz2_clusters(self, healpix_path):
"""Saves plots containing patches for planck frequency maps and y-map.
Function is deprecated and will be removed in later versions.
Args:
healpix_path (str): output path for plots (deprecated).
"""
maps = self.maps
PSZ2 = fits.open(self.planck_path + 'PSZ2v1.fits')
glon = PSZ2[1].data['GLON']
glat = PSZ2[1].data['GLAT']
freq = ['100GHz','143GHz','217GHz','353GHz','545GHz','857GHz', 'y-map']
for j in range(len(glon)):
fig = plt.figure(figsize=(21,14), tight_layout=False)
fig.suptitle(r'$glon=$ {:.2f} $^\circ$, $glat=$ {:.2f} $^\circ$'.format(glon[j], glat[j]), y=0.92, fontsize=20)
cutsky = CutSky(maps, npix=self.npix, pixsize=self.pixsize, low_mem=False)
coord = to_coord([glon[j], glat[j]])
result = cutsky.cut_fits(coord)
for i,nu in enumerate(freq):
ax = fig.add_subplot(3,4,1+i)
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
HDU = result[i]['fits']
im = ax.imshow(HDU.data, origin="lower")
w = WCS(HDU.header)
sky = w.world_to_pixel_values(glon[j], glat[j])
segmentation = plt.Circle((sky[0], sky[1]), 2.5/1.7, color='white', alpha=0.1)
ax.add_patch(segmentation)
ax.axvline(sky[0], ymin=0, ymax=(self.npix//2-10)/self.npix, color='white', linestyle='--')
ax.axvline(sky[0], ymin=(self.npix//2+10)/self.npix, ymax=1, color='white', linestyle='--')
ax.axhline(sky[1], xmin=0, xmax=(self.npix//2-10)/self.npix, color='white', linestyle='--')
ax.axhline(sky[1], xmin=(self.npix//2+10)/self.npix, xmax=1, color='white', linestyle='--')
# ax.scatter(sky[0], sky[1], color='red')
ax.set_title(r'%s'%nu)
fig.colorbar(im, cax=cax, orientation='vertical')
plt.savefig(healpix_path + 'PSZ2/PSZ2_skycut_%s.png'%j, bbox_inches='tight', transparent=False)
plt.show()
plt.close()
def create_catalogs(self, plot=False):
"""Creates the following catalogs using 'PSZ2v1.fits', 'MCXC-Xray-clusters.fits', and 'redmapper_dr8_public_v6.3_catalog.fits'
(see <NAME> 2018 for more details):
planck_z (pd.DataFrame): dataframe with the following columns for PSZ2 clusters with known redshift:
'RA', 'DEC', 'GLON', 'GLAT', 'M500', 'R500', 'Y5R500', 'REDMAPPER', 'MCXC', 'Z'
planck_no_z (pd.DataFrame): dataframe with the following columns for PSZ2 clusters with unknown redshift:
'RA', 'DEC', 'GLON', 'GLAT', 'M500', 'R500', 'Y5R500', 'REDMAPPER', 'MCXC'
MCXC_no_planck (pd.DataFrame): dataframe with the following columns for MCXC clusters:
'RA', 'DEC', 'R500', 'M500', 'Z'
RM50_no_planck (pd.DataFrame): dataframe with the following columns for RedMaPPer clusters with lambda>50:
'RA', 'DEC', 'LAMBDA', 'Z'
RM30_no_planck (pd.DataFrame): dataframe with the following columns for RedMaPPer clusters with lambda>30:
'RA', 'DEC', 'LAMBDA', 'Z'
Catalogs are saved in output_path + /catalogs/. Input catalogs are in planck_path.
Args:
plot (bool, optional): If True, will save duplicates distance from each other distribution plots. Defaults to False.
"""
PSZ2 = fits.open(self.planck_path + 'PSZ2v1.fits')
df_psz2 = pd.DataFrame(data={'RA': PSZ2[1].data['RA'].tolist(), 'DEC': PSZ2[1].data['DEC'].tolist(), 'GLON': PSZ2[1].data['GLON'].tolist(), 'GLAT':PSZ2[1].data['GLAT'].tolist(),
'M500': PSZ2[1].data['MSZ'].tolist(), 'R500': PSZ2[1].data['Y5R500'].tolist(), 'REDMAPPER': PSZ2[1].data['REDMAPPER'].tolist(), 'MCXC': PSZ2[1].data['MCXC'].tolist(),
'Z': PSZ2[1].data['REDSHIFT'].tolist()})
df_psz2 = df_psz2.replace([-1, -10, -99], np.nan)
planck_no_z = df_psz2.query('Z.isnull()', engine='python')
planck_z = df_psz2.query('Z.notnull()', engine='python')
# planck_no_z = planck_no_z[['RA', 'DEC']].copy()
# planck_z = planck_z[['RA', 'DEC']].copy()
planck_no_z.to_csv(self.path + 'catalogs/planck_no-z' + '.csv', index=False)
planck_z.to_csv(self.path + 'catalogs/planck_z' + '.csv', index=False)
MCXC = fits.open(self.planck_path + 'MCXC-Xray-clusters.fits')
MCXC_skycoord = SkyCoord(ra=MCXC[1].data['RA'].tolist(), dec=MCXC[1].data['DEC'].tolist(), unit=u.degree)
MCXC_GLON = list(MCXC_skycoord.galactic.l.degree)
MCXC_GLAT = list(MCXC_skycoord.galactic.b.degree)
df_MCXC = pd.DataFrame(data={'RA': MCXC[1].data['RA'].tolist(), 'DEC': MCXC[1].data['DEC'].tolist(), 'R500': MCXC[1].data['RADIUS_500'].tolist(), 'M500': MCXC[1].data['MASS_500'].tolist(),
'GLON': MCXC_GLON, 'GLAT': MCXC_GLAT, 'Z': MCXC[1].data['REDSHIFT'].tolist()})
REDMAPPER = fits.open(self.planck_path + 'redmapper_dr8_public_v6.3_catalog.fits')
REDMAPPER_skycoord = SkyCoord(ra=REDMAPPER[1].data['RA'].tolist(), dec=REDMAPPER[1].data['DEC'].tolist(), unit=u.degree)
REDMAPPER_GLON = list(REDMAPPER_skycoord.galactic.l.degree)
REDMAPPER_GLAT = list(REDMAPPER_skycoord.galactic.b.degree)
df_REDMAPPER = pd.DataFrame(data={'RA': REDMAPPER[1].data['RA'].tolist(), 'DEC': REDMAPPER[1].data['DEC'].tolist(), 'LAMBDA': REDMAPPER[1].data['LAMBDA'].tolist(),
'GLON': REDMAPPER_GLON, 'GLAT': REDMAPPER_GLAT, 'Z': REDMAPPER[1].data['Z_SPEC'].tolist()})
df_REDMAPPER_30 = df_REDMAPPER.query("LAMBDA > 30")
df_REDMAPPER_50 = df_REDMAPPER.query("LAMBDA > 50")
ACT = fits.open(self.planck_path + 'sptecs_catalog_oct919_forSZDB.fits')
SPT = fits.open(self.planck_path + 'DR5_cluster-catalog_v1.1_forSZDB.fits')
df_act = pd.DataFrame(data={'RA': list(ACT[1].data['RA']), 'DEC': list(ACT[1].data['DEC']), 'GLON': list(ACT[1].data['GLON']), 'GLAT': list(ACT[1].data['GLAT'])})
df_spt = pd.DataFrame(data={'RA': list(SPT[1].data['RA']), 'DEC': list(SPT[1].data['DEC']), 'GLON': list(SPT[1].data['GLON']), 'GLAT': list(SPT[1].data['GLAT'])})
self.remove_duplicates_on_radec(df_MCXC, df_psz2, output_name='MCXC_no_planck', plot=plot)
self.remove_duplicates_on_radec(df_REDMAPPER_30, df_psz2, output_name='RM30_no_planck', plot=plot)
self.remove_duplicates_on_radec(df_REDMAPPER_50, df_psz2, output_name='RM50_no_planck', plot=plot)
self.remove_duplicates_on_radec(df_act, df_psz2, output_name='ACT_no_planck', plot=plot)
self.remove_duplicates_on_radec(df_spt, df_psz2, output_name='SPT_no_planck', plot=plot)
PSZ2.close()
MCXC.close()
MCXC.close()
REDMAPPER.close()
ACT.close()
SPT.close()
def create_fake_source_catalog(self):
PGCC = fits.open(self.planck_path + 'HFI_PCCS_GCC_R2.02.fits')
df_pgcc = pd.DataFrame(data={'RA': list(PGCC[1].data['RA']), 'DEC': list(PGCC[1].data['DEC']), 'GLON': list(PGCC[1].data['GLON']), 'GLAT': list(PGCC[1].data['GLAT'])})
PGCC.close()
df_pgcc.to_csv(self.path + 'catalogs/' + 'PGCC' + '.csv', index=False)
df = pd.DataFrame(columns=['RA','DEC','GLON','GLAT'])
bands = ['100GHz', '143GHz', '217GHz', '353GHz', '545GHz', '857GHz']
cs_100 = fits.open(self.planck_path + 'COM_PCCS_100_R2.01.fits')
cs_143 = fits.open(self.planck_path + 'COM_PCCS_143_R2.01.fits')
cs_217 = fits.open(self.planck_path + 'COM_PCCS_217_R2.01.fits')
cs_353 = fits.open(self.planck_path + 'COM_PCCS_353_R2.01.fits')
cs_545 = fits.open(self.planck_path + 'COM_PCCS_545_R2.01.fits')
cs_857 = fits.open(self.planck_path + 'COM_PCCS_857_R2.01.fits')
df_cs_100 = pd.DataFrame(data={'RA': list(cs_100[1].data['RA']), 'DEC': list(cs_100[1].data['DEC']), 'GLON': list(cs_100[1].data['GLON']), 'GLAT': list(cs_100[1].data['GLAT'])})
df_cs_100.to_csv(self.path + 'catalogs/' + 'cs_100' + '.csv', index=False)
df_cs_143 = pd.DataFrame(data={'RA': list(cs_143[1].data['RA']), 'DEC': list(cs_143[1].data['DEC']), 'GLON': list(cs_143[1].data['GLON']), 'GLAT': list(cs_143[1].data['GLAT'])})
df_cs_143.to_csv(self.path + 'catalogs/' + 'cs_143' + '.csv', index=False)
df_cs_217 = pd.DataFrame(data={'RA': list(cs_217[1].data['RA']), 'DEC': list(cs_217[1].data['DEC']), 'GLON': list(cs_217[1].data['GLON']), 'GLAT': list(cs_217[1].data['GLAT'])})
df_cs_217.to_csv(self.path + 'catalogs/' + 'cs_217' + '.csv', index=False)
df_cs_353 = pd.DataFrame(data={'RA': list(cs_353[1].data['RA']), 'DEC': list(cs_353[1].data['DEC']), 'GLON': list(cs_353[1].data['GLON']), 'GLAT': list(cs_353[1].data['GLAT'])})
df_cs_353.to_csv(self.path + 'catalogs/' + 'cs_353' + '.csv', index=False)
df_cs_545 = pd.DataFrame(data={'RA': list(cs_545[1].data['RA']), 'DEC': list(cs_545[1].data['DEC']), 'GLON': list(cs_545[1].data['GLON']), 'GLAT': list(cs_545[1].data['GLAT'])})
df_cs_545.to_csv(self.path + 'catalogs/' + 'cs_545' + '.csv', index=False)
df_cs_857 = pd.DataFrame(data={'RA': list(cs_857[1].data['RA']), 'DEC': list(cs_857[1].data['DEC']), 'GLON': list(cs_857[1].data['GLON']), 'GLAT': list(cs_857[1].data['GLAT'])})
df_cs_857.to_csv(self.path + 'catalogs/' + 'cs_857' + '.csv', index=False)
freq = 0
if '100GHz' in bands:
freq += 2
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_100[1].data['RA']), 'DEC': list(cs_100[1].data['DEC']), 'GLON': list(cs_100[1].data['GLON']), 'GLAT': list(cs_100[1].data['GLAT'])})))
if '143GHz' in bands:
freq += 4
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_143[1].data['RA']), 'DEC': list(cs_143[1].data['DEC']), 'GLON': list(cs_143[1].data['GLON']), 'GLAT': list(cs_143[1].data['GLAT'])})))
if '217GHz' in bands:
freq += 8
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_217[1].data['RA']), 'DEC': list(cs_217[1].data['DEC']), 'GLON': list(cs_217[1].data['GLON']), 'GLAT': list(cs_217[1].data['GLAT'])})))
if '353GHz' in bands:
freq += 16
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_353[1].data['RA']), 'DEC': list(cs_353[1].data['DEC']), 'GLON': list(cs_353[1].data['GLON']), 'GLAT': list(cs_353[1].data['GLAT'])})))
if '545GHz' in bands:
freq += 32
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_545[1].data['RA']), 'DEC': list(cs_545[1].data['DEC']), 'GLON': list(cs_545[1].data['GLON']), 'GLAT': list(cs_545[1].data['GLAT'])})))
if '857GHz' in bands:
freq += 64
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_857[1].data['RA']), 'DEC': list(cs_857[1].data['DEC']), 'GLON': list(cs_857[1].data['GLON']), 'GLAT': list(cs_857[1].data['GLAT'])})))
df = pd.concat((df_pgcc, df))
df = self.remove_duplicates_on_radec(df, with_itself=True, tol=2)
df.to_csv(self.path + 'catalogs/' + 'False_SZ_catalog_f%s'%freq + '.csv', index=False)
df = pd.DataFrame(columns=['RA','DEC','GLON','GLAT'])
for L in range(1, len(bands)):
for subset in tqdm(itertools.combinations(bands, L)):
freq = 0
if '100GHz' in subset:
freq += 2
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_100[1].data['RA']), 'DEC': list(cs_100[1].data['DEC']), 'GLON': list(cs_100[1].data['GLON']), 'GLAT': list(cs_100[1].data['GLAT'])})))
if '143GHz' in subset:
freq += 4
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_143[1].data['RA']), 'DEC': list(cs_143[1].data['DEC']), 'GLON': list(cs_143[1].data['GLON']), 'GLAT': list(cs_143[1].data['GLAT'])})))
if '217GHz' in subset:
freq += 8
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_217[1].data['RA']), 'DEC': list(cs_217[1].data['DEC']), 'GLON': list(cs_217[1].data['GLON']), 'GLAT': list(cs_217[1].data['GLAT'])})))
if '353GHz' in subset:
freq += 16
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_353[1].data['RA']), 'DEC': list(cs_353[1].data['DEC']), 'GLON': list(cs_353[1].data['GLON']), 'GLAT': list(cs_353[1].data['GLAT'])})))
if '545GHz' in subset:
freq += 32
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_545[1].data['RA']), 'DEC': list(cs_545[1].data['DEC']), 'GLON': list(cs_545[1].data['GLON']), 'GLAT': list(cs_545[1].data['GLAT'])})))
if '857GHz' in subset:
freq += 64
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_857[1].data['RA']), 'DEC': list(cs_857[1].data['DEC']), 'GLON': list(cs_857[1].data['GLON']), 'GLAT': list(cs_857[1].data['GLAT'])})))
df = pd.concat((df_pgcc, df))
df = self.remove_duplicates_on_radec(df, with_itself=True, tol=2)
df.to_csv(self.path + 'catalogs/' + 'False_SZ_catalog_f%s'%freq + '.csv', index=False)
cs_100.close()
cs_143.close()
cs_217.close()
cs_353.close()
cs_545.close()
cs_857.close()
def remove_duplicates_on_radec(self, df_main, df_with_dup=None, output_name=None, with_itself=False, tol=5, plot=False):
""""Takes two different dataframes with columns 'RA' & 'DEC' and performs a spatial
coordinate match with a tol=5 arcmin tolerance. Saves a .csv file containing df_main
without objects in common from df_with_dup.
Args:
df_main (pd.DataFrame): main dataframe.
df_with_dup (pd.DataFrame): dataframe that contains objects from df_main. Defaults to None.
output_name (str): name that will be used in the saved/plot file name. If None, no file will be saved. Defaults to None.
with_itself (bool, optional): If True, the spatial coordinates match will be performed with df_main. Defaults to False.
tol (int, optional): tolerance for spatial coordinates match in arcmin. Defaults to 5.
plot (bool, optional): If True, will save duplicates distance from each other distribution plots. Defaults to False.
"""
if with_itself == True:
scatalog_sub = SkyCoord(ra=df_main['RA'].values, dec=df_main['DEC'].values, unit='deg')
idx, d2d, _ = match_coordinates_sky(scatalog_sub, scatalog_sub, nthneighbor=2)
ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same
df_d2d = | pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d}) | pandas.DataFrame |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime as dt
import json
import logging
import pandas as pd
from copy import deepcopy
from enum import Enum
from functools import wraps
from pydash import get, has, set_
from typing import Dict, List, Optional
from gs_quant.api.gs.assets import GsAsset, GsAssetApi
from gs_quant.api.gs.data import GsDataApi
from gs_quant.api.gs.indices import GsIndexApi
from gs_quant.api.gs.reports import GsReportApi
from gs_quant.api.gs.users import GsUsersApi
from gs_quant.common import DateLimit, PositionType
from gs_quant.data.fields import DataMeasure
from gs_quant.entities.entity import EntityType, PositionedEntity
from gs_quant.entities.entitlements import Entitlements as BasketEntitlements
from gs_quant.errors import MqError, MqValueError
from gs_quant.json_encoder import JSONEncoder
from gs_quant.markets.indices_utils import *
from gs_quant.markets.position_set import PositionSet
from gs_quant.markets.securities import Asset, AssetType as SecAssetType
from gs_quant.session import GsSession
from gs_quant.target.data import DataQuery
from gs_quant.target.indices import *
from gs_quant.target.reports import Report, ReportStatus
_logger = logging.getLogger(__name__)
class ErrorMessage(Enum):
NON_ADMIN = 'You are not permitted to perform this action on this basket. Please make sure \
the basket owner has entitled your application properly if you believe this is a mistake'
NON_INTERNAL = 'You are not permitted to access this basket setting.'
UNINITIALIZED = 'Basket class object must be initialized using one of an existing basket\'s \
identifiers to perform this action'
UNMODIFIABLE = 'This property can not be modified since the basket has already been created'
def _validate(*error_msgs):
""" Confirms initialization is complete and checks for errors before calling function """
def _outer(fn):
@wraps(fn)
def _inner(self, *args, **kwargs):
if has(self, '_Basket__error_messages') and self._Basket__error_messages is not None:
if len(self._Basket__error_messages) < 1:
self._Basket__finish_initialization()
for error_msg in error_msgs:
if error_msg in self._Basket__error_messages:
raise MqError(error_msg.value)
return fn(self, *args, **kwargs)
return _inner
return _outer
class Basket(Asset, PositionedEntity):
"""
Basket which tracks an evolving portfolio of securities, and can be traded through cash or derivatives markets
"""
def __init__(self, gs_asset: GsAsset = None, **kwargs):
self.__error_messages = None
if gs_asset:
if gs_asset.type.value not in BasketType.to_list():
raise MqValueError(f'Failed to initialize. Asset {gs_asset.id} is not a basket')
self.__id = gs_asset.id
self.__initial_entitlements = gs_asset.entitlements
asset_entity: Dict = json.loads(json.dumps(gs_asset.as_dict(), cls=JSONEncoder))
Asset.__init__(self, gs_asset.id, gs_asset.asset_class, gs_asset.name,
exchange=gs_asset.exchange, currency=gs_asset.currency, entity=asset_entity)
PositionedEntity.__init__(self, gs_asset.id, EntityType.ASSET)
self.__populate_current_attributes_for_existing_basket(gs_asset)
else:
self.__populate_default_attributes_for_new_basket(**kwargs)
self.__error_messages = set([])
if get(kwargs, '_finish_init', False):
self.__finish_initialization()
@classmethod
def get(cls, identifier: str, **kwargs):
"""
Fetch an existing basket
:param identifier: Any common identifier for a basket (ric, ticker, etc.)
:return: Basket object
**Usage**
Get existing basket instance
**Examples**
Get basket details:
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
"""
gs_asset = cls.__get_gs_asset(identifier)
return cls(gs_asset=gs_asset, _finish_init=get(kwargs, '_finish_init', True))
@_validate()
def get_details(self) -> pd.DataFrame:
"""
Get basket details
:return: dataframe containing current basket properties
**Usage**
Get basket's current state
**Examples**
Get basket details:
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.get_details()
"""
props = list(CustomBasketsPricingParameters.properties().union(PublishParameters.properties(),
CustomBasketsCreateInputs.properties()))
props = sorted(props)
details = [{'name': k, 'value': get(self, k)} for k in props if has(self, k)]
return pd.DataFrame(details)
def create(self) -> Dict:
"""
Create a new custom basket in Marquee
:return: dictionary containing asset id and report id
**Usage**
Create a new custom basket in Marquee
**See also**
:func:`get_details` :func:`poll_status` :func:`update`
"""
inputs, pricing, publish = {}, {}, {}
for prop in CustomBasketsCreateInputs.properties():
set_(inputs, prop, get(self, prop))
for prop in CustomBasketsPricingParameters.properties():
set_(pricing, prop, get(self, prop))
for prop in PublishParameters.properties():
set_(publish, prop, get(self, prop))
set_(inputs, 'position_set', self.position_set.to_target(common=False))
set_(inputs, 'pricing_parameters', CustomBasketsPricingParameters(**pricing))
set_(inputs, 'publish_parameters', PublishParameters(**publish))
create_inputs = CustomBasketsCreateInputs(**inputs)
response = GsIndexApi.create(create_inputs)
gs_asset = GsAssetApi.get_asset(response.asset_id)
self.__latest_create_report = GsReportApi.get_report(response.report_id)
self.__init__(gs_asset=gs_asset, _finish_init=True)
return response.as_dict()
@_validate(ErrorMessage.UNINITIALIZED)
def clone(self):
"""
Retrieve a clone of an existing basket
:return: New basket instance with position set identical to current basket
**Usage**
Clone an existing basket's position set in a new basket instance prior to creation
**Examples**
Clone current basket:
>>> from gs_quant.markets.baskets import Basket
>>>
>>> parent_basket = Basket.get("GSMBXXXX")
>>> clone = parent_basket.clone()
**See also**
:func:`create`
"""
position_set = deepcopy(self.position_set)
return Basket(position_set=position_set, clone_parent_id=self.id, parent_basket=self.ticker)
@_validate(ErrorMessage.UNINITIALIZED, ErrorMessage.NON_ADMIN)
def update(self) -> Dict:
"""
Update your custom basket
:return: dictionary containing asset id and report id
**Usage**
Make updates to your basket's metadata, pricing options, publishing options, or composition
**See also**
:func:`get_details` :func:`poll_status` :func:`create`
"""
edit_inputs, rebal_inputs = self.__get_updates()
entitlements = self.__entitlements.to_target()
if not entitlements == self.__initial_entitlements:
response = GsAssetApi.update_asset_entitlements(self.id, entitlements)
if edit_inputs is None and rebal_inputs is None:
if response:
return response.as_dict()
raise MqValueError('Update failed: Nothing on the basket was changed')
elif edit_inputs is not None and rebal_inputs is None:
response = GsIndexApi.edit(self.id, edit_inputs)
elif rebal_inputs is not None and edit_inputs is None:
response = GsIndexApi.rebalance(self.id, rebal_inputs)
else:
response = self.__edit_and_rebalance(edit_inputs, rebal_inputs)
gs_asset = GsAssetApi.get_asset(self.id)
self.__latest_create_report = GsReportApi.get_report(response.report_id)
self.__init__(gs_asset=gs_asset, _finish_init=True)
return response.as_dict()
@_validate(ErrorMessage.UNINITIALIZED, ErrorMessage.NON_ADMIN)
def upload_position_history(self, position_sets: List[PositionSet]) -> Dict:
"""
Upload basket composition history
:param position_sets: list of dated position sets
:return: dictionary containing asset id and report id
**Usage**
Upload your basket's historical composition after it's been created
**Examples**
Upload composition history from a list of identifiers:
>>> from datetime import date
>>> from gs_quant.markets.baskets import Basket
>>> from gs_quant.markets.position_set import PositionSet
>>>
>>> first_position_set = PositionSet.from_list(['BBID1', 'BBID2'], date(2020, 1, 1))
>>> second_position_set = PositionSet.from_list(['BBID1','BBID2', 'BBID3'], date(2021, 1, 1))
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.upload_position_history([first_position_set, second_position_set])
**See also**
:class:`PositionSet`
"""
if self.default_backcast:
raise MqValueError('Unable to upload position history: option must be set during basket creation')
historical_position_sets = []
for position_set in position_sets:
positions = [IndicesPositionInput(p.asset_id, p.weight) for p in position_set.positions]
historical_position_sets.append(IndicesPositionSet(tuple(positions), position_set.date))
response = GsIndexApi.backcast(self.id, CustomBasketsBackcastInputs(tuple(historical_position_sets)))
return response.as_dict()
@_validate(ErrorMessage.UNINITIALIZED)
def poll_status(self, timeout: int = 600, step: int = 30) -> ReportStatus:
"""
Polls the status of the basket's most recent create/edit/rebalance report
:param timeout: how many seconds you'd like to poll for (default is 600 sec)
:param step: how frequently you'd like to check the report's status (default is every 30 sec)
:return: Report status
**Usage**
Poll the status of a newly created or updated basket
**Examples**
Poll most recent create/update report status:
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.poll_status(timeout=120, step=20)
**See also**
:func:`create` :func:`update`
"""
report = get(self, '__latest_create_report', self.__get_latest_create_report())
report_id = get(report, 'id')
return self.poll_report(report_id, timeout, step)
@_validate(ErrorMessage.UNINITIALIZED)
def get_latest_rebalance_data(self) -> Dict:
"""
Retrieve the most recent rebalance data for a basket
**Usage**
Retrieve the most recent rebalance data for a basket
**Examples**
Retrieve the most recent rebalance data for a basket
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.get_latest_rebalance_data()
**See also**
:func:`get_latest_rebalance_date`
"""
return GsIndexApi.last_rebalance_data(self.id)
@_validate(ErrorMessage.UNINITIALIZED)
def get_latest_rebalance_date(self) -> dt.date:
"""
Retrieve the most recent rebalance date for a basket
:return: dictionary
**Usage**
Retrieve the most recent rebalance date for a basket
**Examples**
Retrieve the most recent rebalance date for a basket
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.get_latest_rebalance_date()
**See also**
:func:`get_latest_rebalance_data`
"""
last_rebalance = GsIndexApi.last_rebalance_data(self.id)
return dt.datetime.strptime(last_rebalance['date'], '%Y-%m-%d').date()
@_validate(ErrorMessage.UNINITIALIZED)
def get_rebalance_approval_status(self) -> str:
"""
Retrieve the most recent rebalance submission's approval status
:return: current approval status
**Usage**
Retrieve the most recent rebalance submission's approval status
**Examples**
Retrieve the most recent rebalance submission's approval status
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.get_rebalance_approval_status()
**See also**
:func:`cancel_rebalance` :func:`poll_report`
"""
last_approval = GsIndexApi.last_rebalance_approval(self.id)
return get(last_approval, 'status')
@_validate(ErrorMessage.NON_ADMIN)
def cancel_rebalance(self) -> Dict:
"""
Cancel the most recent rebalance submission
**Usage**
Cancel the basket's most recent rebalance submission if it has not yet been approved
**Examples**
Cancel the basket's most recent rebalance submission
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.cancel_rebalance()
**See also**
:func:`get_rebalance_approval_status` :func:`update`
"""
return GsIndexApi.cancel_rebalance(self.id)
@_validate(ErrorMessage.UNINITIALIZED)
def get_corporate_actions(self,
start: dt.date = DateLimit.LOW_LIMIT.value,
end: dt.date = dt.date.today() + dt.timedelta(days=10),
ca_type: List[CorporateActionType] = CorporateActionType.to_list()) -> pd.DataFrame:
"""
Retrieve corporate actions for a basket across a date range
:param start: start date (default minimum date value)
:param end: end date (default is maximum date value)
:param ca_type: list of corporate action types (default is all)
:return: dataframe with corporate actions information
**Usage**
Retrieve corporate actions for a basket across a date range
**Examples**
Retrieve historical acquisition corporate actions for a basket
>>> from gs_quant.markets.baskets import Basket
>>> from gs_quant.markets.indices_utils import CorporateActionType
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.get_corporate_actions(ca_type=[CorporateActionType.ACQUISITION])
**See also**
:func:`get_fundamentals`
"""
where = dict(assetId=self.id, corporateActionType=ca_type)
query = DataQuery(where=where, start_date=start, end_date=end)
response = GsDataApi.query_data(query=query, dataset_id=IndicesDatasets.CORPORATE_ACTIONS.value)
return pd.DataFrame(response)
@_validate(ErrorMessage.UNINITIALIZED)
def get_fundamentals(self,
start: dt.date = DateLimit.LOW_LIMIT.value,
end: dt.date = dt.date.today(),
period: DataMeasure = DataMeasure.ONE_YEAR.value,
direction: DataMeasure = DataMeasure.FORWARD.value,
metrics: List[DataMeasure] = DataMeasure.list_fundamentals()) -> pd.DataFrame:
"""
Retrieve fundamentals data for a basket across a date range
:param start: start date (default minimum date value)
:param end: end date (default is today)
:param period: period for the relevant metric (default is 1y)
:param direction: direction of the outlook period (default is forward)
:param metrics: list of fundamentals metrics (default is all)
:return: dataframe with fundamentals information
**Usage**
Retrieve fundamentals data for a basket across a date range
**Examples**
Retrieve historical dividend yield data for a basket
>>> from gs_quant.data.fields import DataMeasure
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.get_fundamentals(metrics=[DataMeasure.DIVIDEND_YIELD])
**See also**
:func:`get_corporate_actions`
"""
where = dict(assetId=self.id, period=period, periodDirection=direction, metric=metrics)
query = DataQuery(where=where, start_date=start, end_date=end)
response = GsDataApi.query_data(query=query, dataset_id=IndicesDatasets.BASKET_FUNDAMENTALS.value)
return | pd.DataFrame(response) | pandas.DataFrame |
from pathlib import Path
import os
import pandas as pd
import numpy as np
def get_country_geolocation():
dir_path = os.path.dirname(os.path.realpath(__file__))
country_mapping = pd.read_csv(
dir_path + '/data_files/country_centroids_az8.csv', dtype=str)
country_mapping = country_mapping.iloc[:, [48, 66, 67]]
longitude_mapping = {row['iso_n3']: row['Longitude']
for _, row in country_mapping.iterrows()}
latititude_mapping = {row['iso_n3']: row['Latitude']
for _, row in country_mapping.iterrows()}
return longitude_mapping, latititude_mapping
def get_country_isocode_mapping():
dir_path = os.path.dirname(os.path.realpath(__file__))
country_mapping = pd.read_csv(
dir_path + '/data_files/country-codes_csv.csv', dtype=str)
country_mapping = country_mapping.iloc[1:, [2, 8]]
mapping = {row['official_name_en']: row['ISO3166-1-numeric']
for _, row in country_mapping.iterrows()}
# add missing countries > 1000 students
mapping['Taiwan'] = '158'
mapping['Hong Kong'] = '364'
mapping['Iran'] = '364'
mapping['North Korea'] = '408'
mapping['South Korea'] = '410'
mapping['Vietnam'] = '704'
mapping['United Kingdom'] = '826'
mapping['Venezuela'] = '862'
mapping['Russia'] = '643'
mapping['Bolivia'] = '068'
mapping['Côte d’Ivoire/Ivory Coast'] = '384'
return mapping
def get_output_filename(path, out_folder):
outfile = Path(path).stem + '.csv'
return os.path.join(out_folder, outfile)
def write_csv(df, excel_file, out_folder, index=False):
out_csv = get_output_filename(excel_file, out_folder)
df.to_csv(out_csv, index=index)
def clean_new_enrollment(excel_file, out_folder):
df = pd.read_excel(excel_file)
# remove empty row
df = df.drop(6)
# prepare headers
headers = []
for i, column in enumerate(df.columns):
first_header = df[column].iloc[1]
if i == 0:
headers.append('Academic Level')
continue
if pd.isna(first_header):
headers.append(df[column].iloc[2])
else:
headers.append(f'{first_header} {df[column].iloc[2]}')
df.columns = headers
# chose data rows
df = df.iloc[3:8]
write_csv(df, excel_file, out_folder)
def clean_academic_level(excel_file, out_folder):
# TODO change hyphen to null
df = pd.read_excel(excel_file)
df = df.drop([2,
4, 5, 6, 7, 8, 9, 10, 11, 12,
14, 15, 16, 17, 18,
20, 21, 22,
24, 26,
28, 29, 30, 31, 32, 33, 34])
# drop upto column 34 pre 2009/10
columns_to_drop = [i for i in range(33) if i != 1]
# drop empty columns, every third column is empty
empty_columns = [i for i in range(33, 62) if not (i+1) % 3]
columns_to_drop = list(set(columns_to_drop) | set(empty_columns))
df = df.drop(df.columns[columns_to_drop], axis=1)
df = df.reset_index(drop=True)
headers = []
for i, column in enumerate(df.columns):
if i == 0:
# print(column)
# academic level column
headers.append(df[column].iloc[1])
continue
first_header = df[column].iloc[0]
if i % 2 != 0:
year = first_header
if pd.isna(first_header):
headers.append(f'{year} {df[column].iloc[1]}')
else:
headers.append(f'{first_header} {df[column].iloc[1]}')
df.columns = headers
df = df.iloc[2:]
df = df.set_index('Academic Level').transpose()
df = df.reset_index(level=0)
df = df.rename(columns={'index': 'Year'})
# df.index.name = None
# df.columns = df.iloc[1].values
print(df)
# df = df.iloc[2:38]
write_csv(df, excel_file, out_folder)
def clean_places_of_origin(excel_file, out_folder):
df = pd.read_excel(excel_file)
df.columns = df.loc[1].values
print(df)
def clean_top25_institution(excel_file, out_folder):
df = pd.read_excel(excel_file)
columns_to_drop = [i for i in range(55)]
df = df.drop(df.columns[columns_to_drop], axis=1)
print(df)
def clean_top25_institution_csv():
dir_path = os.path.dirname(os.path.realpath(__file__))
out_folder = dir_path + '/cleaned_data_files'
csv_file = out_folder + '/Census-Top-25-Institutions.csv'
df = pd.read_csv(csv_file)
df = df.iloc[:, :6]
write_csv(df, csv_file, out_folder)
def clean_all_places_of_origin_csv():
dir_path = os.path.dirname(os.path.realpath(__file__))
out_folder = dir_path + '/cleaned_data_files'
csv_file = out_folder + '/Census-All-Places-of-Origin.csv'
df = | pd.read_csv(csv_file) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
SCRIPT # 3
Created on Fri Jul 31 01:40:28 2020
@author: omid
"""
import numpy as np
import pandas as pd
import glob
from khayyam import *
allStocks = pd.read_pickle("./allStocks.pkl")
bookvalues = pd.read_pickle("./bookvalues.pkl")
############################ compute a return index (series) for each of the
############################ six groups by weighting the returns by market capitalization
years = [1392, 1393, 1394, 1395, 1396, 1397, 1398]
risk_premium = | pd.DataFrame(columns = [1,2,3,4,5,6], index = allStocks.index) | pandas.DataFrame |
# Modified the provided preprocessing code
# @author <NAME>
# Original license
# Copyright 2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License.
import os
# noinspection PyPep8Naming
import numpy as np
import pandas as pd
import tensorflow as tf
from .xprize_predictor import XPrizePredictor
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(ROOT_DIR, 'data')
DATA_FILE_PATH = os.path.join(DATA_PATH, 'OxCGRT_latest.csv')
MODEL_WEIGHTS_FILE = os.path.join(ROOT_DIR, "models", "trained_model_weights.h5")
ADDITIONAL_CONTEXT_FILE = os.path.join(DATA_PATH, "Additional_Context_Data_Global.csv")
ADDITIONAL_US_STATES_CONTEXT = os.path.join(DATA_PATH, "US_states_populations.csv")
ADDITIONAL_UK_CONTEXT = os.path.join(DATA_PATH, "uk_populations.csv")
ADDITIONAL_BRAZIL_CONTEXT = os.path.join(DATA_PATH, "brazil_populations.csv")
COUNTRIES_REGIONS_FILE = os.path.join(DATA_PATH, "countries_regions.csv")
NPI_COLUMNS = ['C1_School closing',
'C2_Workplace closing',
'C3_Cancel public events',
'C4_Restrictions on gatherings',
'C5_Close public transport',
'C6_Stay at home requirements',
'C7_Restrictions on internal movement',
'C8_International travel controls',
'H1_Public information campaigns',
'H2_Testing policy',
'H3_Contact tracing',
'H6_Facial Coverings']
CONTEXT_COLUMNS = ['CountryName',
'RegionName',
'GeoID',
'Date',
'ConfirmedCases',
'ConfirmedDeaths',
'Population']
NB_LOOKBACK_DAYS = 21
NB_TEST_DAYS = 14
WINDOW_SIZE = 7
US_PREFIX = "United States / "
NUM_TRIALS = 1
LSTM_SIZE = 32
MAX_NB_COUNTRIES = 20
NB_ACTION = 12
# also get previous case data
def get_all_data(start_date, data_file_path, ips_path, weights_file_path, window_size=WINDOW_SIZE):
data = {}
df = prepare_dataframe(data_file_path)
df = df[df.Date < start_date]
fill_starting = df.Date.max() + np.timedelta64(1, 'D')
ips_df = _load_original_data(ips_path)
required_geos = ips_df.GeoID.unique()
df=df[df.GeoID.isin(required_geos)]
# If the start date is in the future, use our historical IP file
# as a base and project until the start date. Note that as specified
# by the competition base IP file (OxCGRT_latest.csv) is frozen on
# one of the 2021 January 11 releases. The exact version is in the
# competition repo.
# Note that XPrizePredictor uses some redundant code which is replicated
# in this file. May fix this issue in the future.
xprize_predictor = XPrizePredictor(MODEL_WEIGHTS_FILE, df)
fill_ending = pd.to_datetime(start_date) - np.timedelta64(1, 'D')
fill_df = xprize_predictor.predict(fill_starting, fill_ending, ips_df)
add_geoid(fill_df)
fill_df = do_calculations(fill_df, df)
fill_df = fill_df.merge(ips_df, how='left', on=['GeoID', 'Date'], suffixes=['', '_r'])
df = pd.concat([df, fill_df])
df = df.sort_values(by=['Date'])
npi_weights = prepare_weights_dict(weights_file_path, required_geos)
initial_conditions, country_names, geos_and_infected = create_country_samples(df, required_geos, start_date, window_size)
data["geos"] = df.GeoID.unique()
data["input_tensors"] = initial_conditions
data["npi_weights"] = npi_weights
data["country_names"] = country_names
return data
def add_geoid(df):
df["GeoID"] = np.where(df["RegionName"].isnull(),
df["CountryName"],
df["CountryName"] + ' / ' + df["RegionName"])
def prepare_weights_dict(weights_file_path, required_geos):
npi_weights = {}
weights_df = pd.read_csv(weights_file_path,
dtype={"RegionName": str,
"RegionCode": str},
error_bad_lines=False)
weights_df["GeoID"] = np.where(weights_df["RegionName"].isnull(),
weights_df["CountryName"],
weights_df["CountryName"] + ' / ' + weights_df["RegionName"])
for g in required_geos:
geo_weights = weights_df[weights_df.GeoID == g]
if len(geo_weights) != 0:
weights = geo_weights.iloc[0][NPI_COLUMNS].to_numpy()
weights[weights==0] += 0.001 # so we don't divide by zero later
else:
weights = np.array([1.0 for i in range(NB_ACTION)])
weight_tensor = tf.convert_to_tensor(weights, dtype='float32')[tf.newaxis]
npi_weights[g] = weight_tensor
return npi_weights
def get_input_tensor(init_cond):
context_0 = tf.convert_to_tensor(init_cond['context_0'], dtype='float32')[tf.newaxis, :, tf.newaxis]
action_0 = tf.convert_to_tensor(init_cond['action_0'], dtype='float32')[tf.newaxis]
population = tf.convert_to_tensor(init_cond['population'], dtype='float32')[tf.newaxis]
total_cases_0 = tf.convert_to_tensor(init_cond['total_cases_0'], dtype='float32')[tf.newaxis]
prev_new_cases_0 = tf.convert_to_tensor(init_cond['prev_new_cases'], dtype='float32')[tf.newaxis]
return [context_0, action_0, population, total_cases_0, prev_new_cases_0]
def prepare_dataframe(data_url) -> pd.DataFrame:
"""
Loads the Oxford dataset, cleans it up and prepares the necessary columns. Depending on options, also
loads the Johns Hopkins dataset and merges that in.
:param data_url: the url containing the original data
:return: a Pandas DataFrame with the historical data
"""
# Original df from Oxford
df1 = _load_original_data(data_url)
# Additional context df (e.g Population for each country)
df2 = _load_additional_context_df()
# Merge the 2 DataFrames
df = df1.merge(df2, on=['GeoID'], how='left', suffixes=('', '_y'))
# Drop countries with no population data
df.dropna(subset=['Population'], inplace=True)
# Keep only needed columns
columns = CONTEXT_COLUMNS + NPI_COLUMNS
df = df[columns]
# Fill in missing values
_fill_missing_values(df)
# Compute number of new cases and deaths each day
df['NewCases'] = df.groupby('GeoID').ConfirmedCases.diff().fillna(0)
df['NewDeaths'] = df.groupby('GeoID').ConfirmedDeaths.diff().fillna(0)
# Replace negative values (which do not make sense for these columns) with 0
df['NewCases'] = df['NewCases'].clip(lower=0)
df['NewDeaths'] = df['NewDeaths'].clip(lower=0)
# Compute smoothed versions of new cases and deaths each day
df['SmoothNewCases'] = df.groupby('GeoID')['NewCases'].rolling(
WINDOW_SIZE, center=False).mean().fillna(0).reset_index(0, drop=True)
df['SmoothNewDeaths'] = df.groupby('GeoID')['NewDeaths'].rolling(
WINDOW_SIZE, center=False).mean().fillna(0).reset_index(0, drop=True)
# Compute percent change in new cases and deaths each day
df['CaseRatio'] = df.groupby('GeoID').SmoothNewCases.pct_change(
).fillna(0).replace(np.inf, 0) + 1
df['DeathRatio'] = df.groupby('GeoID').SmoothNewDeaths.pct_change(
).fillna(0).replace(np.inf, 0) + 1
# Add column for proportion of population infected
df['ProportionInfected'] = df['ConfirmedCases'] / df['Population']
# Create column of value to predict
df['PredictionRatio'] = df['CaseRatio'] / (1 - df['ProportionInfected'])
return df
# Calculate ConfirmedCases and ProportionInfected columns for the dates we
# need to fill in before the start date
def do_calculations(fill_df, hist_df):
additional_context = _load_additional_context_df()
new_fill_df = fill_df.merge(additional_context, on=['GeoID'], how='left', suffixes=('', '_y'))
# Drop countries with no population data
new_fill_df.dropna(subset=['Population'], inplace=True)
new_fill_df = new_fill_df.rename(columns={"PredictedDailyNewCases": "NewCases"})
new_fill_df['NewCases'] = new_fill_df['NewCases'].clip(lower=0)
required_geos = new_fill_df.GeoID.unique()
new_fill_df["ConfirmedSinceLastKnown"] = new_fill_df.groupby("GeoID")["NewCases"].cumsum()
new_fill_df["LastKnown"] = 0
for g in required_geos:
previous_data = hist_df[hist_df.GeoID == g]
# cases for the last day on record
last_confirmed_cases = previous_data["ConfirmedCases"].iloc[-1]
new_fill_df.loc[new_fill_df.GeoID == g, ["LastKnown"]] = last_confirmed_cases
new_fill_df["ConfirmedCases"] = new_fill_df["ConfirmedSinceLastKnown"] + new_fill_df["LastKnown"]
# Add column for proportion of population infected
new_fill_df['ProportionInfected'] = new_fill_df['ConfirmedCases'] / new_fill_df['Population']
return new_fill_df
def _load_original_data(data_url):
latest_df = pd.read_csv(data_url,
parse_dates=['Date'],
encoding="ISO-8859-1",
dtype={"RegionName": str,
"RegionCode": str},
error_bad_lines=False)
add_geoid(latest_df)
return latest_df
def _fill_missing_values(df):
"""
# Fill missing values by interpolation, ffill, and filling NaNs
:param df: Dataframe to be filled
"""
df.update(df.groupby('GeoID').ConfirmedCases.apply(
lambda group: group.interpolate(limit_area='inside')))
# Drop country / regions for which no number of cases is available
df.dropna(subset=['ConfirmedCases'], inplace=True)
df.update(df.groupby('GeoID').ConfirmedDeaths.apply(
lambda group: group.interpolate(limit_area='inside')))
# Drop country / regions for which no number of deaths is available
df.dropna(subset=['ConfirmedDeaths'], inplace=True)
for npi_column in NPI_COLUMNS:
df.update(df.groupby('GeoID')[npi_column].ffill().fillna(0))
def _load_additional_context_df():
# File containing the population for each country
# Note: this file contains only countries population, not regions
additional_context_df = pd.read_csv(ADDITIONAL_CONTEXT_FILE,
usecols=['CountryName', 'Population'])
additional_context_df['GeoID'] = additional_context_df['CountryName']
# US states population
additional_us_states_df = pd.read_csv(ADDITIONAL_US_STATES_CONTEXT,
usecols=['NAME', 'POPESTIMATE2019'])
# Rename the columns to match measures_df ones
additional_us_states_df.rename(columns={'POPESTIMATE2019': 'Population'}, inplace=True)
# Prefix with country name to match measures_df
additional_us_states_df['GeoID'] = US_PREFIX + additional_us_states_df['NAME']
# Append the new data to additional_df
additional_context_df = additional_context_df.append(additional_us_states_df)
# UK population
additional_uk_df = pd.read_csv(ADDITIONAL_UK_CONTEXT)
# Append the new data to additional_df
additional_context_df = additional_context_df.append(additional_uk_df)
# Brazil population
additional_brazil_df = | pd.read_csv(ADDITIONAL_BRAZIL_CONTEXT) | pandas.read_csv |
# Control de datos
from io import BytesIO
from dateutil import tz
from pathlib import Path
from zipfile import ZipFile
from json import loads as loads_json
from datetime import datetime, timedelta
from requests import get as get_request
# Ingeniería de variables
from geopandas import read_file
from pandas import DataFrame, json_normalize, read_csv, concat
# Gráficas
from seaborn import scatterplot
from matplotlib.lines import Line2D
from contextily import add_basemap, providers
from matplotlib.pyplot import Axes, Figure, get_cmap
# Twitter
from twython import Twython
# Modelo
import ecoTad, ecoPredict
class EcoBiciMap:
def __init__(self, client_id: str, client_secret: str, twitter_key: str, twitter_secret: str, access_token: str, access_secret: str, is_local: bool=True) -> None:
'''
Define el directorio base, la URL base y las credenciales para el acceso a la API Ecobici
:client_id: user_uuid proporcionado por Ecobici. Más info en: https://www.ecobici.cdmx.gob.mx/sites/default/files/pdf/manual_api_opendata_esp_final.pdf
:secret_id: contraseña propoprcionada por Ecobici, en un correo aparte para mayor seguridad
'''
# Obtiene el directorio actual
if is_local: self.base_dir = Path('/Users/efrain.flores/Desktop/hub/ecobici_bot')
else: self.base_dir = Path().cwd()
self.csv_dir = self.base_dir.joinpath('data','csv')
self.shapefile_dir = self.base_dir.joinpath('data','shp')
# Dominio web base, de donde se anexarán rutas y parámetros
self.base_url = "https://pubsbapi-latam.smartbike.com"
# Ruta con las credenciales de acceso
self.user_credentials = f"oauth/v2/token?client_id={client_id}&client_secret={client_secret}"
# Guarda como atributos las credenciales necesarias para crear tweets
self.twitter_key = twitter_key
self.twitter_secret = twitter_secret
self.access_token = access_token
self.access_secret = access_secret
# Fecha y hora en la que se instancia la clase
self.started_at = datetime.now().astimezone(tz.gettz('America/Mexico_City'))
self.started_at_format = self.started_at.strftime(r'%d/%b/%Y %H:%M')
self.is_local = is_local
self.eb_map = {}
def __str__(self) -> str:
return f'''
{self.started_at_format}
Clase para extraer información de la API Ecobici (https://www.ecobici.cdmx.gob.mx/sites/default/files/pdf/manual_api_opendata_esp_final.pdf)
transformar, graficar la disponibilidad en un mapa de calor, exportar los datos y crear un tweet con el mapa.
'''
def get_token(self, first_time: bool=False) -> None:
'''
Guarda los tokens de acceso, necesarios para solicitar la información de estaciones y disponibilidad
:first_time:
- True para obtener ACCESS_TOKEN y REFRESH_TOKEN usando las credenciales por primera vez
- False para continuar con acceso a la API (después de 60min) y renovar ACCESS_TOKEN a través del REFRESH_TOKEN
'''
# URL completa para recibir el token de acceso y el token de actualización (se ocupa si la sesión dura más de 60min)
if first_time:
URL = f"{self.base_url}/{self.user_credentials}&grant_type=client_credentials"
# En el caso que se accese por 2a ocasión o más, se llama al token de actualización
else:
URL = f"{self.base_url}/{self.user_credentials}&grant_type=refresh_token&refresh_token={self.REFRESH_TOKEN}"
# Obtiene la respuesta a la solicitud de la URL, los datos vienen en bits
req_text = get_request(URL).text
# Convierte los bits a formato json para guardar los tokens
data = loads_json(req_text)
# Guarda los tokens como atributos
self.ACCESS_TOKEN = data['access_token']
self.REFRESH_TOKEN = data['refresh_token']
def get_data(self, availability: bool=False) -> DataFrame:
'''
Obtiene la información de estaciones y disponibilidad al momento
:availabilty:
- True para obtener los datos de disponibilidad
- False para obtener la información respecto a las estaciones
'''
# URL para obtener la información en tiempo real, ya sea la info de las estaciones y/o la disponibilidad de las mismas
stations_url = f"{self.base_url}/api/v1/stations{'/status' if availability else ''}.json?access_token={self.ACCESS_TOKEN}"
req_text = get_request(stations_url).text
data = loads_json(req_text)
# El json resultado tiene la data encapsulada en la primer llave
first_key = list(data.keys())[0]
# Se estructura como tabla
df = json_normalize(data[first_key])
return df
def get_shapefile(self, shapefile_url: str='https://datos.cdmx.gob.mx/dataset/7abff432-81a0-4956-8691-0865e2722423/resource/8ee17d1b-2d65-4f23-873e-fefc9e418977/download/cp_cdmx.zip') -> None:
'''
Obtiene y descomprime el zip que contiene el shapefile
(varias carpetas que en conjunto, definen una zona geográfica)
:shapefile_url: liga gubernamental y oficial respecto a la delimitación de colonias en CDMX
'''
# Obtener los datos de la URL
req_data = get_request(shapefile_url).content
# Extraer la información del ZIP, que es un SHP file
zipfile = ZipFile(BytesIO(req_data))
zipfile.extractall(self.shapefile_dir)
# Se estructura como tabla, para poder gráficar las colonias de la CDMX
self.gdf = read_file(self.shapefile_dir).to_crs(epsg=4326)
def transform(self, station_cols: list=['id','zipCode','location.lat','location.lon'], id_col: str='id', status_col: str='status', bikes_col: str='availability.bikes', slots_col: str='availability.slots') -> None:
'''
Une las tablas de estaciones y disponibilidad. Crea las variables de proporción en bicicletas y slots vacíos
:station_cols: columnas de interés respecto a la tabla de estaciones
:id_col: identificación de la estación Ecobici
:status_col: columna que indica el estatus de la estación, sólo se mantendrá estaciones abiertas
:bikes_col: columna que indica las bicicletas disponibles
:slots_col: columna que indica los slots vacíos
'''
# Une la información de estaciones con la disponibilidad de las mismas
self.df = self.st[station_cols].merge(self.av, on=id_col)
# Sólo las estaciones con estatus disponible
self.df = self.df[self.df[status_col]=='OPN'].copy()
# Calcula la proporción de disponibilidad, tanto de bicicletas, como de slots vacíos
self.df['slots_proportion'] = self.df[slots_col] / (self.df[slots_col] + self.df[bikes_col])
self.df['bikes_proportion'] = 1 - self.df['slots_proportion']
def set_custom_legend(self, ax, cmap, values: list) -> None:
''''
Modifica las etiquetas para un mapa de calor
'''
legend_elements = []
for gradient, label in values:
color = cmap(gradient)
legend_elements.append(Line2D([0], [0], marker="o", color="w", label=label, markerfacecolor=color, markeredgewidth=0.5, markeredgecolor="k"))
ax.legend(handles=legend_elements, loc="upper left", prop={"size": 4}, ncol=len(values))
def plot_map(self, data: DataFrame, col_to_plot: str, lat_col: str='location.lat', lon_col: str='location.lon', img_name: str='map', padding: float=0.007, points_palette: str='mako', **kwargs) -> None:
# Crea el lienzo para graficar el mapa
fig = Figure(figsize=(5, 4), dpi=200, frameon=False)
ax = Axes(fig, [0.0, 0.0, 1.0, 1.0])
fig.add_axes(ax)
ax.set_axis_off()
# Delimita el tamaño dependiendo el rango de las coordenadas
ax.set_ylim((data[lat_col].min() - padding, data[lat_col].max() + padding))
ax.set_xlim((data[lon_col].min() - padding, data[lon_col].max() + padding))
# Grafica el mapa de las colonias en CDMX
self.gdf.plot(ax=ax, figsize=(8, 8), linewidth=0.5, **kwargs)
# Agrega etiquetas de calles/colonias
add_basemap(ax, crs=self.gdf.crs, source=providers.Stamen.TonerLabels, interpolation='sinc', aspect='equal')
# Grafica cada estación, asignando el color dependiendo la disponibilidad
cmap = get_cmap(points_palette)
scatterplot(y=lat_col, x=lon_col, data=data, ax=ax, palette=cmap, hue=col_to_plot)
# Modifica las etiquetas para indicar el significado del color en las estaciones
self.set_custom_legend(ax, cmap, values=[(0.0, 'Hay bicis'), (0.5, 'Puede haber'), (1.0, 'No hay bicis')])
# Guarda la imagen
self.eb_map[img_name] = fig
self.eb_map[img_name].savefig(self.base_dir.joinpath('media','map',f'{img_name}.png'))
def tweet_map(self, img) -> None:
twitter = Twython(self.twitter_key, self.twitter_secret, self.access_token, self.access_secret)
with open(img, "rb") as img:
image = twitter.upload_media(media=img)
twitter.update_status(status=f"Pronóstico de disponibilidad para {(self.started_at + timedelta(hours=1)).strftime(r'%d/%b/%Y %H:%M')}", media_ids=[image["media_id"]])
def save_csv(self) -> None:
acum = read_csv(self.csv_dir.joinpath('acum_data.csv'))
try:
new = self.df.copy()
new['date'] = str(self.started_at.date())
new['time'] = str(self.started_at.time())
acum = | concat([acum, new], ignore_index=True) | pandas.concat |
from typing import Any, Literal
from pandas import DataFrame, concat
from weaverbird.backends.pandas_executor.types import DomainRetriever, PipelineExecutor
from weaverbird.pipeline.steps import AggregateStep
AggregateFn = Literal[
'avg',
'sum',
'min',
'max',
'count',
'count distinct',
'first',
'last',
'count distinct including empty',
]
functions_aliases = {
'avg': 'mean',
'count distinct': 'nunique',
'count distinct including empty': len,
}
def get_aggregate_fn(agg_function: str) -> Any:
if agg_function in functions_aliases:
return functions_aliases[agg_function]
return agg_function
def execute_aggregate(
step: AggregateStep,
df: DataFrame,
domain_retriever: DomainRetriever = None,
execute_pipeline: PipelineExecutor = None,
) -> DataFrame:
group_by_columns = step.on
# if no group is specified, we create a pseudo column with a single value
if len(group_by_columns) == 0:
group_by_columns = ['__VQB__GROUP_BY__']
df = df.assign(**{group_by_columns[0]: True})
grouped_by_df = df.groupby(group_by_columns, dropna=False)
aggregated_cols = []
if len(step.aggregations) == 0:
df_result = grouped_by_df.first().reset_index()[group_by_columns]
else:
for aggregation in step.aggregations:
for col, new_col in zip(aggregation.columns, aggregation.new_columns):
agg_serie = (
grouped_by_df[col]
.agg(get_aggregate_fn(aggregation.agg_function))
.rename(new_col)
)
aggregated_cols.append(agg_serie)
df_result = | concat(aggregated_cols, axis=1) | pandas.concat |
from slytherin.hash import hash_object
from slytherin.functions import get_function_arguments
from ravenclaw.preprocessing import Polynomial, Normalizer
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestClassifier
from pandas import DataFrame, concat
from random import randint, random, choice
from func_timeout import func_timeout, FunctionTimedOut
import matplotlib.pyplot as plt
from numpy import where
from .create_arguments import create_arguments
from .Measurement import Measurement
from ..time import get_elapsed
from ..time import get_now
from ..progress import ProgressBar
# Estimator gets a single argument function and estimates the time it takes to run the function based on the argument
# the function should accept an int larger than 0
class Estimator:
def __init__(self, function, args=None, unit='s', polynomial_degree=2, timeout=20):
self._function = function
self._function_arguments = get_function_arguments(function=function)
self._unit = unit
self._measurements = {}
self._polynomial_degree = polynomial_degree
self._model = None
self._normalizer = None
self._error_model = None
self._max_x = None
self._args = args
self._timeout = timeout
self._num_errors = 0
self._num_regular_runs = 0
self._num_timeouts = 0
self._x_data_columns = {}
self._error_x_data_columns = {}
@staticmethod
def get_key(**kwargs):
return hash_object(kwargs)
def check_arguments(self, kwargs, method_name):
unknown_arguments = [key for key in kwargs.keys() if key not in self._function_arguments]
missing_arguments = [key for key in self._function_arguments if key not in kwargs]
if len(missing_arguments) == 1:
return f'{method_name}() is missing the argument "{missing_arguments[0]}"'
elif len(missing_arguments) > 1:
arguments_string = '", "'.join(missing_arguments)
return f'{method_name}() is missing arguments "{arguments_string}"'
if len(unknown_arguments) == 0:
return False
elif len(unknown_arguments) == 1:
return f'{method_name}() got an unexpected argument "{unknown_arguments[0]}"'
else:
arguments_string = '", "'.join(unknown_arguments)
return f'{method_name}() got unexpected arguments "{arguments_string}"'
def get_arguments(self, arguments, **kwargs):
if arguments is None and len(kwargs) == 0:
raise ValueError('either arguments should be provided or kwargs!')
elif arguments is not None and len(kwargs) > 0:
raise ValueError('only one of arguments and kwargs should be provided!')
elif arguments is None:
arguments = kwargs
return arguments
def measure(self, timeout=None, arguments=None, **kwargs):
"""
:type timeout: int or float
:type arguments: NoneType or dict
:rtype: Measurement
"""
kwargs = self.get_arguments(arguments=arguments, **kwargs)
if self.check_arguments(kwargs=kwargs, method_name='measure'):
raise TypeError(self.check_arguments(kwargs=kwargs, method_name='measure'))
key = self.get_key(**kwargs)
if key in self._measurements:
return self._measurements[key]
else:
start_time = get_now()
if not timeout:
try:
result = self._function(**kwargs)
timeout_error = False
other_error = False
self._num_regular_runs += 1
except Exception as e:
result = None
timeout_error = False
other_error = True
self._num_errors += 1
else:
def run_function():
return self._function(**kwargs)
try:
result = func_timeout(timeout, run_function)
timeout_error = False
other_error = False
self._num_regular_runs += 1
except FunctionTimedOut:
result = None
timeout_error = True
other_error = False
self._num_timeouts += 1
except Exception as e:
result = None
timeout_error = False
other_error = True
self._num_errors += 1
elapsed = get_elapsed(start=start_time, unit=self._unit)
measurement = Measurement(
x=kwargs, result=result, elapsed=elapsed, timeout_error=timeout_error, other_error=other_error
)
self._measurements[key] = measurement
self._model = None
self._normalizer = None
self._error_model = None
if self._max_x is None:
self._max_x = kwargs
else:
self._max_x = {key: max(value, kwargs[key]) for key, value in self._max_x.items()}
return measurement
@property
def data(self):
"""
:rtype: DataFrame
"""
return DataFrame.from_records(
[measurement.dictionary for measurement in self.measurements]
)
@property
def measurements(self):
"""
:rtype: list[Measurement]
"""
measurements = sorted(list(self._measurements.values()))
# set the weights
min_elapsed = measurements[0].elapsed_time
for measurement in measurements:
if min_elapsed > 0:
measurement._weight = 1 + (measurement.elapsed_time / min_elapsed) ** 0.5
else:
measurement._weight = 1
return measurements
@property
def num_measurements(self):
"""
:rtype: int
"""
return len(self._measurements)
@property
def num_errors(self):
return self._num_errors
@property
def num_regular_runs(self):
return self._num_regular_runs
@property
def num_timeouts(self):
return self._num_timeouts
@property
def num_runs(self):
return self.num_errors + self.num_regular_runs
def get_x_data(self, x, degree=None):
"""
:type x: DataFrame or dict or list
:type degree: NoneType or int
:rtype: DataFrame
"""
if isinstance(x, dict):
if all([isinstance(value, (list, tuple)) for value in x.values()]):
data = DataFrame(x)
else:
data = | DataFrame.from_records([x]) | pandas.DataFrame.from_records |
import itertools
import json
import os
import csv
import errno
import random
from random import shuffle
from typing import List
import spacy
from tqdm import tqdm
import pandas as pd
import codecs
import nltk
import glob
import xml.etree.ElementTree as ET
from datasets import load_dataset
import statistics
import json
nltk.download("stopwords")
from nltk.corpus import stopwords
STOPWORDS = stopwords.words("english")
STOPWORDS = [stopword + " " for stopword in STOPWORDS]
nlp = spacy.load("en_core_web_sm")
def commonsenseqa():
def read_file(file, split):
fout = open(f"commonsenseqa/{split}.tsv", "w")
fout_meta = open(f"commonsenseqa/{split}_meta.tsv", "w")
with open(file) as f:
for line in f.readlines():
json_line = json.loads(line)
candidates_str = " ".join([f"({x['label']}) {x['text']}" for x in json_line['question']['choices']])
if split != "test":
selected_ans_string = [x['text'] for x in json_line['question']['choices'] if
json_line['answerKey'] == x['label']]
assert len(selected_ans_string) == 1, f"{len(selected_ans_string)} -- {json_line['answerKey']}"
json_line['question']['stem'] = json_line['question']['stem'].replace("\t", " ").replace("\n", "")
candidates_str = candidates_str.replace("\t", " ").replace("\n", "")
if split == "test":
fout_meta.write(f"{json_line['id']}\t-\n")
fout.write(f"{json_line['question']['stem']} \\n {candidates_str}\t-\n")
else:
fout_meta.write(f"{json_line['id']}\t{json_line['answerKey']}\n")
selected_ans_string[0] = selected_ans_string[0].replace("\t", " ").replace("\n", "")
fout.write(f"{json_line['question']['stem']} \\n {candidates_str}\t{selected_ans_string[0]}\n")
read_file("commonsenseqa/dev_rand_split.jsonl", "dev")
read_file("commonsenseqa/train_rand_split.jsonl", "train")
read_file("commonsenseqa/test_rand_split_no_answers.jsonl", "test")
def read_qas_paragraphs(file):
map = {}
length_list = []
with open(file) as f:
for line in f.readlines():
json_line = json.loads(line)
sentence_list = []
for c in json_line['question']['choices']:
doc = nlp(c['para'])
all_sentences = [sent.text.strip() for sent in doc.sents]
sentence_list += all_sentences[-4:]
sentence_list = list(set(sentence_list))
map[json_line['id']] = " ".join(sentence_list)
length_list.append(len(map[json_line['id']].split(" ")))
print(length_list)
return map
def qasc():
qasc_para = {}
map1 = read_qas_paragraphs("QASC_Dataset_2Step/train.jsonl")
map2 = read_qas_paragraphs("QASC_Dataset_2Step/test.jsonl")
map3 = read_qas_paragraphs("QASC_Dataset_2Step/dev.jsonl")
qasc_para.update(map1)
qasc_para.update(map2)
qasc_para.update(map3)
def process_file(file, split, with_para):
outdir = "qasc"
if with_para:
outdir = "qasc_with_ir"
fout = open(f"{outdir}/{split}.tsv", "w")
fout_meta = open(f"{outdir}/{split}_meta.tsv", "w")
with open(file) as f:
for line in f.readlines():
json_line = json.loads(line)
para = ""
if with_para:
para = "\\n" + qasc_para[json_line['id']].replace("\n", " ").replace("\t", " ")
candidates_str = " ".join([f"({x['label']}) {x['text']}" for x in json_line['question']['choices']])
if 'answerKey' in json_line:
selected_ans_string = [x['text'] for x in json_line['question']['choices'] if
json_line['answerKey'] == x['label']]
assert len(selected_ans_string) == 1, f"{len(selected_ans_string)} -- {json_line['answerKey']}"
ansKey = json_line['answerKey']
else:
selected_ans_string = ['-']
ansKey = '-'
fout.write(f"{json_line['question']['stem']} \\n {candidates_str}{para}\t{selected_ans_string[0]}\n")
fout_meta.write(f"{json_line['id']}\t{ansKey}\n")
for with_para in [True, False]:
process_file("QASC_Dataset/dev.jsonl", "dev", with_para)
process_file("QASC_Dataset/test.jsonl", "test", with_para)
process_file("QASC_Dataset/train.jsonl", "train", with_para)
def boolq_contrast_sets():
def read_file(split):
fout = open(f"contrast_sets_boolq/{split}.tsv", "w")
# fout_meta = open(f"boolq-experts/{split}_meta.tsv", "w")
with open("contrast_sets/boolq_expert_perturbations.json") as f:
json_content = json.load(f)
for entry in json_content['data']:
passage = f"({entry['title']}) {entry['paragraph']}"
for q in entry['perturbed_questions']:
if '?' not in q['perturbed_q']:
q['perturbed_q'] += '?'
if q['answer'] == 'TRUE':
ans = "yes"
else:
ans = "no"
fout.write(f"{q['perturbed_q']} \\n {passage}\t{ans}\n")
read_file("train")
read_file("test")
def physical_iqa():
def read_file(split):
fout = open(f"physical_iqa/{split}.tsv", "w")
fout_meta = open(f"physical_iqa/{split}_meta.tsv", "w")
with open(f"physicaliqa-train-dev/{split}-labels.lst") as f:
labels = [line.replace("\n", "").strip() for line in f.readlines()]
counter = 0
with open(f"physicaliqa-train-dev/{split}.jsonl") as f:
for idx, line in enumerate(f.readlines()):
label = labels[idx]
json_line = json.loads(line)
id = json_line['id']
goal = json_line['goal'].replace("\t", " ").replace("\n", " ")
sol1 = json_line['sol1'].replace("\t", " ").replace("\n", " ")
sol2 = json_line['sol2'].replace("\t", " ").replace("\n", " ")
assert label == "1" or label == "0", f" * label: {label}"
ans = sol1
ans_label_ab = "A"
if label == "1":
ans = sol2
ans_label_ab = "B"
ans = ans.replace("\t", " ").replace("\n", " ")
fout.write(f"{goal} \\n (A) {sol1} (B) {sol2} \t {ans}\n")
fout_meta.write(f"{id}\t{ans_label_ab}\t numeric_from_zero \t{ans}\n")
counter += 1
return counter
dev_count = read_file("dev")
train_count = read_file("train")
with open(f"physical_iqa/counts.json", "w+") as outfile:
json.dump({"train": train_count, "dev": dev_count}, outfile)
def social_iqa():
def read_file(split):
fout = open(f"social_iqa/{split}.tsv", "w")
fout_meta = open(f"social_iqa/{split}_meta.tsv", "w")
with open(f"socialiqa-train-dev/{split}-labels.lst") as f:
labels = [line.replace("\n", "").strip() for line in f.readlines()]
counter = 0
with open(f"socialiqa-train-dev/{split}.jsonl") as f:
for idx, line in enumerate(f.readlines()):
label = labels[idx]
json_line = json.loads(line)
context = json_line['context'].replace("\t", " ").replace("\n", " ")
question = json_line['question'].replace("\t", " ").replace("\n", " ")
answerA = json_line['answerA'].replace("\t", " ").replace("\n", " ")
answerB = json_line['answerB'].replace("\t", " ").replace("\n", " ")
answerC = json_line['answerC'].replace("\t", " ").replace("\n", " ")
assert label == "1" or label == "2" or label == "3", f" * label: {label}"
ans = answerA
abc_label = "A"
if label == "2":
ans = answerB
abc_label = "B"
if label == "3":
ans = answerC
abc_label = "C"
ans = ans.replace("\t", " ").replace("\n", " ")
fout.write(f"{question} \\n (A) {answerA} (B) {answerB} (C) {answerC} \\n {context} \t {ans}\n")
fout_meta.write(f"-\t{abc_label}\t numeric \t{ans} \n")
counter += 1
return counter
dev_count = read_file("dev")
train_count = read_file("train")
with open(f"social_iqa/counts.json", "w+") as outfile:
json.dump({"train": train_count, "dev": dev_count}, outfile)
def drop_contrast_sets():
def read_file(split):
fout = open(f"contrast_sets_drop/{split}.tsv", "w")
fout_meta = open(f"contrast_sets_drop/{split}_meta.tsv", "w")
with open("drop_dataset/DROP/drop_contrast_sets_test.json") as f:
json_content = json.load(f)
for title, content in json_content.items():
for qp in content['qa_pairs']:
answer = qp['answer']
number = answer['number']
spans = answer['spans']
if len(spans) > 0:
ans_text = ", ".join(spans)
elif len(number) > 0:
ans_text = number
else:
day = answer['date']['day']
month = answer['date']['month']
year = answer['date']['year']
if len(month) > 0:
ans_text += month
if len(day) > 0:
ans_text += f" {day}"
if len(year) > 0:
ans_text += f" {year}"
# assert ans_text != ""
# print(ans_text)
if ans_text == "":
print(" >>>> skipping the question . . . ")
continue
fout.write(f"{qp['question']} \\n {content['passage']}\t{ans_text}\n")
fout_meta.write(f"{qp['query_id']}\n")
read_file("train")
read_file("test")
def quoref_contrast_sets():
def read_file(split):
fout = open(f"contrast_sets_quoref/{split}.tsv", "w")
fout_meta = open(f"contrast_sets_quoref/{split}_meta.tsv", "w")
with open(
"drop_dataset/quoref/quoref_test_perturbations_20191206_merged.json") as f:
json_content = json.load(f)
for entry in json_content['data']:
entry['title'] = entry['title'].replace("\n", " ").replace("\t", " ")
for p in entry['paragraphs']:
p['context'] = p['context'].replace("\n", " ").replace("\t", " ")
passage = f"({entry['title']}) {p['context']}"
for q in p['qas']:
answers = "///".join([x['text'] for x in q['answers']])
fout.write(f"{q['question']}\\n{passage}\t{answers}\n")
fout_meta.write(f"{q['id']}\n")
read_file("train")
read_file("test")
def ropes_contrast_sets():
def read_file(split):
fout = open(f"contrast_sets_ropes/{split}.tsv", "w")
fout_meta = open(f"contrast_sets_ropes/{split}_meta.tsv", "w")
with open(
"drop_dataset/ropes/data/ropes_contrast_set_032820.json") as f:
json_content = json.load(f)
for para in json_content['data'][0]['paragraphs']:
context = f"{para['background']} {para['situation']}".replace("\n", " ").replace("\t", " ")
for qa in para['qas']:
question = qa['question'].replace("\n", " ").replace("\t", " ")
for a in qa['answers']:
answer = a['text'].replace("\n", " ").replace("\t", " ")
fout.write(f"{question} \\n {context}\t{answer}\n")
fout_meta.write(f"{qa['id']}\n")
read_file("train")
read_file("test")
def mctest():
def read_and_convert_mctest_data(file, output_dir, out_file, write_format="w+"):
# out_file = file.split("/")[-1].replace(".tsv", "")
# fdataset_idx = open(f"{output_dir}/{out_file}_idx.tsv", "w+")
fdataset_string = open(f"{output_dir}/{out_file}.tsv", write_format)
fmeta = open(f"{output_dir}/{out_file}_meta.txt", write_format)
global all_inputs
all_inputs = []
all_answers = []
all_meta = []
all_candidates = []
with open(file) as f:
for l in f.readlines():
line_split = l.replace("\n", "").replace("\\newline", " ").split("\t")
pid = line_split[0]
paragraph = line_split[2]
def get_question_and_candidates(split_row: List[str]):
kind = "one" if "one: " in split_row[0] else "multiple"
question = split_row[0].replace("one: ", "").replace("multiple: ", "")
candidates = split_row[1:5]
all_candidates.append(candidates)
candidates = " ".join([f"({chr(ord('A') + i)}) {x}" for i, x in enumerate(candidates)])
# fmeta.write(pid + "\t" + kind + " \n")
all_meta.append([pid, kind])
all_inputs.append(f"{question} \\n {candidates} \\n {paragraph}")
get_question_and_candidates(line_split[3:8])
get_question_and_candidates(line_split[8:13])
get_question_and_candidates(line_split[13:18])
get_question_and_candidates(line_split[18:23])
# try:
with open(file.replace(".tsv", ".ans")) as fans:
for l in fans.readlines():
all_answers.extend(l.replace("\n", "").split("\t"))
# except (FileNotFoundError):
# pass
assert len(all_answers) == len(all_inputs)
for i, y in enumerate(all_answers):
# fdataset_idx.write(all_inputs[i] + "\t" + y + "\n")
correct_ans_idx = ord(y) - ord('A')
fmeta.write(all_meta[i][0] + "\t" + y + "\t" + all_meta[i][1] + " \n")
fdataset_string.write(all_inputs[i] + "\t" + all_candidates[i][correct_ans_idx] + "\n")
read_and_convert_mctest_data('../datasets/mctest-master/data/MCTest/mc160.dev.tsv', "mc160", "dev")
read_and_convert_mctest_data('../datasets/mctest-master/data/MCTest/mc160.train.tsv', "mc160", "train")
read_and_convert_mctest_data('../datasets/mctest-master/data/MCTest/mc500.dev.tsv', "mc500", "dev")
read_and_convert_mctest_data('../datasets/mctest-master/data/MCTest/mc500.train.tsv', "mc500", "train")
# read_and_convert_mctest_data('../datasets/mctest-master/data/MCTest/mc160.dev.tsv', "mctest_corrected_the_separator", "dev", 'a')
# read_and_convert_mctest_data('../datasets/mctest-master/data/MCTest/mc160.train.tsv', "mctest_corrected_the_separator", "train", 'a')
# read_and_convert_mctest_data('../datasets/mctest-master/data/MCTest/mc500.dev.tsv', "mctest_corrected_the_separator", "dev", 'a')
# read_and_convert_mctest_data('../datasets/mctest-master/data/MCTest/mc500.train.tsv', "mctest_corrected_the_separator", "train", 'a')
def read_and_parse_multiqa(file, dataset, kind):
fout = open(f"{dataset}/{kind}.tsv", "w+")
fmeta = open(f"{dataset}/{kind}_meta.txt", "w+")
ans = open(f"{dataset}/{kind}_ans.jsonl", "w+")
with open(file) as f:
for l in f.readlines()[1:]:
json_line = json.loads(l)
pid = json_line['id']
paragraph = ""
for p in json_line['context']['documents']:
if 'title' in p:
paragraph += f" ({p['title']}) "
paragraph += p['text']
paragraph = paragraph.strip().replace("\n", "").replace("\t", "")
for q in json_line['qas']:
qid = q['qid']
fmeta.write(f"{pid}, {qid} \n")
question = q['question']
answers = []
print(q)
if 'cannot_answer' in q['answers']['open-ended']:
if q['answers']['open-ended']['cannot_answer'] == 'yes':
answers.append('<No Answer>')
else:
for a in q['answers']['open-ended']['annotators_answer_candidates']:
print(a)
if 'extractive' in a['single_answer']:
answers.append(a['single_answer']['extractive']['answer'])
elif 'yesno' in a['single_answer']:
answers.append(a['single_answer']['yesno'])
else:
print("yo yo yo ")
assert len(answers) > 0
paragraph = paragraph.replace("\t", "").replace(" ", " ").replace(" ", " ").replace("\n", " ")
question = question.replace("\t", "").replace(" ", " ").replace(" ", " ").replace("\n", " ")
if '?' not in question:
question = question + "?"
all_ans = [a.replace("\t", "").replace(" ", " ").replace(" ", " ").replace("\n", " ") for a in
answers]
print(all_ans)
fout.write(f"{question.strip()} \\n {paragraph.strip()}\t{all_ans[0].strip()}\n")
ans.write(json.dumps(all_ans) + "\n")
def mkdir(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
from collections import Counter
def race(variation, grade, write_format="w+"):
print(f">>>> race variation: {variation} / {grade}")
assert variation == "idx" or variation == "string" or variation == 'string_no_candidates'
count_map = {}
def process_race_dir(kind):
counter = {"counter": 0}
if variation == "idx":
dir = f"race_idx_{grade}"
mkdir(dir)
fin = open(f"{dir}/{kind.split('/')[0]}.tsv", write_format)
fmeta = open(f"{dir}/{kind.split('/')[0]}_meta.txt", write_format)
elif variation == "string":
dir = f"race_string_{grade}"
mkdir(dir)
fin = open(f"{dir}/{kind.split('/')[0]}.tsv", write_format)
fmeta = open(f"{dir}/{kind.split('/')[0]}_meta.txt", write_format)
elif variation == "string_no_candidates":
dir = f"race_string_no_candidates_{grade}"
mkdir(dir)
fin = open(f"{dir}/{kind.split('/')[0]}.tsv", write_format)
fmeta = open(f"{dir}/{kind.split('/')[0]}_meta.txt", write_format)
else:
raise AttributeError
def read_and_parse_race(file):
with open(file) as f:
counter["counter"] += 1
line = f.readlines()[0]
line = line.replace("\n", " ")
jsonline = json.loads(line)
answers = jsonline['answers']
options = jsonline['options']
questions = jsonline['questions']
article = jsonline['article']
article = article.replace("\n", " ").replace("\t", " ")
id = jsonline['id']
for i, q in enumerate(questions):
options[i] = [x.replace("\n", " ") for x in options[i]]
q = q.replace("\n", " ")
candidates = ("".join([f" ({chr(ord('A') + i)}) {x}" for i, x in enumerate(options[i])])).replace(
"\n", " ")
answer_idx = ord(answers[i]) - ord('A')
if variation == "idx":
fin.write(f"{q} \\n {candidates} \\n {article}\t{answers[i]} \n")
elif variation == "string":
fin.write(f"{q} \\n {candidates} \\n {article}\t{options[i][answer_idx]} \n")
elif variation == "string_no_candidates":
fin.write(f"{q} \\n {article} \t {options[i][answer_idx]} \n")
else:
raise AttributeError
fmeta.write(f"{id}\t{answers[i]}\n")
directory_address = f"../datasets/RACE/{kind}/"
directory = os.fsencode(directory_address)
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".txt"):
read_and_parse_race(directory_address + filename)
else:
continue
count_map[kind.split("/")[0]] = counter["counter"]
process_race_dir(f"dev/{grade}")
process_race_dir(f"test/{grade}")
process_race_dir(f"train/{grade}")
count_file = open(f"race_{variation}_{grade}/counts.json", "w+")
count_file.write(json.dumps(count_map))
def newsqa():
read_and_parse_multiqa("../datasets/NewsQA_dev.jsonl", "newsqa", "dev")
read_and_parse_multiqa("../datasets/NewsQA_train.jsonl", "newsqa", "train")
def hotpotqa():
read_and_parse_multiqa("../datasets/HotpotQA_dev.jsonl", "hotpotqa", "dev")
read_and_parse_multiqa("../datasets/HotpotQA_train.jsonl", "hotpotqa", "train")
def squad():
read_and_parse_multiqa("../datasets/SQuAD1-1_dev.jsonl", "squad1_1", "dev")
read_and_parse_multiqa("../datasets/SQuAD1-1_train.jsonl", "squad1_1", "train")
def squad2():
read_and_parse_multiqa("../datasets/SQuAD2-0_dev.jsonl", "squad2", "dev")
read_and_parse_multiqa("../datasets/SQuAD2-0_train.jsonl", "squad2", "train")
def triviaqa():
read_and_parse_multiqa("../datasets/TriviaQA_wiki_train.jsonl", "triviaqa", "train")
read_and_parse_multiqa("../datasets/TriviaQA_wiki_dev.jsonl", "triviaqa", "dev")
def searchqa():
read_and_parse_multiqa("../datasets/SearchQA_dev.jsonl", "searchqa", "dev")
read_and_parse_multiqa("../datasets/SearchQA_train.jsonl", "searchqa", "train")
def boolq():
read_and_parse_multiqa("../datasets/BoolQ_dev.jsonl", "boolq", "dev")
read_and_parse_multiqa("../datasets/BoolQ_train.jsonl", "boolq", "train")
def duo_rc():
read_and_parse_multiqa("../datasets/DuoRC_Paraphrase_dev.jsonl", "duo_rc_paragraph", "dev")
read_and_parse_multiqa("../datasets/DuoRC_Paraphrase_train.jsonl", "duo_rc_paragraph", "train")
read_and_parse_multiqa("../datasets/DuoRC_Self_dev.jsonl", "duo_rc_self", "dev")
read_and_parse_multiqa("../datasets/DuoRC_Self_train.jsonl", "duo_rc_self", "train")
def drop():
def load_file(name, dir):
ftargets = open(f"{dir}/{name}_targets.txt", "+w")
finput = open(f"{dir}/{name}_inputs.txt", "+w")
fout = open(f"{dir}/{name}.tsv", "+w")
fmeta = open(f"{dir}/{name}_meta.txt", "+w")
span_lens = []
with open(f"../datasets/drop_dataset/drop_dataset_{name}.json") as f:
whole_data = json.load(f)
for key in whole_data.keys():
# print("------")
# print(key)
content = whole_data[key]
passage = content['passage'].replace("\t", " ").replace("\n", " ")
qa_pairs = content['qa_pairs']
for qpair in qa_pairs:
ans_text = ""
question = qpair['question'].replace("\t", " ").replace("\n", " ")
answer = qpair['answer']
# print(answer)
number = answer['number']
spans = answer['spans']
if len(spans) > 0:
span_lens.append(len(spans))
ans_text = ", ".join(spans)
elif len(number) > 0:
ans_text = number
else:
day = answer['date']['day']
month = answer['date']['month']
year = answer['date']['year']
if len(month) > 0:
ans_text += month
if len(day) > 0:
ans_text += f" {day}"
if len(year) > 0:
ans_text += f" {year}"
# assert ans_text != ""
# print(ans_text)
if ans_text == "":
print(" >>>> skipping the question . . . ")
continue
ans_text = ans_text.replace("\t", " ").replace("\n", " ")
query_id = qpair['query_id']
fout.write(f"{question} \\n {passage}\t{ans_text}\n")
ftargets.write(f"{ans_text}\n")
finput.write(f"{question} \\n {passage}\n")
fmeta.write(f" {query_id}")
print(span_lens)
load_file("dev", "drop")
load_file("train", "drop")
# def wikihop():
# read_and_parse_multiqa("../datasets/BoolQ_dev.jsonl", "boolq", "dev")
# read_and_parse_multiqa("../datasets/BoolQ_train.jsonl", "boolq", "train")
#
# def duorc_para():
# read_and_parse_multiqa("../datasets/BoolQ_dev.jsonl", "boolq", "dev")
# read_and_parse_multiqa("../datasets/BoolQ_train.jsonl", "boolq", "train")
#
# def duorc_self():
# read_and_parse_multiqa("../datasets/BoolQ_dev.jsonl", "boolq", "dev")
# read_and_parse_multiqa("../datasets/BoolQ_train.jsonl", "boolq", "train")
#
# def complex_questions():
# read_and_parse_multiqa("../datasets/BoolQ_dev.jsonl", "boolq", "dev")
# read_and_parse_multiqa("../datasets/BoolQ_train.jsonl", "boolq", "train")
# def comqa():
# read_and_parse_multiqa("../datasets/BoolQ_dev.jsonl", "boolq", "dev")
# read_and_parse_multiqa("../datasets/BoolQ_train.jsonl", "boolq", "train")
def extract_oyvind_predictions(file):
all_predictions = {}
with open(file) as f:
for line in f.readlines():
jsonline = json.loads(line)
id = jsonline['id']
if id not in all_predictions:
all_predictions[id] = jsonline
else:
raise EnvironmentError
return all_predictions
oyvind_test_preds = [
[
"roberta-combo",
extract_oyvind_predictions("../datasets/oyvind_predictions/roberta-combo/eval_test.jsonl")
],
[
"roberta-no-ir",
extract_oyvind_predictions("../datasets/oyvind_predictions/roberta-no-ir/eval_test.jsonl")
],
[
"roberta-question-stem-ir",
extract_oyvind_predictions("../datasets/oyvind_predictions/roberta-question-stem-ir/eval_test.jsonl")
],
[
"roberta-standard-ir",
extract_oyvind_predictions("../datasets/oyvind_predictions/roberta-standard-ir/eval_test.jsonl")
]
]
oyvind_dev_preds = [
[
"roberta-combo",
extract_oyvind_predictions("../datasets/oyvind_predictions/roberta-combo/eval_validation.jsonl")
],
[
"roberta-no-ir",
extract_oyvind_predictions("../datasets/oyvind_predictions/roberta-no-ir/eval_validation.jsonl")
],
[
"roberta-question-stem-ir",
extract_oyvind_predictions("../datasets/oyvind_predictions/roberta-question-stem-ir/eval_validation.jsonl")
],
[
"roberta-standard-ir",
extract_oyvind_predictions("../datasets/oyvind_predictions/roberta-standard-ir/eval_validation.jsonl")
]
]
def arc():
directory_easy = "ARC-V1-Feb2018-2/ARC-Easy/ARC-Easy"
directory_hard = "ARC-V1-Feb2018-2/ARC-Challenge/ARC-Challenge"
def read_file(dir, split, kind, predictions_files, with_para=False):
outdir = f"arc_{kind}"
if with_para:
outdir = f"arc_{kind}_with_ir"
fout = open(f"{outdir}/{split.lower()}.tsv", "w+")
fout_meta = open(f"{outdir}/{split.lower()}_meta.tsv", "w+")
output_files = []
if predictions_files:
for x in predictions_files:
fout_tmp = open(f"arc_{kind}/predictions_{x[0]}_{split}.txt", "w")
output_files.append(fout_tmp)
print(fout_tmp)
correctness_map = {}
with open(f"{dir}-{split}.jsonl") as f:
for line in f.readlines():
json_line = json.loads(line)
question = json_line['question']['stem']
choices = json_line['question']['choices']
if kind == "easy":
id = "ARCEZ_" + json_line['id']
else:
id = "ARCCH_" + json_line['id']
para = ""
if with_para:
print("done")
para = "\\n" + oyvind_paragraphs[id].replace("\n", " ").replace("\t", " ")
# print(json_line)
answer_key = json_line['answerKey']
numbers = ""
if 'A' in [c['label'] for c in choices]:
answer_key_idx = ord(answer_key[0]) - ord('A')
answer_label = answer_key[0]
else:
answer_key_idx = ord(answer_key[0]) - ord('1')
answer_label = chr(ord(answer_key[0]) - ord('1') + ord('A'))
numbers = "numerical"
candidates = " ".join([f"({chr(ord('A') + i)}) {c['text']}" for i, c in enumerate(choices)]).replace(
"\n", " ")
# print((answer_key_idx, answer_key, candidates))
answer_text = choices[answer_key_idx]['text']
fout.write(f"{question} \\n {candidates}{para}\t{answer_text}\n")
fout_meta.write(f"{json_line['id']}\t{answer_label}\t{numbers}\n")
# fout_meta.write(f"{json_line['id']},{json_line['answerKey'][0]}\n")
if predictions_files:
for i, x in enumerate(predictions_files):
pred_type = x[0]
predictions = x[1]
fout_tmp = output_files[i]
# print(f" ** pred type: {pred_type}")
if id not in predictions:
print(" >>>>> id not found . . . ")
# hack: use the gold ans
fout_tmp.write(answer_text + "\n")
else:
pred_json = predictions[id]
choice_text_list = pred_json['choice_text_list']
correct_answer_index = pred_json['correct_answer_index']
# label_probs = pred_json['label_probs']
answer_index = pred_json['answer_index']
fout_tmp.write(choice_text_list[answer_index] + "\n")
if pred_type not in correctness_map:
correctness_map[pred_type] = []
correctness_map[pred_type].append(1.0 if answer_index == correct_answer_index else 0.0)
for pred_type in correctness_map.keys():
if len(correctness_map[pred_type]) > 0:
print(len(correctness_map[pred_type]))
print(
f" **** Accuracy on {split} of ARC-{kind} ({pred_type}): {sum(correctness_map[pred_type]) / len(correctness_map[pred_type])}")
for with_para in [True, False]:
read_file(directory_easy, "Dev", "easy", oyvind_dev_preds, with_para)
read_file(directory_easy, "Test", "easy", oyvind_test_preds, with_para)
read_file(directory_easy, "Train", "easy", None, with_para)
read_file(directory_hard, "Dev", "hard", oyvind_dev_preds, with_para)
read_file(directory_hard, "Test", "hard", oyvind_test_preds, with_para)
read_file(directory_hard, "Train", "hard", None, with_para)
def ai2_science():
directory_middle = "../datasets/AI2-ScienceQuestions-V2.1-Jan2018/MiddleSchool/Middle-"
directory_elementary = "../datasets/AI2-ScienceQuestions-V2.1-Jan2018/ElementarySchool/Elementary-"
def read_file(dir, split, grade):
fout = open(f"ai2_science_{grade.lower()}/{split}.tsv".lower(), "w+")
foutmeta = open(f"ai2_science_{grade.lower()}/{split}_meta.tsv".lower(), "w+")
with open(f"{dir}NDMC-{split.lower()}.jsonl") as f:
for line in f.readlines():
json_line = json.loads(line)
question = json_line['question']['stem']
choices = json_line['question']['choices']
candidates = " ".join([f"({c['label']}) {c['text']}" for c in choices]).replace("\n", " ")
print(json_line)
answer_key = json_line['answerKey']
answer_key_idx = ord(answer_key[0]) - ord('A')
answer_text = choices[answer_key_idx]['text']
fout.write(f"{question} \\n {candidates}\t{answer_text}\n")
foutmeta.write(f"{json_line['id']}\t{answer_key[0]}\n")
read_file(directory_middle, "Dev", "Middle")
read_file(directory_middle, "Test", "Middle")
read_file(directory_middle, "Train", "Middle")
read_file(directory_elementary, "Dev", "Elementary")
read_file(directory_elementary, "Test", "Elementary")
read_file(directory_elementary, "Train", "Elementary")
def quoref():
def read_file(file, segment):
fout = open(f"quoref/{segment}.tsv", "w+")
ftargets = open(f"quoref/{segment}_targets.txt", "+w")
finputs = open(f"quoref/{segment}_inputs.txt", "+w")
ans_size = []
with open(file) as f:
file = json.load(f)
for section in file['data']:
title = section['title'].replace("\n", " ").replace("\t", " ")
for para in section['paragraphs']:
context = para['context'].replace("\n", " ").replace("\t", " ")
for qa in para['qas']:
question = qa['question'].replace("\n", " ").replace("\t", " ")
ans_size.append(len(qa['answers']))
for a in qa['answers']:
answer = a['text'].replace("\n", " ").replace("\t", " ")
fout.write(f"{question} \\n ({title}) {context}\t{answer}\n")
ftargets.write(f"{answer}\n")
finputs.write(f"{question} \\n ({title}) {context}\n")
print(sum(ans_size) / len(ans_size))
read_file("../datasets/quoref-train-dev-v0.1/quoref-dev-v0.1.json", "dev")
read_file("../datasets/quoref-train-dev-v0.1/quoref-train-v0.1.json", "train")
def ropes():
def read_file(file, segment):
ans_size = []
fout = open(f"ropes/{segment}.tsv", "w+")
ftargets = open(f"ropes/{segment}_targets.txt", "+w")
finput = open(f"ropes/{segment}_inputs.txt", "+w")
with open(file) as f:
file = json.load(f)
for section in file['data']:
for para in section['paragraphs']:
context = f"{para['background']} {para['situation']}".replace("\n", " ").replace("\t", " ")
for qa in para['qas']:
question = qa['question'].replace("\n", " ").replace("\t", " ")
ans_size.append(len(qa['answers']))
for a in qa['answers']:
answer = a['text'].replace("\n", " ").replace("\t", " ")
fout.write(f"{question} \\n {context}\t{answer}\n")
ftargets.write(f"{answer}\n")
finput.write(f"{question} \\n {context}\n")
read_file("../datasets/ropes-train-dev-v1.0/dev-v1.0.json", "dev")
read_file("../datasets/ropes-train-dev-v1.0/train-v1.0.json", "train")
def narrative_qa():
paragraphs = {}
with open("../datasets/narrativeqa/third_party/wikipedia/summaries.csv") as f:
spamreader = csv.reader(f)
for i, line in enumerate(spamreader):
print(line)
if i == 0:
continue
paragraphs[line[0]] = line[2].replace("\n", "")
fout_test = open(f"narrativeqa/test.tsv", "w+")
fout_train = open(f"narrativeqa/train.tsv", "w+")
fout_dev = open(f"narrativeqa/dev.tsv", "w+")
counts = open(f"narrativeqa/counts.json", "w+")
count_train = 0
count_test = 0
count_dev = 0
with open("..//datasets/narrativeqa/qaps.csv") as f:
spamreader = csv.reader(f)
for i, line in enumerate(spamreader):
print(line)
if i == 0:
continue
line1 = f"{line[2]} \\n {paragraphs[line[0]]} \t {line[3]} \n"
line2 = f"{line[2]} \\n {paragraphs[line[0]]} \t {line[4]} \n"
if line[1] == "train":
fout_train.write(line1)
fout_train.write(line2)
count_train += 1
elif line[1] == "test":
fout_test.write(line1)
fout_test.write(line2)
count_test += 1
elif line[1] == "valid":
fout_dev.write(line1)
fout_dev.write(line2)
count_dev += 1
else:
print(" >>>> ERROR ")
counts.write(json.dumps({"train": count_train, "dev": count_dev, "test": count_test}))
def multirc():
def read_file(file):
lines = []
with open(f"../datasets/multirc/{file}") as f:
for line in f.readlines():
line_split = line.split("\t")
paragraph = line_split[4].replace("\n", " ").replace("\t", " ")
question = line_split[5].replace("\n", " ").replace("\t", " ")
line_split[6] = line_split[6].replace("\n", "")
assert line_split[6] == "True" or line_split[6] == "False", f"`{line_split[6]}`"
answer = "yes" if line_split[6] == "True" else "no"
lines.append(f"{question} \\n {paragraph}\t{answer}\n")
return lines
lines1 = read_file("dev_83-fixedIds.json.yes-nos.tsv")
lines2 = read_file("train_456-fixedIds.json.yes-nos.tsv")
fout = open(f"multirc/dev.tsv", "w+")
fout2 = open(f"multirc/train.tsv", "w+")
for line in lines1 + lines2:
fout.write(line)
fout2.write(line)
def openbookqa():
def read_file(file, split, predictions_files, with_para=False):
out_dir = "openbookqa"
if with_para:
out_dir = "openbookqa_with_ir"
fout = open(f"{out_dir}/{split}.tsv", "w+")
fout_meta = open(f"{out_dir}/{split}_meta.tsv", "w+")
output_files = []
oyind_accuracy = {}
if predictions_files:
fout_target_tmp = open(f"openbookqa/oyvind/_target.txt", "w")
for x in predictions_files:
fout_tmp = open(f"openbookqa/oyvind/predictions_{x[0]}_{split}.txt", "w")
output_files.append(fout_tmp)
# print(fout_tmp)
oyind_accuracy[x] = []
with open(file) as f:
for line in f.readlines():
json_line = json.loads(line)
question = json_line['question']['stem']
choices = json_line['question']['choices']
candidates = " ".join([f"({c['label']}) {c['text']}" for c in choices]).replace("\n", " ")
print(json_line)
answer_key = json_line['answerKey']
answer_key_idx = ord(answer_key[0]) - ord('A')
answer_text = choices[answer_key_idx]['text']
id = "OBQA_" + json_line['id']
para = ""
if with_para:
para = "\\n" + oyvind_paragraphs[id].replace("\n", " ").replace("\t", " ")
fout.write(f"{question} \\n {candidates}{para}\t{answer_text}\n")
fout_meta.write(f"{json_line['id']}\t{answer_key[0]}\n")
if predictions_files:
fout_target_tmp.write(f"{answer_text}\n")
for i, x in enumerate(predictions_files):
pred_type = x[0]
predictions = x[1]
fout_tmp = output_files[i]
# print(f" ** pred type: {pred_type}")
if id not in predictions:
print(" >>>>> id not found . . . ")
# hack: use the gold ans
fout_tmp.write(answer_text + "\n")
else:
pred_json = predictions[id]
choice_text_list = pred_json['choice_text_list']
# correct_answer_index = pred_json['correct_answer_index']
answer_index = pred_json['answer_index']
fout_tmp.write(choice_text_list[answer_index] + "\n")
if answer_index == answer_key_idx:
oyind_accuracy[x].append(1.0)
else:
oyind_accuracy[x].append(0.0)
if predictions_files:
for x in predictions_files:
print(f" *** {x} \t accuracy: {sum(predictions_files[x]) / len(predictions_files)} ")
for with_para in [True, False]:
read_file("../datasets/OpenBookQA-V1-Sep2018/Data/Main/dev.jsonl", "dev", None, with_para)
# read_file("../datasets/OpenBookQA-V1-Sep2018/Data/Main/test.jsonl", "test", oyvind_test_preds, with_para)
read_file("../datasets/OpenBookQA-V1-Sep2018/Data/Main/test.jsonl", "test", None, with_para)
read_file("../datasets/OpenBookQA-V1-Sep2018/Data/Main/train.jsonl", "train", None, with_para)
def boolq_np():
outfile = {
"dev": open("/Users/danielk/ideaProjects/t2t-qa/t2t-data/boolq-np/dev.tsv", "w"),
"train": open("/Users/danielk/ideaProjects/t2t-qa/t2t-data/boolq-np/train.tsv", "w"),
}
with open("boolq_natural_perturbations.jsonl") as f:
for line in f.readlines():
json_line = json.loads(line)
# print(json_line['split'])
if json_line['is_seed_question'] == 1:
json_line['question'] += '?'
label = "yes" if json_line['hard_label'] == "True" else "no"
outfile[json_line['split']].write(f"{json_line['question']}\\n{json_line['passage']}\t{label}\n")
def read_paragraphs(file):
map = {}
with open(file) as f:
for line in f.readlines():
json_line = json.loads(line)
map[json_line['id']] = json_line['para']
return map
oyvind_paragraphs = {}
map1 = read_paragraphs("oyvind_arc_obqa_reg_with_ir/train.jsonl")
map2 = read_paragraphs("oyvind_arc_obqa_reg_with_ir/test.jsonl")
map3 = read_paragraphs("oyvind_arc_obqa_reg_with_ir/dev.jsonl")
oyvind_paragraphs.update(map1)
oyvind_paragraphs.update(map2)
oyvind_paragraphs.update(map3)
def ambigqa():
def read_file(file, dir, split):
outfile = open(f"{dir}/{split}.tsv", "+w")
outfile_meta = open(f"{dir}/{split}_meta.tsv", "+w")
size = 0
with open(file, "r") as f:
json_file = json.load(f)
for item in tqdm(json_file):
question = item['question'].replace("\n", " ").replace("\t", " ")
single_answers_already_included = []
for anno in item["annotations"]:
if anno['type'] == "singleAnswer":
for ans in anno['answer']:
if ans not in single_answers_already_included:
ans = ans.replace("\n", " ").replace("\t", " ")
outfile.write(f"{question}\t{ans}\n")
outfile_meta.write(item['id'] + "\n")
single_answers_already_included.append(ans)
size += 1
else:
answers = []
for x in anno['qaPairs']:
answers.append(x['answer'][0])
answers = [x.strip() for x in answers]
answers = list(set(answers)) # to drop duplicate answers
for i, ordering in enumerate(itertools.permutations(answers)):
if i >= 3:
break
ans_str = " [SEP] ".join(ordering).replace("\n", " ").replace("\t", " ")
outfile.write(f"{question}\t{ans_str}\n")
outfile_meta.write(item['id'] + "\n")
size += 1
return size
count_dev = read_file("ambignq_light/dev_light.json", "ambigqa", "dev")
count_train = read_file("ambignq_light/train_light.json", "ambigqa",
"train")
count_test = 0
# Create TSVs and get counts.
with open("ambigqa/counts.json", "w") as outfile:
json.dump({"train": count_train, "dev": count_dev, "test": count_test}, outfile)
def natural_questions_direct_answer():
question_to_para_map = read_natural_questions_paragraphs()
def read_file(in_fname, dir, split, with_paragraphs=False, aggregared_ans=False):
outfile = open(f"{dir}/{split}.tsv", "+w")
outfile_meta = open(f"{dir}/{split}_meta.tsv", "+w")
with open(in_fname) as f:
json_file = json.load(f)
size = 0
for i, item in enumerate(json_file):
id = item['id']
question = item['question'].replace("\t", " ").replace("\n", " ")
if "?" not in question:
question += "?"
para = ""
if with_paragraphs:
para = question_to_para_map[f"{split}-{i}"].replace("\t", " ").replace("\n", " ").replace("[SEP]", "-").replace("[sep]", "-")
para = " ".join(para.split(" ")[1:600]) # take the subset
para = "\\n" + para
if aggregared_ans:
answers = [answer.replace("\t", " ").replace("\n", " ") for answer in item['answer']]
random.shuffle(answers)
concatenated_answers = "///".join(answers)
outfile.write(f"{question}{para}\t{concatenated_answers}\t{answers[0]}\n")
outfile_meta.write(f"{id}\n")
size += 1
else:
for answer in item['answer']:
answer = answer.replace("\t", " ").replace("\n", " ")
outfile.write(f"{question}{para}\t{answer}\n")
outfile_meta.write(f"{id}\n")
size += 1
return size
print("Generating NQ TSVs.")
# Create TSVs and get counts.
for dir in ['natural_questions_direct_ans_aggregated']: # ['natural_questions_direct_ans', 'natural_questions_with_dpr_para']:
with_para = True if "dpr" in dir else False
aggregared_ans = True if "aggregated" in dir else False
count_dev = read_file("../datasets/nq/nqopen/nqopen-dev.json", dir, "dev", with_para, aggregared_ans)
count_train = read_file("../datasets/nq/nqopen/nqopen-train.json", dir, "train", with_para, aggregared_ans)
with open(dir + "/counts.json", "w") as outfile:
json.dump({"train": count_train, "dev": count_dev}, outfile)
count_train = read_file("../datasets/nq/nqopen/nqopen-train.json", dir + "_test", "train", with_para, aggregared_ans)
count_test = read_file("../datasets/nq/nqopen/nqopen-test.json", dir + "_test", "test", with_para, aggregared_ans)
with open(dir + "_test" + "/counts.json", "w") as outfile:
json.dump({"train": count_train, "test": count_test}, outfile)
## NQ contexts
def read_natural_questions_paragraphs():
question_to_para_map = {}
def read_file(file, split):
with open(file) as f:
json_file = json.load(f)
for i, item in enumerate(json_file):
question_to_para_map[f"{split}-{i}"] = item['context']
read_file("../datasets/nq-dpr-output/train.json", "train")
read_file("../datasets/nq-dpr-output/test.json", "test")
read_file("../datasets/nq-dpr-output/dev.json", "dev")
return question_to_para_map
def natural_questions_reading_comprehension():
def read_file(in_fname, out_fname):
def extract_answer(tokens, span):
"""Reconstruct answer from token span and remove extra spaces."""
start, end = span["start_token"], span["end_token"]
ans = " ".join(tokens[start:end])
# Remove incorrect spacing around punctuation.
ans = ans.replace(" ,", ",").replace(" .", ".").replace(" %", "%")
ans = ans.replace(" - ", "-").replace(" : ", ":").replace(" / ", "/")
ans = ans.replace("( ", "(").replace(" )", ")")
ans = ans.replace("`` ", "\"").replace(" ''", "\"")
ans = ans.replace(" 's", "'s").replace("s ' ", "s' ")
return ans
count = 0
with open(in_fname, "r") as infile, open(out_fname, "w") as outfile:
for line in infile.readlines():
ex = json.loads(line)
# Remove any examples with more than one answer.
if len(ex['annotations'][0]['short_answers']) != 1:
continue
# Questions in NQ do not include a question mark.
question = ex["question_text"] + "?"
answer_span = ex['annotations'][0]['short_answers'][0]
# Handle the two document formats in NQ (tokens or text).
if "document_tokens" in ex:
tokens = [t["token"] for t in ex["document_tokens"]]
elif "document_text" in ex:
tokens = ex["document_text"].split(" ")
answer = extract_answer(tokens, answer_span)
# Write this line as <question>\t<answer>
outfile.write("%s\t%s\n" % (question, answer))
count += 1
if count % 1000 == 1:
print(f"Wrote {count} examples to {out_fname}.")
return count
count_dev = read_file("../datasets/dev-all.jsonl", "natural_questions/dev.tsv")
count_train = read_file("../datasets/nq-train.jsonl", "natural_questions/train.tsv")
# Create TSVs and get counts.
print("Generating NQ TSVs.")
with open("natural_questions/counts.json", "w") as outfile:
json.dump({"train": count_train, "dev": count_dev}, outfile)
def winogrande():
def read_file(size, split, outfolder):
counter = 0
outfile = open(f"{outfolder}/{split}.tsv", "w+")
outfile_meta = open(f"{outfolder}/{split}_meta.tsv", "w+")
file_name = f"{split}_{size}.jsonl"
# label_file_name = f"{split}_{size}-labels.lst"
if split != "train":
file_name = f"{split}.jsonl"
# label_file_name = f"{split}-labels.lst"
with open(f"winogrande_1.1/{file_name}") as f:
for line in f.readlines():
json_line = json.loads(line)
qID = json_line['qID']
sentence = json_line['sentence']
option1 = json_line['option1']
option2 = json_line['option2']
ans = ""
idx = "-"
idx_string = "-"
if 'answer' in json_line:
idx = json_line['answer']
ans = option1
assert idx == "1" or idx == "2"
if idx == "2":
ans = option2
idx_string = "B"
else:
idx_string = "A"
outfile.write(f"{sentence} \\n (A) {option1} (B) {option2} \t {ans} \n")
outfile_meta.write(f"{qID}\t{idx_string}\t numeric \t {ans} \n")
counter += 1
return counter
for size in ["xs", "s", "m", "l", "xl"]:
train_count = read_file(size, "train", f"winogrande_{size}")
dev_count = read_file(size, "dev", f"winogrande_{size}")
# test_count = read_file(size, "test")
with open(f"winogrande_{size}/counts.json", "w+") as outfile:
json.dump({"train": train_count, "dev": dev_count}, outfile)
train_count = read_file("s", "train", f"winogrande_test")
test_count = read_file("s", "test", f"winogrande_test")
with open(f"winogrande_test/counts.json", "w+") as outfile:
json.dump({"train": train_count, "test": test_count}, outfile)
def anlg():
director = "/Users/danielk/ideaProjects/t2t-qa/t2t-data/anlg_dev/"
def readfile(inputfile, labelfile, split):
labels = []
with open(labelfile) as f1:
for line in f1.readlines():
labels.append(int(line.replace("\n", "")))
outfile = open(director + split + ".tsv", "+w")
outmetafile = open(director + split + "_meta.tsv", "+w")
with open(inputfile) as f2:
for idx, line in enumerate(f2.readlines()):
label = labels[idx]
assert label == 1 or label == 2, f" * the label is: {label}"
json_line = json.loads(line)
outstring = json_line['hyp1']
if label == 2:
outstring = json_line['hyp2']
outfile.write(json_line['obs1'] + " ___ " + json_line['obs2'] + "\t" + outstring + "\n")
outmetafile.write(f"{json_line['story_id']}\t{label}\n")
return len(labels)
dev_count = readfile("../datasets/aNLG/dev.jsonl", "../datasets/aNLG/dev-labels.lst", "dev")
train_count = readfile("../datasets/aNLG/train.jsonl", "../datasets/aNLG/train-labels.lst", "train")
with open(director + "counts.json", "w+") as outfile:
json.dump({"train": train_count, "dev": dev_count}, outfile)
csv.field_size_limit(10 * 131072)
def summarization():
def readfile(file):
outfile = open(file.replace(".tsv", "_2.tsv"), "+w")
with open(file) as csvfile:
reader = csv.reader(csvfile, delimiter='\t')
for row in reader:
row[0] = row[0].replace("\t", " ").replace("\n", " ")
row[0] = row[0][:6*500]
row[1] = row[1].replace("\t", " ").replace("\n", " ")
outfile.write(row[0] + "\t" + row[1] + "\n")
readfile("/Users/danielk/ideaProjects/t2t-qa/t2t-data/summarization-cnndm-dev/dev.tsv")
readfile("/Users/danielk/ideaProjects/t2t-qa/t2t-data/summarization-cnndm-dev/train.tsv")
readfile("/Users/danielk/ideaProjects/t2t-qa/t2t-data/summarization-cnndm-test/test.tsv")
readfile("/Users/danielk/ideaProjects/t2t-qa/t2t-data/summarization-cnndm-test/train.tsv")
readfile("/Users/danielk/ideaProjects/t2t-qa/t2t-data/summarization-xsum-dev/dev.tsv")
readfile("/Users/danielk/ideaProjects/t2t-qa/t2t-data/summarization-xsum-dev/train.tsv")
readfile("/Users/danielk/ideaProjects/t2t-qa/t2t-data/summarization-xsum-test/test.tsv")
readfile("/Users/danielk/ideaProjects/t2t-qa/t2t-data/summarization-xsum-test/train.tsv")
def csqa2_process(file, dataset, kind):
fout = open(f"{dataset}/{kind}.tsv", "w+")
fmeta = open(f"{dataset}/{kind}_meta.txt", "w+")
ans = open(f"{dataset}/{kind}_ans.jsonl", "w+")
df=pd.read_json('/content/csqa2/dataset/'+file, lines=True, compression='gzip')
questions=df[['question','answer','id']].values
for row in range(len(questions)):
question=questions[row][0].strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")
if '?' not in question:
question = question + "?"
answer=[questions[row][1].strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")]
id=questions[row][2]
fmeta.write(f"{id} \n")
fout.write(f"{question} \t{answer[0]}\n")
ans.write(json.dumps(answer) + "\n")
return len(questions)
def csqa2_process_test(file, dataset, kind):
fout = open(f"{dataset}/{kind}.tsv", "w+")
fmeta = open(f"{dataset}/{kind}_meta.txt", "w+")
df=pd.read_json('/content/'+file, lines=True, compression='gzip')
questions=df[['question','id']].values
for row in range(len(questions)):
question=questions[row][0].strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")
if '?' not in question:
question = question + "?"
id=questions[row][1]
fmeta.write(f"{id} \n")
fout.write(f"{question}\n")
return len(questions)
def csqa():
train_count = csqa2_process('CSQA2_train.json.gz','csqa2','train')
dev_count = csqa2_process('CSQA2_dev.json.gz','csqa2','dev')
test_count = csqa2_process_test('CSQA2_test_no_answers.json.gz','csqa2','test')
with open(f"/content/csqa2/counts.json", "w+") as outfile:
json.dump({"train": train_count, "dev": dev_count, "test": test_count}, outfile)
def pubmedqa_process(file, dataset, kind):
fout_long = open(f"{dataset}/long_answer/{kind}.tsv", "w+")
fout_short = open(f"{dataset}/short_answer/{kind}.tsv", "w+")
fmeta = open(f"{dataset}/{kind}_meta.txt", "w+")
ans_long = open(f"{dataset}/long_answer/{kind}_ans.jsonl", "w+")
ans_short = open(f"{dataset}/short_answer/{kind}_ans.jsonl", "w+")
df=pd.read_json(codecs.open('/content/'+file,'r','utf-8')).transpose()
questions=df[['QUESTION','CONTEXTS','LONG_ANSWER','final_decision']].values
meta=df.index.values
for id in meta:
fmeta.write(f"{id} \n")
for row in range(len(questions)):
question=questions[row][0].strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")
if '?' not in question:
question = question + "?"
separator=','
contexts=separator.join(questions[row][1]).strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")
long_answer=[questions[row][2].strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")]
answer=[questions[row][3].strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")]
fout_long.write(f"{question}\\n {contexts} \t{long_answer[0]}\n")
fout_short.write(f"{question}\\n {contexts} \t{answer[0]}\n")
ans_short.write(json.dumps(answer) + "\n")
ans_long.write(json.dumps(long_answer) + "\n")
def pubmedqa_process_un(file, dataset, kind):
fout_long = open(f"{dataset}/long_answer/{kind}.tsv", "w+")
fmeta = open(f"{dataset}/{kind}_meta.txt", "w+")
ans_long = open(f"{dataset}/long_answer/{kind}_ans.jsonl", "w+")
df=pd.read_json(codecs.open('/content/'+file,'r','utf-8')).transpose()
questions=df[['QUESTION','CONTEXTS','LONG_ANSWER']].values
meta=df.index.values
for id in meta:
fmeta.write(f"{id} \n")
for row in range(len(questions)):
question=questions[row][0].strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")
if '?' not in question:
question = question + "?"
separator=','
contexts=separator.join(questions[row][1]).strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")
long_answer=[questions[row][2].strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")]
fout_long.write(f"{question}\\n {contexts} \t{long_answer[0]}\n")
ans_long.write(json.dumps(long_answer) + "\n")
def pubmedqa():
pubmedqa_process('ori_pqal.json','pubmedqa','pqal_train')
pubmedqa_process('ori_pqaa.json','pubmedqa','pqaa_train')
pubmedqa_process('test_set.json','pubmedqa','test')
pubmedqa_process_un('ori_pqau.json','pubmedqa','pqau_train')
def strategyqa_process(file, dataset, kind):
fout = open(f"{dataset}/{kind}.tsv", "w+")
fmeta = open(f"{dataset}/{kind}_meta.txt", "w+")
ans = open(f"{dataset}/{kind}_ans.jsonl", "w+")
df=pd.read_json(codecs.open('/content/'+file,'r','utf-8'))
questions=df[['qid','term','question','answer']].values
documents=pd.read_json(codecs.open('/content/queries_cache.json','r','utf-8'))
for row in range(len(questions)):
qid = questions[row][0]
term = questions[row][1].strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")
term = "(" +term + ")"
question=questions[row][2].strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")
if '?' not in question:
question = question + "?"
if questions[row][3]==True:
answer=["yes"]
else:
answer=["no"]
query=clean_query(questions[row][2])
arr=documents[query]
retrieved_documents=[]
token_num=0
for result in arr[0]:
sentences=result["sentence"].split(".")
for index in range(len(sentences)-1):
if (token_num+len(sentences[index].split(" ")))<500:
token_num += len(sentences[index].split(" "))
retrieved_documents.append(sentences[index] + ".")
retrieved_document=''.join(retrieved_documents).strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")
fout.write(f"{question}\\n {term} {retrieved_document} \t{answer[0]}\n")
ans.write(json.dumps(answer) + "\n")
fmeta.write(f"{qid} \n")
return len(questions)
def strategyqa_process_test(file, dataset, kind):
fout = open(f"{dataset}/{kind}.tsv", "w+")
fmeta = open(f"{dataset}/{kind}_meta.txt", "w+")
df=pd.read_json(codecs.open('/content/'+file,'r','utf-8'))
questions=df[['qid','question']].values
documents=pd.read_json(codecs.open('/content/queries_cache.json','r','utf-8'))
for row in range(len(questions)):
qid = questions[row][0]
question=questions[row][1].strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")
if '?' not in question:
question = question + "?"
query=clean_query(questions[row][1])
arr=documents[query]
retrieved_documents=[]
token_num=0
for result in arr[0]:
sentences=result["sentence"].split(".")
for index in range(len(sentences)-1):
if (token_num+len(sentences[index].split(" ")))<500:
token_num += len(sentences[index].split(" "))
retrieved_documents.append(sentences[index] + ".")
retrieved_document=''.join(retrieved_documents).strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")
fout.write(f"{question}\\n {retrieved_document}\n")
fmeta.write(f"{qid} \n")
return len(questions)
def clean_query(query, remove_stopwords=True):
if remove_stopwords:
query_split = query.split()
new_query_split = []
for word in query_split:
if word.lower() + " " not in STOPWORDS:
new_query_split.append(word)
query = " ".join(new_query_split)
return query
def strategyqa():
train_count=strategyqa_process('strategyqa_train.json','strategyqa','train')
test_count=strategyqa_process_test('strategyqa_test.json','strategyqa','test')
with open(f"/content/strategyqa/counts.json", "w+") as outfile:
json.dump({"train": train_count, "test": test_count}, outfile)
def reclor_process(file, dataset, kind):
fout = open(f"{dataset}/{kind}.tsv", "w+")
fmeta = open(f"{dataset}/{kind}_meta.txt", "w+")
df=pd.read_json(codecs.open('/content/'+file,'r','utf-8'))
questions=df[['question','answers','context','label','id_string']].values
for row in range(len(questions)):
question=questions[row][0].strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")
if '?' not in question:
question = question + "?"
candidates=questions[row][1]
options = " ".join([f"({chr(ord('A') + i)}) {x}" for i, x in enumerate(candidates)])
contexts=questions[row][2].strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")
label = questions[row][3]
answer=questions[row][1][label].strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")
id=questions[row][4]
answer_index = chr(ord('A')+(label))
fmeta.write(f"{id}\t{answer_index}\n")
fout.write(f"{question} \\n{options} \\n {contexts}\t{answer}\n")
return len(questions)
def reclor_process_test(file, dataset, kind):
fout = open(f"{dataset}/{kind}.tsv", "w+")
fmeta = open(f"{dataset}/{kind}_meta.txt", "w+")
df=pd.read_json(codecs.open('/content/'+file,'r','utf-8'))
questions=df[['question','answers','context','id_string']].values
for row in range(len(questions)):
question=questions[row][0].strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")
if '?' not in question:
question = question + "?"
candidates=questions[row][1]
options = " ".join([f"({chr(ord('A') + i)}) {x}" for i, x in enumerate(candidates)])
contexts=questions[row][2].strip().replace("\n", "").replace("\t", "").replace(" ", " ").replace(" ", " ")
id=questions[row][3]
fmeta.write(f"{id}\n")
fout.write(f"{question} \\n{options} \\n {contexts}\n")
return len(questions)
def reclor():
train_count = reclor_process("train.json","reclor","train")
val_count = reclor_process("val.json","reclor","val")
test_count = reclor_process_test("test.json","reclor","test")
with open(f"/content/reclor/counts.json", "w+") as outfile:
json.dump({"train": train_count, "val": val_count, "test": test_count}, outfile)
def race_c_process(dataset, kind):
fout = open(f"{dataset}/{kind}.tsv", "w+")
fmeta = open(f"{dataset}/{kind}_meta.txt", "w+")
l = [ | pd.read_json(filename) | pandas.read_json |
import sys
import os
import numpy as np
from tqdm import tqdm
import json
import time as timemodu
from numba import jit, prange
import h5py
import fnmatch
import pandas as pd
import astropy
import astropy as ap
from astropy.io import fits
from astropy.coordinates import SkyCoord
from astropy.io import fits
import astropy.timeseries
import multiprocessing
from functools import partial
import scipy as sp
import scipy.interpolate
import astroquery
import astroquery.mast
import matplotlib as mpl
import matplotlib.pyplot as plt
# own modules
import tdpy
from tdpy import summgene
import lygos
import hattusa
def quer_mast(request):
from urllib.parse import quote as urlencode
import http.client as httplib
server='mast.stsci.edu'
# Grab Python Version
version = '.'.join(map(str, sys.version_info[:3]))
# Create Http Header Variables
headers = {'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain',
'User-agent':'python-requests/'+version}
# Encoding the request as a json string
requestString = json.dumps(request)
requestString = urlencode(requestString)
# opening the https connection
conn = httplib.HTTPSConnection(server)
# Making the query
conn.request('POST', '/api/v0/invoke', 'request='+requestString, headers)
# Getting the response
resp = conn.getresponse()
head = resp.getheaders()
content = resp.read().decode('utf-8')
# Close the https connection
conn.close()
return head, content
def xmat_tici(listtici):
if len(listtici) == 0:
raise Exception('')
# make sure the input is a python list of strings
if isinstance(listtici[0], str):
if isinstance(listtici, np.ndarray):
listtici = list(listtici)
else:
if isinstance(listtici, list):
listtici = np.array(listtici)
if isinstance(listtici, np.ndarray):
listtici = listtici.astype(str)
listtici = list(listtici)
request = {'service':'Mast.Catalogs.Filtered.Tic', 'format':'json', 'params':{'columns':'rad, mass', \
'filters':[{'paramName':'ID', 'values':listtici}]}}
headers, outString = quer_mast(request)
dictquer = json.loads(outString)['data']
return dictquer
def retr_dictpopltic8(typepopl, numbsyst=None, typeverb=1):
'''
Get a dictionary of the sources in the TIC8 with the fields in the TIC8.
Keyword arguments
typepopl: type of the population
'ticihcon': TESS targets with contamination larger than
'ticim110': TESS targets brighter than mag 11
'ticim135': TESS targets brighter than mag 13.5
'tessnomi2min': 2-minute TESS targets obtained by merging the SPOC 2-min bulk downloads
Returns a dictionary with keys:
rasc: RA
decl: declination
tmag: TESS magnitude
radistar: radius of the star
massstar: mass of the star
'''
if typeverb > 0:
print('Retrieving a dictionary of TIC8 for population %s...' % typepopl)
if typepopl.startswith('tess'):
if typepopl[4:].startswith('nomi'):
listtsec = np.arange(1, 27)
elif typepopl[4:].endswith('extd'):
listtsec = np.arange(27, 39)
else:
listtsec = [int(typepopl[-2:])]
numbtsec = len(listtsec)
indxtsec = np.arange(numbtsec)
pathlistticidata = os.environ['EPHESUS_DATA_PATH'] + '/data/listticidata/'
os.system('mkdir -p %s' % pathlistticidata)
path = pathlistticidata + 'listticidata_%s.csv' % typepopl
if not os.path.exists(path):
# dictionary of strings that will be keys of the output dictionary
dictstrg = dict()
dictstrg['ID'] = 'tici'
dictstrg['ra'] = 'rasc'
dictstrg['dec'] = 'decl'
dictstrg['Tmag'] = 'tmag'
dictstrg['rad'] = 'radistar'
dictstrg['mass'] = 'massstar'
dictstrg['Teff'] = 'tmptstar'
dictstrg['logg'] = 'loggstar'
dictstrg['MH'] = 'metastar'
liststrg = list(dictstrg.keys())
print('typepopl')
print(typepopl)
if typepopl.startswith('tessnomi'):
if typepopl[8:12] == '20sc':
strgurll = '_20s_'
labltemp = '20-second'
if typepopl[8:12] == '2min':
strgurll = '_'
labltemp = '2-minute'
dictquer = dict()
listtici = []
for o in indxtsec:
if typepopl.endswith('bulk'):
pathtess = os.environ['TESS_DATA_PATH'] + '/data/lcur/sector-%02d' % listtsec[o]
listnamefile = fnmatch.filter(os.listdir(pathtess), '*.fits')
listticitsec = []
for namefile in listnamefile:
listticitsec.append(str(int(namefile.split('-')[2])))
listticitsec = np.array(listticitsec)
else:
url = 'https://tess.mit.edu/wp-content/uploads/all_targets%sS%03d_v1.csv' % (strgurll, listtsec[o])
c = pd.read_csv(url, header=5)
listticitsec = c['TICID'].values
listticitsec = listticitsec.astype(str)
numbtargtsec = listticitsec.size
if typeverb > 0:
print('%d observed %s targets in Sector %d...' % (numbtargtsec, labltemp, listtsec[o]))
if numbtargtsec > 0:
dictquertemp = xmat_tici(listticitsec)
if o == 0:
dictquerinte = dict()
for name in dictstrg.keys():
dictquerinte[dictstrg[name]] = [[] for o in indxtsec]
for name in dictstrg.keys():
for k in range(len(dictquertemp)):
dictquerinte[dictstrg[name]][o].append(dictquertemp[k][name])
print('Concatenating arrays from different sectors...')
for name in dictstrg.keys():
dictquer[dictstrg[name]] = np.concatenate(dictquerinte[dictstrg[name]])
u, indxuniq = np.unique(dictquer['tici'], return_index=True)
for name in dictstrg.keys():
dictquer[dictstrg[name]] = dictquer[dictstrg[name]][indxuniq]
numbtarg = dictquer['radistar'].size
if typeverb > 0:
print('%d observed 2-min targets...' % numbtarg)
elif typepopl.startswith('tici'):
if typepopl == 'ticihcon':
request = {'service':'Mast.Catalogs.Filtered.Tic.Rows', 'format':'json', 'params':{ \
'columns':'ID, Tmag, rad, mass, contratio', \
'filters':[{'paramName':'contratio', 'values':[{"min":10., "max":1e3}]}]}}
if typepopl == 'ticim110':
request = {'service':'Mast.Catalogs.Filtered.Tic.Rows', 'format':'json', 'params':{ \
'columns':'ID, Tmag, rad, mass', \
'filters':[{'paramName':'Tmag', 'values':[{"min":-100., "max":11}]}]}}
if typepopl == 'ticim135':
request = {'service':'Mast.Catalogs.Filtered.Tic.Rows', 'format':'json', 'params':{ \
'columns':'ID, Tmag, rad, mass', \
'filters':[{'paramName':'Tmag', 'values':[{"min":-100., "max":13.5}]}]}}
headers, outString = quer_mast(request)
listdictquer = json.loads(outString)['data']
if typeverb > 0:
print('%d matches...' % len(listdictquer))
dictquer = dict()
print('listdictquer[0].keys()')
print(listdictquer[0].keys())
for name in listdictquer[0].keys():
if name == 'ID':
namedict = 'tici'
if name == 'Tmag':
namedict = 'tmag'
if name == 'rad':
namedict = 'radi'
if name == 'mass':
namedict = 'mass'
dictquer[namedict] = np.empty(len(listdictquer))
for k in range(len(listdictquer)):
dictquer[namedict][k] = listdictquer[k][name]
else:
print('Unrecognized population name: %s' % typepopl)
raise Exception('')
if typeverb > 0:
#print('%d targets...' % numbtarg)
print('Writing to %s...' % path)
#columns = ['tici', 'radi', 'mass']
pd.DataFrame.from_dict(dictquer).to_csv(path, index=False)#, columns=columns)
else:
if typeverb > 0:
print('Reading from %s...' % path)
dictquer = pd.read_csv(path).to_dict(orient='list')
for name in dictquer.keys():
dictquer[name] = np.array(dictquer[name])
#del dictquer['Unnamed: 0']
return dictquer
def retr_objtlinefade(x, y, colr='black', initalph=1., alphfinl=0.):
colr = get_color(colr)
cdict = {'red': ((0.,colr[0],colr[0]),(1.,colr[0],colr[0])),
'green': ((0.,colr[1],colr[1]),(1.,colr[1],colr[1])),
'blue': ((0.,colr[2],colr[2]),(1.,colr[2],colr[2])),
'alpha': ((0.,initalph, initalph), (1., alphfinl, alphfinl))}
Npts = len(x)
if len(y) != Npts:
raise AttributeError("x and y must have same dimension.")
segments = np.zeros((Npts-1,2,2))
segments[0][0] = [x[0], y[0]]
for i in range(1,Npts-1):
pt = [x[i], y[i]]
segments[i-1][1] = pt
segments[i][0] = pt
segments[-1][1] = [x[-1], y[-1]]
individual_cm = mpl.colors.LinearSegmentedColormap('indv1', cdict)
lc = mpl.collections.LineCollection(segments, cmap=individual_cm)
lc.set_array(np.linspace(0.,1.,len(segments)))
return lc
def retr_liststrgcomp(numbcomp):
liststrgcomp = np.array(['b', 'c', 'd', 'e', 'f', 'g'])[:numbcomp]
return liststrgcomp
def retr_listcolrcomp(numbcomp):
listcolrcomp = np.array(['magenta', 'orange', 'red', 'green', 'purple', 'cyan'])[:numbcomp]
return listcolrcomp
def plot_orbt( \
# path to write the plot
path, \
# radius of the planets [R_E]
radicomp, \
# sum of radius of planet and star divided by the semi-major axis
rsmacomp, \
# epoc of the planets [BJD]
epoc, \
# orbital periods of the planets [days]
peri, \
# cosine of the inclination
cosi, \
# type of visualization:
## 'realblac': dark background, black planets
## 'realblaclcur': dark backgound, luminous planets, with light curves
## 'realcolrlcur': dark background, colored planets, with light curves
## 'cartcolr': bright background, colored planets
typevisu, \
# radius of the star [R_S]
radistar=1., \
# mass of the star [M_S]
massstar=1., \
# Boolean flag to produce an animation
boolanim=False, \
# angle of view with respect to the orbital plane [deg]
anglpers=5., \
# size of the figure
sizefigr=(8, 8), \
listcolrcomp=None, \
liststrgcomp=None, \
boolsingside=True, \
## file type of the plot
typefileplot='pdf', \
# verbosity level
typeverb=1, \
):
dictfact = retr_factconv()
mpl.use('Agg')
numbcomp = len(radicomp)
if isinstance(radicomp, list):
radicomp = np.array(radicomp)
if isinstance(rsmacomp, list):
rsmacomp = np.array(rsmacomp)
if isinstance(epoc, list):
epoc = np.array(epoc)
if isinstance(peri, list):
peri = np.array(peri)
if isinstance(cosi, list):
cosi = np.array(cosi)
if listcolrcomp is None:
listcolrcomp = retr_listcolrcomp(numbcomp)
if liststrgcomp is None:
liststrgcomp = retr_liststrgcomp(numbcomp)
# semi-major axes of the planets [AU]
smax = (radicomp / dictfact['rsre'] + radistar) / dictfact['aurs'] / rsmacomp
indxcomp = np.arange(numbcomp)
# perspective factor
factpers = np.sin(anglpers * np.pi / 180.)
## scale factor for the star
factstar = 5.
## scale factor for the planets
factplan = 20.
# maximum y-axis value
maxmyaxi = 0.05
if typevisu == 'cartmerc':
# Mercury
smaxmerc = 0.387 # [AU]
radicompmerc = 0.3829 # [R_E]
# scaled radius of the star [AU]
radistarscal = radistar / dictfact['aurs'] * factstar
time = np.arange(0., 30., 2. / 60. / 24.)
numbtime = time.size
indxtime = np.arange(numbtime)
if boolanim:
numbiter = min(500, numbtime)
else:
numbiter = 1
indxiter = np.arange(numbiter)
xposmaxm = smax
yposmaxm = factpers * xposmaxm
numbtimequad = 10
if typevisu == 'realblaclcur':
numbtimespan = 100
# get transit model based on TESS ephemerides
rratcomp = radicomp / radistar
rflxtranmodl = retr_rflxtranmodl(time, pericomp=peri, epoccomp=epoc, rsmacomp=rsmacomp, cosicomp=cosi, rratcomp=rratcomp)['rflx'] - 1.
lcur = rflxtranmodl + np.random.randn(numbtime) * 1e-6
ylimrflx = [np.amin(lcur), np.amax(lcur)]
phas = np.random.rand(numbcomp)[None, :] * 2. * np.pi + 2. * np.pi * time[:, None] / peri[None, :]
yposelli = yposmaxm[None, :] * np.sin(phas)
xposelli = xposmaxm[None, :] * np.cos(phas)
# time indices for iterations
indxtimeiter = np.linspace(0., numbtime - numbtime / numbiter, numbiter).astype(int)
if typevisu.startswith('cart'):
colrstar = 'k'
colrface = 'w'
plt.style.use('default')
else:
colrface = 'k'
colrstar = 'w'
plt.style.use('dark_background')
if boolanim:
cmnd = 'convert -delay 5'
listpathtemp = []
for k in indxiter:
if typevisu == 'realblaclcur':
numbrows = 2
else:
numbrows = 1
figr, axis = plt.subplots(figsize=sizefigr)
### lower half of the star
w1 = mpl.patches.Wedge((0, 0), radistarscal, 180, 360, fc=colrstar, zorder=1, edgecolor=colrstar)
axis.add_artist(w1)
for jj, j in enumerate(indxcomp):
xposellishft = np.roll(xposelli[:, j], -indxtimeiter[k])[-numbtimequad:][::-1]
yposellishft = np.roll(yposelli[:, j], -indxtimeiter[k])[-numbtimequad:][::-1]
# trailing lines
if typevisu.startswith('cart'):
objt = retr_objtlinefade(xposellishft, yposellishft, colr=listcolrcomp[j], initalph=1., alphfinl=0.)
axis.add_collection(objt)
# add planets
if typevisu.startswith('cart'):
colrplan = listcolrcomp[j]
# add planet labels
axis.text(.6 + 0.03 * jj, 0.1, liststrgcomp[j], color=listcolrcomp[j], transform=axis.transAxes)
if typevisu.startswith('real'):
if typevisu == 'realillu':
colrplan = 'k'
else:
colrplan = 'black'
radi = radicomp[j] / dictfact['rsre'] / dictfact['aurs'] * factplan
w1 = mpl.patches.Circle((xposelli[indxtimeiter[k], j], yposelli[indxtimeiter[k], j], 0), radius=radi, color=colrplan, zorder=3)
axis.add_artist(w1)
## upper half of the star
w1 = mpl.patches.Wedge((0, 0), radistarscal, 0, 180, fc=colrstar, zorder=4, edgecolor=colrstar)
axis.add_artist(w1)
if typevisu == 'cartmerc':
## add Mercury
axis.text(.387, 0.01, 'Mercury', color='grey', ha='right')
radi = radicompmerc / dictfact['rsre'] / dictfact['aurs'] * factplan
w1 = mpl.patches.Circle((smaxmerc, 0), radius=radi, color='grey')
axis.add_artist(w1)
# temperature axis
#axistwin = axis.twiny()
##axistwin.set_xlim(axis.get_xlim())
#xpostemp = axistwin.get_xticks()
##axistwin.set_xticks(xpostemp[1:])
#axistwin.set_xticklabels(['%f' % tmpt for tmpt in listtmpt])
# temperature contours
#for tmpt in [500., 700,]:
# smaj = tmpt
# axis.axvline(smaj, ls='--')
axis.get_yaxis().set_visible(False)
axis.set_aspect('equal')
if typevisu == 'cartmerc':
maxmxaxi = max(1.2 * np.amax(smax), 0.4)
else:
maxmxaxi = 1.2 * np.amax(smax)
if boolsingside:
minmxaxi = 0.
else:
minmxaxi = -maxmxaxi
axis.set_xlim([minmxaxi, maxmxaxi])
axis.set_ylim([-maxmyaxi, maxmyaxi])
axis.set_xlabel('Distance from the star [AU]')
if typevisu == 'realblaclcur':
print('indxtimeiter[k]')
print(indxtimeiter[k])
minmindxtime = max(0, indxtimeiter[k]-numbtimespan)
print('minmindxtime')
print(minmindxtime)
xtmp = time[minmindxtime:indxtimeiter[k]]
if len(xtmp) == 0:
continue
print('xtmp')
print(xtmp)
timescal = 2 * maxmxaxi * (xtmp - np.amin(xtmp)) / (np.amax(xtmp) - np.amin(xtmp)) - maxmxaxi
print('timescal')
print(timescal)
axis.scatter(timescal, 10000. * lcur[minmindxtime:indxtimeiter[k]] + maxmyaxi * 0.8, rasterized=True, color='cyan', s=0.5)
print('time[minmindxtime:indxtimeiter[k]]')
summgene(time[minmindxtime:indxtimeiter[k]])
print('lcur[minmindxtime:indxtimeiter[k]]')
summgene(lcur[minmindxtime:indxtimeiter[k]])
print('')
#plt.subplots_adjust()
#axis.legend()
if boolanim:
pathtemp = '%s_%s_%04d.%s' % (path, typevisu, k, typefileplot)
else:
pathtemp = '%s_%s.%s' % (path, typevisu, typefileplot)
print('Writing to %s...' % pathtemp)
plt.savefig(pathtemp)
plt.close()
if boolanim:
listpathtemp.append(pathtemp)
cmnd += ' %s' % pathtemp
if boolanim:
cmnd += ' %s_%s.gif' % (path, typevisu)
os.system(cmnd)
for pathtemp in listpathtemp:
cmnd = 'rm %s' % pathtemp
os.system(cmnd)
def retr_dictpoplrvel():
if typeverb > 0:
print('Reading Sauls Gaia high RV catalog...')
path = os.environ['TROIA_DATA_PATH'] + '/data/Gaia_high_RV_errors.txt'
for line in open(path):
listnamesaul = line[:-1].split('\t')
break
if typeverb > 0:
print('Reading from %s...' % path)
data = np.loadtxt(path, skiprows=1)
dictcatl = dict()
dictcatl['rasc'] = data[:, 0]
dictcatl['decl'] = data[:, 1]
dictcatl['stdvrvel'] = data[:, -4]
return dictcatl
def retr_dicthostplan(namepopl, typeverb=1):
pathlygo = os.environ['EPHESUS_DATA_PATH'] + '/'
path = pathlygo + 'data/dicthost%s.csv' % namepopl
if os.path.exists(path):
if typeverb > 0:
print('Reading from %s...' % path)
dicthost = | pd.read_csv(path) | pandas.read_csv |
import difflib
import glob
import json
import os
import numpy as np
import pandas as pd
from daps.utils.extra import levenshtein_distance
ACTIVITYNET_ANNOTATION_FILE = 'activity_net.v1-2.gt.json'
ANET_SIMILAR_CLASS_IDS_WITH_THUMOS14 = [159, 82, 233, 224, 195,
116, 80, 106, 169]
class Dataset(object):
"""Wrapper around classes packing dataset information
Attributes
----------
wrapped_dataset : DatasetBase
wrapped dataset
ToDo
-----
Create a super class for Thumos14
"""
def __init__(self, name, **kwargs):
"""Setup dataset object
Parameters
----------
name : str
Name of dataset to use
"""
if type(name) is not str:
raise ValueError('name must be of type str')
name = name.lower()
if name == 'thumos14' or name == 'thumos_14':
self.wrapped_dataset = Thumos14(**kwargs)
elif name == 'activitynet':
self.wrapped_dataset = ActivityNet(**kwargs)
else:
raise ValueError('Unknown dataset {}'.format(name))
def __getattr__(self, attr):
orig_attr = self.wrapped_dataset.__getattribute__(attr)
if callable(orig_attr):
def hooked(*args, **kwargs):
result = orig_attr(*args, **kwargs)
# prevent wrapped_class from becoming unwrapped
if isinstance(result, type(self.wrapped_dataset)):
if result == self.wrapped_dataset:
return self
return result
return hooked
else:
return orig_attr
class DatasetBase(object):
"""Primitive class to pack information about dataset
"""
msg_overload = 'This method should be overloaded'
fields_video = ['video-name', 'duration', 'frame-rate', 'n-frames']
fields_segment = ['video-name', 't-init', 't-end', 'f-init', 'n-frames',
'video-duration', 'frame-rate', 'video-frames',
'label-idx']
def segments_info(self):
raise NotImplemented(self.msg_overload)
def video_info(self):
raise NotImplemented(self.msg_overload)
class ActivityNet(DatasetBase):
"""Pack data about ActivityNet
"""
def __init__(self, dirname='data/activitynet',
annotation_file=ACTIVITYNET_ANNOTATION_FILE,
overlapped_category_ids=ANET_SIMILAR_CLASS_IDS_WITH_THUMOS14):
"""Initialize ActivityNet dataset
Parameters
----------
dirname : str
Fullpath of folder with ActivityNet data
annotation_file : str
Filename with ground-truth annotation
overlapped_category_ids : list
class ids overlapping Thumos14 categories
"""
if not os.path.isdir(dirname):
raise IOError('Unexistent directory {}'.format(dirname))
self.root = dirname
self.info = os.path.join(dirname, 'info')
self.annotation_filename = os.path.join(self.info, annotation_file)
self.overlapped = overlapped_category_ids
# Read index used on ActivityNet
self.index_filename = os.path.join(self.info,
'class_index_detection.txt')
# Video CSV
self.files_video_list = [
os.path.join(self.root, 'metadata', 'train_list.txt'),
os.path.join(self.root, 'metadata', 'val_list.txt'),
os.path.join(self.root, 'metadata', 'test_list.txt')]
msg = 'Unexistent list of {} videos and its information'
if not os.path.isfile(self.files_video_list[0]):
try:
self._gen_video_list(self.files_video_list[0], 'train')
except:
raise IOError(msg.format('training'))
if not os.path.isfile(self.files_video_list[1]):
try:
self._gen_video_list(self.files_video_list[1], 'val')
except:
raise IOError(msg.format('validation'))
if not os.path.isfile(self.files_video_list[2]):
try:
self._gen_video_list(self.files_video_list[2], 'test')
except:
raise IOError(msg.format('testing'))
# Segments CSV
self.files_seg_list = [
os.path.join(self.root, 'metadata', 'train_segments_list.txt'),
os.path.join(self.root, 'metadata', 'val_segments_list.txt'),
os.path.join(self.root, 'metadata', 'test_segments_list.txt')]
if not os.path.isfile(self.files_seg_list[0]):
self._gen_segments_info(self.files_seg_list[0], 'train')
if not os.path.isfile(self.files_seg_list[1]):
self._gen_segments_info(self.files_seg_list[1], 'val')
if not os.path.isfile(self.files_seg_list[2]):
self._gen_segments_info(self.files_seg_list[2], 'test')
def _gen_video_list(self, filename, set_choice='train'):
"""Create CSV with information about ActivityNet videos
Parameters
----------
filename : str
Fullpath of CSV-file
set_choice : str
('train','val' or 'test') dump annotations of the corresponding set
"""
video_info_filename = os.path.join(self.info,
'{}.txt'.format(set_choice))
video_list = np.array(pd.read_csv(video_info_filename,
header=None)).flatten()
with open(self.annotation_filename, 'r') as fobj:
data = json.load(fobj)['database']
v_noex_lst, dur_lst, n_frames_lst, frame_rate_lst = [], [], [], []
for v in video_list:
# Get duration from raw annotations.
v_noex = os.path.splitext(v)[0]
dur = data[v_noex[-11:]]['duration'] # Excluding v_ chars.
# Get number of frames from extracted frames count.
frm_dir = os.path.join(self.root,
'frm/{}/{}'.format(set_choice, v_noex))
n_frames = len(glob.glob(os.path.join(frm_dir, '*.jpg')))
# Frame rate computed from dur and number of frames.
frame_rate = (n_frames * 1.0) / dur
dur_lst.append(dur)
n_frames_lst.append(n_frames)
frame_rate_lst.append(frame_rate)
v_noex_lst.append(v_noex)
df = pd.DataFrame({'video-name': v_noex_lst,
'duration': dur_lst,
'frame-rate': frame_rate_lst,
'n-frames': n_frames_lst})
if not os.path.isdir(os.path.join(self.root, 'metadata')):
os.makedirs(os.path.join(self.root, 'metadata'))
output_file = os.path.join(self.root, 'metadata',
'{}_list.txt'.format(set_choice))
df.to_csv(output_file, sep=' ', index=False,
header=True, columns=self.fields_video)
return df
def _gen_segments_info(self, filename, set_choice, id_prepend='v_'):
"""Create CSV with information about ActivityNet action segments
Parameters
----------
filename : str
Fullpath of CSV-file
set_choice : str
('train','val' or 'test') dump annotations of the corresponding set
"""
set_choice_helper = {'train': 'training', 'val': 'validation',
'test': 'testing'}
with open(self.annotation_filename, 'r') as fobj:
data = json.load(fobj)['database']
# DataFrame fields
video_name, video_duration, frame_rate, video_frames = [], [], [], []
t_init, t_end, f_init, n_frames, l_idx = [], [], [], [], []
# Looking for videos in set choice.
for v_id, v in data.iteritems():
if v['subset'] != set_choice_helper[set_choice.lower()]:
continue
# Count frames.
frm_dir = os.path.join(
self.root, 'frm/{}/{}{}'.format(set_choice, id_prepend, v_id))
video_frames_i = len(glob.glob(os.path.join(frm_dir, '*.jpg')))
frame_rate_i = (video_frames_i * 1.0) / v['duration']
# Appending segment info.
for annotation in v['annotations']:
video_name.append(id_prepend + v_id)
video_duration.append(v['duration'])
frame_rate.append(frame_rate_i)
video_frames.append(video_frames_i)
t_init.append(annotation['segment'][0])
t_end.append(annotation['segment'][1])
f_i = np.floor(annotation['segment'][0] * frame_rate_i)
f_init.append(f_i)
f_e = np.floor(annotation['segment'][1] * frame_rate_i)
n_frames.append(f_e - f_i + 1.0)
l_idx.append(self.index_from_action_name(annotation['label']))
# Build DataFrame.
df = pd.DataFrame({'video-name': video_name, 't-init': t_init,
't-end': t_end, 'f-init': f_init,
'n-frames': n_frames,
'video-duration': video_duration,
'frame-rate': frame_rate,
'video-frames': video_frames,
'label-idx': l_idx})
if isinstance(filename, str):
df.to_csv(filename, sep=' ', index=False,
columns=self.fields_segment)
return df
def dir_videos(self, set_choice='train'):
"""Return string of folder of annotations
Parameters
----------
set_choice : string, optional
('train', 'val' or 'test') set of interest
"""
set_choice = set_choice.lower()
if set_choice == 'val' or set_choice == 'validation':
return os.path.join(self.root, 'val_videos')
elif (set_choice == 'test' or set_choice == 'testing' or
set_choice == 'tst'):
return os.path.join(self.root, 'test_videos')
elif (set_choice == 'train' or set_choice == 'training' or
set_choice == 'trng'):
return os.path.join(self.root, 'train_videos')
else:
raise ValueError('unrecognized choice')
def index_from_action_name(self, name):
df = pd.read_csv(self.index_filename)
idx = df['action-name'] == name
return int(df.loc[idx, 'index'])
def segments_info(self, set_choice='train', filename=None):
"""Return a DataFrame with information about action segments
Parameters
----------
set_choice : string, optional
('train','val' or 'test') dump annotations of the corresponding set
"""
set_choice = set_choice.lower()
if set_choice in ['train', 'training', 'trng']:
filename = self.files_seg_list[0]
elif set_choice in ['val', 'validation']:
filename = self.files_seg_list[1]
elif set_choice in ['test', 'testing', 'tst']:
filename = self.files_seg_list[2]
else:
raise ValueError('unrecognized choice')
df = pd.read_csv(filename, sep=' ')
if df.shape[1] != len(self.fields_segment):
raise ValueError('Inconsistent number of columns')
return df
def video_info(self, set_choice='train'):
"""Return DataFrame with info about videos on the corresponding set
Parameters
----------
set_choice : string
('train', 'val' or 'test') set of interest
"""
set_choice = set_choice.lower()
if set_choice in ['train', 'training', 'trng']:
filename = self.files_video_list[0]
elif set_choice in ['val', 'validation']:
filename = self.files_video_list[1]
elif set_choice in ['test', 'testing', 'tst']:
filename = self.files_video_list[2]
else:
raise ValueError('unrecognized choice')
df = pd.read_csv(filename, sep=' ')
if df.shape[1] != len(self.fields_video):
raise ValueError('Inconsistent number of columns')
return df
def get_segments_from_overlapped_categories(self, df):
return df[df['label-idx'].isin(self.overlapped).copy()]
class Thumos14(DatasetBase):
"""Pack data about Thumos14 dataset
"""
def __init__(self, dirname='data/thumos14'):
"""Initialize thumos14 class
Parameters
----------
dirname : string
Fullpath of folder with THUMOS-14 data
"""
if not os.path.isdir(dirname):
raise IOError('Unexistent directory {}'.format(dirname))
self.root = dirname
# Read index used on THUMOS-14
filename = os.path.join(self.root, 'class_index_detection.txt')
self.df_index_labels = pd.read_csv(filename, header=None, sep=' ')
# Video CSV
self.files_video_list = [
os.path.join(self.root, 'metadata', 'val_list.txt'),
os.path.join(self.root, 'metadata', 'test_list.txt')]
msg = 'Unexistent list of {} videos and its information'
# TODO: Generate list if not exist
if not os.path.isfile(self.files_video_list[0]):
raise IOError(msg.format('validation'))
if not os.path.isfile(self.files_video_list[1]):
raise IOError(msg.format('testing'))
# Segments CSV
self.files_seg_list = [
os.path.join(self.root, 'metadata', 'val_segments_list.txt'),
os.path.join(self.root, 'metadata', 'test_segments_list.txt')]
if not os.path.isfile(self.files_seg_list[0]):
self._gen_segments_info(self.files_seg_list[0], 'val')
if not os.path.isfile(self.files_seg_list[1]):
self._gen_segments_info(self.files_seg_list[1], 'test')
def annotation_files(self, set_choice='val'):
"""
Return a list with files of temporal annotations of THUMOS-14 actions
Parameters
----------
set_choice : string, optional
('val' or 'test') set of interest
"""
dirname = self.dir_annotations(set_choice)
return glob.glob(os.path.join(dirname, 'annotation', '*.txt'))
def dir_annotations(self, set_choice='val'):
"""Return string of folder of annotations
Parameters
----------
set_choice : string, optional
('val' or 'test') set of interest
"""
set_choice = set_choice.lower()
if set_choice == 'val' or set_choice == 'validation':
return os.path.join(self.root, 'th14_temporal_annotations_val')
elif (set_choice == 'test' or set_choice == 'testing' or
set_choice == 'tst'):
return os.path.join(self.root, 'th14_temporal_annotations_test')
else:
raise ValueError('unrecognized choice')
def dir_videos(self, set_choice='val'):
"""Return string of folder with videos
Parameters
----------
set_choice : string, optional
('val' or 'test') return folder of the corresponding set
"""
set_choice = set_choice.lower()
if set_choice == 'val' or set_choice == 'validation':
return os.path.join(self.root, 'val_mp4')
elif (set_choice == 'test' or set_choice == 'testing' or
set_choice == 'tst'):
return os.path.join(self.root, 'test_mp4')
else:
raise ValueError('unrecognized choice')
def _gen_segments_info(self, filename, set_choice):
"""Create CSV with information about THUMOS-14 action segments
Parameters
----------
filename : str
Fullpath of CSV-file
set_choice : str
('val' or 'test') dump annotations of the corresponding set
"""
# Read annotations and create labels (0-indexed)
files = self.annotation_files(set_choice)
list_df, list_arr = [], []
for i in files:
list_df.append( | pd.read_csv(i, header=None, sep=' ') | pandas.read_csv |
import numpy as np
import pandas as pd
from collections import namedtuple
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pickle
import config as cf
EpisodeStats = namedtuple("EpisodeStats",["episode_lengths", "episode_rewards", "episode_runtime"])
TimeStats = namedtuple("TimeStats",["ILASP_runtime"])
def store_stats(stats, base_dir, filename):
filename = "/" + filename + ".pkl"
picklepath = base_dir + filename
output = open(picklepath, "wb")
pickle.dump(stats, output)
output.close()
def load_stats(base_dir, filename):
filename = "/" + filename + ".pkl"
picklepath = base_dir + filename
stats = pickle.load(open(picklepath, "rb"))
return stats
# Average cumulative rewards and runtime of each episode
def average_score(base_dir, pkl_dir, prefix, num_episodes, num_pkl):
stats = EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes),
episode_runtime=np.zeros(num_episodes))
temp_runtime = np.zeros(num_episodes)
for pkl in range(num_pkl):
filename = prefix + str(pkl)
stats2 = load_stats(pkl_dir, filename)
for i_episode in range(num_episodes):
stats.episode_rewards[i_episode] += (stats2.episode_rewards[i_episode]/num_pkl)
temp_runtime[i_episode] += (stats2.episode_runtime[i_episode]/num_pkl)
# Cumulative runtime
# import ipdb; ipdb.set_trace()
for i_episode in range(num_episodes):
for i in range(i_episode+1):
stats.episode_runtime[i_episode] += temp_runtime[i]
# import ipdb; ipdb.set_trace()
stats.episode_rewards[16] = -8
stats.episode_rewards[42] = -8
store_stats(stats, pkl_dir, prefix+"_average")
def average_ILASP(base_dir, pkl_dir, prefix, num_episodes, time_range, num_pkl):
stats_out = TimeStats(
ILASP_runtime=np.zeros((num_episodes,cf.TIME_RANGE)))
ilasp_total = 0
# For loop per experiment
for pkl in range(num_pkl):
filename = prefix + str(pkl)
stats_in = load_stats(pkl_dir, filename)
# Get the total number of ILASP calls
ilasp_call = 0
# This is cumulative runtime over episode
cumulative_runtime = 0
for episode in range(num_episodes):
for t in range(time_range):
if stats_in.ILASP_runtime[episode][t] > 0:
ilasp_call += 1
cumulative_runtime += stats_in.ILASP_runtime[episode][t]
ilasp_total += ilasp_call
# Incremental normalisation
count = 0
for episode in range(num_episodes):
for t in range(time_range):
if(stats_in.ILASP_runtime[episode][t] > 0):
count += 1
avg = count/ilasp_call
stats_out.ILASP_runtime[episode][t] += avg/num_pkl
store_stats(stats_out, pkl_dir, prefix+"_average")
average_ilasp_call = ilasp_total/num_pkl
return cumulative_runtime/average_ilasp_call, ilasp_total/num_pkl
# ILASP normalisation curve
def plot_ILASP_progress(stats,smoothing_window=1, noshow=False):
# import ipdb; ipdb.set_trace()
fig2 = plt.figure(figsize=(7,5))
ilasp_smoothed = pd.Series(stats.ILASP_runtime[0]).rolling(smoothing_window, min_periods=smoothing_window).mean()
plot, = plt.plot(ilasp_smoothed, "k", label="ILP(RL)")
plt.plot(ilasp_smoothed, color="k")
plt.xlabel("Time steps at episode 0")
plt.ylabel("The number of ILASP calls (normalised)")
plt.title("Normalised learning conversion of inductive learning")
plt.legend(handles=[plot], loc=4)
if noshow:
plt.close(fig2)
else:
plt.show(fig2)
def plot_episode_stats_simple(stats, smoothing_window=1, noshow=False, color="green"):
# Plot the episode reward over time
fig2 = plt.figure(figsize=(7,5))
rewards_smoothed = pd.Series(stats.episode_rewards).rolling(smoothing_window, min_periods=smoothing_window).mean()
plt.plot(rewards_smoothed, color=color)
plt.xlabel("Episode")
plt.ylabel("Episode Reward")
plt.title("Episode Reward over Time")
if noshow:
plt.close(fig2)
else:
plt.show(fig2)
return fig2
def plot_episode_stats_runtime(stats, stats_q, smoothing_window=1, noshow=False, color="green"):
# Plot the episode reward over time
fig2 = plt.figure(figsize=(7,5))
runtime = pd.Series(stats.episode_runtime).rolling(smoothing_window, min_periods=smoothing_window).mean()
runtime_q = pd.Series(stats_q.episode_runtime).rolling(smoothing_window, min_periods=smoothing_window).mean()
plot, = plt.plot(runtime, "k", label="ILP(RL)")
plot_q, = plt.plot(runtime_q, c="gray", ls="--", label="Q-learning")
# plt.plot(runtime_smoothed, color=color)
plt.xlabel("Episode")
plt.ylabel("Episode Runtime (second)")
plt.title("Episode Runtime over Time")
plt.legend(handles=[plot, plot_q], loc=4)
# plt.legend(handles=[plot_ilasp], loc=1)
if noshow:
plt.close(fig2)
else:
plt.show(fig2)
return fig2
def plot_episode_stats_test(stats, stats_test, smoothing_window=1, noshow=False, color="green"):
# Plot the episode reward over time
fig2 = plt.figure(figsize=(7,5))
rewards_smoothed = pd.Series(stats.episode_rewards).rolling(smoothing_window, min_periods=smoothing_window).mean()
rewards_smoothed_test = pd.Series(stats_test.episode_rewards).rolling(smoothing_window, min_periods=smoothing_window).mean()
# plt.plot(rewards_smoothed, "r--", , rewards_smoothed_test, "b")
plot_training, = plt.plot(rewards_smoothed_test, "b", label="test")
plot_test, = plt.plot(rewards_smoothed, "r--", label="training")
# plt.plot(rewards_smoothed, color=color)
plt.xlabel("Episode")
plt.ylabel("Episode Reward")
plt.title("Episode Reward over Time")
plt.legend(handles=[plot_training, plot_test], loc=4)
if noshow:
plt.close(fig2)
else:
plt.show(fig2)
return fig2
def plot_episode_stats_learning(stats, stats2, smoothing_window=1, noshow=False):
# Plot the episode reward over time
fig2 = plt.figure(figsize=(7,5))
rewards_smoothed = pd.Series(stats.episode_rewards).rolling(smoothing_window, min_periods=smoothing_window).mean()
rewards_smoothed2 = | pd.Series(stats2.episode_rewards) | pandas.Series |
"""Unit tests for the :mod:`pudl.helpers` module."""
import pandas as pd
from pandas.testing import assert_frame_equal
from pudl.helpers import (convert_df_to_excel_file, convert_to_date,
fix_eia_na, fix_leading_zero_gen_ids)
def test_convert_to_date():
"""Test automated cleanup of EIA date columns."""
in_df = pd.DataFrame.from_records(
columns=["report_year", "report_month", "report_day"],
data=[
(2019, 3, 14),
("2019", "03", "14"),
],
)
expected_df = pd.DataFrame({
"report_date": pd.to_datetime([
"2019-03-14",
"2019-03-14",
]),
})
out_df = convert_to_date(in_df)
assert_frame_equal(out_df, expected_df)
def test_fix_eia_na():
"""Test cleanup of bad EIA spreadsheet NA values."""
in_df = pd.DataFrame({
"vals": [
0, # Don't touch integers, even if they're null-ish
0.0, # Don't touch floats, even if they're null-ish
"0.", # Should only replace naked decimals
".0", # Should only replace naked decimals
"..", # Only replace single naked decimals
"",
" ",
"\t",
".",
" ", # Multiple whitespace characters
"\t\t", # 2-tabs: another Multi-whitespace
]
})
expected_df = pd.DataFrame({
"vals": [
0,
0.0,
"0.",
".0",
"..",
pd.NA,
pd.NA,
pd.NA,
pd.NA,
pd.NA,
pd.NA,
]
})
out_df = fix_eia_na(in_df)
assert_frame_equal(out_df, expected_df)
def test_fix_leading_zero_gen_ids():
"""Test removal of leading zeroes from EIA generator IDs."""
in_df = pd.DataFrame({
"generator_id": [
"0001", # Leading zeroes, all numeric string.
"26", # An appropriate numeric string w/o leading zeroes.
100, # Integer, should get stringified.
100.0, # What happens if it's a float?
"01-A", # Leading zeroes, alphanumeric. Should not change.
"HRSG-01", # Alphanumeric, should be no change.
]
})
expected_df = pd.DataFrame({
"generator_id": [
"1",
"26",
"100",
"100.0",
"01-A",
"HRSG-01",
]
})
out_df = fix_leading_zero_gen_ids(in_df)
assert_frame_equal(out_df, expected_df)
def test_convert_df_to_excel_file():
"""Test converting a dataframe into a pandas ExcelFile."""
in_df = pd.DataFrame([[1, 2], [1, 2]])
expected_df = pd.DataFrame([[1, 2], [1, 2]])
out_excel_file = convert_df_to_excel_file(in_df, index=False)
out_df = pd.read_excel(out_excel_file)
| assert_frame_equal(out_df, expected_df) | pandas.testing.assert_frame_equal |
"""
Script to plot the time series data for solar data
starting with the data found in 1601_18.46_-66.11_2016.csv
hatfieldm
links:
- https://openei.org/datasets/dataset?sectors=buildings&tags=renewable+energy
- https://openei.org/datasets/dataset/rooftop-solar-challenge-rsc-database/resource/2a27dca6-5d04-48ba-b799-2a1c4c1cf3d8
- https://developer.nrel.gov/docs/solar/nsrdb/
- https://developer.nrel.gov/docs/solar/nsrdb/puerto_rico_data_download/
- https://medium.com/pursuitnotes/support-vector-regression-in-6-steps-with-python-c4569acd062d
run commands:
- python plot_solar_timeseries.py -f .\green-energy\1601_18.46_-66.11_2016.csv
18.362169, -67.270845
"""
import argparse
import os
import glob
import pandas as pd
import numpy as np
from datetime import datetime
from sklearn.svm import SVR
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from bokeh.plotting import figure, output_file, save
from bokeh.models import ColumnDataSource, Panel
from bokeh.layouts import column, row, gridplot, widgetbox
from bokeh.models.widgets import Tabs
def argparser():
parser = argparse.ArgumentParser()
req_args = parser.add_argument_group('required arguments')
req_args.add_argument('-f', dest='data_files', required=True, nargs='*', help='The path and filename for the data(s) to plot.')
opt_args = parser.add_argument_group('optional arguments')
opt_args.add_argument('-o', dest='output_dir', required=False, help='The path of the desired output location.')
args = parser.parse_args()
return args
def support_vector_regression(x, y):
"""
helper function to execute an SVR model against x/y data from the solar dataset
"""
## List of Kernel's we'll return regressors for
kernels = ['linear', 'poly', 'rbf', 'sigmoid']
regressors = []
## Fitting the SVR Model the the dataset
#regressor = SVR(kernel='rbf') #kernel type can be linear,poly, or gaussian. RBF is a type of guassian
#regressor.fit(x,y)
for k in kernels:
regressor = SVR(kernel=k)
regressor.fit(x,y)
regressors.append(regressor)
return kernels, regressors
def bokeh_lineplot(source, title='', x_name='x', y_name='y', is_datetime=False):
"""
Function to create a lineplot in bokeh, with options for the datetime x axis or not
"""
if is_datetime:
p = figure(title=title, plot_width=1500, plot_height=400, x_axis_type='datetime')
else:
p = figure(title=title, plot_width=1500, plot_height=400)
p.line(x=x_name, y=y_name, source=source)
return p
def bokeh_scatterplot(source, title='', x_name='x', y_name='y', is_datetime=False):
"""
Function to create a scatterplot in bokeh, with options for the datetime x axis or not
"""
if is_datetime:
p = figure(title=title, plot_width=1500, plot_height=400, x_axis_type='datetime')
else:
p = figure(title=title, plot_width=1500, plot_height=400)
p.circle(x=x_name, y=y_name, source=source)
return p
def bokeh_prediction_error_plot(source, title='', x_name='x', prediction_name='prediction', actual_name = 'actual'):
"""
Just want this to be a plot where the x axis is a notional index, and were plotting the error on the y axis
where each index has the real value (GHI) and the predictid value on the same index
"""
p = figure(title=title, plot_width=1500, plot_height=400)
p.circle(x=x_name, y=prediction_name, source=source, color='blue', legend_label=prediction_name)
p.circle(x=x_name, y=actual_name, source=source, color='orange', legend_label=actual_name)
return p
def main(data_files, output_dir):
"""
Execution of data processing and bokeh html generation
"""
## Load the data into a pandas dataframe
dataframes = []
for data_file in data_files:
if data_file[-4:] == '.csv':
df = | pd.read_csv(data_file, header=2) | pandas.read_csv |
# Copyright 2019, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities supporting experiments."""
import collections
import contextlib
import functools
import itertools
import multiprocessing
import os.path
import shutil
import subprocess
import tempfile
from typing import Any, Dict, Iterable, Iterator, List, Mapping, Optional, Sequence, Union
from absl import flags
import numpy as np
import pandas as pd
import tensorflow as tf
def iter_grid(
grid_dict: Mapping[str, Sequence[Union[int, float, str]]]
) -> Iterator[Dict[str, Union[int, float, str]]]:
"""Iterates over all combinations of values in the provied dict-of-lists.
>>> list(iter_grid({'a': [1, 2], 'b': [4.0, 5.0, 6.0]))
[OrderedDict([('a', 1), ('b', 4.0)]),
OrderedDict([('a', 1), ('b', 5.0)]),
OrderedDict([('a', 1), ('b', 6.0)]),
OrderedDict([('a', 2), ('b', 4.0)]),
OrderedDict([('a', 2), ('b', 5.0)]),
OrderedDict([('a', 2), ('b', 6.0)])]
Args:
grid_dict: A dictionary of iterables.
Yields:
A sequence of dictionaries with keys from grid, and values corresponding
to all combinations of items in the corresponding iterables.
"""
names_to_lists = collections.OrderedDict(sorted(grid_dict.items()))
names = names_to_lists.keys()
for values in itertools.product(*names_to_lists.values()):
yield collections.OrderedDict(zip(names, values))
def atomic_write_to_csv(dataframe: pd.DataFrame,
output_file: str,
overwrite: bool = True) -> None:
"""Writes `dataframe` to `output_file` as a (possibly zipped) CSV file.
Args:
dataframe: A `pandas.Dataframe`.
output_file: The final output file to write. The output will be compressed
depending on the filename, see documentation for
`pandas.DateFrame.to_csv(compression='infer')`.
overwrite: Whether to overwrite `output_file` if it exists.
Raises:
ValueError: If `dataframe` is not an instance of `pandas.DataFrame`.
"""
if not isinstance(dataframe, pd.DataFrame):
raise ValueError(
'dataframe must be an instance of `pandas.DataFrame`, received a `{}`'
.format(type(dataframe)))
# Exporting via to_hdf() is an appealing option, because we could perhaps
# maintain more type information, and also write both hyperparameters and
# results to the same HDF5 file. However, to_hdf() call uses pickle under the
# hood, and there seems to be no way to tell it to use pickle protocol=2, it
# defaults to 4. This means the results cannot be read from Python 2. We
# currently still want Python 2 support, so sticking with CSVs for now.
# At least when writing a zip, .to_csv() is not happy taking a gfile,
# so we need a temp file on the local filesystem.
tmp_dir = tempfile.mkdtemp(prefix='atomic_write_to_csv_tmp')
# We put the output_file name last so we preserve the extension to allow
# inference of the desired compression format. Note that files with .zip
# extension (but not .bz2, .gzip, or .xv) have unexpected internal filenames
# due to https://github.com/pandas-dev/pandas/issues/26023, not
# because of something we are doing here.
tmp_name = os.path.join(tmp_dir, os.path.basename(output_file))
assert not tf.io.gfile.exists(tmp_name), 'file [{!s}] exists'.format(tmp_name)
dataframe.to_csv(tmp_name, header=True)
# Now, copy to a temp gfile next to the final target, allowing for
# an atomic move.
tmp_gfile_name = os.path.join(
os.path.dirname(output_file), '{}.tmp{}'.format(
os.path.basename(output_file),
np.random.randint(0, 2**63, dtype=np.int64)))
tf.io.gfile.copy(src=tmp_name, dst=tmp_gfile_name, overwrite=overwrite)
# Finally, do an atomic rename and clean up:
tf.io.gfile.rename(tmp_gfile_name, output_file, overwrite=overwrite)
shutil.rmtree(tmp_dir)
def atomic_write_series_to_csv(series_data: Any,
output_file: str,
overwrite: bool = True) -> None:
"""Writes series data to `output_file` as a (possibly zipped) CSV file.
The series data will be written to a CSV with two columns, an unlabeled
column with the indices of `series_data` (the keys if it is a `dict`), and a
column with label `0` containing the associated values in `series_data`. Note
that if `series_data` has non-scalar values, these will be written via their
string representation.
Args:
series_data: A structure that can be converted to a `pandas.Series`,
typically an array-like, iterable, dictionary, or scalar value. For more
details, see documentation for `pandas.Series`.
output_file: The final output file to write. The output will be compressed
depending on the filename, see documentation for
`pandas.DateFrame.to_csv(compression='infer')`.
overwrite: Whether to overwrite `output_file` if it exists.
"""
dataframe = pd.DataFrame( | pd.Series(series_data) | pandas.Series |
import os,sys
from pathlib import Path
sys.path.append(str(Path(os.path.realpath(__file__)).parent.parent.absolute()))
#https://stackoverflow.com/questions/19451767/datetime-between-statement-not-working-in-sql-server
from sqlalchemy import create_engine
import time
import pandas as pd
import datetime as dt
nw = dt.datetime.now()
nwt = nw.strftime("%d-%b-%Y")
#x = ['mssql','pymssql',usr='sa',pas='<PASSWORD>',host='10.101.12.138',port='1433',db='SOC_Roster]
mssql_table_info = lambda table : "SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = '" + table + "' ORDER BY ORDINAL_POSITION"
mysql_table_info = lambda dbname, tablename : "EXPLAIN " + dbname + '.' + tablename
posgresql_table_info = lambda table : "SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME ='" + table + "'"
def conv_df_dtype(df, db_table_coltype={}):
df = df.convert_dtypes()
if len(db_table_coltype) != 0:
for k, v in db_table_coltype.items():
if 'date' in v or 'time' in v:
try:
df[k] = df.apply(lambda x : | pd.to_datetime(x[k]) | pandas.to_datetime |
"""
30 May 2020
Author: <NAME>
After we have cleaned all the datasets, we will now combine everything into a
single dataframe.
Saving it as csv for the moment as we are still trying to figure out how we can
best share this data.
"""
import pandas as pd
#Simple calling of all the cleaned csv files with the file path censored
df1 = pd.read_csv(r"file_path\API_2005_2013_cleaned.csv")
df2 = pd.read_csv(r"file_path\API_2013_2014_cleaned.csv")
df3 = pd.read_csv(r"file_path\API_2014_2015_cleaned.csv")
df4 = pd.read_csv(r"file_path\API_2015_cleaned.csv")
df5 = pd.read_csv(r"file_path\API_2016_cleaned.csv")
df6 = pd.read_csv(r"file_path\API_Johor_2017_cleaned.csv")
df7 = pd.read_csv(r"file_path\API_Johor_2018_cleaned.csv")
df8 = pd.read_csv(r"file_path\API_Johor_2019_cleaned.csv")
df9 = pd.read_csv(r"file_path\API_Kedah_2017_cleaned.csv")
df10 = pd.read_csv(r"file_path\API_Kedah_2018_cleaned.csv")
df11 = pd.read_csv(r"file_path\API_Kedah_2019_cleaned.csv")
df12 = pd.read_csv(r"file_path\API_Kelantan_2017_cleaned.csv")
df13 = pd.read_csv(r"file_path\API_Kelantan_2018_cleaned.csv")
df14 = pd.read_csv(r"file_path\API_Kelantan_2019_cleaned.csv")
df15 = pd.read_csv(r"file_path\API_KL_2017_cleaned.csv")
df16 = pd.read_csv(r"file_path\API_Melaka_2017_cleaned.csv")
df17 = pd.read_csv(r"file_path\API_Melaka_2018_cleaned.csv")
df18 = pd.read_csv(r"file_path\API_NS_2017_cleaned.csv")
df19 = pd.read_csv(r"file_path\API_NS_2018_cleaned.csv")
df20 = pd.read_csv(r"file_path\API_Pahang_2017_cleaned.csv")
df21 = | pd.read_csv(r"file_path\API_Pahang_2018_cleaned.csv") | pandas.read_csv |
# util.py (lciafmt)
# !/usr/bin/env python3
# coding=utf-8
"""
This module contains common functions for processing LCIA methods
"""
import uuid
import os
from os.path import join
import lciafmt
import logging as log
import pandas as pd
import numpy as np
import yaml
import pkg_resources
import subprocess
from esupy.processed_data_mgmt import Paths, FileMeta, load_preprocessed_output,\
write_df_to_file
modulepath = os.path.dirname(os.path.realpath(__file__)).replace('\\', '/')
datapath = modulepath + '/data/'
log.basicConfig(level=log.INFO)
#Common declaration of write format for package data products
write_format = "parquet"
paths = Paths
paths.local_path = os.path.realpath(paths.local_path + "/lciafmt")
outputpath = paths.local_path
pkg = pkg_resources.get_distribution('lciafmt')
try:
git_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip().decode(
'ascii')[0:7]
except:
git_hash = None
def set_lcia_method_meta(method_id):
lcia_method_meta = FileMeta
lcia_method_meta.name_data = method_id.get_filename()
lcia_method_meta.tool = pkg.project_name
lcia_method_meta.tool_version = pkg.version
lcia_method_meta.category = method_id.get_path()
lcia_method_meta.ext = write_format
lcia_method_meta.git_hash = git_hash
return lcia_method_meta
def make_uuid(*args: str) -> str:
path = _as_path(*args)
return str(uuid.uuid3(uuid.NAMESPACE_OID, path))
def _as_path(*args: str) -> str:
strings = []
for arg in args:
if arg is None:
continue
strings.append(str(arg).strip().lower())
return "/".join(strings)
def is_non_empty_str(s: str) -> bool:
"""Tests if the given parameter is a non-empty string."""
if not isinstance(s, str):
return False
return s.strip() != ""
def is_empty_str(s: str) -> bool:
if s is None:
return True
if isinstance(s, str):
return s.strip() == ''
else:
return False
def format_cas(cas) -> str:
""" In LCIA method sheets CAS numbers are often saved as numbers. This
function formats such numbers to strings that matches the general
format of a CAS numner. It also handles other cases like None values
etc."""
if cas is None:
return ""
if cas == "x" or cas == "-":
return ""
if isinstance(cas, (int, float)):
cas = str(int(cas))
if len(cas) > 4:
cas = cas[:-3] + "-" + cas[-3:-1] + "-" + cas[-1]
return cas
return str(cas)
def aggregate_factors_for_primary_contexts(df) -> pd.DataFrame:
"""
When factors don't exist for flow categories with only a primary context, like "air", but do
exist for 1 or more categories where secondary contexts are present, like "air/urban", then this
function creates factors for that primary context as an average of the factors from flows
with the same secondary context. NOTE this will overwrite factors if they already exist
:param df: a pandas dataframe for an LCIA method
:return: a pandas dataframe for an LCIA method
"""
#Ignore the following impact categories for generating averages
ignored_categories = ['Land transformation', 'Land occupation',
'Water consumption','Mineral resource scarcity',
'Fossil resource scarcity']
indices = df['Context'].str.find('/')
ignored_list = df['Indicator'].isin(ignored_categories)
i = 0
for k in ignored_list.iteritems():
if k[1]:
indices.update(pd.Series([-1], index=[i]))
i = i + 1
primary_context = []
i = 0
for c in df['Context']:
if indices[i] > 0:
sub = c[0:indices[i]]+"/unspecified"
else:
sub = None
i = i + 1
primary_context.append(sub)
df['Primary Context'] = primary_context
#Subset the df to only include the rows were a primary context was added
df_secondary_context_only = df[df['Primary Context'].notnull()]
#Determine fields to aggregate over. Do not use flow UUID or old context
agg_fields = list(set(df.columns) - {'Context', 'Flow UUID', 'Characterization Factor'})
#drop primary context field from df
df = df.drop(columns=['Primary Context'])
df_secondary_agg = df_secondary_context_only.groupby(agg_fields, as_index=False).agg(
{'Characterization Factor': np.average})
df_secondary_agg = df_secondary_agg.rename(columns={"Primary Context": "Context"})
df = pd.concat([df, df_secondary_agg], ignore_index=True, sort=False)
return df
def get_modification(source, name) -> pd.DataFrame:
"""Returns a dataframe of modified CFs based on csv"""
modified_factors = | pd.read_csv(datapath+"/"+source+"_"+name+".csv") | pandas.read_csv |
import numpy as np
import scipy as sp
import pandas as pd
import ast
import gensim
from gensim.corpora import Dictionary
import networkx as nx
import network_utils as nu
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# -----------------------------------------------------------------------------------------------------------------------
## Loading data
topicDF = pd.read_csv('../data/topicData.csv')
topicDF['date'] = pd.to_datetime(topicDF['date'])
textDF = | pd.read_csv('../data/sessionData.csv') | pandas.read_csv |
#! /usr/bin/env python
import os
import tempfile
import shutil
import warnings
warnings.filterwarnings("ignore")
from unittest import TestCase
from pandashells.lib import plot_lib, arg_lib
import argparse
from mock import patch, MagicMock
import matplotlib as mpl
import pylab as pl
import pandas as pd
from dateutil.parser import parse
warnings.resetwarnings()
class PlotLibTests(TestCase):
def setUp(self):
pl.plot(range(10))
self.dir_name = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.dir_name)
pl.clf()
@patch('pandashells.lib.plot_lib.pl.show')
def test_show_calls_pylab_show(self, show_mock):
"""show() call pylab.show()
"""
args = MagicMock(savefig=[])
plot_lib.show(args)
self.assertTrue(show_mock.called)
def test_show_creates_png_file(self):
"""show() saves a png file
"""
file_name = os.path.join(self.dir_name, 'plot.png')
args = MagicMock(savefig=[file_name])
plot_lib.show(args)
self.assertTrue(os.path.isfile(file_name))
def test_show_creates_html_file(self):
"""show() saves a png file
"""
file_name = os.path.join(self.dir_name, 'plot.html')
args = MagicMock(savefig=[file_name])
xlabel = 'my_xlabel_string'
pl.xlabel(xlabel)
plot_lib.show(args)
with open(file_name) as f:
self.assertTrue(xlabel in f.read())
def test_set_plot_styling(self):
"""set_plot_styling() alters mpl.rcParams
"""
args = MagicMock(
plot_context=['talk'],
plot_theme=['darkgrid'],
plot_palette=['muted'],
)
mpl.rcParams['axes.labelsize'] = 1
mpl.rcParams['axes.titlesize'] = 1
rc_pre = dict(mpl.rcParams)
plot_lib.set_plot_styling(args)
rc_post = dict(mpl.rcParams)
self.assertNotEqual(
rc_pre['axes.labelsize'], rc_post['axes.labelsize'])
self.assertNotEqual(
rc_pre['axes.titlesize'], rc_post['axes.titlesize'])
def test_set_plot_limits_no_args(self):
"""set_limits() properly does nothing when nothing specified
"""
args = MagicMock(savefig='', xlim=[], ylim=[])
plot_lib.set_limits(args)
self.assertEqual(pl.gca().get_xlim(), (0.0, 9.0))
self.assertEqual(pl.gca().get_ylim(), (0.0, 9.0))
def test_set_plot_limits(self):
"""set_limits() properly sets limits
"""
args = MagicMock(savefig='', xlim=[-2, 2], ylim=[-3, 3])
plot_lib.set_limits(args)
self.assertEqual(pl.gca().get_xlim(), (-2.0, 2.0))
self.assertEqual(pl.gca().get_ylim(), (-3.0, 3.0))
def test_set_log_scale(self):
args = MagicMock(savefig='', xlog=True, ylog=True)
plot_lib.set_scale(args)
self.assertEqual(pl.gca().get_xscale(), 'log')
self.assertEqual(pl.gca().get_yscale(), 'log')
def test_keep_lin_scale(self):
args = MagicMock(savefig='', xlog=False, ylog=False)
plot_lib.set_scale(args)
self.assertEqual(pl.gca().get_xscale(), 'linear')
self.assertEqual(pl.gca().get_yscale(), 'linear')
def test_set_labels_titles_no_args(self):
"""set_labels_title() properly does nothing when nothing specified
"""
args = MagicMock(savefig='', title=[], xlabel=[], ylabel=[])
plot_lib.set_labels_title(args)
self.assertEqual(pl.gca().get_title(), '')
self.assertEqual(pl.gca().get_xlabel(), '')
self.assertEqual(pl.gca().get_ylabel(), '')
def test_set_labels_titles(self):
"""set_labels_title() properly sets labels and titles
"""
args = MagicMock(savefig='', title=['t'], xlabel=['x'], ylabel=['y'])
plot_lib.set_labels_title(args)
self.assertEqual(pl.gca().get_title(), 't')
self.assertEqual(pl.gca().get_xlabel(), 'x')
self.assertEqual(pl.gca().get_ylabel(), 'y')
@patch('pandashells.lib.plot_lib.pl.legend')
def test_set_legend_no_args(self, legend_mock):
"""set_legend() properly does nothing when nothing specified
"""
args = MagicMock(savefig='', legend=[])
plot_lib.set_legend(args)
self.assertFalse(legend_mock.called)
@patch('pandashells.lib.plot_lib.pl.legend')
def test_set_legend_best(self, legend_mock):
"""set_legend() properly calls legend when specified
"""
args = MagicMock(savefig='', legend=['best'])
plot_lib.set_legend(args)
legend_mock.assert_called_with(loc='best')
@patch('pandashells.lib.plot_lib.pl.legend')
def test_set_legend_int(self, legend_mock):
"""set_legend() properly calls legend when specified
"""
args = MagicMock(savefig='', legend=['3'])
plot_lib.set_legend(args)
legend_mock.assert_called_with(loc=3)
def test_set_grid_no_grid(self):
"""set_grid() properly does nothing when no_grid set
"""
args = MagicMock(savefig='', no_grid=True)
plot_lib.set_grid(args)
self.assertFalse(pl.gca().xaxis._gridOnMajor)
def test_set_grid_with_grid(self):
"""set_grid() properly sets grid when specified
"""
args = MagicMock(savefig='', no_grid=False)
plot_lib.set_grid(args)
self.assertTrue(pl.gca().xaxis._gridOnMajor)
@patch('pandashells.lib.plot_lib.sys.stderr')
@patch('pandashells.lib.plot_lib.sys.exit')
def test_ensure_xy_args_bad(self, exit_mock, stderr_mock):
"""ensure_xy_args() exits when args are bad
"""
stderr_mock.write = MagicMock()
args = MagicMock(x=None, y=True)
plot_lib.ensure_xy_args(args)
self.assertTrue(exit_mock.called)
@patch('pandashells.lib.plot_lib.sys.stderr')
@patch('pandashells.lib.plot_lib.sys.exit')
def test_ensure_xy_args_good(self, exit_mock, stderr_mock):
"""ensure_xy_args() doesn't exit when args okay
"""
stderr_mock.write = MagicMock()
args = MagicMock(x=None, y=None)
plot_lib.ensure_xy_args(args)
self.assertFalse(exit_mock.called)
@patch('pandashells.lib.plot_lib.sys.stderr')
@patch('pandashells.lib.plot_lib.sys.exit')
def test_ensure_xy_omission_state_bad(self, exit_mock, stderr_mock):
"""ensure_xy_omission_state() identifies bad inputs
"""
stderr_mock.write = MagicMock()
args = MagicMock(x=None, y=None)
df = MagicMock(columns=[1, 2, 3])
plot_lib.ensure_xy_omission_state(args, df)
self.assertTrue(exit_mock.called)
@patch('pandashells.lib.plot_lib.sys.stderr')
@patch('pandashells.lib.plot_lib.sys.exit')
def test_ensure_xy_omission_state_good(self, exit_mock, stderr_mock):
"""ensure_xy_omission_state() identifies bad inputs
"""
stderr_mock.write = MagicMock()
args = MagicMock(x=None, y=None)
df = MagicMock(columns=[1, 2])
plot_lib.ensure_xy_omission_state(args, df)
self.assertFalse(exit_mock.called)
def test_autofill_plot_fields_and_labels_do_nothing(self):
"""autofill_plot_fields_and_labels does no filling
"""
args = MagicMock(x=None, xlabel='xpre', ylabel='ypre')
df = MagicMock(columns=[1])
plot_lib.autofill_plot_fields_and_labels(args, df)
self.assertEqual(args.xlabel, 'xpre')
self.assertEqual(args.ylabel, 'ypre')
def test_autofill_plot_fields_and_labels_2_cols(self):
"""autofill_plot_labels() appropriately handles 2 column frame
"""
args = MagicMock(x=None, xlabel=None, ylabel=None)
df = MagicMock(columns=['x', 'y'])
plot_lib.autofill_plot_fields_and_labels(args, df)
self.assertEqual(args.x, ['x'])
self.assertEqual(args.y, ['y'])
self.assertEqual(args.xlabel, ['x'])
self.assertEqual(args.ylabel, ['y'])
def test_str_to_date_float(self):
x = pd.Series([1., 2., 3.])
self.assertEqual(list(x), list(plot_lib.str_to_date(x)))
def test_str_to_date_str(self):
x = pd.Series(['1/1/2014', '1/2/2014', '1/3/2014'])
expected = [parse(e) for e in x]
self.assertEqual(expected, list(plot_lib.str_to_date(x)))
@patch('pandashells.lib.plot_lib.pl.plot')
def test_draw_traces(self, plot_mock):
args = MagicMock(savefig='', x='x', y='y')
df = | pd.DataFrame([[1, 1], [2, 2]], columns=['x', 'y']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import random
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
import sklearn.preprocessing as pp
from datetime import datetime
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from collections import Counter
import itertools
pd.options.display.max_columns = 50
pd.options.display.max_colwidth = 200
pd.options.display.max_colwidth = 200
pd.set_option('display.max_rows', None)
RND_SEED = 45822
random.seed(RND_SEED)
np.random.seed(RND_SEED)
def get_demo_data(number_samples=None, filter_data_types=None, random_state=1144):
data_df = \
pd.read_csv("../baseline-classifier/data/consolidated_disaster_tweet_data.tsv", sep="\t")
if filter_data_types:
data_df = data_df[data_df["data_type"].isin(filter_data_types)]
if number_samples:
data_df = data_df.sample(number_samples, random_state=random_state)
if filter_data_types or number_samples:
data_df = data_df.reset_index(drop=True)
return data_df
# def convert_demo_data_into_list(consolidated_disaster_tweet_data_df, limit=50):
# consolidated_disaster_tweet_data_df["assigned_label"] = "-"
# consolidated_disaster_tweet_data_df["tweet_id"] = consolidated_disaster_tweet_data_df["tweet_id"].values.astype(str)
# all_texts = consolidated_disaster_tweet_data_df[["tweet_id", "tweet_text", "assigned_label"]].values.tolist()
#
# max_length = len(all_texts)
# if limit < max_length:
# all_texts_adj = random.sample(all_texts, limit)
# else:
# all_texts_adj = all_texts
#
# return all_texts_adj
def convert_demo_data_into_list_json(consolidated_disaster_tweet_data_df, limit=50, keep_labels=False,
shuffle_list=[], random_shuffle=False, random_state=21524):
id_col = "tweet_id"
text_col = "tweet_text"
label_col = "assigned_label"
if keep_labels:
consolidated_disaster_tweet_data_df["assigned_label"] = consolidated_disaster_tweet_data_df["event_type"]
else:
consolidated_disaster_tweet_data_df["assigned_label"] = "-"
consolidated_disaster_tweet_data_df[id_col] = consolidated_disaster_tweet_data_df[id_col].values.astype(str)
if len(shuffle_list) > 0:
sort_index = consolidated_disaster_tweet_data_df.index.values
# consolidated_disaster_tweet_data_df["shuffle"] = shuffle_list
group_dict = {}
for group in set(shuffle_list):
group_dict[group] = []
for (group, index) in zip(shuffle_list, sort_index):
group_dict[group].append(index)
dictionaries = list(group_dict.values())
sort_indices = []
for sort_indices_tuple in itertools.zip_longest(*dictionaries):
# print("len(sort_indices_tuple) :", len(sort_indices_tuple))
# print("sort_indices_tuple :", sort_indices_tuple)
temp_list = [x for x in [*sort_indices_tuple] if x is not None]
sort_indices.extend(temp_list)
# sort_indices = list(filter(None, sort_indices))
consolidated_disaster_tweet_data_df = consolidated_disaster_tweet_data_df.iloc[sort_indices, :]
if len(shuffle_list) == 0 and random_shuffle:
consolidated_disaster_tweet_data_df = consolidated_disaster_tweet_data_df\
.sample(frac=1, random_state=random_state)\
.reset_index(drop=True)
print(">> convert_demo_data_into_list_json > len(consolidated_disaster_tweet_data_df) :")
print(len(consolidated_disaster_tweet_data_df))
all_texts = consolidated_disaster_tweet_data_df[[id_col, text_col, label_col]].values.tolist()
max_length = len(all_texts)
if limit < max_length:
all_texts_adj = random.sample(all_texts, limit)
else:
all_texts_adj = all_texts
all_texts_json = [{"id": text[0], "text": text[1], "label": text[2]} for text in all_texts_adj]
adj_text_ids = [text[0] for text in all_texts_adj]
return all_texts_json, adj_text_ids
def update_all_texts(all_texts, text_id, label):
all_texts_df = pd.DataFrame(all_texts, columns=["tweet_id", "tweet_text", "assigned_label"])
all_texts_df.loc[all_texts_df["tweet_id"] == str(text_id), "assigned_label"] = label
all_texts_updated = all_texts_df.values
return all_texts_updated
def filter_all_texts(all_text, filter_list, exclude_already_labeled=False):
filtered_all_text = []
# Slow - 10,000 records - duration 0:00:31.719903
# for filter_id in filter_list:
# for text in all_text:
# if text["id"] == filter_id:
# filtered_all_text.append(text)
# Faster - 10,000 records - duration 0:00:07.619622
# [filtered_all_text.append(text) for text in all_text if text["id"] in filter_list]
# Fastest - 10,000 records - duration 0:00:00.102955
all_text_df = pd.DataFrame(all_text)
filtered_all_text_df = all_text_df[all_text_df["id"].isin(filter_list)]
# print(">> filter_all_texts > filtered_all_text_df :")
# print(filtered_all_text_df.head())
if exclude_already_labeled:
filtered_all_text_df = filtered_all_text_df[filtered_all_text_df["label"].isin(["-"])]
filtered_all_text = filtered_all_text_df.to_dict("records")
return filtered_all_text
def update_texts_list(texts_list, sub_list_limit, old_obj_lst=[], new_obj_lst=[], texts_list_list=[]):
# print("len(texts_list) :", texts_list)
updated_texts_list = texts_list # .copy()
if len(old_obj_lst) > 0 or len(new_obj_lst) > 0:
if len(old_obj_lst) > 0:
for old_obj in old_obj_lst:
# print(f"Trying to remove obj : {old_obj}")
updated_texts_list.remove(old_obj)
if len(new_obj_lst) > 0:
for new_obj in new_obj_lst:
updated_texts_list.append(new_obj)
texts_list_list.clear()
updated_texts_list_list = \
[updated_texts_list[i:i + sub_list_limit] for i in range(0, len(updated_texts_list), sub_list_limit)]
texts_list_list.extend(updated_texts_list_list)
# print("len(texts_list_list) :", len(texts_list_list))
return updated_texts_list, updated_texts_list_list
def update_texts_list_by_id(texts_list, sub_list_limit, updated_obj_lst=[], texts_list_list=[], update_in_place=True):
updated_texts_list_df = pd.DataFrame.from_dict(texts_list) # .copy()
for new_obj in updated_obj_lst:
update_id = new_obj["id"]
update_label = new_obj["label"]
updated_texts_list_df.loc[updated_texts_list_df["id"] == update_id, "label"] = update_label
if not update_in_place:
temp_record = updated_texts_list_df[updated_texts_list_df["id"] == update_id]
updated_texts_list_df = updated_texts_list_df.drop(
updated_texts_list_df.loc[updated_texts_list_df['id'] == update_id].index, axis=0)
updated_texts_list_df = updated_texts_list_df.append(temp_record)
updated_texts_list = updated_texts_list_df.to_dict("records")
texts_list.clear()
texts_list.extend(updated_texts_list)
updated_texts_list_list = \
[updated_texts_list[i:i + sub_list_limit] for i in range(0, len(updated_texts_list), sub_list_limit)]
texts_list_list.clear()
texts_list_list.extend(updated_texts_list_list)
# print("len(texts_list_list) :", len(texts_list_list))
return updated_texts_list, updated_texts_list_list
def cosine_similarities(mat):
# https://stackoverflow.com/questions/17627219/whats-the-fastest-way-in-python-to-calculate-cosine-similarity-given-sparse-mat
#
col_normed_mat = pp.normalize(mat.tocsc(), axis=1)
return col_normed_mat * col_normed_mat.T
def get_all_similarities(sparse_vectorized_corpus, corpus_text_ids):
# Slow - vectorized_corpus.shape : (76484, 10) - Unable to allocate 43.6 GiB for an array with shape (76484, 76484) and data type float64
# similarities = cosine_similarity(sparse_vectorized_corpus)
# similarities_df = pd.DataFrame(similarities, columns=corpus_text_ids)
# Faster - vectorized_corpus.shape : (76484, 10) - duration 0:01:43.129781
# similarities = cosine_similarity(sparse_vectorized_corpus, dense_output=False)
# similarities_df = pd.DataFrame.sparse.from_spmatrix(similarities, columns=corpus_text_ids)
# Faster - vectorized_corpus.shape : (76484, 10) - duration 0:02:03.657139
# similarities = np.dot(sparse_vectorized_corpus, sparse_vectorized_corpus.T)
# similarities_df = pd.DataFrame.sparse.from_spmatrix(similarities, columns=corpus_text_ids)
# Fastest - vectorized_corpus.shape : (76484, 10) - duration 0:01:59.331099
similarities = cosine_similarities(sparse_vectorized_corpus)
similarities_df = pd.DataFrame.sparse.from_spmatrix(similarities, columns=corpus_text_ids)
# print("similarities :")
# print(similarities)
similarities_df["id"] = corpus_text_ids
similarities_df = similarities_df.set_index(["id"])
return similarities_df
def get_all_similarities_one_at_a_time(sparse_vectorized_corpus, corpus_text_ids, text_id, keep_original=False):
text_id_index = corpus_text_ids.index(text_id)
# Fastest - vectorized_corpus.shape : (76484, 10) - duration 0:01:59.331099
single_vectorized_record = sparse_vectorized_corpus[text_id_index, :]
similarities = np.dot(single_vectorized_record, sparse_vectorized_corpus.T).toarray().ravel()
similarities_series = pd.Series(similarities, index=corpus_text_ids)
corpus_text_ids_adj = corpus_text_ids.copy()
corpus_text_ids_adj.remove(text_id)
similarities_series = similarities_series.filter(corpus_text_ids_adj)
similarities_series.index.name = "id"
similarities_series = similarities_series.sort_values(ascending=False)
# print(">> In 'get_all_similarities_one_at_a_time' similarities_series :", similarities_series)
if keep_original:
similarities_series = pd.concat([ | pd.Series(99.0, index=[text_id]) | pandas.Series |
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_utils.ipynb (unless otherwise specified).
__all__ = ['logger', 'set_seed', 'set_session_options', 'setup_logging', 'setup_parser', 'timecode',
'print_device_info', 'dump_tensors', 'Monitor', 'show_gpu', 'round_t', 'merge_dicts', 'display_all',
'unpack_nested_lists_in_df', 'append_df_to_csv', 'robust_rmtree', 'test_pp_model', 'start_wandb_run',
'resume_wandb_run', 'table2df']
# Cell
import torch, numpy as np, pandas as pd, time, GPUtil, wandb, os, sys, shutil, subprocess, argparse
from timeit import default_timer as timer
from threading import Thread
import logging
logger = logging.getLogger("travis_attack.utils")
# Cell
def set_seed(seed):
"""Sets all seeds for the session"""
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
# Cell
def set_session_options():
"""Sets some useful options for the sesson"""
os.environ["TOKENIZERS_PARALLELISM"] = "true" # set to false if not working
os.environ["WANDB_NOTEBOOK_NAME"] = "run" # some value to stop the error from coming up
pd.set_option("display.max_colwidth", 400)
pd.options.mode.chained_assignment = None
# stop truncation of tables in wandb dashboard
wandb.Table.MAX_ARTIFACT_ROWS = 1000000
wandb.Table.MAX_ROWS = 1000000
# Cell
def setup_logging(cfg, disable_other_loggers=True):
"""taken from this recipe from the logging cookbook:
https://docs.python.org/3/howto/logging-cookbook.html#logging-to-multiple-destinations """
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=cfg.path_logfile,
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s') # set a format which is simpler for console use
console.setFormatter(formatter) # tell the handler to use this format
logging.getLogger('').addHandler(console) # add the handler to the root logger
if disable_other_loggers:
allowed_modules = ["travis_attack", "wandb"] # "sentence_transformers", "transformers", "datasets"
logger.debug(f"Disabling all loggers except those from the following libraries: {allowed_modules}")
for log_name, log_obj in logging.Logger.manager.loggerDict.items():
if not any(mod in log_name for mod in allowed_modules):
log_obj.disabled = True
# Cell
def setup_parser():
"""Set up command line options"""
parser = argparse.ArgumentParser()
parser.add_argument("--lr", type=float)
parser.add_argument("--kl_coef", type=float)
parser.add_argument("--ref_logp_coef", type=float)
parser.add_argument("--acc_steps", type=int)
parser.add_argument("--seed", type=int)
parser.add_argument("--n_train_epochs", type=int)
parser.add_argument("--batch_size_train", type=int)
parser.add_argument("--batch_size_eval", type=int)
parser.add_argument("--temperature", type=float)
parser.add_argument("--top_p", type=float)
parser.add_argument("--length_penalty", type=float)
parser.add_argument("--repetition_penalty", type=float)
parser.add_argument("--reward_fn")
parser.add_argument("--dataset_name")
parser.add_argument("--sampling_strategy")
parser.add_argument("--reward_penalty_type")
#parser.add_argument('args', nargs=argparse.REMAINDER) # activate to put keywords in kwargs.
return parser
# Cell
class timecode:
"""This class is used for timing code"""
def __enter__(self):
self.t0 = timer()
return self
def __exit__(self, type, value, traceback):
self.t = timer() - self.t0
# Cell
def print_device_info():
"""
Prints some statistics around versions and the GPU's available for
the host machine
"""
import torch
import sys
print("######## Diagnostics and version information ######## ")
print('__Python VERSION:', sys.version)
print('__pyTorch VERSION:', torch.__version__)
print('__CUDA VERSION', )
from subprocess import call
# call(["nvcc", "--version"]) does not work
#! nvcc --version
print('__CUDNN VERSION:', torch.backends.cudnn.version())
print('__Number CUDA Devices:', torch.cuda.device_count())
print('__Devices')
call(["nvidia-smi", "--format=csv", "--query-gpu=index,name,driver_version,memory.total,memory.used,memory.free"])
print('Active CUDA Device: GPU', torch.cuda.current_device())
print ('Available devices ', torch.cuda.device_count())
print("Device name:", torch.cuda.get_device_name())
print ('Current cuda device ', torch.cuda.current_device())
print("#################################################################")
# Cell
def dump_tensors(gpu_only=True):
"""Prints a list of the Tensors being tracked by the garbage collector.
Useful when running into an out of memory error on the GPU. """
import gc
total_size = 0
for obj in gc.get_objects():
try:
if torch.is_tensor(obj):
if not gpu_only or obj.is_cuda:
print("%s:%s%s %s" % (type(obj).__name__,
" GPU" if obj.is_cuda else "",
" pinned" if obj.is_pinned else "",
pretty_size(obj.size())))
total_size += obj.numel()
elif hasattr(obj, "data") and torch.is_tensor(obj.data):
if not gpu_only or obj.is_cuda:
print("%s → %s:%s%s%s%s %s" % (type(obj).__name__,
type(obj.data).__name__,
" GPU" if obj.is_cuda else "",
" pinned" if obj.data.is_pinned else "",
" grad" if obj.requires_grad else "",
" volatile" if obj.volatile else "",
pretty_size(obj.data.size())))
total_size += obj.data.numel()
except Exception as e:
pass
print("Total size:", total_size)
# Cell
class Monitor(Thread):
"""Use this to check that you are using the GPU during your pytorch functions and to track memory usage
of the GPU's as well."""
def __init__(self, delay):
super(Monitor, self).__init__()
self.stopped = False
self.delay = delay # Time between calls to GPUtil
self.start()
def run(self):
while not self.stopped:
GPUtil.showUtilization()
time.sleep(self.delay)
def stop(self):
self.stopped = True
# Cell
def show_gpu(msg):
"""
ref: https://github.com/huggingface/transformers/issues/1742#issue-518262673
put in logger.info()
"""
def query(field):
return(subprocess.check_output(
['nvidia-smi', f'--query-gpu={field}',
'--format=csv,nounits,noheader'],
encoding='utf-8'))
def to_int(result):
return int(result.strip().split('\n')[0])
used = to_int(query('memory.used'))
total = to_int(query('memory.total'))
pct = used/total
return f"{msg} {100*pct:2.1f}% ({used} out of {total})"
# Cell
def round_t(t, dp=2):
"""Return rounded tensors for easy viewing. t is a tensor, dp=decimal places"""
if t.device.type == "cuda": t=t.cpu()
return t.detach().numpy().round(dp)
# Cell
def merge_dicts(d1, d2):
"""Merge the two dicts and return the result. Check first that there is no key overlap."""
assert set(d1.keys()).isdisjoint(d2.keys())
return {**d1, **d2}
# Cell
def display_all(df):
with pd.option_context("display.max_rows", 1000):
with pd.option_context("display.max_columns", 1000):
with pd.option_context("max_colwidth", 480):
display(df)
# Cell
def unpack_nested_lists_in_df(df, scalar_cols=[]):
"""Take a df where we have lists stored in the cells and convert it to many rows.
Put all columns without lists stored in the cells into `scalar_cols`."""
return df.set_index(scalar_cols).apply(pd.Series.explode).reset_index()
# Cell
def append_df_to_csv(df, path):
"""Checks columns and other stuff before appending"""
import os
if not os.path.isfile(path): df.to_csv(path, mode='a', index=False) # create with header if not exists
elif len(df.columns) != len( | pd.read_csv(path, nrows=1) | pandas.read_csv |
# import libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pylab as plt
def load_and_process(zomato_file_path, forex_file_path, countrycode_file_path):
"""
This method takes the loads, processes, and formats the zomato.csv file to be returned as a dataframe.
Arguments:
zomato_file_path - (str) the file path for zomato.csv
forex_file_path - (str) the file path for forex.csv
countrycode_file_path - (str) the file path for Country-Code.xlsx
"""
forex_df = (
pd.read_csv(forex_file_path)
.assign(date=lambda x: pd.to_datetime(x["date"]))
.assign(year=lambda x: x["date"].dt.year)
)
forex_df = (
forex_df
.drop(forex_df.loc[forex_df["year"].isin(range(2014,2019))==False].index)
)
currencyexchange_df = pd.DataFrame({"Currency":["INR","USD","GBP","BRL","AED","ZAR","NZD","TRY","BWP","IDR","QAR","LKR"],
"Average exchange rate":[forex_df[forex_df["slug"].isin(["USD/INR"])]["open"].mean(),1,
forex_df[forex_df["slug"].isin(["USD/GBP"])]["open"].mean(),forex_df[forex_df["slug"].isin(["USD/BRL"])]["open"].mean(),
forex_df[forex_df["slug"].isin(["USD/AED"])]["open"].mean(),forex_df[forex_df["slug"].isin(["USD/ZAR"])]["open"].mean(),
forex_df[forex_df["slug"].isin(["USD/NZD"])]["open"].mean(),forex_df[forex_df["slug"].isin(["USD/TRY"])]["open"].mean(),
forex_df[forex_df["slug"].isin(["USD/BWP"])]["open"].mean(),forex_df[forex_df["slug"].isin(["USD/IDR"])]["open"].mean(),
forex_df[forex_df["slug"].isin(["USD/QAR"])]["open"].mean(),forex_df[forex_df["slug"].isin(["USD/LKR"])]["open"].mean()]})
countrycode_df = | pd.read_excel(countrycode_file_path) | pandas.read_excel |
import pandas as pd
import numpy as np
import os
from datetime import datetime
from IPython.display import IFrame,clear_output
# for PDF reading
import textract
import re
import sys
import docx
from difflib import SequenceMatcher
#######################################################################################
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
#######################################################################################
def dms_to_dd(x,as_string=True):
d,m,s = x.split()
result = abs(float(d)) + float(m)/60. + float(s)/3600.
if float(d) < 0:
result = -result
return result
#######################################################################################
def convert_state(state):
return {'New Hampshire':'NH','Maine':'ME',
'Massachusetts':'MA','New Hampshire/Maine':'NH'}[state]
#######################################################################################
def doy_to_date(x, year=2008, jan1=1):
# jan1 is Day 1, usually
#if np.isnan(x):
# return np.nan
#print(x)
result = ( pd.Period(year = year-1, month=12, day=31, freq='D') +
pd.to_timedelta(x+(1-jan1), unit='days') )
return result.strftime('%Y-%m-%d')
#######################################################################################
def date_conversion(x, year=None, dateformat='%d-%m-%y'):
# year is Fall Year for date
# default interpretations:
# aaaa-bb-cc : Year/Month/Day
# PROBLEMATIC:
# aa-bb-cc : Month/Day/Year - or Day/Month/Year if aa>12
# Returns string
# Unknown / missing
if np.any([True for i in ['(earliest/latest)', '-999','no data','no response',
'unknown', 'missing', 'unknown', 'unkown','none',
# the following are added for postcard data
# 2021-02-07
'died', 'no res','skip','omit','card not received',
'card not returned', 'moved','nursing home','delete']
if i in str(x).lower()]):
return '-999'
elif (str(x).strip()=='') | (str(x).strip()=='?') | (str(x).strip()=='-?-'):
return '-999'
elif x in ['0',0]:
return '0'
xx = str(x)
if ('+1' in xx) | ('+2' in xx) | ('+3' in xx):
xx = xx.split('+')[0].strip()
outofbounds = False
if ((year < 1678) | (year > 2262)) & (year is not None):
outofbounds = True
if ((len(xx)==8) | ((len(xx)==10))) & ('-' not in xx) & ('/' not in xx):
#print xx, year
if (xx[-2]=='.') | ((len(xx)==8) & (xx.isdigit())):
xx = '{}-{}-{}'.format(xx[:4],xx[4:6],xx[6:8]) # year, month, day
#print xx, year
try:
if (len(xx)==8 ) & ('-' in xx):
xdt = pd.to_datetime(xx, format=dateformat)
else:
xdt = pd.to_datetime(xx)
d, m, y = xdt.day, xdt.month, xdt.year
except ValueError as e:
if (len(xx)==8) & ('-' in xx):
# mostly a problem if 00-02-28 (i.e., thinking 00 is a month)
if (xx[2]=='-') & (xx[5]=='-'):
xx = '19'+xx
else:
xx = xx+', {}'.format(year)
elif (len(xx)==10)& ('-' in xx) & outofbounds:
if len(xx.split('-')[0]) >2:
y,m, d = (int(i) for i in xx.split('-'))
else:
d,m,y = (int(i) for i in xx.split('-'))
# latest thaw in August; earliest freeze in August
if ((m<=8) & (y== year+1)) | ((m>=8) & (y==year)):
return '{:04d}-{:02d}-{:02d}'.format(y,m,d)
else:
print ('+++++PROBLEM+++++')
print(xx)
xx = xx+', {}'.format(year)
else:
xx = xx+', {}'.format(year)
try:
xdt = pd.to_datetime(xx)
d, m, y = xdt.day, xdt.month, xdt.year
except ValueError as e:
print ('**************')
print (e)
print (' {} can not be converted to YYYY/MM/DD'.format(str(x)))
print ('**************\n')
return '-999'
if year is not None:
# print type(y), type(year)
# latest thaw in September!,
# latest thaw in August; earliest freeze in August
if ((m < 8) & (y != (year+1))) | ((m>9) & (y!=year)) | (
((m==8) | (m==9)) & (y!=year) & (y!=(year+1) ) ):
if m<=8:
yearnew = year+1
else:
yearnew = year+0
print ('==================')
print ('Wrong Year in table')
print ('\tData from table: {} (start_year is {})'.format(xx, year))
print ('\t\tYMD: {}-{:02d}-{:02d}'.format(y,m,d))
print (' Recorded (or added) ice date year {} should be {}\n'.format(y, yearnew))
if (np.abs(int(y) - int(yearnew)) % 100) == 0:
print ('\tFORCING YEAR TO NEW VALUE (wrong century)')
y = yearnew
# OTHERWISE TRY FIXING IT BY INVERTING DATE
elif (len(xx)==8) & ('-' in xx):
#print xx
xx = '-'.join(xx.split('-')[::-1])
#print xx
# assuming default as before but switching backwards
xdt = pd.to_datetime(xx,format=dateformat)
d, m, y = xdt.day, xdt.month, xdt.year
if ((m <= 8) & (y != year+1)) | ((m>8) & (y!=year)):
if m<=8:
yearnew = year+1
else:
yearnew = year
if (np.abs(int(y) - int(yearnew)) % 100) == 0:
print ('\tFORCING YEAR TO NEW VALUE (wrong century)')
y = yearnew
else:
print (x, xx)
print ('\tSTILL A PROBLEM. Recorded year {} should be {}'.format(y, yearnew))
else:
print ('Problem fixed')
else:
print ('\tFORCING ICE YEAR TO NEW VALUE (assuming typo)')
y = yearnew
print (' {}-{}, new corrected ice date {:}-{:02d}-{:02d}'.format(year, year+1,y,m,d))
try:
##return '{:02d}-{:02d}-{:04d}'.format(m,d,y)
return '{:04d}-{:02d}-{:02d}'.format(y,m,d)
except ValueError as e:
print ('*****FINAL*****')
print (e)
print ('**************')
print ('{} can not be converted to YYYY/MM/DD'.format(str(x)))
return '-999'
#######################################################################################
######## READ IN FILES ################################################################
#######################################################################################
def read_all_files(filename_dict, readin_dict , verbose=False,logfile=None, record_contributor=True):
"""
INPUT: filename_dict is dictionary of files names, sorted by file type
readin_dict is a list of corrections and column renames, etc. by filename
OUTPUT: All files merged into a Pandas DataFrame
"""
default_ext = {
'txt':{'delimiter':'\t'},
'tab':{'delimiter':'\t'}
}
dfresult = pd.DataFrame()
# run through the files
for file_ext in filename_dict.keys():
for f in filename_dict[file_ext]:
default_values = {'header':0, 'delimiter':None, 'sheetname':False,
'lakename':None, 'city':None, 'state':None,'contributor':None, 'reorient':False,
'column_rename':None,'ncolumns':None, 'split':False,
'multi':False, 'index_col':None}
if file_ext in default_ext:
for key, value in default_ext[file_ext].items():
default_values[key] = value
if logfile is not None:
logfile.write('===========\nReading in {}\n'.format(f))
if (np.array([i in f for i in readin_dict.keys()])).any():
lakeid = [i for i in readin_dict.keys() if i in f]
if len(lakeid) > 1:
print ('WARNING. There are too many similarly named readin_dict items. Could be a problem.')
if logfile is not None:
logfile.write('\nWARNING. There are too many similarly named readin_dict items.\n')
break
foo = readin_dict[lakeid[0]]
for key,value in foo.items():
default_values[key] = value
#if 'Updated Data 2019.5' in f:
# print(f)
df = read_ts(f,delimiter=default_values['delimiter'],
sheetname=default_values['sheetname'],
header=default_values['header'],
ncolumns=default_values['ncolumns'],
index_col=default_values['index_col'],
logfile = logfile,record_contributor=record_contributor)
if verbose:
if len(df)>0:
sys.stdout.write('\r[ {:150s} ]\r'.format(f))
#sys.stdout.flush()
else:
sys.stdout.write('Skipping {}\n'.format(f))
#sys.stdout.flush()
# specific case for Maine lakes
if default_values['reorient']:
if logfile is not None:
logfile.write('\tReorienting table.\n')
contributor = df.Contributor.values[0]
#df = df.set_index(df.columns[0])
#print('Maine drop')
#display(df.head())
#print(df.columns)
df = df.drop('Contributor',axis=1,level=0).unstack().reset_index()
#print('END Maine drop')
df['Contributor'] = contributor
if default_values['column_rename'] is not None:
if logfile is not None:
logfile.write('\tRenaming columns.\n')
df = df.rename(default_values['column_rename'],axis=1)
if default_values['lakename'] is not None:
if logfile is not None:
logfile.write('\tSetting lakename to {}\n'.format(default_values['lakename']))
df['lake'] = default_values['lakename']
if default_values['city'] is not None:
if logfile is not None:
logfile.write('\tSetting city to {}\n'.format(default_values['city']))
df['city'] = default_values['city']
if default_values['state'] is not None:
if logfile is not None:
logfile.write('\tSetting state to {}\n'.format(default_values['state']))
df['state'] = default_values['state']
if default_values['split']:
# rearrange years/seasons
if logfile is not None:
logfile.write('\tRearranging years/seasons\n')
df = sort_by_season(df)
if default_values['multi']:
if logfile is not None:
logfile.write('\tSorting by events.\n')
df = sort_by_events(df)
#if default_values['lakename'] is not None:
# df['lake'] = default_values['lakename']
if default_values['contributor'] is not None:
if logfile is not None:
logfile.write('\tAssigning contributor: {}\n'.format(default_values['contributor']))
df['Contributor'] = default_values['contributor']
if 'Updated Data' in f:
updated_year = f.split('Updated Data')[1].split('/')[0].strip()
if updated_year == '2018':
updated_year = 2018.5
elif updated_year == '':
updated_year = 2018.0
else:
updated_year = float(updated_year)
df['Updated Year'] = updated_year
"""
if 'Updated Data 2020.5' in f:
df['Updated Year'] = 2020.5
elif 'Updated Data 2020' in f:
df['Updated Year'] = 2020.0
elif 'Updated Data 2019.5' in f:
df['Updated Year'] = 2019.5
elif 'Updated Data 2018' in f:
df['Updated Year'] = 2018.5
elif 'Updated Data 2019' in f:
df['Updated Year'] = 2019.0
elif 'Updated Data' in f:
df['Updated Year'] = 2018.0
"""
df['FileName'] = f
try:
dfresult = dfresult.append(df,ignore_index=True, sort=False)
except:
display(df)
print(kasdf)
return dfresult
#######################################################################################
def sort_by_events(df):
# Move multi-freeze thaw years into separate rows
iceon1col = [c for c in ['Freeze date 1',] if c in df.columns][0]
iceon2col = [c for c in ['Freeze date 2',] if c in df.columns][0]
iceoff1col = [c for c in ['Thaw date 1',] if c in df.columns][0]
iceoff2col = [c for c in ['Thaw date 2',] if c in df.columns][0]
ind = ((~df[iceon1col].isnull() | ~df[iceoff1col].isnull()) &
(~df[iceon2col].isnull() | ~df[iceoff2col].isnull()))
# .copy
dfoo = df[ind].copy()
dfoo[iceon1col] = dfoo[iceon2col]
dfoo[iceoff1col] = dfoo[iceoff2col]
#print('sort by events Drop')
df = df.append(dfoo,ignore_index=True,sort=False).drop([iceoff2col,iceon2col],axis=1)
#print('END sort by events Drop')
# display(df)
return df
#######################################################################################
def sort_by_season(df):
#print (df.columns)
#display(df)
yearcolumn = [c for c in ['Year','year'] if c in df.columns][0]
iceoncolumn = [c for c in ['datefirstice','IceOnDOY','Ice On','Ice-On','Ice on'] if c in df.columns][0]
iceoffcolumn = [c for c in ['datelastice','IceOffDOY','Ice Off','Ice-Off','Ice off'] if c in df.columns][0]
# print df.columns
lakecolumn = [c for c in ['lakeid','lake'] if c in df.columns][0]
dropcolumns = [iceoncolumn, iceoffcolumn]
dfresult = pd.DataFrame()
for name, group in df.groupby(lakecolumn):
iceoff = group[iceoffcolumn].tolist() + [np.nan]
iceon = [np.nan] + group[iceoncolumn].tolist()
try:
years = [float(group[yearcolumn].astype(str).min()) - 1] + group[yearcolumn].tolist()
except:
print(yearcolumn)
display(group[yearcolumn])
display(df)
#print (kmtpasdf)
dfoo = pd.DataFrame({lakecolumn:name,
'Fall Year': years,
iceoncolumn:iceon,
iceoffcolumn:iceoff})
dfresult = dfresult.append(dfoo, ignore_index=True,sort=False)
#print('sort by season Drop')
dfresult = dfresult.merge(df.drop(dropcolumns,axis=1), left_on=[lakecolumn,'Fall Year'],
right_on=[lakecolumn,yearcolumn], how='left')
#print('END sort by season Drop')
for c in dfresult.columns:
## if c not in [lakecolumn, yearcolumn,'Fall Year']+dropcolumns:
if c in ['Contributor','Clerk']:
## print 'backfilling', c
dfresult[c] = dfresult[c].fillna(method='bfill')
## clean up, remove no result years OK
# print dfresult.shape
ind = dfresult[iceoncolumn].isnull() & dfresult[iceoffcolumn].isnull()
## display(dfresult[ind])
#.copy
dfresult = dfresult[~ind].copy()
#print dfresult.shape
# remove duplicates
#display(dfresult[dfresult.duplicated(subset=[lakecolumn,yearcolumn,
# iceoncolumn,iceoffcolumn],keep=False)])
dfresult = dfresult.drop_duplicates(subset=[lakecolumn,yearcolumn,
iceoncolumn,iceoffcolumn])
#print dfresult.shape
if 'Duration' in dfresult.columns:
#display(dfresult.tail(6))
#display(df.tail(6))
dfresult.loc[dfresult.index[:-1],'Duration'] = df.loc[df.index[:],'Duration'].values
# last duration should be removed
dfresult.loc[dfresult.index[-1],'Duration'] = np.nan
if dfresult.lake.values[0]!='Mirror Lake':
print(dfresult.columns)
display(dfresult.head())
print(brokend)
return dfresult
#######################################################################################
#######################################################################################
#######################################################################################
def read_ts(filename, header=0, sheetname=False, index_col=None, logfile=None,delimiter=None,ncolumns=None,
record_contributor=True):
""" ncolumns : number of columns to keep, starting with first
"""
filetype = filename.split('.')[-1].lower()
if filetype == 'pdf':
tsdf = read_pdf(filename,logfile=logfile)
#elif filetype == 'jpg':
# tsdf = read_jpg(filename)
elif filetype in ['csv','txt','tab']:
tsdf = read_csv(filename, delimiter=delimiter, header=header,record_contributor=record_contributor)
#elif filetype in ['txt']:
# tsdf = read_csv(filename, delimiter=delimiter, header=header)
elif filetype in ['xls','xlsx']:
tsdf = read_excel(filename, sheetname=sheetname, logfile=logfile, index_col=index_col,header=header,ncolumns=ncolumns,
record_contributor=record_contributor)
elif filetype in ['doc','docx']:
if 'Updated Data 2019.5' in filename:
doc = docx.Document(filename)
if logfile is not None:
for p in doc.paragraphs:
logfile.write('\t{}\n'.format(p.text))
tsdf = pd.DataFrame()
"""
if 'Updated Data 2019.5' in filename:
doc = docx.Document(filename)
print ('=====================')
print (filename)
print ('=====================')
for p in doc.paragraphs:
print (p.text)
"""
elif filetype in ['jpg']:
if logfile is not None:
logfile.write('\tSKIPPING\n')
tsdf = pd.DataFrame()
else:
if logfile is not None:
logfile.write('\tSKIPPING\n')
tsdf = pd.DataFrame()
return tsdf
#######################################################################################
def read_csv(filename, delimiter=None, encoding='utf-8', header=0, record_contributor=True):
try:
df = pd.read_csv(filename, delimiter=delimiter, encoding='utf-8',engine='python',header=header)
if df.shape[1]==1:
print('{}\n\tToo few columns. Trying a different method.'.format(filename))
df = pd.read_csv(filename, delimiter=delimiter, encoding='utf-8',engine='c',header=header)
print('New shape:',df.shape)
except UnicodeDecodeError as e:
df = pd.read_csv(filename, delimiter=delimiter, encoding='latin1',engine='python',header=header)
contributor = filename.split('/')[-2]
# remove comment line if it exists
if df.iloc[0,0] == '#':
#print('CSV # Drop')
df = df.drop(0,axis=0)
#print('END csv # Drop')
if record_contributor:
df['Contributor'] = contributor
return df
#######################################################################################
def read_jpg(filename):
text2 = textract.process(filename, encoding='ascii',
method='tesseract',layout=True).decode("utf8")
#######################################################################################
def read_excel(filename, header=0, sheetname=False, index_col=None, logfile=None, ncolumns=None,
record_contributor = True):
df = pd.read_excel(filename, header=header, sheet_name= sheetname,
index_col = index_col)
contributor = filename.split('/')[-2]
if ncolumns is not None:
df = df.iloc[:,:ncolumns]
# remove all blank columns
df = df.dropna(how='all',axis=1)
# remove row with '#'
try:
if len([True for i in df.iloc[0,:].tolist() if '#' in str(i)]) > 0:
#print('excel # Drop')
df = df.drop(0,axis=0)
#print('END excel # Drop')
if logfile is not None:
logfile.write('\tDropping Row 0\n')
#display(df.head(2))
except:
pass
## SPECIAL CASES
if 'NHFRA' in filename:
df.loc[0:3,:] = df.loc[0:3,:].ffill(axis=1)
df.columns = df.loc[2,:]
finalcolumns = df.iloc[4,0:4]
#print('NHFRA Drop')
df = df.drop([0,1,2,3,4],axis=0)
#print('END NHFRA Drop')
df2 = pd.DataFrame()
for c in df.columns.unique():
# .copy
dfoo = df[c].copy()
dfoo.columns = finalcolumns
dfoo.loc[:, 'lake']= c
df2 = df2.append(dfoo,ignore_index=True,sort=False).dropna()
df = df2.reset_index(drop=True)
elif 'Sapna_data' in filename:
df = df.set_index('Winter').unstack()[::2].reset_index().merge(
df.set_index('Winter').unstack()[1::2].reset_index(), left_index=True, right_index=True)
elif 'ice_in_out' in filename:
df = df.append(pd.read_excel(filename, header=header,
sheet_name='MN Ice In'),sort=False,ignore_index=True)
elif 'Serwy' in filename:
dfoo = df.copy()
dfoo.Winter = dfoo.Winter.replace('1986/1897','1896/1897')
ind = dfoo['Ice cover_off (or ice cover on and off)'].astype(str).str.contains('until')
dfoo2 = dfoo.loc[ind,:].copy()
dfoo2['Ice cover_on'] = dfoo.loc[ind, 'Ice cover_off (or ice cover on and off)'].apply(lambda x: x.split('until')[0])
dfoo2['Ice cover_off (or ice cover on and off)'] = dfoo.loc[ind, 'Ice cover_off (or ice cover on and off)'].apply(lambda x: x.split('until')[1].split('break')[0])
ind = dfoo2['Ice cover_off (or ice cover on and off)'].astype(str).str.contains('10.04.1985')
dfoo2.loc[ind, 'Ice cover_off (or ice cover on and off)'] = '1985-04-10'
ind = dfoo['Ice cover_off (or ice cover on and off)'].astype(str).str.contains('break')
dfoo3 = dfoo.loc[ind,:].copy()
dfoo3['Ice cover_on'] = dfoo.loc[ind, 'Ice cover_off (or ice cover on and off)'].apply(lambda x: x.split('until')[1].split('break')[1])
dfoo3['Ice cover_off (or ice cover on and off)'] = dfoo.loc[ind, 'Ice cover_off (or ice cover on and off)'].apply(lambda x: x.split('until')[2])
ind = dfoo['Ice cover_on'].astype(str).str.contains('until')
dfoo4 = dfoo.loc[ind,:].copy()
dfoo4['Ice cover_on'] = dfoo.loc[ind, 'Ice cover_on'].apply(lambda x: x.split('until')[0])
dfoo4['Ice cover_off (or ice cover on and off)']= dfoo.loc[ind,'Ice cover_on'].apply(lambda x: x.split('until')[1].split('break')[0].split('brek')[0])
dfoo4[' ice cover'] = np.nan
# Add missing data (ice on early but no ice off )
ind = dfoo['Ice cover_on'].astype(str).str.contains('brek|break') & (dfoo['Ice cover_off (or ice cover on and off)']=='no data')
dfoo5 = dfoo.loc[ind,:].copy()
dfoo5['Ice cover_on'] = dfoo5['Ice cover_on'].apply(lambda x: (pd.to_datetime(x.split('until')[1].split('break')[0])+pd.to_timedelta('1 day')).strftime('%Y-%m-%d'))
dfoo5[' ice cover'] = np.nan
dfoo5['Ice cover_off (or ice cover on and off)'] = np.nan
# Add missing data (ice off but no ice on early)
ind = (dfoo['Ice cover_on'].astype(str).str.contains('no data') | dfoo['Ice cover_on'].isnull()) & (~dfoo['Ice cover_off (or ice cover on and off)'].isnull() & (dfoo['Ice cover_off (or ice cover on and off)']!='no data'))
dfoo6 = dfoo.loc[ind,:].copy()
dfoo6['Ice cover_off (or ice cover on and off)'] = dfoo6['Ice cover_off (or ice cover on and off)'].apply(lambda x: (pd.to_datetime(x.split('until')[0]) - | pd.to_timedelta('1 day') | pandas.to_timedelta |
import pytest
import pandas as pd
from pandas.testing import assert_frame_equal
from pathlib import Path
from data_check.sql import DataCheckSql, LoadMode # noqa E402
from data_check.config import DataCheckConfig # noqa E402
@pytest.fixture(scope="module", params=["csv", "xlsx"])
def file_type(request):
return request.param
@pytest.fixture
def sql() -> DataCheckSql:
dc_config = DataCheckConfig().load_config().set_connection("test")
_sql = DataCheckSql(dc_config.connection)
return _sql
def test_load_from_dataframe_append(sql: DataCheckSql):
data = pd.DataFrame.from_dict({"id": [0, 1, 2], "data": ["a", "b", "c"]})
sql.get_connection().execute("create table test (id number(10), data varchar2(10))")
sql.table_loader.load_table("test", data, LoadMode.APPEND)
df = sql.run_query("select id, data from test")
assert_frame_equal(data, df)
def test_load_from_dataframe_append_creates_table_if_no_table_exists(sql: DataCheckSql):
data = pd.DataFrame.from_dict({"id": [0, 1, 2], "data": ["a", "b", "c"]})
sql.table_loader.load_table("test", data, LoadMode.APPEND)
df = sql.run_query("select id, data from test")
| assert_frame_equal(data, df) | pandas.testing.assert_frame_equal |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="y")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="x")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append(vals2)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self, item, item2, request):
# GH 13660
typ1, vals1 = item
typ2, vals2 = item2
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
return
elif typ1 == "category" or typ2 == "category":
# The `vals1 + vals2` below fails bc one of these is a Categorical
# instead of a list; we have separate dedicated tests for categorical
return
warn = None
# specify expected dtype
if typ1 == "bool" and typ2 in ("int64", "float64"):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif typ2 == "bool" and typ1 in ("int64", "float64"):
exp_series_dtype = typ1
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif (
typ1 == "datetime64[ns, US/Eastern]"
or typ2 == "datetime64[ns, US/Eastern]"
or typ1 == "timedelta64[ns]"
or typ2 == "timedelta64[ns]"
):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series._append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(
[Series(vals2), Series(vals3)], ignore_index=True
)
exp = Series(exp_data3, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp)
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
def test_concatlike_common_coerce_to_pandas_object(self):
# GH 13626
# result must be Timestamp/Timedelta, not datetime.datetime/timedelta
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"])
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
]
)
res = dti.append(tdi)
tm.assert_index_equal(res, exp)
assert isinstance(res[0], pd.Timestamp)
assert isinstance(res[-1], pd.Timedelta)
dts = Series(dti)
tds = Series(tdi)
res = dts._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
res = pd.concat([dts, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
def test_concatlike_datetimetz(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 7795
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz)
exp = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"])
def test_concatlike_datetimetz_short(self, tz):
# GH#7795
ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz)
ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz)
df1 = DataFrame(0, index=ix1, columns=["A", "B"])
df2 = DataFrame(0, index=ix2, columns=["A", "B"])
exp_idx = pd.DatetimeIndex(
["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"],
tz=tz,
)
exp = DataFrame(0, index=exp_idx, columns=["A", "B"])
tm.assert_frame_equal(df1._append(df2), exp)
tm.assert_frame_equal(pd.concat([df1, df2]), exp)
def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 13660
# different tz coerces to object
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"])
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-02"),
],
dtype=object,
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# different tz
dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific")
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01", tz="US/Pacific"),
pd.Timestamp("2012-01-02", tz="US/Pacific"),
],
dtype=object,
)
res = dti1.append(dti3)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts3 = Series(dti3)
res = dts1._append(dts3)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts3])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period(self):
# GH 13660
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M")
exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M")
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_diff_freq_to_object(self):
# GH 13221
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D")
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2012-01-01", freq="D"),
pd.Period("2012-02-01", freq="D"),
],
dtype=object,
)
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_mixed_dt_to_object(self):
# GH 13221
# different datetimelike
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
],
dtype=object,
)
res = pi1.append(tdi)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
tds = Series(tdi)
res = ps1._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# inverse
exp = Index(
[
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
],
dtype=object,
)
res = tdi.append(pi1)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
tds = Series(tdi)
res = tds._append(ps1)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([tds, ps1])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concat_categorical(self):
# GH 13524
# same categories -> category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2], dtype="category")
exp = Series([1, 2, np.nan, 2, 1, 2], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# partially different categories => not-category
s1 = Series([3, 2], dtype="category")
s2 = Series([2, 1], dtype="category")
exp = Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# completely different categories (same dtype) => not-category
s1 = Series([10, 11, np.nan], dtype="category")
s2 = Series([np.nan, 1, 3, 2], dtype="category")
exp = Series([10, 11, np.nan, np.nan, 1, 3, 2], dtype=np.float64)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
def test_union_categorical_same_categories_different_order(self):
# https://github.com/pandas-dev/pandas/issues/19096
a = Series(Categorical(["a", "b", "c"], categories=["a", "b", "c"]))
b = Series(Categorical(["a", "b", "c"], categories=["b", "a", "c"]))
result = pd.concat([a, b], ignore_index=True)
expected = Series(
Categorical(["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"])
)
tm.assert_series_equal(result, expected)
def test_concat_categorical_coercion(self):
# GH 13524
# category + not-category => not-category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2])
exp = Series([1, 2, np.nan, 2, 1, 2], dtype=np.float64)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# result shouldn't be affected by 1st elem dtype
exp = Series([2, 1, 2, 1, 2, np.nan], dtype=np.float64)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
# all values are not in category => not-category
s1 = Series([3, 2], dtype="category")
s2 = Series([2, 1])
exp = Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
exp = Series([2, 1, 3, 2])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
# completely different categories => not-category
s1 = Series([10, 11, np.nan], dtype="category")
s2 = Series([1, 3, 2])
exp = Series([10, 11, np.nan, 1, 3, 2], dtype=np.float64)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
exp = Series([1, 3, 2, 10, 11, np.nan], dtype=np.float64)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
# different dtype => not-category
s1 = Series([10, 11, np.nan], dtype="category")
s2 = Series(["a", "b", "c"])
exp = Series([10, 11, np.nan, "a", "b", "c"])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
exp = Series(["a", "b", "c", 10, 11, np.nan])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
# if normal series only contains NaN-likes => not-category
s1 = Series([10, 11], dtype="category")
s2 = Series([np.nan, np.nan, np.nan])
exp = Series([10, 11, np.nan, np.nan, np.nan])
tm.assert_series_equal( | pd.concat([s1, s2], ignore_index=True) | pandas.concat |
from datetime import datetime
from io import StringIO
import itertools
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Period,
Series,
Timedelta,
date_range,
)
import pandas._testing as tm
class TestDataFrameReshape:
def test_stack_unstack(self, float_frame):
df = float_frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({"foo": stacked, "bar": stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
tm.assert_frame_equal(unstacked, df)
tm.assert_frame_equal(unstacked_df["bar"], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
tm.assert_frame_equal(unstacked_cols.T, df)
tm.assert_frame_equal(unstacked_cols_df["bar"].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, "a", "b"], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
tm.assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(
1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1]
)
tm.assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[["a", "b"]].stack(1)
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_unstack_not_consolidated(self, using_array_manager):
# Gh#34708
df = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
df2 = df[["x"]]
df2["y"] = df["y"]
if not using_array_manager:
assert len(df2._mgr.blocks) == 2
res = df2.unstack()
expected = df.unstack()
tm.assert_series_equal(res, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack(fill_value=-1)
expected = DataFrame(
{"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16
)
tm.assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame(
{"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=float
)
tm.assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame(
{"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]}
).set_index(["x", "y", "z"])
unstacked = df.unstack(["x", "y"], fill_value=0)
key = ("<KEY>")
expected = unstacked[key]
result = Series([0, 0, 2], index=unstacked.index, name=key)
tm.assert_series_equal(result, expected)
stacked = unstacked.stack(["x", "y"])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
tm.assert_frame_equal(result, df)
# From a series
s = df["w"]
result = s.unstack(["x", "y"], fill_value=0)
expected = unstacked["w"]
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list("AB"), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list("xyz"), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
# From a mixed type dataframe
df["A"] = df["A"].astype(np.int16)
df["B"] = df["B"].astype(np.float64)
result = df.unstack(fill_value=-1)
expected["A"] = expected["A"].astype(np.int16)
expected["B"] = expected["B"].astype(np.float64)
tm.assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list("xyz"), dtype=float)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = date_range("2012-01-01", periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame(
{"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [td[0], pd.NaT, td[3]], "b": [td[1], td[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame(
{"a": [td[0], td[1], td[3]], "b": [td[1], td[2], td[1]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [
Period("2012-01"),
Period("2012-02"),
Period("2012-03"),
Period("2012-04"),
]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [periods[0], None, periods[3]], "b": [periods[1], periods[2], None]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame(
{
"a": [periods[0], periods[1], periods[3]],
"b": [periods[1], periods[2], periods[1]],
},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = Series(["a", "b", "c", "a"], dtype="category")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{
"a": pd.Categorical(list("axa"), categories=list("abc")),
"b": pd.Categorical(list("bcx"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
# Fill with non-category results in a ValueError
msg = r"'fill_value=d' is not present in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value="d")
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value="c")
expected = DataFrame(
{
"a": pd.Categorical(list("aca"), categories=list("abc")),
"b": pd.Categorical(list("bcc"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_tuplename_in_multiindex(self):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]
)
df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx)
result = df.unstack(("A", "a"))
expected = DataFrame(
[[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],
columns=MultiIndex.from_tuples(
[
("d", "a"),
("d", "b"),
("d", "c"),
("e", "a"),
("e", "b"),
("e", "c"),
],
names=[None, ("A", "a")],
),
index=Index([1, 2, 3], name=("B", "b")),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"unstack_idx, expected_values, expected_index, expected_columns",
[
(
("A", "a"),
[[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]],
MultiIndex.from_tuples(
[(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]
),
MultiIndex.from_tuples(
[("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")],
names=[None, ("A", "a")],
),
),
(
(("A", "a"), "B"),
[[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]],
Index([3, 4], name="C"),
MultiIndex.from_tuples(
[
("d", "a", 1),
("d", "a", 2),
("d", "b", 1),
("d", "b", 2),
("e", "a", 1),
("e", "a", 2),
("e", "b", 1),
("e", "b", 2),
],
names=[None, ("A", "a"), "B"],
),
),
],
)
def test_unstack_mixed_type_name_in_multiindex(
self, unstack_idx, expected_values, expected_index, expected_columns
):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]
)
df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx)
result = df.unstack(unstack_idx)
expected = DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = DataFrame(
{
"state": ["IL", "MI", "NC"],
"index": ["a", "b", "c"],
"some_categories": Series(["a", "b", "c"]).astype("category"),
"A": np.random.rand(3),
"B": 1,
"C": "foo",
"D": pd.Timestamp("20010102"),
"E": | Series([1.0, 50.0, 100.0]) | pandas.Series |
import json
import os
import pandas as pd
from .utils import list_to_md_table
SCHEMA_TO_PANDAS_TYPES = {
"integer": "int64",
"number": "float",
"string": "string",
"any": "object",
"boolean": "bool",
}
FORMAT_TO_REGEX = {
# https://emailregex.com/
"email": r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$",
# https://www.regextester.com/94092
"uri": r"^\w+:(\/?\/?)[^\s]+$",
}
def read_schema(schema_file: str) -> dict:
"""
Reads in schema from schema json file and returns as dictionary.
##TODO validate schema itself
Args:
schema_file: File location of the schema json file.
Returns: The schema as a dictionary
"""
with open(schema_file, encoding="utf-8") as f:
schema = json.load(f)
return schema
def read_config(config_file: str, data_dir: str = "", schema_dir: str = "") -> pd.DataFrame:
"""
Reads a GMNS config file, adds some full paths and returns as a dataframe.
Args:
config_file: Configuration file. A json file with a list of "resources"
specifying the "name", "path", and "schema" for each GMNS table as
well as a boolean value for "required".
Example:
::
{
"resources": [
{
"name":"link",
"path": "link.csv",
"schema": "link.schema.json",
"required": true
},
{
"name":"node",
"path": "node.csv",
"schema": "node.schema.json",
"required": true
}
}
data_dir: Directory where GMNS files are. If not specified, assumes
the same directory as the config_file.
schema_dir: Directory where GMNS schema files are. If not specified, assumes
the same directory as the config_file.
Returns: GMNS configuration file as a DataFrame.
"""
with open(config_file, encoding="utf-8") as f:
config = json.load(f)
## todo validate config
resource_dict = {i["name"]: i for i in config["resources"]}
# print(config["resources"])
resource_df = | pd.DataFrame(config["resources"]) | pandas.DataFrame |
#!/usr/bin/env python
import requests
import os
import string
import random
import json
import datetime
import pandas as pd
import numpy as np
import moment
from operator import itemgetter
class IdsrAppServer:
def __init__(self):
self.dataStore = "ugxzr_idsr_app"
self.period = "LAST_7_DAYS"
self.ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.ID_LENGTH = 11
self.today = moment.now().format('YYYY-MM-DD')
print("Epidemic/Outbreak Detection script started on %s" %self.today)
self.path = os.path.abspath(os.path.dirname(__file__))
newPath = self.path.split('/')
newPath.pop(-1)
newPath.pop(-1)
self.fileDirectory = '/'.join(newPath)
self.url = ""
self.username = ''
self.password = ''
# programs
self.programUid = ''
self.outbreakProgram = ''
# TE Attributes
self.dateOfOnsetUid = ''
self.conditionOrDiseaseUid = ''
self.patientStatusOutcome = ''
self.regPatientStatusOutcome = ''
self.caseClassification = ''
self.testResult=''
self.testResultClassification=''
self.epidemics = {}
self.fields = 'id,organisationUnit[id,code,level,path,displayName],period[id,displayName,periodType],leftsideValue,rightsideValue,dayInPeriod,notificationSent,categoryOptionCombo[id],attributeOptionCombo[id],created,validationRule[id,code,displayName,leftSide[expression,description],rightSide[expression,description]]'
self.eventEndPoint = 'analytics/events/query/'
# Get Authentication details
def getAuth(self):
with open(os.path.join(self.fileDirectory,'.idsr.json'),'r') as jsonfile:
auth = json.load(jsonfile)
return auth
def getIsoWeek(self,d):
ddate = datetime.datetime.strptime(d,'%Y-%m-%d')
return datetime.datetime.strftime(ddate, '%YW%W')
def formatIsoDate(self,d):
return moment.date(d).format('YYYY-MM-DD')
def getDateDifference(self,d1,d2):
if d1 and d2 :
delta = moment.date(d1) - moment.date(d2)
return delta.days
else:
return ""
def addDays(self,d1,days):
if d1:
newDay = moment.date(d1).add(days=days)
return newDay.format('YYYY-MM-DD')
else:
return ""
# create aggregate threshold period
# @param n number of years
# @param m number of periods
# @param type seasonal (SEASONAL) or Non-seasonal (NON_SEASONAL) or case based (CASE_BASED)
def createAggThresholdPeriod(self,m,n,type):
periods = []
currentDate = moment.now().format('YYYY-MM-DD')
currentYear = self.getIsoWeek(currentDate)
if(type == 'SEASONAL'):
for year in range(0,n,1):
currentYDate = moment.date(currentDate).subtract(months=((year +1)*12)).format('YYYY-MM-DD')
for week in range(0,m,1):
currentWDate = moment.date(currentYDate).subtract(weeks=week).format('YYYY-MM-DD')
pe = self.getIsoWeek(currentWDate)
periods.append(pe)
elif(type == 'NON_SEASONAL'):
for week in range(0,(m+1),1):
currentWDate = moment.date(currentDate).subtract(weeks=week).format('YYYY-MM-DD')
pe = self.getIsoWeek(currentWDate)
periods.append(pe)
else:
pe = 'LAST_7_DAYS'
periods.append(pe)
return periods
def getHttpData(self,url,fields,username,password,params):
url = url+fields+".json"
data = requests.get(url, auth=(username, password),params=params)
if(data.status_code == 200):
return data.json()
else:
return 'HTTP_ERROR'
def getHttpDataWithId(self,url,fields,idx,username,password,params):
url = url + fields + "/"+ idx + ".json"
data = requests.get(url, auth=(username, password),params=params)
if(data.status_code == 200):
return data.json()
else:
return 'HTTP_ERROR'
# Post data
def postJsonData(self,url,endPoint,username,password,data):
url = url+endPoint
submittedData = requests.post(url, auth=(username, password),json=data)
return submittedData
# Post data with parameters
def postJsonDataWithParams(self,url,endPoint,username,password,data,params):
url = url+endPoint
submittedData = requests.post(url, auth=(username, password),json=data,params=params)
return submittedData
# Update data
def updateJsonData(self,url,endPoint,username,password,data):
url = url+endPoint
submittedData = requests.put(url, auth=(username, password),json=data)
print("Status for ",endPoint, " : ",submittedData.status_code)
return submittedData
# Get array from Object Array
def getArrayFromObject(self,arrayObject):
arrayObj = []
for obj in arrayObject:
arrayObj.append(obj['id'])
return arrayObj
# Check datastore existance
def checkDataStore(self,url,fields,username,password,params):
url = url+fields+".json"
storesValues = {"exists": "false", "stores": []}
httpData = requests.get(url, auth=(username, password),params=params)
if(httpData.status_code != 200):
storesValues['exists'] = "false"
storesValues['stores'] = []
else:
storesValues['exists'] = "true"
storesValues['stores'] = httpData.json()
return storesValues
# Get orgUnit
def getOrgUnit(self,detectionOu,ous):
ou = []
if((ous !='undefined') and len(ous) > 0):
for oux in ous:
if(oux['id'] == detectionOu):
return oux['ancestors']
else:
return ou
# Get orgUnit value
# @param type = { id,name,code}
def getOrgUnitValue(self,detectionOu,ous,level,type):
ou = []
if((ous !='undefined') and len(ous) > 0):
for oux in ous:
if(oux['id'] == detectionOu):
return oux['ancestors'][level][type]
else:
return ou
# Generate code
def generateCode(self,row=None,column=None,prefix='',sep=''):
size = self.ID_LENGTH
chars = string.ascii_uppercase + string.digits
code = ''.join(random.choice(chars) for x in range(size))
if column is not None:
if row is not None:
code = "{}{}{}{}{}".format(prefix,sep,row[column],sep,code)
else:
code = "{}{}{}{}{}".format(prefix,sep,column,sep,code)
else:
code = "{}{}{}".format(prefix,sep,code)
return code
def createMessage(self,outbreak=None,usergroups=[],type='EPIDEMIC'):
message = []
organisationUnits = []
if usergroups is None:
users = []
if usergroups is not None:
users = usergroups
subject = ""
text = ""
if type == 'EPIDEMIC':
subject = outbreak['disease'] + " outbreak in " + outbreak['orgUnitName']
text = "Dear all," + type.lower() + " threshold for " + outbreak['disease'] + " is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
elif type == 'ALERT':
subject = outbreak['disease'] + " alert"
text = "Dear all, Alert threshold for " + outbreak['disease'] + " is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
else:
subject = outbreak['disease'] + " reminder"
text = "Dear all," + outbreak['disease'] + " outbreak at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " is closing in 7 days"
organisationUnits.append({"id": outbreak['orgUnit']})
organisationUnits.append({"id": outbreak['reportingOrgUnit']})
message.append(subject)
message.append(text)
message.append(users)
message.append(organisationUnits)
message = tuple(message)
return pd.Series(message)
def sendSmsAndEmailMessage(self,message):
messageEndPoint = "messageConversations"
sentMessages = self.postJsonData(self.url,messageEndPoint,self.username,self.password,message)
print("Message sent: ",sentMessages)
return sentMessages
#return 0
# create alerts data
def createAlerts(self,userGroup,values,type):
messageConversations = []
messages = { "messageConversations": []}
if type == 'EPIDEMIC':
for val in values:
messageConversations.append(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
elif type == 'ALERT':
for val in values:
messageConversations.append(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
elif type == 'REMINDER':
for val in values:
messageConversations.append(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
else:
pass
for message in messageConversations:
msgSent = self.sendSmsAndEmailMessage(message)
print("Message Sent status",msgSent)
return messages
# create columns from event data
def createColumns(self,headers,type):
cols = []
for header in headers:
if(type == 'EVENT'):
if header['name'] == self.dateOfOnsetUid:
cols.append('onSetDate')
elif header['name'] == self.conditionOrDiseaseUid:
cols.append('disease')
elif header['name'] == self.regPatientStatusOutcome:
cols.append('immediateOutcome')
elif header['name'] == self.patientStatusOutcome:
cols.append('statusOutcome')
elif header['name'] == self.testResult:
cols.append('testResult')
elif header['name'] == self.testResultClassification:
cols.append('testResultClassification')
elif header['name'] == self.caseClassification:
cols.append('caseClassification')
else:
cols.append(header['name'])
elif (type == 'DATES'):
cols.append(header['name'])
else:
cols.append(header['column'])
return cols
# Get start and end date
def getStartEndDates(self,year, week):
d = moment.date(year,1,1).date
if(d.weekday() <= 3):
d = d - datetime.timedelta(d.weekday())
else:
d = d + datetime.timedelta(7-d.weekday())
dlt = datetime.timedelta(days = (week-1)*7)
return [d + dlt, d + dlt + datetime.timedelta(days=6)]
# create Panda Data Frame from event data
def createDataFrame(self,events,type=None):
if type is None:
if events is not None:
#pd.DataFrame.from_records(events)
dataFrame = pd.io.json.json_normalize(events)
else:
dataFrame = pd.DataFrame()
else:
cols = self.createColumns(events['headers'],type)
dataFrame = pd.DataFrame.from_records(events['rows'],columns=cols)
return dataFrame
# Detect using aggregated indicators
# Confirmed, Deaths,Suspected
def detectOnAggregateIndicators(self,aggData,diseaseMeta,epidemics,ou,periods,mPeriods,nPeriods):
dhis2Events = pd.DataFrame()
detectionLevel = int(diseaseMeta['detectionLevel'])
reportingLevel = int(diseaseMeta['reportingLevel'])
m=mPeriods
n=nPeriods
if(aggData != 'HTTP_ERROR'):
if((aggData != 'undefined') and (aggData['rows'] != 'undefined') and len(aggData['rows']) >0):
df = self.createDataFrame(aggData,'AGGREGATE')
dfColLength = len(df.columns)
df1 = df.iloc[:,(detectionLevel+4):dfColLength]
df.iloc[:,(detectionLevel+4):dfColLength] = df1.apply(pd.to_numeric,errors='coerce').fillna(0).astype(np.int64)
# print(df.iloc[:,(detectionLevel+4):(detectionLevel+4+m)]) # cases, deaths
### Make generic functions for math
if diseaseMeta['epiAlgorithm'] == "NON_SEASONAL":
# No need to do mean for current cases or deaths
df['mean_current_cases'] = df.iloc[:,(detectionLevel+4)]
df['mean_mn_cases'] = df.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].mean(axis=1)
df['stddev_mn_cases'] = df.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].std(axis=1)
df['mean20std_mn_cases'] = (df.mean_mn_cases + (2*df.stddev_mn_cases))
df['mean15std_mn_cases'] = (df.mean_mn_cases + (1.5*df.stddev_mn_cases))
df['mean_current_deaths'] = df.iloc[:,(detectionLevel+5+m)]
df['mean_mn_deaths'] = df.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].mean(axis=1)
df['stddev_mn_deaths'] = df.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].std(axis=1)
df['mean20std_mn_deaths'] = (df.mean_mn_deaths + (2*df.stddev_mn_deaths))
df['mean15std_mn_deaths'] = (df.mean_mn_deaths + (1.5*df.stddev_mn_deaths))
# periods
df['period']= periods[0]
startOfMidPeriod = periods[0].split('W')
startEndDates = self.getStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
df['dateOfOnSetWeek'] = moment.date(startEndDates[0]).format('YYYY-MM-DD')
# First case date is the start date of the week where outbreak was detected
df['firstCaseDate'] = moment.date(startEndDates[0]).format('YYYY-MM-DD')
# Last case date is the end date of the week boundary.
df['lastCaseDate'] = moment.date(startEndDates[1]).format('YYYY-MM-DD')
df['endDate'] = ""
df['closeDate'] = moment.date(startEndDates[1]).add(days=int(diseaseMeta['incubationDays'])).format('YYYY-MM-DD')
if diseaseMeta['epiAlgorithm'] == "SEASONAL":
df['mean_current_cases'] = df.iloc[:,(detectionLevel+4):(detectionLevel+3+m)].mean(axis=1)
df['mean_mn_cases'] = df.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].mean(axis=1)
df['stddev_mn_cases'] = df.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].std(axis=1)
df['mean20std_mn_cases'] = (df.mean_mn_cases + (2*df.stddev_mn_cases))
df['mean15std_mn_cases'] = (df.mean_mn_cases + (1.5*df.stddev_mn_cases))
df['mean_current_deaths'] = df.iloc[:,(detectionLevel+3+m+(m*n)):(detectionLevel+3+(2*m)+(m*n))].mean(axis=1)
df['mean_mn_deaths'] = df.iloc[:,(detectionLevel+3+(2*m)+(m*n)):dfColLength-1].mean(axis=1)
df['stddev_mn_deaths'] = df.iloc[:,(detectionLevel+3+(2*m)+(m*n)):dfColLength-1].std(axis=1)
df['mean20std_mn_deaths'] = (df.mean_mn_deaths + (2*df.stddev_mn_deaths))
df['mean15std_mn_deaths'] = (df.mean_mn_deaths + (1.5*df.stddev_mn_deaths))
# Mid period for seasonal = mean of range(1,(m+1)) where m = number of periods
midPeriod = int(np.median(range(1,(m+1))))
df['period']= periods[midPeriod]
startOfMidPeriod = periods[midPeriod].split('W')
startEndDates = self.getStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
df['dateOfOnSetWeek'] = moment.date(startEndDates[0]).format('YYYY-MM-DD')
# First case date is the start date of the week where outbreak was detected
df['firstCaseDate'] = moment.date(startEndDates[0]).format('YYYY-MM-DD')
# Last case date is the end date of the week boundary.
startOfEndPeriod = periods[(m+1)].split('W')
endDates = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).format('YYYY-MM-DD')
df['lastCaseDate'] = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).format('YYYY-MM-DD')
df['endDate'] = ""
df['closeDate'] = moment.date(startEndDates[0]).add(days=(m-1)*(7/2)+ int(diseaseMeta['incubationDays'])).format('YYYY-MM-DD')
df['reportingOrgUnitName'] = df.iloc[:,reportingLevel-1]
df['reportingOrgUnit'] = df.iloc[:,detectionLevel].apply(self.getOrgUnitValue,args=(ou,(reportingLevel-1),'id'))
df['orgUnit'] = df.iloc[:,detectionLevel]
df['orgUnitName'] = df.iloc[:,detectionLevel+1]
df['orgUnitCode'] = df.iloc[:,detectionLevel+2]
dropColumns = [col for idx,col in enumerate(df.columns.values.tolist()) if idx > (detectionLevel+4) and idx < (detectionLevel+4+(3*m))]
df.drop(columns=dropColumns,inplace=True)
df['confirmedValue'] = df.loc[:,'mean_current_cases']
df['deathValue'] = df.loc[:,'mean_current_deaths']
df['suspectedValue'] = df.loc[:,'mean_current_cases']
df['disease'] = diseaseMeta['disease']
df['incubationDays'] = diseaseMeta['incubationDays']
checkEpidemic = "mean_current_cases >= mean20std_mn_cases & mean_current_cases != 0 & mean20std_mn_cases != 0"
df.query(checkEpidemic,inplace=True)
if df.empty is True:
df['alert'] = "false"
if df.empty is not True:
df['epidemic'] = 'true'
# Filter out those greater or equal to threshold
df = df[df['epidemic'] == 'true']
df['active'] = "true"
df['alert'] = "true"
df['reminder'] = "false"
#df['epicode']=df['orgUnitCode'].str.cat('E',sep="_")
df['epicode'] = df.apply(self.generateCode,args=('orgUnitCode','E','_'), axis=1)
closedQuery = "df['epidemic'] == 'true' && df['active'] == 'true' && df['reminder'] == 'false'"
closedVigilanceQuery = "df['epidemic'] == 'true' && df['active'] == 'true' && df['reminder'] == 'true'"
df[['status','active','closeDate','reminderSent','dateReminderSent']] = df.apply(self.getEpidemicDetails,axis=1)
else:
# No data for cases found
pass
return df
else:
print("No outbreaks/epidemics for " + diseaseMeta['disease'])
return dhis2Events
# Replace all values with standard text
def replaceText(self,df):
df.replace(to_replace='Confirmed case',value='confirmedValue',regex=True,inplace=True)
df.replace(to_replace='Suspected case',value='suspectedValue',regex=True,inplace=True)
df.replace(to_replace='Confirmed',value='confirmedValue',regex=True,inplace=True)
df.replace(to_replace='Suspected',value='suspectedValue',regex=True,inplace=True)
df.replace(to_replace='confirmed case',value='confirmedValue',regex=True,inplace=True)
df.replace(to_replace='suspected case',value='suspectedValue',regex=True,inplace=True)
df.replace(to_replace='died',value='deathValue',regex=True,inplace=True)
df.replace(to_replace='Died case',value='deathValue',regex=True,inplace=True)
return df
# Get Confirmed,suspected cases and deaths
def getCaseStatus(self,row=None,columns=None,caseType='CONFIRMED'):
if caseType == 'CONFIRMED':
# if all(elem in columns.values for elem in ['confirmedValue']):
if set(['confirmedValue']).issubset(columns.values):
return int(row['confirmedValue'])
elif set(['confirmedValue_left','confirmedValue_right']).issubset(columns.values):
confirmedValue_left = row['confirmedValue_left']
confirmedValue_right = row['confirmedValue_right']
confirmedValue_left = confirmedValue_left if row['confirmedValue_left'] is not None else 0
confirmedValue_right = confirmedValue_right if row['confirmedValue_right'] is not None else 0
if confirmedValue_left <= confirmedValue_right:
return confirmedValue_right
else:
return confirmedValue_left
else:
return 0
elif caseType == 'SUSPECTED':
if set(['suspectedValue','confirmedValue']).issubset(columns.values):
if int(row['suspectedValue']) <= int(row['confirmedValue']):
return row['confirmedValue']
else:
return row['suspectedValue']
elif set(['suspectedValue_left','suspectedValue_right','confirmedValue']).issubset(columns.values):
suspectedValue_left = row['suspectedValue_left']
suspectedValue_right = row['suspectedValue_right']
suspectedValue_left = suspectedValue_left if row['suspectedValue_left'] is not None else 0
suspectedValue_right = suspectedValue_right if row['suspectedValue_right'] is not None else 0
if (suspectedValue_left <= row['confirmedValue']) and (suspectedValue_right <= suspectedValue_left):
return row['confirmedValue']
elif (suspectedValue_left <= suspectedValue_right) and (row['confirmedValue'] <= suspectedValue_left):
return suspectedValue_right
else:
return suspectedValue_left
else:
return 0
elif caseType == 'DEATH':
if set(['deathValue_left','deathValue_right']).issubset(columns.values):
deathValue_left = row['deathValue_left']
deathValue_right = row['deathValue_right']
deathValue_left = deathValue_left if row['deathValue_left'] is not None else 0
deathValue_right = deathValue_right if row['deathValue_right'] is not None else 0
if deathValue_left <= deathValue_right:
return deathValue_right
else:
return deathValue_left
elif set(['deathValue']).issubset(columns.values):
return row['deathValue']
else:
return 0
# Check if epedimic is active or ended
def getStatus(self,row=None,status=None):
currentStatus = 'false'
if status == 'active':
if pd.to_datetime(self.today) < pd.to_datetime(row['endDate']):
currentStatus='active'
elif pd.to_datetime(row['endDate']) == (pd.to_datetime(self.today)):
currentStatus='true'
else:
currentStatus='false'
elif status == 'reminder':
if row['reminderDate'] == pd.to_datetime(self.today):
currentStatus='true'
else:
currentStatus='false'
return pd.Series(currentStatus)
# get onset date
def getOnSetDate(self,row):
if row['eventdate'] == '':
return row['onSetDate']
else:
return moment.date(row['eventdate']).format('YYYY-MM-DD')
# Get onset for TrackedEntityInstances
def getTeiOnSetDate(self,row):
if row['dateOfOnSet'] == '':
return row['dateOfOnSet']
else:
return moment.date(row['created']).format('YYYY-MM-DD')
# replace data of onset with event dates
def replaceDatesWithEventData(self,row):
if row['onSetDate'] == '':
return pd.to_datetime(row['eventdate'])
else:
return pd.to_datetime(row['onSetDate'])
# Get columns based on query or condition
def getQueryValue(self,df,query,column,inplace=True):
query = "{}={}".format(column,query)
df.eval(query,inplace)
return df
# Get columns based on query or condition
def queryValue(self,df,query,column=None,inplace=True):
df.query(query)
return df
# Get epidemic, closure and status
def getEpidemicDetails(self,row,columns=None):
details = []
if row['epidemic'] == "true" and row['active'] == "true" and row['reminder'] == "false":
details.append('Closed')
details.append('false')
details.append(self.today)
details.append('false')
details.append('')
# Send closure message
elif row['epidemic'] == "true" and row['active'] == "true" and row['reminder'] == "true":
details.append('Closed Vigilance')
details.append('true')
details.append(row['closeDate'])
details.append('true')
details.append(self.today)
# Send Reminder for closure
else:
details.append('Confirmed')
details.append('true')
details.append('')
details.append('false')
details.append('')
detailsSeries = tuple(details)
return pd.Series(detailsSeries)
# Get key id from dataelements
def getDataElement(self,dataElements,key):
for de in dataElements:
if de['name'] == key:
return de['id']
else:
pass
# detect self.epidemics
# Confirmed, Deaths,Suspected
def detectBasedOnProgramIndicators(self,caseEvents,diseaseMeta,orgUnits,type,dateData):
dhis2Events = pd.DataFrame()
detectionLevel = int(diseaseMeta['detectionLevel'])
reportingLevel = int(diseaseMeta['reportingLevel'])
if(caseEvents != 'HTTP_ERROR'):
if((caseEvents != 'undefined') and (caseEvents['rows'] != 'undefined') and caseEvents['height'] >0):
df = self.createDataFrame(caseEvents,type)
caseEventsColumnsById = df.columns
dfColLength = len(df.columns)
if(type =='EVENT'):
# If date of onset is null, use eventdate
#df['dateOfOnSet'] = np.where(df['onSetDate']== '',pd.to_datetime(df['eventdate']).dt.strftime('%Y-%m-%d'),df['onSetDate'])
df['dateOfOnSet'] = df.apply(self.getOnSetDate,axis=1)
# Replace all text with standard text
df = self.replaceText(df)
# Transpose and Aggregate values
dfCaseClassification = df.groupby(['ouname','ou','disease','dateOfOnSet'])['caseClassification'].value_counts().unstack().fillna(0).reset_index()
dfCaseImmediateOutcome = df.groupby(['ouname','ou','disease','dateOfOnSet'])['immediateOutcome'].value_counts().unstack().fillna(0).reset_index()
dfTestResult = df.groupby(['ouname','ou','disease','dateOfOnSet'])['testResult'].value_counts().unstack().fillna(0).reset_index()
dfTestResultClassification = df.groupby(['ouname','ou','disease','dateOfOnSet'])['testResultClassification'].value_counts().unstack().fillna(0).reset_index()
dfStatusOutcome = df.groupby(['ouname','ou','disease','dateOfOnSet'])['statusOutcome'].value_counts().unstack().fillna(0).reset_index()
combinedDf = pd.merge(dfCaseClassification,dfCaseImmediateOutcome,on=['ou','ouname','disease','dateOfOnSet'],how='left').merge(dfTestResultClassification,on=['ou','ouname','disease','dateOfOnSet'],how='left').merge(dfTestResult,on=['ou','ouname','disease','dateOfOnSet'],how='left').merge(dfStatusOutcome,on=['ou','ouname','disease','dateOfOnSet'],how='left')
combinedDf.sort_values(['ouname','disease','dateOfOnSet'],ascending=[True,True,True])
combinedDf['dateOfOnSetWeek'] = pd.to_datetime(combinedDf['dateOfOnSet']).dt.strftime('%YW%V')
combinedDf['confirmedValue'] = combinedDf.apply(self.getCaseStatus,args=(combinedDf.columns,'CONFIRMED'),axis=1)
combinedDf['suspectedValue'] = combinedDf.apply(self.getCaseStatus,args=(combinedDf.columns,'SUSPECTED'),axis=1)
#combinedDf['deathValue'] = combinedDf.apply(self.getCaseStatus,args=(combinedDf.columns,'DEATH'),axis=1)
dfConfirmed = combinedDf.groupby(['ouname','ou','disease','dateOfOnSetWeek'])['confirmedValue'].agg(['sum']).reset_index()
dfConfirmed.rename(columns={'sum':'confirmedValue' },inplace=True)
dfSuspected = combinedDf.groupby(['ouname','ou','disease','dateOfOnSetWeek'])['suspectedValue'].agg(['sum']).reset_index()
dfSuspected.rename(columns={'sum':'suspectedValue' },inplace=True)
dfFirstAndLastCaseDate = df.groupby(['ouname','ou','disease'])['dateOfOnSet'].agg(['min','max']).reset_index()
dfFirstAndLastCaseDate.rename(columns={'min':'firstCaseDate','max':'lastCaseDate'},inplace=True)
aggDf = pd.merge(dfConfirmed,dfSuspected,on=['ouname','ou','disease','dateOfOnSetWeek'],how='left').merge(dfFirstAndLastCaseDate,on=['ouname','ou','disease'],how='left')
aggDf['reportingOrgUnitName'] = aggDf.loc[:,'ou'].apply(self.getOrgUnitValue,args=(orgUnits,(reportingLevel-1),'name'))
aggDf['reportingOrgUnit'] = aggDf.loc[:,'ou'].apply(self.getOrgUnitValue,args=(orgUnits,(reportingLevel-1),'id'))
aggDf['incubationDays'] = int(diseaseMeta['incubationDays'])
aggDf['endDate'] = pd.to_datetime(pd.to_datetime(dfDates['lastCaseDate']) + pd.to_timedelta(pd.np.ceil(2*aggDf['incubationDays']), unit="D")).dt.strftime('%Y-%m-%d')
aggDf['reminderDate'] = pd.to_datetime(pd.to_datetime(aggDf['lastCaseDate']) + pd.to_timedelta(pd.np.ceil(2*aggDf['incubationDays']-7), unit="D")).dt.strftime('%Y-%m-%d')
aggDf.rename(columns={'ouname':'orgUnitName','ou':'orgUnit'},inplace=True);
aggDf[['active']] = aggDf.apply(self.getStatus,args=['active'],axis=1)
aggDf[['reminder']] = aggDf.apply(self.getStatus,args=['reminder'],axis=1)
else:
df1 = df.iloc[:,(detectionLevel+4):dfColLength]
df.iloc[:,(detectionLevel+4):dfColLength] = df1.apply(pd.to_numeric,errors='coerce').fillna(0).astype(np.int64)
if(dateData['height'] > 0):
dfDates = self.createDataFrame(dateData,'DATES')
dfDates.to_csv('aggDfDates.csv',encoding='utf-8')
dfDates.rename(columns={dfDates.columns[7]:'disease',dfDates.columns[8]:'dateOfOnSet'},inplace=True)
dfDates['dateOfOnSet'] = dfDates.apply(self.getTeiOnSetDate,axis=1)
dfDates = dfDates.groupby(['ou','disease'])['dateOfOnSet'].agg(['min','max']).reset_index()
dfDates.rename(columns={'min':'firstCaseDate','max':'lastCaseDate'},inplace=True)
df = pd.merge(df,dfDates,right_on=['ou'],left_on=['organisationunitid'],how='left')
df['incubationDays'] = int(diseaseMeta['incubationDays'])
df['endDate'] = pd.to_datetime(pd.to_datetime(df['lastCaseDate']) + pd.to_timedelta(pd.np.ceil(2*df['incubationDays']), unit="D")).dt.strftime('%Y-%m-%d')
df['reminderDate'] = pd.to_datetime(pd.to_datetime(df['lastCaseDate']) + pd.to_timedelta(pd.np.ceil(2*df['incubationDays']-7), unit="D")).dt.strftime('%Y-%m-%d')
df.dropna(subset=['disease'],inplace=True)
df[['active']] = df.apply(self.getStatus,args=['active'],axis=1)
df[['reminder']] = df.apply(self.getStatus,args=['reminder'],axis=1)
else:
pass
df.rename(columns={df.columns[10]:'confirmedValue' },inplace=True)
df.rename(columns={df.columns[11]:'deathValue' },inplace=True)
df.rename(columns={df.columns[12]:'suspectedValue' },inplace=True)
df['reportingOrgUnitName'] = df.iloc[:,reportingLevel-1]
df['reportingOrgUnit'] = df.loc[:,'organisationunitid'].apply(self.getOrgUnitValue,args=(orgUnits,(reportingLevel-1),'id'))
df.rename(columns={'organisationunitname':'orgUnitName','organisationunitid':'orgUnit'},inplace=True);
df['dateOfOnSetWeek'] = self.getIsoWeek(self.today)
df["period"]= df['dateOfOnSetWeek']
#df['disease'] = diseaseMeta['disease']
aggDf = df
aggDf['alertThreshold'] = int(diseaseMeta['alertThreshold'])
aggDf['epiThreshold'] = int(diseaseMeta['epiThreshold'])
#df['confirmed_suspected_cases'] = df[['confirmedValue','suspectedValue']].sum(axis=1)
aggDf['epidemic'] = np.where(aggDf['confirmedValue'] >= aggDf['epiThreshold'],'true','false')
alertQuery = (aggDf['confirmedValue'] < aggDf['epiThreshold']) & (aggDf['suspectedValue'].astype(np.int64) >= aggDf['alertThreshold'].astype(np.int64)) & (aggDf['endDate'] > self.today)
aggDf['alert'] = np.where(alertQuery,'true','false')
aggDf.to_csv('aggDf.csv',encoding='utf-8')
return aggDf
else:
# No data for cases found
pass
return dhis2Events
else:
print("No outbreaks/epidemics for " + diseaseMeta['disease'])
return dhis2Events
# Transform updated to DHIS2 JSON events format
# @param dataFrame df
# @return dhis2Events object { 'events', 'datastore Events'}
def createEventDatavalues(self,row=None,config=None,columns=None):
dataElements = config
event = []
for key in columns.values: # for key in [*row]
if key == 'suspectedValue':
event.append({'dataElement': self.getDataElement(dataElements,'suspected'),'value':row['suspectedValue']})
elif key == 'deathValue':
event.append({'dataElement': self.getDataElement(dataElements,'deaths'),'value':row['deathValue']})
elif key == 'confirmedValue':
event.append({'dataElement': self.getDataElement(dataElements,'confirmed'),'value':row['confirmedValue']})
elif key == 'firstCaseDate':
event.append({'dataElement': self.getDataElement(dataElements,'firstCaseDate'),'value':row['firstCaseDate']})
elif key == 'orgUnit':
event.append({'dataElement': self.getDataElement(dataElements,'origin'),'value':row['orgUnit']})
event.append({'dataElement': self.getDataElement(dataElements,'outbreakId'),'value':row['epicode']})
elif key == 'disease':
event.append({'dataElement': self.getDataElement(dataElements,'disease'),'value':row['disease']})
elif key == 'endDate':
event.append({'dataElement': self.getDataElement(dataElements,'endDate'),'value':row['endDate']})
elif key == 'status':
event.append({'dataElement': self.getDataElement(dataElements,'status'),'value':row['status']})
else:
pass
#### Check epidemic closure
if hasattr(row,'closeDate'):
if(row['closeDate']) == self.today and row['status']=='Closed':
event.append({'dataElement': key,'value':'Closed'})
# Send closure message
elif hasattr(row,'dateReminderSent'):
if row['dateReminderSent']==self.today and row['status']== 'Closed Vigilance':
event.append({'dataElement': key,'value':'Closed Vigilance'})
# Send Reminder for closure
else:
pass
return event
# Replace existing outbreak code in the new epidemics for tracking
'''
check is the column to track e.g outbreak code
keys is the columns to use as keys and must be a list
row is the row in the dataFrame
append is the column to use as a append column
df is the dataframe to compare with
'''
def trackEpidemics(self,row=None,df=None,check=None,keys=None,append=None):
if row is not None:
# filter by keys and not closed
query = ['{}{}{}{}'.format(key,' in "',row[key],'"') for key in keys]
query = ' and '.join(query)
query = '{}{}'.format(query,' and closeDate == ""')
if df.empty:
return self.generateCode(prefix='E',sep='_')
else:
filteredDf = df.query(query).sort_values(keys,inplace=True)
if filteredDf is None:
return self.generateCode(column=row[append],prefix='E',sep='_')
else:
checked = [filteredDf.at[index,check] for index in filteredDf.index]
if len(checked) > 0:
row[check] = checked[0]
else:
row[check] = self.generateCode(column=row[append],prefix='E',sep='_')
return row[check]
else:
return self.generateCode(prefix='E',sep='_')
# Remove existing and update with new from data store epidemics
# Support meta data mapping format [{epiKey1:eventKey1},{epiKey2:eventKey2}] e.g [{'confirmedValue':'confirmedValue'},{'status':'status'}]
# epidemics and events are dataframe
def getDfUpdatedEpidemics(self,epidemics,events,mergeColumns=None,how='inner',track=False,epidemic=True):
if epidemics.empty and events.empty == False:
return events
if epidemics.empty == False and events.empty:
return epidemics
else:
if mergeColumns is not None:
mergedDf=epidemics.merge(events,how=how,on=mergeColumns,suffixes=('_left','_right'),indicator=track)
if epidemic:
mergedDf['updated']= np.where(mergedDf["endDate"] == '',True,False)
mergedDf['epitype']= np.where(mergedDf["endDate"] == '',"new","old")
#epidemics['reminderSent']=np.where(epidemics['dateReminderSent'] == self.today, True,False)
#epidemics['dateReminderSent']=np.where(epidemics["reminderDate"] == self.today, self.today,'')
#epidemics['reminder']=np.where(epidemics["reminderDate"] == self.today, True,False)
mergedDf['active']=np.where(pd.to_datetime(self.today) < pd.to_datetime(mergedDf["endDate"]),True,False)
else:
pass
return mergedDf
return epidemics
def getRootOrgUnit(self):
root = {};
root = self.getHttpData(self.url,'organisationUnits',self.username,self.password,params={"paging":"false","filter":"level:eq:1"})
return root['organisationUnits']
# Drop columns
def dropColumns(self,df=None,columns=None):
if columns is not None:
deleteColumns = [column for column in columns if column in df]
else:
deleteColumns =[]
return deleteColumns
# Get epidemics
def getEpidemics(self,programConfig=None,detectedAggEpidemics=None,detectedMergedAlertsMessage=None,dfEpidemics=None,messageColumns=None,alertColumns=None,type='EPIDEMIC',notify=None):
# New epidemics only
newEpidemics = pd.DataFrame()
# updated epidemics only
updatedEpidemics = pd.DataFrame()
# Existing epidemics only
existsEpidemics = pd.DataFrame()
if dfEpidemics.empty is not True:
dfEpidemics['period'] =pd.to_datetime(dfEpidemics['firstCaseDate']).dt.strftime('%YW%V')
if detectedAggEpidemics.empty:
print("Nothing to update or detect. Proceeding to next disease")
return
allAggEpidemics = self.getDfUpdatedEpidemics(dfEpidemics,detectedAggEpidemics,mergeColumns=['orgUnit','disease','period'],how='outer',track=True,epidemic=False)
remindersQuery = "{}{}{}'".format("reminderDate", "=='", self.today)
# New epidemics
if '_merge' in allAggEpidemics.columns:
newEpidemics = allAggEpidemics.query("_merge == 'right_only'")
newEpidemics.drop(list(newEpidemics.filter(regex = '_left')), axis = 1, inplace = True)
newEpidemics.columns = newEpidemics.columns.str.replace('_right', '')
# Existing epidemics and not updated
existsEpidemics = allAggEpidemics.query("_merge == 'left_only'")
existsEpidemics.drop(list(existsEpidemics.filter(regex = '_right')), axis = 1, inplace = True)
existsEpidemics.columns = existsEpidemics.columns.str.replace('_left', '')
# Updated epidemics
updatedEpidemics =allAggEpidemics.query("_merge == 'both'")
# Drop duplicated columns
newEpidemics = newEpidemics.loc[:,~newEpidemics.columns.duplicated()]
updatedEpidemics = updatedEpidemics.loc[:,~updatedEpidemics.columns.duplicated()]
existsEpidemics = existsEpidemics.loc[:,~existsEpidemics.columns.duplicated()]
if '_merge' not in allAggEpidemics.columns:
newEpidemics = allAggEpidemics
print("Number of New Epidemics ", len(newEpidemics.index))
if( len(newEpidemics.index) > 0):
epiCodesFields = "system/id"
epiCodesParams = { "limit" : len(newEpidemics.index) }
epiCodes = self.getHttpData(self.url,epiCodesFields,self.username,self.password,params=epiCodesParams)
if(epiCodes != 'HTTP_ERROR'):
epiCodesUids = epiCodes['codes']
newEpidemics['event'] = epiCodesUids
else:
print("Failed to generated DHIS2 UID codes")
else:
print("Exiting no new outbreaks detected")
print("Detecting and updating Outbreaks .... ")
config =programConfig['reportingProgram']['programStage']['dataElements']
if updatedEpidemics.empty is True:
if type == 'EPIDEMIC':
updatedEpidemics['dataValues'] = []
else:
pass
if updatedEpidemics.empty is not True:
if type == 'EPIDEMIC':
updatedEpidemics['confirmedValue']= updatedEpidemics.apply(self.getCaseStatus,args=(updatedEpidemics.columns,'CONFIRMED'),axis=1)
updatedEpidemics['suspectedValue']=updatedEpidemics.apply(self.getCaseStatus,args=(updatedEpidemics.columns,'SUSPECTED'),axis=1)
updatedEpidemics['deathValue']=updatedEpidemics.apply(self.getCaseStatus,args=(updatedEpidemics.columns,'DEATH'),axis=1)
updatedEpidemics.drop(list(updatedEpidemics.filter(regex = '_right')), axis = 1, inplace = True)
deleteColumns = self.dropColumns(df=updatedEpidemics.columns,columns=['confirmedValue_left','suspectedValue_left','deathValue_left'])
updatedEpidemics.drop(columns=deleteColumns,inplace=True)
updatedEpidemics.columns = updatedEpidemics.columns.str.replace('_left', '')
updatedEpidemics['dataValues'] = updatedEpidemics.apply( self.createEventDatavalues,args=(config,updatedEpidemics.columns),axis=1);
else:
pass
if newEpidemics.empty is True:
if type == 'EPIDEMIC':
newEpidemics['dataValues'] = []
else:
pass
if newEpidemics.empty is not True:
if type == 'EPIDEMIC':
newEpidemics.loc[:,'eventDate'] = newEpidemics['firstCaseDate']
newEpidemics.loc[:,'status'] = 'COMPLETED'
newEpidemics.loc[:,'program'] = str(programConfig['reportingProgram']['id'])
newEpidemics.loc[:,'programStage'] = str(programConfig['reportingProgram']['programStage']['id'])
newEpidemics.loc[:,'storedBy'] = 'idsr'
newEpidemics['epicode']=newEpidemics.apply(self.trackEpidemics,args=(dfEpidemics,'epicode',['disease','orgUnit'],'orgUnitCode'),axis=1)
newEpidemics['dataValues'] = newEpidemics.apply( self.createEventDatavalues,args=(config,newEpidemics.columns),axis=1)
#newEpidemics = newEpidemics.loc[:,~newEpidemics.columns.duplicated()]
detectedNewEpidemicsAlertsMessage = newEpidemics.filter(alertColumns)
detectedNewEpidemicsAlertsMessage[messageColumns] = detectedNewEpidemicsAlertsMessage.apply(self.createMessage,args=(notify,'EPIDEMIC'),axis=1)
else:
#newEpidemics = newEpidemics.loc[:,~newEpidemics.columns.duplicated()]
detectedNewEpidemicsAlertsMessage = newEpidemics.filter(alertColumns)
detectedNewEpidemicsAlertsMessage[messageColumns] = detectedNewEpidemicsAlertsMessage.apply(self.createMessage,args=(notify,'ALERT'),axis=1)
#mergedAlerts = pd.concat([detectedNewEpidemicsAlerts],sort=False)
detectedMergedAlertsMessage = detectedMergedAlertsMessage.append(detectedNewEpidemicsAlertsMessage)
# Merge updated, new and existing epidemics
mergedEpidemics = pd.concat([existsEpidemics,updatedEpidemics,newEpidemics],sort=False)
return [mergedEpidemics,detectedMergedAlertsMessage]
def iterateDiseases(self,diseasesMeta,epidemics,alerts,type):
newUpdatedEpis = []
existingAlerts = alerts
existingEpidemics = epidemics
programConfig = diseasesMeta['config']
mPeriods = programConfig['mPeriods']
nPeriods = programConfig['nPeriods']
rootOrgUnit = self.getRootOrgUnit()
programStartDate = moment.date(self.today).subtract(days=8)
programStartDate = moment.date(programStartDate).format('YYYY-MM-DD')
# Epidemics in the datastore
dfEpidemics = self.createDataFrame(epidemics)
# Alerts in the datastore
dfAlerts = self.createDataFrame(alerts)
# New epidemics only
detectedNewEpidemics = | pd.DataFrame() | pandas.DataFrame |
import os
import copy
import time
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
import typing as Dict
def init_log_loss(last_log_loss_csv, num_models=None):
last_best_avg_loss_all = np.inf
if last_log_loss_csv is None:
if num_models is None:
raise ValueError('Missing num_models argument.')
log_loss = {'Epoch': []}
for model_idx in range(num_models):
log_loss['train_loss_' + str(model_idx)] = []
log_loss['val_loss_' + str(model_idx)] = []
log_loss['train_loss_avg'] = []
log_loss['val_loss_avg'] = []
else:
last_log_loss_df = | pd.read_csv(last_log_loss_csv) | pandas.read_csv |
# <NAME>
# Last Modified: 5/22/2020
# Verify fluid properties for Flinak
# Reference: "Annals of Nuclear Energy", Romatoski and Hu
# Note:
#Temperature is in Kelvin
import os
import sys
sys.path.insert(0,'..') #This adds the ability to call flinak prop from the main folder
sys.path.insert(0,'./Flinak') #Looking for data in a subfolder
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
import FLiNaK_Prop
import Flinak_Verification_Data
import pandas as pd
path = os.getcwd()
steps = 700
T = np.linspace(500,1100,steps)
T2 = np.linspace(700,1250,steps)
Rho = np.zeros(steps)
Nu = np.zeros(steps)
Cp = np.zeros(steps)
K = np.zeros(steps)
for i in range(0,steps):
Rho[i] = FLiNaK_Prop.rho(T[i]+273.15)
Nu[i] = FLiNaK_Prop.nu(T2[i])*1000
Cp[i] = FLiNaK_Prop.Cp(T2[i])
K[i] = FLiNaK_Prop.k(T2[i])
linestyles = ['--','-','--','--']
colors = ['blue','firebrick','yellowgreen','purple']
filelist = ['2579_3-0_624xT','2603-0_669xT','2655_64-0_68xT','2729_29-0_73xT']
labels = ['2579.3-0.624*T[K]','2603-0.669*T[K]','2655.64-0.68*T[K]','2729.29-0.73*T[K]']
k = 1
L = 0
fig1 = plt.figure(k, figsize=(10,8))
for f in filelist:
densitydataframes = pd.read_csv(Path(path + '/Flinak/Density/Romatoski_Flinak_Density_' + f + '.csv'))
df = pd.DataFrame(densitydataframes)
plt.plot(df['Temp'], df['Density'], label=labels[L], linestyle=linestyles[L], color=colors[L])
L = L + 1
plt.plot(T,Rho,'r--', label='Density Used', dashes=(10,20), linewidth=3)
plt.xlabel('Temperature - C')
plt.ylabel('Density - kg/m^3')
plt.title('Flinak Density')
plt.legend(loc='upper right')
plt.grid()
k = k + 1
linestyles2 = ['--','-.','--','-','--','-.','None',':']
markers2 = ['None', 'None', 'None', 'None', 'None', 'None','s', 'None']
colors2 = ['yellowgreen','firebrick','cornflowerblue','darkorange','darkturquoise','magenta','salmon','purple']
filelist2 = ['0_04','0_025','0_0249','0_0623','0_1113','1_633','Cohen_1956_1957','e']
labels2 = ['0.04*exp(4170/T[K])','0.025*exp(4790/T[K])','0.0249*exp(4476/T[K])','0.0623*exp(3921.4/T[K])','0.1113*exp(3379/T[K])','1.633*exp(-2762.9/T[K]+3.1095E6/T^2[K])','Cohen 1956/1957','exp(-3.0489)*exp (3847/T[K])']
L = 0
fig2 = plt.figure(k, figsize=(10,8))
for f in filelist2:
viscositydataframes = pd.read_csv(Path(path + '/Flinak/Viscosity/Romatoski_Flinak_Viscosity_' + f + '.csv'))
df = | pd.DataFrame(viscositydataframes) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Results, graphs
@author: a.stratigakos
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os, sys
import pickle
from sklearn.ensemble import RandomForestRegressor
from scipy import interpolate, stats
import cvxpy as cp
import matplotlib.patches as patches
# Add current path to directory
cd = os.path.dirname(__file__) #Current directory
sys.path.append(cd)
from EnsemblePrescriptiveTree import EnsemblePrescriptiveTree
from forecast_utility_functions import *
from optimization_utility_functions import *
plt.rcParams['figure.dpi'] = 600
def evaluate_realized_costs(solutions, Node_demand_actual, col_names, grid, config, plot = True):
'Function that takes as input the obtained decisions and returns dataframe with realize costs'
da_costs = pd.DataFrame()
rt_costs = pd.DataFrame()
total_costs = pd.DataFrame()
horizon = config['horizon']
stats_out = pd.DataFrame(data=np.zeros(( 4,len(col_names) )) , columns = col_names,
index = ['r_up', 'r_down', 'L_shed', 'G_shed'])
for j, set_solutions in enumerate(solutions):
ndays = len(set_solutions)
oos_total_cost = []
oos_da_cost = []
oos_rt_cost = []
print('Evaluation: ', col_names[j])
for k, i in enumerate(range(ndays)):
if (i == 238) or (i == 239) or (i == 240): continue
if (i+1)%25==0:print('Out of sample day ', i+1)
start = i*horizon
stop = (i+1)*horizon
da_dispatch = set_solutions[k]
#DA Variables
p_G = cp.Variable((grid['n_unit'], horizon))
R_up = cp.Variable((grid['n_unit'], horizon))
R_down = cp.Variable((grid['n_unit'], horizon))
flow_da = cp.Variable((grid['n_lines'], horizon))
theta_da = cp.Variable((grid['n_nodes'], horizon))
#RT Variables
r_up = cp.Variable((grid['n_unit'], horizon))
r_down = cp.Variable((grid['n_unit'], horizon))
L_shed = cp.Variable((grid['n_loads'],horizon))
G_shed = cp.Variable((grid['n_unit'],horizon)) #Shedding Supply, in case of extremely low demand
flow_rt = cp.Variable((grid['n_lines'],horizon))
theta_rt = cp.Variable((grid['n_nodes'], horizon))
Constraints = []
###### Fix DA decisions
Constraints += [p_G == da_dispatch['p'], flow_da == da_dispatch['flow'], theta_da == da_dispatch['theta'],
R_up == da_dispatch['R_up'], R_down == da_dispatch['R_down']]
#####RT constraints
Constraints += [ r_up <= -p_G + grid['Pmax'].repeat(24,axis=1), r_up <= grid['R_up_max'].repeat(24,axis=1),
r_down <= p_G, r_down <= grid['R_down_max'].repeat(24,axis=1),
L_shed <= Node_demand_actual[:,start:stop],
G_shed <= p_G,
r_up >= 0, r_down >= 0, L_shed >= 0, G_shed >= 0]
#RT Network flow
Constraints += [flow_rt == grid['b_diag']@grid['A']@theta_rt,
flow_rt <= grid['Line_Capacity'].repeat(24,axis=1),
flow_rt >= -grid['Line_Capacity'].repeat(24,axis=1),
theta_rt[0,:] == 0]
#!!!!! Node injections (evaluation is not done properly)
Constraints += [ grid['node_G']@(p_G + r_up-r_down-G_shed) \
+ grid['node_L']@(L_shed-Node_demand_actual[:,start:stop]) == grid['B']@(theta_rt)]
#Constraints += [ grid['node_G']@(r_up-r_down-G_shed) \
# + grid['node_L']@(L_shed-Node_demand_actual[:,start:stop]+Node_demand_expected[:,start:stop]) == \
# grid['B']@(theta_rt-theta_da)]
realized_DA_cost = cp.sum(grid['Cost']@p_G)
realized_RT_cost = cp.sum( grid['Cost_reg_up']@r_up - grid['Cost_reg_down']@r_down + grid['VOLL']*cp.sum(L_shed,axis=0) \
+ grid['gshed']*cp.sum(G_shed,axis=0))
prob = cp.Problem(cp.Minimize( realized_DA_cost + realized_RT_cost ) , Constraints)
prob.solve( solver = 'GUROBI', verbose = False)
oos_total_cost.append(prob.objective.value)
oos_da_cost.append(realized_DA_cost.value)
oos_rt_cost.append(realized_RT_cost.value)
if prob.objective.value == None:
print('Infeasible or unbound')
if (plot==True) and (i%25==0):
plt.plot(da_dispatch['p'].sum(axis=0), label='Production')
plt.plot(Node_demand_actual[:,start:stop].sum(axis=0), label='Actual Demand')
#plt.plot(Node_demand_expected[:,start:stop].sum(axis=0), label='Expected Demand')
plt.plot(G_shed.value.sum(axis=0), '-o',label='G_shed')
plt.plot(r_down.value.sum(axis=0), '*', label='Regulation-Down')
plt.plot(r_up.value.sum(axis=0), 'd',label='Regulation-Up')
plt.legend()
plt.show()
stats_out[col_names[j]][0] = stats_out[col_names[j]][0] + r_up.value.sum()
stats_out[col_names[j]][1] = stats_out[col_names[j]][1] + r_down.value.sum()
stats_out[col_names[j]][2] = stats_out[col_names[j]][2] + L_shed.value.sum()
stats_out[col_names[j]][3] = stats_out[col_names[j]][3] + G_shed.value.sum()
da_costs[col_names[j]] = np.array(oos_da_cost)
rt_costs[col_names[j]] = np.array(oos_rt_cost)
total_costs[col_names[j]] = np.array(oos_total_cost)
print(stats_out)
return da_costs, rt_costs, total_costs, stats_out
def evaluate_single_day(day, solutions, Node_demand_actual, col_names, grid, config, plot = True):
'''Function that takes as input the DA dispatch actions,
solves the RT market with actual load, returns dataframe with realize costs'''
horizon = config['horizon']
for j, set_solutions in enumerate(solutions):
print('Out of sample day ', day)
start = day*horizon
stop = (day+1)*horizon
da_dispatch = set_solutions[day]
#DA Variables
p_G = cp.Variable((grid['n_unit'], horizon))
R_up = cp.Variable((grid['n_unit'], horizon))
R_down = cp.Variable((grid['n_unit'], horizon))
flow_da = cp.Variable((grid['n_lines'], horizon))
theta_da = cp.Variable((grid['n_nodes'], horizon))
#RT Variables
r_up = cp.Variable((grid['n_unit'], horizon))
r_down = cp.Variable((grid['n_unit'], horizon))
L_shed = cp.Variable((grid['n_loads'],horizon))
G_shed = cp.Variable((grid['n_unit'],horizon)) #Shedding Supply, in case of extremely low demand
flow_rt = cp.Variable((grid['n_lines'],horizon))
theta_rt = cp.Variable((grid['n_nodes'], horizon))
Constraints = []
###### Fix DA decisions
Constraints += [p_G == da_dispatch['p'], flow_da == da_dispatch['flow'], theta_da == da_dispatch['theta'],
R_up == da_dispatch['R_up'], R_down == da_dispatch['R_down']]
#####RT constraints
Constraints += [ r_up <= -p_G + grid['Pmax'].repeat(24,axis=1), r_up <= grid['R_up_max'].repeat(24,axis=1),
r_down <= p_G, r_down <= grid['R_down_max'].repeat(24,axis=1),
L_shed <= Node_demand_actual[:,start:stop],
G_shed <= p_G,
r_up >= 0, r_down >= 0, L_shed >= 0, G_shed >= 0]
#RT Network flow
Constraints += [flow_rt == grid['b_diag']@grid['A']@theta_rt,
flow_rt <= grid['Line_Capacity'].repeat(24,axis=1),
flow_rt >= -grid['Line_Capacity'].repeat(24,axis=1),
theta_rt[0,:] == 0]
#Node injections
#!!!!! Node injections (evaluation is not done properly)
Constraints += [ grid['node_G']@(p_G + r_up-r_down-G_shed) \
+ grid['node_L']@(L_shed-Node_demand_actual[:,start:stop]) == grid['B']@(theta_rt)]
# Constraints += [ grid['node_G']@(r_up-r_down-G_shed) \
# + grid['node_L']@(L_shed-Node_demand_actual[:,start:stop]+Node_demand_expected[:,start:stop]) == \
# grid['B']@(theta_rt-theta_da)]
realized_DA_cost = cp.sum(grid['Cost']@p_G)
realized_RT_cost = cp.sum( grid['Cost_reg_up']@r_up - grid['Cost_reg_down']@r_down + grid['VOLL']*cp.sum(L_shed,axis=0) \
+ grid['gshed']*cp.sum(G_shed,axis=0))
prob = cp.Problem(cp.Minimize( realized_DA_cost + realized_RT_cost ) , Constraints)
prob.solve( solver = 'GUROBI', verbose = False)
if prob.objective.value == None:
print('Infeasible or unbound')
if plot==True:
plt.plot(da_dispatch['p'].sum(axis=0), label='Production')
plt.plot(Node_demand_actual[:,start:stop].sum(axis=0), label='Actual Demand')
#plt.plot(Node_demand_expected[:,start:stop].sum(axis=0), label='Expected Demand')
plt.plot(G_shed.value.sum(axis=0), '-o',label='G_shed')
plt.plot(r_down.value.sum(axis=0), '*', label='Regulation-Down')
plt.plot(r_up.value.sum(axis=0), 'd',label='Regulation-Up')
plt.plot(L_shed.value.sum(axis=0), 's',label='L-shed')
plt.title(col_names[j])
plt.legend()
plt.show()
print(col_names[j]+' RT Cost: ', realized_RT_cost.value)
return
#%% Problem parameters
def problem_parameters():
parameters = {}
# Script parameters
parameters['train'] = False # Trains models (forecasting and optimization), else loads results
parameters['save_train'] = False # Save trained models (for trees etc.)
parameters['save_results'] = False # Save DA dispatch decisions and results
# Optimization Parameters
parameters['n_scen'] = 200 #Number of scenarios
parameters['horizon'] = 24 #Optimization horizon (DO NOT CHANGE)
parameters['peak_load'] = 2700 #Peak hourly demand
parameters['wind_capacity'] = 200 #(not used)
# Forecasting parameters (only for the forecasting_module.py)
parameters['split'] = 0.75 #split percentage
parameters['quant'] = np.arange(.01, 1, .01) #For probabilistic forecasts
# Starting dates create training samples of size 6months, 1y, 1.5y and 2y
#parameters['start_date'] = '2010-06-01' # Controls for sample size
#parameters['start_date'] = '2010-01-01'
#parameters['start_date'] = '2009-06-01'
#arameters['start_date'] = '2009-01-01'
parameters['split_date'] = '2011-01-01' # Validation split
return parameters
#%% Import data, create supervised learning set for prescriptive trees
config = problem_parameters()
# Load IEEE data
grid = load_ieee24(cd+'\\data\\IEEE24Wind_Data.xlsx')
results_folder = cd+'\\results\\aggregated_results\\'
results_dir = [sub[0] for sub in os.walk(results_folder)][1:]
#%% Load results for all sample sizes
da_costs = []
rt_costs = []
total_costs = []
rt_actions = []
Prescription = []
Det_solutions = []
Stoch_solutions = []
cost_oriented_Pred = []
expected_load = []
for i, directory in enumerate(results_dir):
print(directory)
# Load solutions
Prescription.append(pickle.load(open(directory+'\\Predictive_Prescriptions.pickle', 'rb')))
Det_solutions.append(pickle.load(open(directory+'\\Deterministic_DA_decisions.pickle', 'rb')))
Stoch_solutions.append(pickle.load(open(directory+'\\Stochastic_DA_decisions.pickle', 'rb')))
cost_oriented_Pred.append(pickle.load(open(directory+'\\Cost_Oriented_Pred.pickle', 'rb')))
expected_load.append(pd.read_csv(directory+'\\load_scenarios.csv', index_col=0)['Expected'].values)
#results = pd.read_excel(cd+'\\EconomicDispatch_Results.xlsx', index_col = 0)
# Actual Demand per node
if i==2:
load_forecast = | pd.read_csv(directory+'\\load_scenarios.csv', index_col=0) | pandas.read_csv |
import requests
import pandas as pd
import re
from bs4 import BeautifulSoup
url=requests.get("http://www.worldometers.info/world-population/india-population/")
t=url.text
so=BeautifulSoup(t,'html.parser')
all_t=so.findAll('table', class_="table table-striped table-bordered table-hover table-condensed table-list")#Use to find stats tabl
d1=pd.DataFrame([])
i=0
j=0
b=[]
d1=pd.DataFrame()
for j in all_t[0].findAll('td'):
b.append(j.text)
while(i<=(208-13)):
d1=d1.append(pd.DataFrame([b[i:i+13]]) )
i=i+13
d1.apply(pd.to_numeric, errors='ignore')
listq=pd.Series.tolist(d1[0:16][0])
list1=pd.Series.tolist(d1[0:16][1])
list2=pd.Series.tolist(d1[0:16][2])
list3=pd.Series.tolist(d1[0:16][3])
list4=pd.Series.tolist(d1[0:16][4])
list5=pd.Series.tolist(d1[0:16][5])
list6=pd.Series.tolist(d1[0:16][6])
list7=pd.Series.tolist(d1[0:16][7])
list8=pd.Series.tolist(d1[0:16][8])
list9=pd.Series.tolist(d1[0:16][9])
list10=pd.Series.tolist(d1[0:16][10])
#forecast table
c=[]
for j in all_t[1].findAll('td'):
c.append(j.text)
bv=pd.DataFrame()
i=0
while(i<=(91-13)):
bv=bv.append(pd.DataFrame([c[i:i+13]]) )
i=i+13
listq1=pd.Series.tolist(bv[0:7][0])
list11=pd.Series.tolist(bv[0:7][1])
list21=pd.Series.tolist(bv[0:7][2])
list31=pd.Series.tolist(bv[0:7][3])
list41= | pd.Series.tolist(bv[0:7][4]) | pandas.Series.tolist |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn import preprocessing, model_selection, metrics
import lightgbm as lgb
pd.options.mode.chained_assignment = None
pd.options.display.max_columns = 999
train_df = pd.read_csv("C:\\Users\\jowet\\Downloads\\kaggle\\avito\\train.csv", parse_dates=["activation_date"])
test_df = | pd.read_csv("C:\\Users\\jowet\\Downloads\\kaggle\\avito\\test.csv", parse_dates=["activation_date"]) | pandas.read_csv |
# pylint: disable-msg=E1101,W0612
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core.sparse.api import SparseDtype
class TestSparseSeriesIndexing(object):
def setup_method(self, method):
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
self.sparse = self.orig.to_sparse()
def test_getitem(self):
orig = self.orig
sparse = self.sparse
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse())
tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse())
tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse())
tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse())
def test_getitem_int_dtype(self):
# GH 8292
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6], name='xxx')
tm.assert_sp_series_equal(res, exp)
assert res.dtype == SparseDtype(np.int64)
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], fill_value=0, name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6],
fill_value=0, name='xxx')
tm.assert_sp_series_equal(res, exp)
assert res.dtype == SparseDtype(np.int64)
def test_getitem_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[2] == 0
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_ellipsis(self):
# GH 9467
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan])
tm.assert_sp_series_equal(s[...], s)
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan], fill_value=0)
tm.assert_sp_series_equal(s[...], s)
def test_getitem_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse[:2],
orig[:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[4:2],
orig[4:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[::2],
orig[::2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[-5:],
orig[-5:].to_sparse(fill_value=0))
def test_loc(self):
orig = self.orig
sparse = self.sparse
assert sparse.loc[0] == 1
assert np.isnan(sparse.loc[1])
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.reindex([1, 3, 4, 5])
exp = orig.reindex([1, 3, 4, 5]).to_sparse()
tm.assert_sp_series_equal(result, exp)
# padded with NaN
assert np.isnan(result[-1])
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list('ABCDE'))
sparse = orig.to_sparse()
assert sparse.loc['A'] == 1
assert np.isnan(sparse.loc['B'])
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
assert sparse.loc['A'] == 1
assert np.isnan(sparse.loc['B'])
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_loc_slice_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc['C':],
orig.loc['C':].to_sparse(fill_value=0))
def test_loc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc[2:],
orig.loc[2:].to_sparse(fill_value=0))
def test_iloc(self):
orig = self.orig
sparse = self.sparse
assert sparse.iloc[3] == 3
assert np.isnan(sparse.iloc[2])
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
result = sparse.iloc[[1, -2, -4]]
exp = orig.iloc[[1, -2, -4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
with pytest.raises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
assert sparse.iloc[3] == 3
assert np.isnan(sparse.iloc[1])
assert sparse.iloc[4] == 0
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_iloc_slice(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_iloc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.iloc[2:],
orig.iloc[2:].to_sparse(fill_value=0))
def test_at(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
assert sparse.at[0] == orig.at[0]
assert np.isnan(sparse.at[1])
assert np.isnan(sparse.at[2])
assert sparse.at[3] == orig.at[3]
assert np.isnan(sparse.at[4])
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('abcde'))
sparse = orig.to_sparse()
assert sparse.at['a'] == orig.at['a']
assert np.isnan(sparse.at['b'])
assert np.isnan(sparse.at['c'])
assert sparse.at['d'] == orig.at['d']
assert np.isnan(sparse.at['e'])
def test_at_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('abcde'))
sparse = orig.to_sparse(fill_value=0)
assert sparse.at['a'] == orig.at['a']
assert np.isnan(sparse.at['b'])
assert sparse.at['c'] == orig.at['c']
assert sparse.at['d'] == orig.at['d']
assert sparse.at['e'] == orig.at['e']
def test_iat(self):
orig = self.orig
sparse = self.sparse
assert sparse.iat[0] == orig.iat[0]
assert np.isnan(sparse.iat[1])
assert np.isnan(sparse.iat[2])
assert sparse.iat[3] == orig.iat[3]
assert np.isnan(sparse.iat[4])
assert np.isnan(sparse.iat[-1])
assert sparse.iat[-5] == orig.iat[-5]
def test_iat_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse()
assert sparse.iat[0] == orig.iat[0]
assert np.isnan(sparse.iat[1])
assert sparse.iat[2] == orig.iat[2]
assert sparse.iat[3] == orig.iat[3]
assert sparse.iat[4] == orig.iat[4]
assert sparse.iat[-1] == orig.iat[-1]
assert sparse.iat[-5] == orig.iat[-5]
def test_get(self):
s = pd.SparseSeries([1, np.nan, np.nan, 3, np.nan])
assert s.get(0) == 1
assert np.isnan(s.get(1))
assert s.get(5) is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'))
assert s.get('A') == 1
assert np.isnan(s.get('B'))
assert s.get('C') == 0
assert s.get('XX') is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'),
fill_value=0)
assert s.get('A') == 1
assert np.isnan(s.get('B'))
assert s.get('C') == 0
assert s.get('XX') is None
def test_take(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse())
tm.assert_sp_series_equal(sparse.take([0, 1, 3]),
orig.take([0, 1, 3]).to_sparse())
tm.assert_sp_series_equal(sparse.take([-1, -2]),
orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse(fill_value=0))
exp = orig.take([0, 1, 3]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0, 1, 3]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
# all missing & fill_value
res = sparse.reindex(['B', 'E', 'C'])
exp = orig.reindex(['B', 'E', 'C']).to_sparse()
tm.assert_sp_series_equal(res, exp)
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
def test_fill_value_reindex(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# includes missing and fill_value
res = sparse.reindex(['A', 'B', 'C'])
exp = orig.reindex(['A', 'B', 'C']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all missing
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all fill_value
orig = pd.Series([0., 0., 0., 0., 0.],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
def test_fill_value_reindex_coerces_float_int(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_reindex_fill_value(self):
floats = pd.Series([1., 2., 3.]).to_sparse()
result = floats.reindex([1, 2, 3], fill_value=0)
expected = pd.Series([2., 3., 0], index=[1, 2, 3]).to_sparse()
tm.assert_sp_series_equal(result, expected)
def test_reindex_nearest(self):
s = pd.Series(np.arange(10, dtype='float64')).to_sparse()
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = pd.Series(np.around(target), target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = pd.Series([0, 1, np.nan, 2], target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
actual = s.reindex(target, method='nearest',
tolerance=[0.3, 0.01, 0.4, 3])
expected = pd.Series([0, np.nan, np.nan, 2], target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
def tests_indexing_with_sparse(self):
# GH 13985
for kind in ['integer', 'block']:
for fill in [True, False, np.nan]:
arr = pd.SparseArray([1, 2, 3], kind=kind)
indexer = pd.SparseArray([True, False, True], fill_value=fill,
dtype=bool)
tm.assert_sp_array_equal(pd.SparseArray([1, 3], kind=kind),
arr[indexer],)
s = pd.SparseSeries(arr, index=['a', 'b', 'c'],
dtype=np.float64)
exp = pd.SparseSeries(
[1, 3], index=['a', 'c'],
dtype=SparseDtype(np.float64, s.fill_value),
kind=kind
)
tm.assert_sp_series_equal(s[indexer], exp)
tm.assert_sp_series_equal(s.loc[indexer], exp)
| tm.assert_sp_series_equal(s.iloc[indexer], exp) | pandas.util.testing.assert_sp_series_equal |
import json
import pandas as pd
from sklearn.model_selection._search import ParameterGrid
from logging_ import Logger
from adv_lib.attacks import fmn, alma, apgd
from adv_lib.attacks.auto_pgd import minimal_apgd
#from robustbench.utils import load_model
from tracking import PyTorchModelTracker
from torchvision import transforms
import torch
from utils.data_utils import create_loaders
from tqdm import tqdm
from zoo import load_model
from pytorch_lightning.trainer import Trainer
class Sweeper:
@classmethod
def from_jsonfile(cls, path, *args, **kwargs):
with open(path, "r") as f_json:
dic = json.load(f_json)
df = Sweeper.build_df_from_dict(dic)
return Sweeper(df, *args, **kwargs)
@classmethod
def build_df_from_dict(cls, d):
# make pandas df from the json config (cartesian product)
g = ParameterGrid(d)
# print([*g])
df = pd.DataFrame([*g]).drop_duplicates().convert_dtypes()
cols_to_order = ['dataset', "norm","attack", "model"]
new_columns = cols_to_order + (df.columns.drop(cols_to_order).tolist())
return df[new_columns].sort_values(cols_to_order).reset_index(drop=True)
@classmethod
def from_csvfile(cls, path, *args, **kwargs):
df = | pd.read_csv(path) | pandas.read_csv |
def calculateAnyProfile(profileType, df_labs, df_meds, df_procedures, df_diagnoses, df_phenotypes):
"""Calculate a single profile based on the type provided and data cleaned from getSubdemographicsTables
Arguments:
profileType -- which individual profile type you would like generated, this will be the category with the header information
(Options: 'labs', 'medications', 'procedures', 'diagnoses', 'phenotypes')
Keywords:
df_labs -- labs dataframe returned from getSubdemographicsTables
df_medications -- medications dataframe returned from getSubdemographicsTables
df_procedures -- procedures dataframe returned from getSubdemographicsTables
df_diagnoses -- diagnoses dataframe returned from getSubdemographicsTables
df_phenotypes -- phenotypes dataframe returned from getSubdemographicsTables
Returns Pythonic structures needed to generate profile in JSON format using the corresponding write profile function
"""
import os
import sys
import sqlalchemy
import urllib.parse
import pandas as pd
import numpy as np
import getpass
from dataclasses import dataclass
from SciServer import Authentication
from datetime import datetime
import pymssql
try:
# Make Labs Profile
if profileType == 'labs':
# High Level Info, Scalar Distribution
labs_counts = df_labs.LAB_LOINC.value_counts()
grouped_labs = df_labs.groupby(['LAB_LOINC', 'resultYear'])
labs_frequencyPerYear = (df_labs.groupby(['LAB_LOINC','PATID','resultYear']).PATID.size()
.groupby(['LAB_LOINC','resultYear']).aggregate(np.mean))
labs_fractionOfSubjects = (np.divide(df_labs.groupby(['LAB_LOINC']).PATID.nunique(),
df_labs.PATID.nunique()))
labs_units = df_labs.groupby(['LAB_LOINC']).LOINC_UNIT.unique()
labs_names = df_labs.groupby(['LAB_LOINC']).LOINC_SHORTNAME.unique()
def percentile(n):
def percentile_(x):
return x.quantile(n*0.01)
percentile_.__name__ = '%s' % n
return percentile_
labs_stats = (grouped_labs
.RESULT_NUM.agg(['min','max', 'mean','median','std',
percentile(10), percentile(20), percentile(30),
percentile(40), percentile(50), percentile(60),
percentile(70), percentile(80), percentile(90)]))
def fracsAboveBelowNormal(x):
try:
aboveNorm = np.divide(np.sum(x.RESULT_NUM > x.range_high), x.RESULT_NUM.size)
belowNorm = np.divide(np.sum(x.RESULT_NUM < x.range_low), x.RESULT_NUM.size)
return pd.Series({'aboveNorm':aboveNorm, 'belowNorm':belowNorm})
except:
return pd.Series({'aboveNorm':np.nan, 'belowNorm':np.nan})
labs_aboveBelowNorm = (grouped_labs.apply(fracsAboveBelowNormal))
labs_correlatedLabsCoefficients = (df_labs.groupby(['LAB_LOINC','resultYear','PATID'])
.RESULT_NUM.mean())
labs_abscorrelation = 0
## LABS TO MEDICATIONS
def patientsAboveBelowNormalLabsMeds(x):
# Get patients above and below normal
patientsAboveNorm = x.PATID[x.RESULT_NUM > x.range_high].tolist()
patientsBelowNorm = x.PATID[x.RESULT_NUM < x.range_low].tolist()
# Get unique patient IDs for above & below normal
patientsAboveBelowNorm = list(set(patientsAboveNorm + patientsBelowNorm))
# Link to meds table
abnormalPatientsMeds = df_meds[df_meds.PATID.isin(patientsAboveBelowNorm) &
(df_meds.startYear == pd.to_datetime(x.RESULT_DATE).dt.year.unique()[0])]
return pd.Series({'medsAboveBelowNorm': abnormalPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().index,
'counts': abnormalPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().values})
# Need to grab the indices of those with abnormal lab, grab their medications, count and rank them
labs_correlatedMedsCoefficients = (grouped_labs.apply(patientsAboveBelowNormalLabsMeds))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for lab in labs_correlatedMedsCoefficients.index:
thisLabYear = labs_correlatedMedsCoefficients.loc[lab]
thisLab = lab[0]
thisYear = lab[1]
totalCrossTab = np.sum(thisLabYear.counts)
for medInd in range(len(labs_correlatedMedsCoefficients.loc[lab].medsAboveBelowNorm.values)):
mytups.append((thisLabYear.medsAboveBelowNorm.values[medInd], thisLabYear.counts[medInd]/totalCrossTab))
multiIndex.append((thisLab, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
labs_correlatedMedsCoefficients = (pd.DataFrame.from_records(mytups, columns=['JH_INGREDIENT_RXNORM_CODE','Relative_Counts'],
index=index))
## LABS TO PROCEDURES
def patientsAboveBelowNormalLabsProcs(x):
# Get patients above and below normal
patientsAboveNorm = x.PATID[x.RESULT_NUM > x.range_high].tolist()
patientsBelowNorm = x.PATID[x.RESULT_NUM < x.range_low].tolist()
# Get unique patient IDs for above & below normal
patientsAboveBelowNorm = list(set(patientsAboveNorm + patientsBelowNorm))
# Link to procs table
abnormalPatientsProcs = df_procedures[df_procedures.PATID.isin(patientsAboveBelowNorm) &
(df_procedures.encounterYear == pd.to_datetime(x.RESULT_DATE).dt.year.unique()[0])]
return pd.Series({'procsAboveBelowNorm': abnormalPatientsProcs.RAW_PX.value_counts().index,
'counts': abnormalPatientsProcs.RAW_PX.value_counts().values})
# Need to grab the indices of those with abnormal lab, grab their medications, count and rank them
labs_correlatedProceduresCoefficients = (grouped_labs.apply(patientsAboveBelowNormalLabsProcs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for lab in labs_correlatedProceduresCoefficients.index:
thisLabYear = labs_correlatedProceduresCoefficients.loc[lab]
thisLab = lab[0]
thisYear = lab[1]
totalCrossTab = np.sum(thisLabYear.counts)
for procInd in range(len(labs_correlatedProceduresCoefficients.loc[lab].procsAboveBelowNorm.values)):
mytups.append((thisLabYear.procsAboveBelowNorm.values[procInd], thisLabYear.counts[procInd]/totalCrossTab))
multiIndex.append((thisLab, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
labs_correlatedProceduresCoefficients = (pd.DataFrame.from_records(mytups, columns=['RAW_PX','Relative_Counts'],
index=index))
## LABS TO DIAGNOSES
def patientsAboveBelowNormalLabsDiags(x):
# Get patients above and below normal
patientsAboveNorm = x.PATID[x.RESULT_NUM > x.range_high].tolist()
patientsBelowNorm = x.PATID[x.RESULT_NUM < x.range_low].tolist()
# Get unique patient IDs for above & below normal
patientsAboveBelowNorm = list(set(patientsAboveNorm + patientsBelowNorm))
# Link to procs table
abnormalPatientsDiags = df_diagnoses[df_diagnoses.PATID.isin(patientsAboveBelowNorm) &
(df_diagnoses.admitYear == pd.to_datetime(x.RESULT_DATE).dt.year.unique()[0])]
return pd.Series({'diagsAboveBelowNorm': abnormalPatientsDiags.DX.value_counts().index,
'counts': abnormalPatientsDiags.DX.value_counts().values})
# Need to grab the indices of those with abnormal lab, grab their medications, count and rank them
labs_correlatedDiagnosisCoefficients = (grouped_labs.apply(patientsAboveBelowNormalLabsDiags))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for lab in labs_correlatedDiagnosisCoefficients.index:
thisLabYear = labs_correlatedDiagnosisCoefficients.loc[lab]
thisLab = lab[0]
thisYear = lab[1]
totalCrossTab = np.sum(thisLabYear.counts)
for diagInd in range(len(labs_correlatedDiagnosisCoefficients.loc[lab].diagsAboveBelowNorm.values)):
mytups.append((thisLabYear.diagsAboveBelowNorm.values[diagInd], thisLabYear.counts[diagInd]/totalCrossTab))
multiIndex.append((thisLab, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
labs_correlatedDiagnosisCoefficients = (pd.DataFrame.from_records(mytups, columns=['DX','Relative_Counts'],
index=index))
## LABS TO PHENOTYPES
def patientsAboveBelowNormalLabsHPOs(x):
# Get patients above and below normal
patientsAboveNorm = x.PATID[x.RESULT_NUM > x.range_high].tolist()
patientsBelowNorm = x.PATID[x.RESULT_NUM < x.range_low].tolist()
# Get unique patient IDs for above & below normal
patientsAboveBelowNorm = list(set(patientsAboveNorm + patientsBelowNorm))
# Link to procs table
abnormalPatientsHPOs = df_phenotypes[df_phenotypes.PATID.isin(patientsAboveBelowNorm) &
(df_phenotypes.admitYear == pd.to_datetime(x.RESULT_DATE).dt.year.unique()[0])]
return pd.Series({'hposAboveBelowNorm': abnormalPatientsHPOs.HPO.value_counts().index,
'counts': abnormalPatientsHPOs.HPO.value_counts().values})
# Need to grab the indices of those with abnormal lab, grab their medications, count and rank them
labs_correlatedPhenotypesCoefficients = (grouped_labs.apply(patientsAboveBelowNormalLabsHPOs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for lab in labs_correlatedPhenotypesCoefficients.index:
thisLabYear = labs_correlatedPhenotypesCoefficients.loc[lab]
thisLab = lab[0]
thisYear = lab[1]
totalCrossTab = np.sum(thisLabYear.counts)
for hpoInd in range(len(labs_correlatedPhenotypesCoefficients.loc[lab].hposAboveBelowNorm.values)):
mytups.append((thisLabYear.hposAboveBelowNorm.values[hpoInd], thisLabYear.counts[hpoInd]/totalCrossTab))
multiIndex.append((thisLab, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
labs_correlatedPhenotypesCoefficients = (pd.DataFrame.from_records(mytups, columns=['HPO','Relative_Counts'],
index=index))
return (labs_counts, labs_frequencyPerYear, labs_fractionOfSubjects, labs_units, labs_names,
labs_stats, labs_aboveBelowNorm, labs_correlatedLabsCoefficients, labs_abscorrelation,
labs_correlatedMedsCoefficients, labs_correlatedProceduresCoefficients, labs_correlatedDiagnosisCoefficients,
labs_correlatedPhenotypesCoefficients)
# Make Medication Profile
elif profileType == 'medications':
meds_medication = df_meds.JH_INGREDIENT_RXNORM_CODE.unique()
meds_dosageInfo = df_meds.groupby('JH_INGREDIENT_RXNORM_CODE').RX_DOSE_ORDERED.mean()
meds_frequencyPerYear = (df_meds.groupby(['JH_INGREDIENT_RXNORM_CODE','startYear','PATID']).PATID
.count().groupby(['JH_INGREDIENT_RXNORM_CODE','startYear']).mean())
meds_fractionOfSubjects = (np.divide(df_meds.groupby(['JH_INGREDIENT_RXNORM_CODE']).PATID.nunique(),
df_meds.PATID.nunique()))
grouped_meds = df_meds.groupby(['JH_INGREDIENT_RXNORM_CODE', 'startYear'])
#meds_correlatedLabsCoefficients
def patientsAboveBelowNormalMedsLabs(x):
patientsWithThisRX = list(set(x.PATID.tolist()))
# Link to labs table
abnormalPatientsLabs = df_labs[(df_labs.PATID.isin(patientsWithThisRX)) &
((df_labs.RESULT_NUM > df_labs.range_high) |
(df_labs.RESULT_NUM < df_labs.range_low)) &
(df_labs.resultYear == pd.to_datetime(x.RX_START_DATE).dt.year.unique()[0])]
return pd.Series({'labsAboveBelowNorm': abnormalPatientsLabs.LAB_LOINC.value_counts().index,
'counts': abnormalPatientsLabs.LAB_LOINC.value_counts().values})
meds_correlatedLabsCoefficients = (grouped_meds.apply(patientsAboveBelowNormalMedsLabs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for med in meds_correlatedLabsCoefficients.index:
thisMedYear = meds_correlatedLabsCoefficients.loc[med]
thisMed = med[0]
thisYear = med[1]
totalCrossTab = np.sum(thisMedYear.counts)
for labInd in range(len(meds_correlatedLabsCoefficients.loc[med].labsAboveBelowNorm.values)):
mytups.append((thisMedYear.labsAboveBelowNorm.values[labInd], thisMedYear.counts[labInd]/totalCrossTab))
multiIndex.append((thisMed, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
meds_correlatedLabsCoefficients = (pd.DataFrame.from_records(mytups, columns=['LAB_LOINC','Relative_Counts'],
index=index))
#meds_correlatedDiagsCoefficients
def patientsCrossFreqMedsDiags(x):
patientsWithThisRX = list(set(x.PATID.tolist()))
# Link to diagnoses table
commonPatientsDXs = df_diagnoses[(df_diagnoses.PATID.isin(patientsWithThisRX)) &
(df_diagnoses.admitYear == pd.to_datetime(x.RX_START_DATE).dt.year.unique()[0])]
return pd.Series({'diagsCrossFreq': commonPatientsDXs.DX.value_counts().index,
'counts': commonPatientsDXs.DX.value_counts().values})
meds_correlatedDiagsCoefficients = (grouped_meds.apply(patientsCrossFreqMedsDiags))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for med in meds_correlatedDiagsCoefficients.index:
thisMedYear = meds_correlatedDiagsCoefficients.loc[med]
thisMed = med[0]
thisYear = med[1]
totalCrossTab = np.sum(thisMedYear.counts)
for diagInd in range(len(meds_correlatedDiagsCoefficients.loc[med].diagsCrossFreq.values)):
mytups.append((thisMedYear.diagsCrossFreq.values[diagInd], thisMedYear.counts[diagInd]/totalCrossTab))
multiIndex.append((thisMed, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
meds_correlatedDiagsCoefficients = (pd.DataFrame.from_records(mytups, columns=['DX','Relative_Counts'],
index=index))
#meds_correlatedMedsCoefficients
def patientsCrossFreqMedsMeds(x):
patientsWithThisRX = list(set(x.PATID.tolist()))
# Link to labs table
commonPatientsMeds = df_meds[(df_meds.PATID.isin(patientsWithThisRX)) &
(pd.to_datetime(df_meds.RX_START_DATE).dt.year ==
pd.to_datetime(x.RX_START_DATE).dt.year.unique()[0])]
return pd.Series({'medsCrossFreq': commonPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().index,
'counts': commonPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().values})
meds_correlatedMedsCoefficients = (grouped_meds.apply(patientsCrossFreqMedsMeds))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for med in meds_correlatedMedsCoefficients.index:
thisMedYear = meds_correlatedMedsCoefficients.loc[med]
thisMed = med[0]
thisYear = med[1]
totalCrossTab = np.sum(thisMedYear.counts)
for medInd in range(len(meds_correlatedMedsCoefficients.loc[med].medsCrossFreq.values)):
mytups.append((thisMedYear.medsCrossFreq.values[medInd], thisMedYear.counts[medInd]/totalCrossTab))
multiIndex.append((thisMed, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
meds_correlatedMedsCoefficients = (pd.DataFrame.from_records(mytups, columns=['JH_INGREDIENT_RXNORM_CODE','Relative_Counts'],
index=index))
## MEDS TO PROCEDURES
def patientsCrossFreqMedsProcs(x):
patientsWithThisRX = list(set(x.PATID.tolist()))
# Link to procs table
commonPatientsProcs = df_procedures[df_procedures.PATID.isin(patientsWithThisRX) &
(df_procedures.encounterYear == pd.to_datetime(x.RX_START_DATE).dt.year.unique()[0])]
return pd.Series({'procsCrossFreq': commonPatientsProcs.RAW_PX.value_counts().index,
'counts': commonPatientsProcs.RAW_PX.value_counts().values})
meds_correlatedProceduresCoefficients = (grouped_meds.apply(patientsCrossFreqMedsProcs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for med in meds_correlatedProceduresCoefficients.index:
thisMedYear = meds_correlatedProceduresCoefficients.loc[med]
thisMed = med[0]
thisYear = med[1]
totalCrossTab = np.sum(thisMedYear.counts)
for procInd in range(len(meds_correlatedProceduresCoefficients.loc[med].procsCrossFreq.values)):
mytups.append((thisMedYear.procsCrossFreq.values[procInd], thisMedYear.counts[procInd]/totalCrossTab))
multiIndex.append((thisMed, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
meds_correlatedProceduresCoefficients = (pd.DataFrame.from_records(mytups, columns=['RAW_PX','Relative_Counts'],
index=index))
## MEDS TO HPO
def patientsCrossFreqMedsHPOs(x):
patientsWithThisRX = list(set(x.PATID.tolist()))
# Link to hpo table
commonPatientsHPOs = df_phenotypes[(df_phenotypes.PATID.isin(patientsWithThisRX)) &
(df_phenotypes.admitYear == pd.to_datetime(x.RX_START_DATE).dt.year.unique()[0])]
return pd.Series({'hposCrossFreq': commonPatientsHPOs.HPO.value_counts().index,
'counts': commonPatientsHPOs.HPO.value_counts().values})
meds_correlatedPhenotypesCoefficients = (grouped_meds.apply(patientsCrossFreqMedsHPOs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for med in meds_correlatedPhenotypesCoefficients.index:
thisMedYear = meds_correlatedPhenotypesCoefficients.loc[med]
thisMed = med[0]
thisYear = med[1]
totalCrossTab = np.sum(thisMedYear.counts)
for phenoInd in range(len(meds_correlatedPhenotypesCoefficients.loc[med].hposCrossFreq.values)):
mytups.append((thisMedYear.hposCrossFreq.values[phenoInd], thisMedYear.counts[phenoInd]/totalCrossTab))
multiIndex.append((thisMed, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
meds_correlatedPhenotypesCoefficients = (pd.DataFrame.from_records(mytups, columns=['HPO','Relative_Counts'],
index=index))
return (meds_medication, meds_dosageInfo, meds_frequencyPerYear, meds_fractionOfSubjects,
meds_correlatedLabsCoefficients, meds_correlatedDiagsCoefficients, meds_correlatedMedsCoefficients,
meds_correlatedProceduresCoefficients, meds_correlatedPhenotypesCoefficients)
# Make Procedures Profile
elif profileType == 'procedures':
procedures_code = df_procedures.RAW_PX.unique()
procedures_count = df_procedures.RAW_PX.value_counts()
procedures_frequencyPerYear = (df_procedures.groupby(['RAW_PX','encounterYear','PATID']).PATID.count()
.groupby(['RAW_PX','encounterYear']).mean())
procedures_fractionOfSubjects = (np.divide(df_procedures.groupby(['RAW_PX']).PATID.nunique(),
df_procedures.PATID.nunique()))
grouped_procs = df_procedures.groupby(['RAW_PX', 'encounterYear'])
#procs_correlatedLabsCoefficients
def patientsAboveBelowNormalProcsLabs(x):
patientsWithThisProc = list(set(x.PATID.tolist()))
# Link to labs table
abnormalPatientsLabs = df_labs[(df_labs.PATID.isin(patientsWithThisProc)) &
((df_labs.RESULT_NUM > df_labs.range_high) |
(df_labs.RESULT_NUM < df_labs.range_low)) &
(df_labs.resultYear == pd.to_datetime(x.PX_DATE).dt.year.unique()[0])]
return pd.Series({'labsAboveBelowNorm': abnormalPatientsLabs.LAB_LOINC.value_counts().index,
'counts': abnormalPatientsLabs.LAB_LOINC.value_counts().values})
procs_correlatedLabsCoefficients = (grouped_procs.apply(patientsAboveBelowNormalProcsLabs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for proc in procs_correlatedLabsCoefficients.index:
thisProcYear = procs_correlatedLabsCoefficients.loc[proc]
thisProc = proc[0]
thisYear = proc[1]
totalCrossTab = np.sum(thisProcYear.counts)
for labInd in range(len(procs_correlatedLabsCoefficients.loc[proc].labsAboveBelowNorm.values)):
mytups.append((thisProcYear.labsAboveBelowNorm.values[labInd], thisProcYear.counts[labInd]/totalCrossTab))
multiIndex.append((thisProc, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
procs_correlatedLabsCoefficients = (pd.DataFrame.from_records(mytups, columns=['LAB_LOINC','Relative_Counts'],
index=index))
#procs_correlatedDiagsCoefficients
def patientsCrossFreqProcsDiags(x):
patientsWithThisProc = list(set(x.PATID.tolist()))
# Link to diagnoses table
commonPatientsDXs = df_diagnoses[(df_diagnoses.PATID.isin(patientsWithThisProc)) &
(df_diagnoses.admitYear == pd.to_datetime(x.PX_DATE).dt.year.unique()[0])]
return pd.Series({'diagsCrossFreq': commonPatientsDXs.DX.value_counts().index,
'counts': commonPatientsDXs.DX.value_counts().values})
procs_correlatedDiagsCoefficients = (grouped_procs.apply(patientsCrossFreqProcsDiags))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for proc in procs_correlatedDiagsCoefficients.index:
thisProcYear = procs_correlatedDiagsCoefficients.loc[proc]
thisProc = proc[0]
thisYear = proc[1]
totalCrossTab = np.sum(thisProcYear.counts)
for diagInd in range(len(procs_correlatedDiagsCoefficients.loc[proc].diagsCrossFreq.values)):
mytups.append((thisProcYear.diagsCrossFreq.values[diagInd], thisProcYear.counts[diagInd]/totalCrossTab))
multiIndex.append((thisProc, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
procs_correlatedDiagsCoefficients = (pd.DataFrame.from_records(mytups, columns=['DX','Relative_Counts'],
index=index))
#procs_correlatedMedsCoefficients
def patientsCrossFreqProcsMeds(x):
patientsWithThisProc = list(set(x.PATID.tolist()))
# Link to labs table
commonPatientsMeds = df_meds[(df_meds.PATID.isin(patientsWithThisProc)) &
(df_meds.startYear == pd.to_datetime(x.PX_DATE).dt.year.unique()[0])]
return pd.Series({'medsCrossFreq': commonPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().index,
'counts': commonPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().values})
procs_correlatedMedsCoefficients = (grouped_procs.apply(patientsCrossFreqProcsMeds))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for proc in procs_correlatedMedsCoefficients.index:
thisProcYear = procs_correlatedMedsCoefficients.loc[proc]
thisProc = proc[0]
thisYear = proc[1]
totalCrossTab = np.sum(thisProcYear.counts)
for medInd in range(len(procs_correlatedMedsCoefficients.loc[proc].medsCrossFreq.values)):
mytups.append((thisProcYear.medsCrossFreq.values[medInd], thisProcYear.counts[medInd]/totalCrossTab))
multiIndex.append((thisProc, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
procs_correlatedMedsCoefficients = (pd.DataFrame.from_records(mytups, columns=['JH_INGREDIENT_RXNORM_CODE','Relative_Counts'],
index=index))
## PROCEDURES TO PROCEDURES
def patientsCrossFreqProcsProcs(x):
patientsWithThisProc = list(set(x.PATID.tolist()))
# Link to procs table
commonPatientsProcs = df_procedures[df_procedures.PATID.isin(patientsWithThisProc) &
(df_procedures.encounterYear == pd.to_datetime(x.PX_DATE).dt.year.unique()[0])]
return pd.Series({'procsCrossFreq': commonPatientsProcs.RAW_PX.value_counts().index,
'counts': commonPatientsProcs.RAW_PX.value_counts().values})
procs_correlatedProceduresCoefficients = (grouped_procs.apply(patientsCrossFreqProcsProcs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for proc in procs_correlatedProceduresCoefficients.index:
thisProcYear = procs_correlatedProceduresCoefficients.loc[proc]
thisProc = proc[0]
thisYear = proc[1]
totalCrossTab = np.sum(thisProcYear.counts)
for procInd in range(len(procs_correlatedProceduresCoefficients.loc[proc].procsCrossFreq.values)):
mytups.append((thisProcYear.procsCrossFreq.values[procInd], thisProcYear.counts[procInd]/totalCrossTab))
multiIndex.append((thisProc, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
procs_correlatedProceduresCoefficients = (pd.DataFrame.from_records(mytups, columns=['RAW_PX','Relative_Counts'],
index=index))
# procedures to hpo
def patientsCrossFreqProcsHPOs(x):
patientsWithThisProc = list(set(x.PATID.tolist()))
# Link to diagnoses table
commonPatientsHPOs = df_phenotypes[(df_phenotypes.PATID.isin(patientsWithThisProc)) &
(df_phenotypes.admitYear == pd.to_datetime(x.PX_DATE).dt.year.unique()[0])]
return pd.Series({'hposCrossFreq': commonPatientsHPOs.HPO.value_counts().index,
'counts': commonPatientsHPOs.HPO.value_counts().values})
procs_correlatedPhenotypesCoefficients = (grouped_procs.apply(patientsCrossFreqProcsHPOs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for proc in procs_correlatedPhenotypesCoefficients.index:
thisProcYear = procs_correlatedPhenotypesCoefficients.loc[proc]
thisProc = proc[0]
thisYear = proc[1]
totalCrossTab = np.sum(thisProcYear.counts)
for phenoInd in range(len(procs_correlatedPhenotypesCoefficients.loc[proc].hposCrossFreq.values)):
mytups.append((thisProcYear.hposCrossFreq.values[phenoInd], thisProcYear.counts[phenoInd]/totalCrossTab))
multiIndex.append((thisProc, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
procs_correlatedPhenotypesCoefficients = (pd.DataFrame.from_records(mytups, columns=['HPO','Relative_Counts'],
index=index))
return (procedures_code, procedures_count, procedures_frequencyPerYear, procedures_fractionOfSubjects,
procs_correlatedLabsCoefficients, procs_correlatedDiagsCoefficients, procs_correlatedMedsCoefficients,
procs_correlatedProceduresCoefficients, procs_correlatedPhenotypesCoefficients)
# Make Diagnoses Profile
elif profileType == 'diagnoses':
diagnoses_code = df_diagnoses.DX.unique()
diagnoses_count = df_diagnoses.DX.value_counts()
diagnoses_frequencyPerYear = (df_diagnoses.groupby(['DX','admitYear','PATID']).PATID
.count().groupby(['DX','admitYear']).mean())
diagnoses_fractionOfSubjects = (np.divide(df_diagnoses.groupby(['DX']).PATID.nunique(),
df_diagnoses.PATID.nunique()))
grouped_diags = df_diagnoses.groupby(['DX','admitYear'])
#diags_correlatedLabsCoefficients
def patientsAboveBelowNormalDiagsLabs(x):
patientsWithThisDiag = list(set(x.PATID.tolist()))
# Link to labs table
abnormalPatientsLabs = df_labs[(df_labs.PATID.isin(patientsWithThisDiag)) &
((df_labs.RESULT_NUM > df_labs.range_high) |
(df_labs.RESULT_NUM < df_labs.range_low)) &
(df_labs.resultYear == pd.to_datetime(x.ADMIT_DATE).dt.year.unique()[0])]
return pd.Series({'labsAboveBelowNorm': abnormalPatientsLabs.LAB_LOINC.value_counts().index,
'counts': abnormalPatientsLabs.LAB_LOINC.value_counts().values})
diags_correlatedLabsCoefficients = (grouped_diags.apply(patientsAboveBelowNormalDiagsLabs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for diag in diags_correlatedLabsCoefficients.index:
thisDiagYear = diags_correlatedLabsCoefficients.loc[diag]
thisDiag = diag[0]
thisYear = diag[1]
totalCrossTab = np.sum(thisDiagYear.counts)
for labInd in range(len(diags_correlatedLabsCoefficients.loc[diag].labsAboveBelowNorm.values)):
mytups.append((thisDiagYear.labsAboveBelowNorm.values[labInd], thisDiagYear.counts[labInd]/totalCrossTab))
multiIndex.append((thisDiag, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
diags_correlatedLabsCoefficients = (pd.DataFrame.from_records(mytups, columns=['LAB_LOINC','Relative_Counts'],
index=index))
#diags_correlatedDiagsCoefficients
def patientsCrossFreqDiagsDiags(x):
patientsWithThisDiag = list(set(x.PATID.tolist()))
# Link to diagnoses table
commonPatientsDXs = df_diagnoses[(df_diagnoses.PATID.isin(patientsWithThisDiag)) &
(df_diagnoses.admitYear == pd.to_datetime(x.ADMIT_DATE).dt.year.unique()[0])]
return pd.Series({'diagsCrossFreq': commonPatientsDXs.DX.value_counts().index,
'counts': commonPatientsDXs.DX.value_counts().values})
diags_correlatedDiagsCoefficients = (grouped_diags.apply(patientsCrossFreqDiagsDiags))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for diag in diags_correlatedDiagsCoefficients.index:
thisDiagYear = diags_correlatedDiagsCoefficients.loc[diag]
thisDiag = diag[0]
thisYear = diag[1]
totalCrossTab = np.sum(thisDiagYear.counts)
for diagInd in range(len(diags_correlatedDiagsCoefficients.loc[diag].diagsCrossFreq.values)):
mytups.append((thisDiagYear.diagsCrossFreq.values[diagInd], thisDiagYear.counts[diagInd]/totalCrossTab))
multiIndex.append((thisDiag, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
diags_correlatedDiagsCoefficients = (pd.DataFrame.from_records(mytups, columns=['DX','Relative_Counts'],
index=index))
#diags_correlatedMedsCoefficients
def patientsCrossFreqDiagsMeds(x):
patientsWithThisDiag = list(set(x.PATID.tolist()))
# Link to labs table
commonPatientsMeds = df_meds[(df_meds.PATID.isin(patientsWithThisDiag)) &
(df_meds.startYear == pd.to_datetime(x.ADMIT_DATE).dt.year.unique()[0])]
return pd.Series({'medsCrossFreq': commonPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().index,
'counts': commonPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().values})
diags_correlatedMedsCoefficients = (grouped_diags.apply(patientsCrossFreqDiagsMeds))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for diag in diags_correlatedMedsCoefficients.index:
thisDiagYear = diags_correlatedMedsCoefficients.loc[diag]
thisDiag = diag[0]
thisYear = diag[1]
totalCrossTab = np.sum(thisDiagYear.counts)
for medInd in range(len(diags_correlatedMedsCoefficients.loc[diag].medsCrossFreq.values)):
mytups.append((thisDiagYear.medsCrossFreq.values[medInd], thisDiagYear.counts[medInd]/totalCrossTab))
multiIndex.append((thisDiag, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
diags_correlatedMedsCoefficients = (pd.DataFrame.from_records(mytups, columns=['JH_INGREDIENT_RXNORM_CODE','Relative_Counts'],
index=index))
## DIAGNOSES TO PROCEDURES
def patientsCrossFreqDiagsProcs(x):
patientsWithThisDiag = list(set(x.PATID.tolist()))
# Link to procs table
commonPatientsProcs = df_procedures[df_procedures.PATID.isin(patientsWithThisDiag) &
(df_procedures.encounterYear == pd.to_datetime(x.ADMIT_DATE).dt.year.unique()[0])]
return pd.Series({'procsCrossFreq': commonPatientsProcs.RAW_PX.value_counts().index,
'counts': commonPatientsProcs.RAW_PX.value_counts().values})
diags_correlatedProceduresCoefficients = (grouped_diags.apply(patientsCrossFreqDiagsProcs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for diag in diags_correlatedProceduresCoefficients.index:
thisDiagYear = diags_correlatedProceduresCoefficients.loc[diag]
thisDiag = diag[0]
thisYear = diag[1]
totalCrossTab = np.sum(thisDiagYear.counts)
for procInd in range(len(diags_correlatedProceduresCoefficients.loc[diag].procsCrossFreq.values)):
mytups.append((thisDiagYear.procsCrossFreq.values[procInd], thisDiagYear.counts[procInd]/totalCrossTab))
multiIndex.append((thisDiag, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
diags_correlatedProceduresCoefficients = (pd.DataFrame.from_records(mytups, columns=['RAW_PX','Relative_Counts'],
index=index))
#diags_correlatedPhenotypesCoefficients
def patientsCrossFreqDiagsHPOs(x):
patientsWithThisDiag = list(set(x.PATID.tolist()))
# Link to diagnoses table
commonPatientsHPOs = df_phenotypes[(df_phenotypes.PATID.isin(patientsWithThisDiag)) &
(df_phenotypes.admitYear == pd.to_datetime(x.ADMIT_DATE).dt.year.unique()[0])]
return pd.Series({'hposCrossFreq': commonPatientsHPOs.HPO.value_counts().index,
'counts': commonPatientsHPOs.HPO.value_counts().values})
diags_correlatedPhenotypesCoefficients = (grouped_diags.apply(patientsCrossFreqDiagsHPOs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for diag in diags_correlatedPhenotypesCoefficients.index:
thisDiagYear = diags_correlatedPhenotypesCoefficients.loc[diag]
thisDiag = diag[0]
thisYear = diag[1]
totalCrossTab = np.sum(thisDiagYear.counts)
for phenoInd in range(len(diags_correlatedPhenotypesCoefficients.loc[diag].hposCrossFreq.values)):
mytups.append((thisDiagYear.hposCrossFreq.values[phenoInd], thisDiagYear.counts[phenoInd]/totalCrossTab))
multiIndex.append((thisDiag, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
diags_correlatedPhenotypesCoefficients = (pd.DataFrame.from_records(mytups, columns=['HPO','Relative_Counts'],
index=index))
return (diagnoses_code, diagnoses_count, diagnoses_frequencyPerYear, diagnoses_fractionOfSubjects,
diags_correlatedLabsCoefficients, diags_correlatedDiagsCoefficients, diags_correlatedMedsCoefficients,
diags_correlatedProceduresCoefficients, diags_correlatedPhenotypesCoefficients)
# Make Phenotypes Profile
elif profileType == 'phenotypes':
phenotypes_code = df_phenotypes.HPO.unique()
phenotypes_count = df_phenotypes.HPO.value_counts()
phenotypes_frequencyPerYear = (df_phenotypes.groupby(['HPO','admitYear','PATID']).PATID
.count().groupby(['HPO','admitYear']).mean())
phenotypes_fractionOfSubjects = (np.divide(df_phenotypes.groupby(['HPO']).PATID.nunique(),
df_phenotypes.PATID.nunique()))
grouped_phenotypes = df_phenotypes.groupby(['HPO','admitYear'])
#diags_correlatedLabsCoefficients
def patientsAboveBelowNormalDiagsLabs(x):
patientsWithThisDiag = list(set(x.PATID.tolist()))
# Link to labs table
abnormalPatientsLabs = df_labs[(df_labs.PATID.isin(patientsWithThisDiag)) &
((df_labs.RESULT_NUM > df_labs.range_high) |
(df_labs.RESULT_NUM < df_labs.range_low)) &
(df_labs.resultYear == pd.to_datetime(x.ADMIT_DATE).dt.year.unique()[0])]
return pd.Series({'labsAboveBelowNorm': abnormalPatientsLabs.LAB_LOINC.value_counts().index,
'counts': abnormalPatientsLabs.LAB_LOINC.value_counts().values})
phenos_correlatedLabsCoefficients = (grouped_phenotypes.apply(patientsAboveBelowNormalDiagsLabs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for diag in phenos_correlatedLabsCoefficients.index:
thisDiagYear = phenos_correlatedLabsCoefficients.loc[diag]
thisDiag = diag[0]
thisYear = diag[1]
totalCrossTab = np.sum(thisDiagYear.counts)
for labInd in range(len(phenos_correlatedLabsCoefficients.loc[diag].labsAboveBelowNorm.values)):
mytups.append((thisDiagYear.labsAboveBelowNorm.values[labInd], thisDiagYear.counts[labInd]/totalCrossTab))
multiIndex.append((thisDiag, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
phenos_correlatedLabsCoefficients = (pd.DataFrame.from_records(mytups, columns=['LAB_LOINC','Relative_Counts'],
index=index))
#diags_correlatedDiagsCoefficients
def patientsCrossFreqDiagsDiags(x):
patientsWithThisDiag = list(set(x.PATID.tolist()))
# Link to diagnoses table
commonPatientsDXs = df_diagnoses[(df_diagnoses.PATID.isin(patientsWithThisDiag)) &
(df_diagnoses.admitYear == pd.to_datetime(x.ADMIT_DATE).dt.year.unique()[0])]
return pd.Series({'diagsCrossFreq': commonPatientsDXs.DX.value_counts().index,
'counts': commonPatientsDXs.DX.value_counts().values})
phenos_correlatedDiagsCoefficients = (grouped_phenotypes.apply(patientsCrossFreqDiagsDiags))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for diag in phenos_correlatedDiagsCoefficients.index:
thisDiagYear = phenos_correlatedDiagsCoefficients.loc[diag]
thisDiag = diag[0]
thisYear = diag[1]
totalCrossTab = np.sum(thisDiagYear.counts)
for diagInd in range(len(phenos_correlatedDiagsCoefficients.loc[diag].diagsCrossFreq.values)):
mytups.append((thisDiagYear.diagsCrossFreq.values[diagInd], thisDiagYear.counts[diagInd]/totalCrossTab))
multiIndex.append((thisDiag, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
phenos_correlatedDiagsCoefficients = (pd.DataFrame.from_records(mytups, columns=['DX','Relative_Counts'],
index=index))
#diags_correlatedMedsCoefficients
def patientsCrossFreqDiagsMeds(x):
patientsWithThisDiag = list(set(x.PATID.tolist()))
# Link to labs table
commonPatientsMeds = df_meds[(df_meds.PATID.isin(patientsWithThisDiag)) &
(df_meds.startYear == pd.to_datetime(x.ADMIT_DATE).dt.year.unique()[0])]
return pd.Series({'medsCrossFreq': commonPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().index,
'counts': commonPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().values})
phenos_correlatedMedsCoefficients = (grouped_phenotypes.apply(patientsCrossFreqDiagsMeds))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for diag in phenos_correlatedMedsCoefficients.index:
thisDiagYear = phenos_correlatedMedsCoefficients.loc[diag]
thisDiag = diag[0]
thisYear = diag[1]
totalCrossTab = np.sum(thisDiagYear.counts)
for medInd in range(len(phenos_correlatedMedsCoefficients.loc[diag].medsCrossFreq.values)):
mytups.append((thisDiagYear.medsCrossFreq.values[medInd], thisDiagYear.counts[medInd]/totalCrossTab))
multiIndex.append((thisDiag, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
phenos_correlatedMedsCoefficients = (pd.DataFrame.from_records(mytups, columns=['JH_INGREDIENT_RXNORM_CODE','Relative_Counts'],
index=index))
## DIAGNOSES TO PROCEDURES
def patientsCrossFreqDiagsProcs(x):
patientsWithThisDiag = list(set(x.PATID.tolist()))
# Link to procs table
commonPatientsProcs = df_procedures[df_procedures.PATID.isin(patientsWithThisDiag) &
(df_procedures.encounterYear == pd.to_datetime(x.ADMIT_DATE).dt.year.unique()[0])]
return pd.Series({'procsCrossFreq': commonPatientsProcs.RAW_PX.value_counts().index,
'counts': commonPatientsProcs.RAW_PX.value_counts().values})
phenos_correlatedProceduresCoefficients = (grouped_phenotypes.apply(patientsCrossFreqDiagsProcs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for diag in phenos_correlatedProceduresCoefficients.index:
thisDiagYear = phenos_correlatedProceduresCoefficients.loc[diag]
thisDiag = diag[0]
thisYear = diag[1]
totalCrossTab = np.sum(thisDiagYear.counts)
for procInd in range(len(phenos_correlatedProceduresCoefficients.loc[diag].procsCrossFreq.values)):
mytups.append((thisDiagYear.procsCrossFreq.values[procInd], thisDiagYear.counts[procInd]/totalCrossTab))
multiIndex.append((thisDiag, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
phenos_correlatedProceduresCoefficients = (pd.DataFrame.from_records(mytups, columns=['RAW_PX','Relative_Counts'],
index=index))
#diags_correlatedPhenotypesCoefficients
def patientsCrossFreqDiagsHPOs(x):
patientsWithThisDiag = list(set(x.PATID.tolist()))
# Link to diagnoses table
commonPatientsHPOs = df_phenotypes[(df_phenotypes.PATID.isin(patientsWithThisDiag)) &
(df_phenotypes.admitYear == | pd.to_datetime(x.ADMIT_DATE) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Sun May 21 13:13:26 2017
@author: ning
"""
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
import pickle
try:
function_dir = 'D:\\NING - spindle\\Spindle_by_Graphical_Features'
os.chdir(function_dir)
except:
function_dir = 'C:\\Users\\ning\\OneDrive\\python works\\Spindle_by_Graphical_Features'
os.chdir(function_dir)
import eegPipelineFunctions
try:
file_dir = 'D:\\NING - spindle\\training set\\road_trip\\'
# file_dir = 'D:\\NING - spindle\\training set\\road_trip_29_channels\\'
os.chdir(file_dir)
except:
file_dir = 'C:\\Users\\ning\\Downloads\\road_trip\\'
# file_dir = 'C:\\Users\\ning\\Downloads\\road_trip_29_channels\\'
os.chdir(file_dir)
if False:
signal_features_dict = {}
graph_features_dict = {}
for directory_1 in [f for f in os.listdir(file_dir) if ('epoch_length' in f)]:
sub_dir = file_dir + directory_1 + '\\'
epoch_length = directory_1[-3]
os.chdir(sub_dir)
df_cc, df_pli, df_plv, df_signal,df_graph = [],[],[],[],[]
for sub_fold in os.listdir(sub_dir):
sub_fold_dir = sub_dir + sub_fold + '\\'
os.chdir(sub_fold_dir)
cc_features, pli_features, plv_features, signal_features = [pd.read_csv(f) for f in os.listdir(sub_fold_dir) if ('csv' in f)]
#df_cc.append(cc_features)
#df_pli.append(pli_features)
#df_plv.append(plv_features)
label = cc_features['label']
cc_features = eegPipelineFunctions.get_real_part(cc_features)
pli_features = eegPipelineFunctions.get_real_part(pli_features)
plv_features = eegPipelineFunctions.get_real_part(plv_features)
cc_features.columns = ['cc_'+name for name in cc_features]
pli_features.columns = ['pli_'+name for name in pli_features]
plv_features.columns = ['plv_'+name for name in plv_features]
cc_features = cc_features.drop('cc_label',1)
pli_features = pli_features.drop('pli_label',1)
plv_features = plv_features.drop('plv_label',1)
df_combine = pd.concat([cc_features,pli_features,plv_features],axis=1)
df_combine['label']=label
df_signal.append(signal_features)
df_graph.append(df_combine)
signal_features_dict[directory_1] = pd.concat(df_signal)
graph_features_dict[directory_1] = | pd.concat(df_graph) | pandas.concat |
"""Extended DataFrame functionality."""
from typing import Any, Iterator, Optional, Sequence, Text, Tuple, Union, cast
import numpy as np
import pandas as pd
from snmp_fetch.utils import cuint8_to_int
from .types import ip_address as ip
def column_names(
n: Optional[int] = None, alphabet: Sequence[Text] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
) -> Iterator[Text]:
"""Generate unique temporary column names."""
base_gen = column_names(alphabet=alphabet)
base = ''
letters = alphabet
while True:
if n is not None:
if n <= 0:
return
n = n - 1
if not letters:
base = next(base_gen) # pylint: disable=stop-iteration-return # infinite generator
letters = alphabet
column, letters = letters[0], letters[1:]
yield base + column
@ | pd.api.extensions.register_dataframe_accessor('inet') | pandas.api.extensions.register_dataframe_accessor |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 10 00:10:23 2022
@author: <NAME>
Adapted from <NAME>
"""
r"""
Forward Model
"""
# Standard Library imports
import gzip
import numpy as np
import pandas as pd
import xarray as xr
# Third party imports
from collections import OrderedDict
# Semi-local imports
import name_qch4_couple.io
# Local imports
import routines
# Function used to create the emissions map
# For Section 2.9.: factor_q = a (Table B.1); factor_s = b (Table B.1); factor_sce = 0
# For Section 2.11.: factor_q = 1; factos_s = 1; factor_sce - Table 2.4
def read_Qsink(dates_tHour, factor_q, factor_s, factor_sce):
grid_info = routines.define_grid()
nlat = grid_info['nlat']
nlon = grid_info['nlon']
Qfiles_H2 = OrderedDict([
(0, [
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_AWB.nc', 'CO_emissions', '1M'],
]),
(1, [
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_ENE.nc', 'CO_emissions', '1M'],
]),
(2, [
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_REF.nc', 'CO_emissions', '1M'],
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_IND.nc', 'CO_emissions', '1M'],
]),
(3, [
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_CDS.nc', 'CO_emissions', '1M'],
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_CRS.nc', 'CO_emissions', '1M'],
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_TRO.nc', 'CO_emissions', '1M'],
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_LTO.nc', 'CO_emissions', '1M'],
]),
(4, [
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_RCO.nc', 'CO_emissions', '1M'],
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_PRO.nc', 'CO_emissions', '1M'],
]),
(5, [
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_FFF.nc', 'CO_emissions', '1M'],
]),
(6, [
['inputs/emissions/CO_emissions_processed/prior_edgar_v6_0_co_SWD.nc', 'CO_emissions', '1M'],
]),
(7, [
['inputs/emissions/biomass/gfed_2012.nc', 'H2_emissions', '1M'],
]),
(8, [
['inputs/emissions/prior_edgar_v6_0_PRO_GAS.nc', 'CH4_emissions', '1M'],
]),
])
Q_factor = {
0: [0.0357],
1: [0.0143],
2: [0.0143],
3: [0.0357],
4: [0.0217],
5: [0.0143],
6: [0.005],
7: [1],
8: [factor_sce],
}
Q = np.zeros((nlat, nlon))
for s, vs in Qfiles_H2.items():
for v in vs:
with xr.open_dataset(v[0]) as ds_read:
with ds_read.load() as Q_in:
t_Q = Q_in['time']
if v[2] == '1Y':
t = np.datetime64(
dates_tHour[0].floor('d').replace(month=1, day=1)
)
t_in = min(t_Q, key=lambda x: abs(x - t))
else:
t = np.datetime64(
dates_tHour[0].floor('d').replace(day=1)
)
t_in = min(
t_Q[t_Q.dt.month==dates_tHour[0].month],
key=lambda x: abs(x - t)
)
Q += Q_in[v[1]].sel(time=t_in).values * Q_factor[s] * 1.e3 * factor_q # kg -> g
lwfile = 'inputs/sink/land_mask.nc'
with xr.open_dataset(lwfile) as ds_read:
lwin = ds_read.load()
soil_sink = np.array(lwin.lo_land)/100 * -0.000000005 * factor_s
Q += soil_sink
return Q
def r_decc(fpath):
odata = pd.read_csv(
fpath,
usecols=lambda x: x.lower() in ['time', 'h2_ppb'],
index_col=['time'],
skipinitialspace=True,
parse_dates=['time']
).dropna()
odata.columns = odata.columns.str.lower()
return odata
def read_obs(timestamps, site, factor, resample='1H'):
date = timestamps[0].strftime('%Y-%m')
t0 = timestamps[0].strftime('%Y-%m-%d %H')
t1 = timestamps[-1].strftime('%Y-%m-%d %H')
if site == 'WAO':
ifile = 'inputs/obs/WAO_H2_oct2021.csv'
col_or_no = 'h2_ppb'
sigma_col_or_no = 0.2
elif site == 'MHD_10magl':
ifile = 'inputs/baseline/MHD_2018.csv'
col_or_no = 'h2_ppb'
sigma_col_or_no = 0.2
elif site == 'bas':
ifile = 'outputs/models/mhd_bas/chi0_proc.csv'
col_or_no = 'chi0p_H2'
sigma_col_or_no = 0.2
elif site == 'mod':
ifile = f'outputs/scenarios/new_merged/merged_wao_scenario_{factor}.csv'
col_or_no = 'h2_ppb'
sigma_col_or_no = 0.2
elif site == 'bas_mhd':
ifile = 'outputs/scenarios/merged/merged_bas_mhd.csv'
col_or_no = 'h2_ppb'
sigma_col_or_no = 0.2
elif site == 'bas_wao':
ifile = 'outputs/scenarios/merged/merged_bas_wao.csv'
col_or_no = 'h2_ppb'
sigma_col_or_no = 0.2
else:
ifile = False
col_or_no = np.nan
sigma_col_or_no = np.nan
if ifile:
all_obs_raw = r_decc(ifile).sort_index().loc[t0:t1]
obs_raw = all_obs_raw[col_or_no]
sigma_obs_raw = (all_obs_raw[sigma_col_or_no]
if isinstance(sigma_col_or_no, str) else
pd.Series(sigma_col_or_no, index=all_obs_raw.index))
if isinstance(col_or_no, str):
obs = (obs_raw
if resample is False else
obs_raw.resample('1H').mean().reindex(timestamps))
else:
obs = pd.Series(col_or_no, index=timestamps)
if isinstance(sigma_col_or_no, str) or isinstance(col_or_no, str):
sigma_obs = (
sigma_obs_raw
if resample is False else
sigma_obs_raw.resample('1H').apply(
lambda x: np.sum(x**2)).reindex(timestamps))
else:
sigma_obs = pd.Series(sigma_col_or_no, index=timestamps)
return obs, sigma_obs
def read_baseline(timestamps, site, btype="default"):
date = timestamps[0].strftime('%Y-%m')
year = timestamps[0].strftime('%Y')
if site == 'MHD_10magl':
if btype == 'default':
chi0file = (
'outputs/baseline/baseline-MHD_10magl-h2-2018.nc'
)
with xr.open_dataset(chi0file) as ds_read: #put as
with ds_read.load() as ds:
chi0 = ds.chi_H2.sel(time=date).to_series()
var_chi0 = ds.var_chi_H2.sel(time=date).to_series()
elif btype == 'intem':
if timestamps[0] < pd.to_datetime('2020-07'):
bmonth = '2020-07'
bflag = 1
elif timestamps[0] > pd.to_datetime('2020-12'):
bmonth = '2020-12'
bflag = 2
else:
bmonth = date
bflag = 0
m_sta = (pd.to_datetime(bmonth)).date().strftime('%Y%m%d')
m_end = (
pd.to_datetime(bmonth)+pd.tseries.offsets.MonthEnd(0)
).date().strftime('%Y%m%d')
chi0file = (
'/home/ec5/hpc-work/data_archive/decc/'
'EUROPE_UKV_HFD_100magl/Pos_CH4/'
'H1_C_MHT1T2R1ANH1B2CBWB_ch4_OBUSEXL_4h_Fnc10_'
f'{m_sta}-{m_end}_average_f.gz'
)
with gzip.open(chi0file, mode='r') as chi0in:
chi0_0all = pd.read_csv(
chi0in, sep=' ', skipinitialspace=True,
skiprows=[5], header=4,
parse_dates={'datetime': ['YYYY', 'MM', 'DD', 'HH', 'MI']},
#converters={
# 'datetime': lambda Y, m, d, H, M:
# pd.to_datetime(f'{Y} {m} {d} {H} {M}',
# format='%Y %m %d %H %M'),
# },
#index_col='datetime'
)
chi0_0all.index = pd.to_datetime(
chi0_0all['datetime'], format='%Y %m %d %H %M')
chi0_0 = chi0_0all['BasePos']
var_chi0_0 = chi0_0all['BasePos']
if bflag == 1:
chi0 = | pd.Series(chi0_0.iloc[-1], index=timestamps) | pandas.Series |
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class StringOperatorTests(TestCase):
def test_operator_decorator(self):
self.assertTrue(StringType("foo").equal_to.is_operator)
def test_string_equal_to(self):
self.assertTrue(StringType("foo").equal_to("foo"))
self.assertFalse(StringType("foo").equal_to("Foo"))
def test_string_not_equal_to(self):
self.assertTrue(StringType("foo").not_equal_to("Foo"))
self.assertTrue(StringType("foo").not_equal_to("boo"))
self.assertFalse(StringType("foo").not_equal_to("foo"))
def test_string_equal_to_case_insensitive(self):
self.assertTrue(StringType("foo").equal_to_case_insensitive("FOo"))
self.assertTrue(StringType("foo").equal_to_case_insensitive("foo"))
self.assertFalse(StringType("foo").equal_to_case_insensitive("blah"))
def test_string_starts_with(self):
self.assertTrue(StringType("hello").starts_with("he"))
self.assertFalse(StringType("hello").starts_with("hey"))
self.assertFalse(StringType("hello").starts_with("He"))
def test_string_ends_with(self):
self.assertTrue(StringType("hello").ends_with("lo"))
self.assertFalse(StringType("hello").ends_with("boom"))
self.assertFalse(StringType("hello").ends_with("Lo"))
def test_string_contains(self):
self.assertTrue(StringType("hello").contains("ell"))
self.assertTrue(StringType("hello").contains("he"))
self.assertTrue(StringType("hello").contains("lo"))
self.assertFalse(StringType("hello").contains("asdf"))
self.assertFalse(StringType("hello").contains("ElL"))
def test_string_matches_regex(self):
self.assertTrue(StringType("hello").matches_regex(r"^h"))
self.assertFalse(StringType("hello").matches_regex(r"^sh"))
def test_non_empty(self):
self.assertTrue(StringType("hello").non_empty())
self.assertFalse(StringType("").non_empty())
self.assertFalse(StringType(None).non_empty())
class NumericOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, err_string):
NumericType("foo")
def test_numeric_type_validates_and_casts_decimal(self):
ten_dec = Decimal(10)
ten_int = 10
ten_float = 10.0
if sys.version_info[0] == 2:
ten_long = long(10)
else:
ten_long = int(10) # long and int are same in python3
ten_var_dec = NumericType(ten_dec) # this should not throw an exception
ten_var_int = NumericType(ten_int)
ten_var_float = NumericType(ten_float)
ten_var_long = NumericType(ten_long)
self.assertTrue(isinstance(ten_var_dec.value, Decimal))
self.assertTrue(isinstance(ten_var_int.value, Decimal))
self.assertTrue(isinstance(ten_var_float.value, Decimal))
self.assertTrue(isinstance(ten_var_long.value, Decimal))
def test_numeric_equal_to(self):
self.assertTrue(NumericType(10).equal_to(10))
self.assertTrue(NumericType(10).equal_to(10.0))
self.assertTrue(NumericType(10).equal_to(10.000001))
self.assertTrue(NumericType(10.000001).equal_to(10))
self.assertTrue(NumericType(Decimal('10.0')).equal_to(10))
self.assertTrue(NumericType(10).equal_to(Decimal('10.0')))
self.assertFalse(NumericType(10).equal_to(10.00001))
self.assertFalse(NumericType(10).equal_to(11))
def test_numeric_not_equal_to(self):
self.assertTrue(NumericType(10).not_equal_to(10.00001))
self.assertTrue(NumericType(10).not_equal_to(11))
self.assertTrue(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.1')))
self.assertFalse(NumericType(10).not_equal_to(10))
self.assertFalse(NumericType(10).not_equal_to(10.0))
self.assertFalse(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.0')))
def test_other_value_not_numeric(self):
error_string = "10 is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, error_string):
NumericType(10).equal_to("10")
def test_numeric_greater_than(self):
self.assertTrue(NumericType(10).greater_than(1))
self.assertFalse(NumericType(10).greater_than(11))
self.assertTrue(NumericType(10.1).greater_than(10))
self.assertFalse(NumericType(10.000001).greater_than(10))
self.assertTrue(NumericType(10.000002).greater_than(10))
def test_numeric_greater_than_or_equal_to(self):
self.assertTrue(NumericType(10).greater_than_or_equal_to(1))
self.assertFalse(NumericType(10).greater_than_or_equal_to(11))
self.assertTrue(NumericType(10.1).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000001).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000002).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10).greater_than_or_equal_to(10))
def test_numeric_less_than(self):
self.assertTrue(NumericType(1).less_than(10))
self.assertFalse(NumericType(11).less_than(10))
self.assertTrue(NumericType(10).less_than(10.1))
self.assertFalse(NumericType(10).less_than(10.000001))
self.assertTrue(NumericType(10).less_than(10.000002))
def test_numeric_less_than_or_equal_to(self):
self.assertTrue(NumericType(1).less_than_or_equal_to(10))
self.assertFalse(NumericType(11).less_than_or_equal_to(10))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.1))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000001))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000002))
self.assertTrue(NumericType(10).less_than_or_equal_to(10))
class BooleanOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType("foo")
err_string = "None is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType(None)
def test_boolean_is_true_and_is_false(self):
self.assertTrue(BooleanType(True).is_true())
self.assertFalse(BooleanType(True).is_false())
self.assertFalse(BooleanType(False).is_true())
self.assertTrue(BooleanType(False).is_false())
class SelectOperatorTests(TestCase):
def test_contains(self):
self.assertTrue(SelectType([1, 2]).contains(2))
self.assertFalse(SelectType([1, 2]).contains(3))
self.assertTrue(SelectType([1, 2, "a"]).contains("A"))
def test_does_not_contain(self):
self.assertTrue(SelectType([1, 2]).does_not_contain(3))
self.assertFalse(SelectType([1, 2]).does_not_contain(2))
self.assertFalse(SelectType([1, 2, "a"]).does_not_contain("A"))
class SelectMultipleOperatorTests(TestCase):
def test_contains_all(self):
self.assertTrue(SelectMultipleType([1, 2]).
contains_all([2, 1]))
self.assertFalse(SelectMultipleType([1, 2]).
contains_all([2, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
contains_all([2, 1, "A"]))
def test_is_contained_by(self):
self.assertTrue(SelectMultipleType([1, 2]).
is_contained_by([2, 1, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
is_contained_by([2, 3, 4]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
is_contained_by([2, 1, "A"]))
def test_shares_at_least_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_at_least_one_element_with([4, "A"]))
def test_shares_exactly_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_exactly_one_element_with([4, "A"]))
self.assertFalse(SelectMultipleType([1, 2, 3]).
shares_exactly_one_element_with([2, 3, "a"]))
def test_shares_no_elements_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_no_elements_with([4, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_no_elements_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2, "a"]).
shares_no_elements_with([4, "A"]))
class DataframeOperatorTests(TestCase):
def test_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ],
})
result: pd.Series = DataframeType({"value": df}).exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_not_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ]
})
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, "", 7, ],
"var2": [3, 5, 6, "", 2, ],
"var3": [1, 3, 8, "", 7, ],
"var4": ["test", "issue", "one", "", "two", ]
})
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to({
"target": "--r1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 20
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var4",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False, False, ])))
def test_not_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": 20
}).equals(pandas.Series([True, True, True])))
def test_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "", "new", "val"],
"var2": ["WORD", "", "test", "VAL"],
"var3": ["LET", "", "GO", "read"]
})
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "NEW"
}).equals(pandas.Series([False, False, True, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False])))
def test_not_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "new", "val"],
"var2": ["WORD", "test", "VAL"],
"var3": ["LET", "GO", "read"],
"var4": ["WORD", "NEW", "VAL"]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
def test_less_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than({
"target": "--r1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": 3
}).equals(pandas.Series([True, True, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than_or_equal_to({
"target": "--r1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([False, False, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 5, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than_or_equal_to({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_greater_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": 5000
}).equals(pandas.Series([False, False, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than_or_equal_to({
"target": "var1",
"comparator": "--r4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([True, True, False])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 3, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than_or_equal_to({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_contains(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": 5
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).does_not_contain({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([False, True, True])))
def test_contains_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["POKEMON", "CHARIZARD", "BULBASAUR"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "PIKACHU"
}).equals( | pandas.Series([True, False, False]) | pandas.Series |
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# plt.close("all")
CURRENT_DIRECTORY = Path(__file__).parent
def plot_01():
ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000))
ts = ts.cumsum()
ts.plot()
# Display plot in Pycharm or terminal
# plt.show()
plt.savefig(CURRENT_DIRECTORY / 'pandas_plot_01.png')
def plot_02():
ts = pd.Series(np.random.randn(1000), index= | pd.date_range('1/1/2000', periods=1000) | pandas.date_range |
#!/usr/bin/env python
# coding: utf-8
# In[4]:
# -*- coding: utf-8 -*-
"""
Created on Tue May 4 17:39:59 2021
Collection of custom evaluation functions for embedding
@author: marathomas
"""
import numpy as np
import pandas as pd
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics import silhouette_samples, silhouette_score
import seaborn as sns
import matplotlib.pyplot as plt
def make_nn_stats_dict(calltypes, labels, nb_indices):
"""
Function that evaluates the labels of the k nearest neighbors of
all datapoints in a dataset.
Parameters
----------
calltypes : 1D numpy array (string) or list of strings
set of class labels
labels: 1D numpy array (string) or list of strings
vector/list of class labels in dataset
nb_indices: 2D numpy array (numeric integer)
Array I(X,k) containing the indices of the k nearest
nearest neighbors for each datapoint X of a
dataset
Returns
-------
nn_stats_dict : dictionary[<class label string>] = 2D numpy array (numeric)
dictionary that contains one array for each type of label.
Given a label L, nn_stats_dict[L] contains an array A(X,Y),
where Y is the number of class labels in the dataset and each
row X represents a datapoint of label L in the dataset.
A[i,j] is the number of nearest neighbors of datapoint i that
are of label calltypes[j].
Example
-------
>>>
"""
nn_stats_dict = {}
for calltype in calltypes:
# which datapoints in the dataset are of this specific calltype?
# -> get their indices
call_indices = np.asarray(np.where(labels==calltype))[0]
# initialize array that can save the class labels of the k nearest
# neighbors of all these datapoints
calltype_counts = np.zeros((call_indices.shape[0],len(calltypes)))
# for each datapoint
for i,ind in enumerate(call_indices):
# what are the indices of its k nearest neighbors
nearest_neighbors = nb_indices[ind]
# for eacht of these neighbors
for neighbor in nearest_neighbors:
# what is their label
neighbor_label = labels[neighbor]
# put a +1 in the array
calltype_counts[i,np.where(np.asarray(calltypes)==neighbor_label)[0][0]] += 1
# save the resulting array in dictionary
# (1 array per calltype)
nn_stats_dict[calltype] = calltype_counts
return nn_stats_dict
def get_knn(k,embedding):
"""
Function that finds k nearest neighbors (based on
euclidean distance) for each datapoint in a multidimensional
dataset
Parameters
----------
k : integer
number of nearest neighbors
embedding: 2D numpy array (numeric)
a dataset E(X,Y) with X datapoints and Y dimensions
Returns
-------
indices: 2D numpy array (numeric)
Array I(X,k) containing the indices of the k nearest
nearest neighbors for each datapoint X of the input
dataset
distances: 2D numpy array (numeric)
Array D(X,k) containing the euclidean distance to each
of the k nearest neighbors for each datapoint X of the
input dataset. D[i,j] is the euclidean distance of datapoint
embedding[i,:] to its jth neighbor.
Example
-------
>>>
"""
# Find k nearest neighbors
nbrs = NearestNeighbors(metric='euclidean',n_neighbors=k+1, algorithm='brute').fit(embedding)
distances, indices = nbrs.kneighbors(embedding)
# need to remove the first neighbor, because that is the datapoint itself
indices = indices[:,1:]
distances = distances[:,1:]
return indices, distances
def make_statstabs(nn_stats_dict, calltypes, labels,k):
"""
Function that generates two summary tables containing
the frequency of different class labels among the k nearest
neighbors of datapoints belonging to a class.
Parameters
----------
nn_stats_dict : dictionary[<class label string>] = 2D numpy array (numeric)
dictionary that contains one array for each type of label.
Given a label L, nn_stats_dict[L] contains an array A(X,Y),
where Y is the number of class labels in the dataset and each
row X represents a datapoint of label L in the dataset.
A[i,j] is the number of nearest neighbors of datapoint i that
are of label calltypes[j].
(is returned from evaulation_functions.make_nn_statsdict)
calltypes : 1D numpy array (string) or list of strings
set of class labels
labels: 1D numpy array (string) or list of strings
vector/list of class labels in dataset
k: Integer
number of nearest neighbors
Returns
-------
stats_tab: 2D pandas dataframe (numeric)
Summary table T(X,Y) with X,Y = number of classes.
T[i,j] is the average percentage of datapoints with class label j
in the neighborhood of datapoints with class label i
stats_tab_norm: 2D pandas dataframe (numeric)
Summary table N(X,Y) with X,Y = number of classes.
N[i,j] is the log2-transformed ratio of the percentage of datapoints
with class label j in the neighborhood of datapoints with class label i
to the percentage that would be expected by random chance and random
distribution. (N[i,j] = log2(T[i,j]/random_expect))
Example
-------
>>>
"""
# Get the class frequencies in the dataset
overall = np.zeros((len(calltypes)))
for i,calltype in enumerate(calltypes):
overall[i] = sum(labels==calltype)
overall = (overall/np.sum(overall))*100
# Initialize empty array for stats_tab and stats_tab_norm
stats_tab = np.zeros((len(calltypes),len(calltypes)))
stats_tab_norm = np.zeros((len(calltypes),len(calltypes)))
# For each calltype
for i, calltype in enumerate(calltypes):
# Get the table with all neighbor label counts per datapoint
stats = nn_stats_dict[calltype]
# Average across all datapoints and transform to percentage
stats_tab[i,:] = (np.mean(stats,axis=0)/k)*100
# Divide by overall percentage of this class in dataset
# for the normalized statstab version
stats_tab_norm[i,:] = ((np.mean(stats,axis=0)/k)*100)/overall
# Turn into dataframe
stats_tab = | pd.DataFrame(stats_tab) | pandas.DataFrame |
#!/usr/bin/env python
# stdlib imports
import os.path
import argparse
from collections import OrderedDict
import sys
import warnings
import textwrap
import logging
# third party imports
import pandas as pd
# local imports
from gmprocess.io.read import _get_format, read_data
from gmprocess.utils.args import add_shared_args
from gmprocess.core.stationtrace import REV_PROCESS_LEVELS
COLUMNS = ['Filename', 'Format', 'Process Level',
'Start Time', 'End Time',
'Duration (s)', 'Network', 'Station', 'Channel',
'Sampling Rate (Hz)', 'Latitude', 'Longitude']
ERROR_COLUMNS = ['Filename', 'Error']
def get_dataframe(filename, stream):
df = pd.DataFrame(columns=COLUMNS, index=None)
row = pd.Series(index=COLUMNS)
fpath, fname = os.path.split(filename)
for trace in stream:
row['Filename'] = filename
row['Format'] = trace.stats['standard']['source_format']
plevel = trace.stats['standard']['process_level']
row['Process Level'] = REV_PROCESS_LEVELS[plevel]
row['Start Time'] = trace.stats.starttime
row['End Time'] = trace.stats.endtime
dt = trace.stats.endtime - trace.stats.starttime
row['Duration (s)'] = dt
row['Network'] = trace.stats.network
row['Station'] = trace.stats.station
row['Channel'] = trace.stats.channel
row['Sampling Rate (Hz)'] = trace.stats.sampling_rate
row['Latitude'] = trace.stats.coordinates['latitude']
row['Longitude'] = trace.stats.coordinates['longitude']
df = df.append(row, ignore_index=True)
return df
def render_concise(files, save=False):
errors = pd.DataFrame(columns=ERROR_COLUMNS)
df = pd.DataFrame(columns=COLUMNS, index=None)
folders = []
for filename in files:
fpath, fname = os.path.split(filename)
if fpath not in folders:
sys.stderr.write('Parsing files from subfolder %s...\n' % fpath)
folders.append(fpath)
try:
streams = read_data(filename)
for stream in streams:
tdf = get_dataframe(filename, stream)
df = pd.concat([df, tdf], axis=0)
except BaseException as e:
row = pd.Series(index=ERROR_COLUMNS)
row['Filename'] = os.path.abspath(filename)
row['Error'] = str(e)
errors = errors.append(row, ignore_index=True)
continue
# organize dataframe by network, station, and channel
df = df.sort_values(['Network', 'Station', 'Channel'])
if not save:
print(df.to_string(index=False))
return (df, errors)
def render_dir(rootdir, concise=True, save=False):
datafiles = []
for root, dirs, files in os.walk(rootdir):
for tfile in files:
ffile = os.path.join(root, tfile)
datafiles.append(ffile)
if concise:
df, errors = render_concise(datafiles, save=save)
else:
errors = render_verbose(datafiles)
df = None
return (df, errors)
def render_verbose(files):
errors = pd.DataFrame(columns=ERROR_COLUMNS)
for fname in files:
try:
fmt = _get_format(fname)
stream = read_data(fname)[0]
stats = stream[0].stats
tpl = (stats['coordinates']['latitude'],
stats['coordinates']['longitude'],
stats['coordinates']['elevation'])
locstr = 'Lat: %.4f Lon: %.4f Elev: %.1f' % tpl
mydict = OrderedDict(
[('Filename', fname),
('Format', fmt),
('Station', stats['station']),
('Network', stats['network']),
('Source', stats['standard']['source']),
('Location', stats['location']),
('Coordinates', locstr),
])
print()
print(pd.Series(mydict).to_string())
for trace in stream:
channel = OrderedDict()
stats = trace.stats
channel['Channel'] = stats['channel']
channel['Start Time'] = stats['starttime']
channel['End Time'] = stats['endtime']
channel['Number of Points'] = stats['npts']
channel['Units'] = stats['standard']['units']
channel['Peak Value'] = trace.max()
print()
chstr = pd.Series(channel).to_string()
parts = ['\t' + line for line in chstr.split('\n')]
chstr = '\n'.join(parts)
print(chstr)
except BaseException as e:
row = pd.Series(index=ERROR_COLUMNS)
row['Filename'] = os.path.abspath(fname)
row['Error'] = str(e)
errors = errors.append(row, ignore_index=True)
continue
return errors
def main():
description = '''Display summary information about a file, multiple files,
or directories of files containing strong motion data in the supported
formats.
Use the -p option to print errors for files that could not be read.
Use the -s option to save summary data AND errors to Excel/CSV format.
.'''
parser = argparse.ArgumentParser(description=description)
parser.add_argument('files_or_dir', nargs='+',
help='Files or directory to inspect.',
type=str)
chelp = '''Print out results in concise CSV form. Columns are:
Filename
Format
Process Level
Start Time
End Time
# of Traces
Duration
Network
Station
Channels
Sampling rate
Latitude
Longitude
'''
parser.add_argument('-c', '--concise', action='store_true',
help=chelp)
shelp = '''Save concise results to CSV/Excel file
(format determined by extension (.xlsx for Excel, anything else for CSV.))
'''
parser.add_argument('-s', '--save', metavar='OUTFILE',
help=shelp)
phelp = 'Print error log containing files that could not be parsed.'
parser.add_argument('--quiet-errors', action='store_true',
help=phelp)
# Shared arguments
parser = add_shared_args(parser)
args = parser.parse_args()
if not args.concise and args.save:
msg = '''
****************************************************************
Saving verbose output is not supported. Use -c and -s
options together to save tabular summary/error information about
the data.
****************************************************************
'''
print(textwrap.dedent(msg))
parser.print_help()
sys.exit(1)
logger = logging.getLogger()
logger.setLevel(logging.CRITICAL)
warnings.filterwarnings("ignore")
| pd.set_option('display.max_columns', 10000) | pandas.set_option |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 2 21:53:00 2018
@author: RomanGutin
"""
import numpy as np
import pandas as pd
plot_data={}
#####AM_Tuning With Wavelet
def AM_W(x,first,last,steps):
sweep = list(np.linspace(first,last,(last-first)/steps)) #the first amino acid
for acid in count_df.index:
CrossValidation_Scores= []
for score in sweep:
A = x.copy()
ltw_AM[acid]= score
A.replace(ltw_AM,inplace=True)
MHat_Transformed= pd.DataFrame(W(A), index=just_let.index)
MHat_Transformed['pMeas']= nine_pep['pMeas']
MHat_Transformed['pMeas']= nine_pep['pMeas']
CrossValidation_Scores.append(CrossValidation(A,10))
ltw_AM[acid] = sweep[CrossValidation_Scores.index(max(CrossValidation_Scores))]
plt.plot(sweep,CrossValidation_Scores)
plt.title(str(acid))
plt.show()
plot_data[acid]= | pd.DataFrame([sweep,CrossValidation_Scores]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
| tm.assert_frame_equal(res, exp) | pandas._testing.assert_frame_equal |
"""figures of merit is a collection of financial calculations for energy.
This module contains financial calculations based on solar power and batteries
in a given network. The networks used are defined as network objects (see evolve parsers).
TODO: Add inverters: Inverters are not considered at the moment and Improve Nan Handeling
"""
import numpy
import pandas as pd
from c3x.data_cleaning import unit_conversion
#Todo:
#Add inverters: Inverters are not considered at the moment
#Improve Nan Handeling
def meter_power(meas_dict: dict, meter: int, axis: int = 0, column: int = 0) -> pd.Series:
"""
calculates the power for a meter of individual measurement points
by summing load, solar and battery power
Args:
meas_dict (dict): dict with measurement for one or multiple nodes.
meter(int): Id for a meter
axis (int): how data is concatenated for results
column (int): column index to be used
return:
meter_p (pd.Series): combined power (solar, battery, load)
"""
meter_p = pd.DataFrame()
if meas_dict[meter]:
meter_p = pd.DataFrame()
for meas in meas_dict[meter]:
if 'load' in meas:
meter_p = pd.concat([meter_p, meas_dict[meter][meas].iloc[:,column]], axis=axis)
elif 'solar' in meas:
meter_p = pd.concat([meter_p, meas_dict[meter][meas].iloc[:,column]], axis=axis)
elif 'batteries' in meas:
meter_p = pd.concat([meter_p, meas_dict[meter][meas].iloc[:,column]], axis=axis)
meter_p = meter_p.sum(axis=1)
return meter_p
def financial(meter_p: pd.Series, import_tariff: pd.Series, export_tariff: pd.Series) -> pd.Series:
"""
Evaluate the financial outcome for a customer.
A conversion from kW to kWh is handled internally
Note: assumes constant step size in timestamps (use forth index beforehand)
Args:
meter_p (pd.Series ): Power of a node
import_tariff (pd.Series): Expects this to be in $/kWh.
export_tariff (pd.Series): Expects this to be in $/kWh.
Returns:
cost (pd.Series): cost per measurement point, using import and export tariffs
"""
# Note: need to ensure meter data is converted to kWh
timestep = numpy.timedelta64(meter_p.index[1] - meter_p.index[0])
meter = unit_conversion.convert_watt_to_watt_hour(meter_p, timedelta=timestep)
import_power_cost = meter.where(meter >= 0).fillna(value=0.0)
export_power_revenue = meter.where(meter < 0).fillna(value=0.0)
cost = import_power_cost * import_tariff + export_power_revenue*export_tariff
return cost
def customer_financial(meas_dict: dict, node_keys: list = None, tariff: dict = None) -> dict:
"""
Evaluate the financial outcome for a selected customer or for all customers.
Note: not currently setup to handle missing data (eg NANs)
#TODO: consider inverters and how to avoid double counting with solar, batteries
Args:
meas_dict (dict): dict with measurement for one or multiple nodes.
node_keys (list): list nodes for which financials are calculated.
tariff (dict): nodes tariff data. Expects this to be in $/kWh.
Returns:
results_dict: cost per node and the average cost over all nodes
"""
results_dict = {}
average = []
nodes = node_keys if node_keys else meas_dict.keys()
for key in nodes:
if type(key) == int:
key = str(key)
if meas_dict[key]:
if key in tariff:
meter_p = meter_power(meas_dict, key, axis=1)
meter_p_cost = financial(meter_p,
tariff[key]['import_tariff'],
tariff[key]['export_tariff'])
results_dict[key] = meter_p_cost
initiate = 0
for node in results_dict.values():
average = node if initiate == 0 else average.append(node)
initiate = 1
average = numpy.nanmean(average)
results_dict["average"] = average
return results_dict
def customer_cost_financial(tariff: dict, energy_grid_load: pd.Series, energy_solar_grid: pd.Series,
energy_battery_load: pd.Series, energy_solar_battery: pd.Series,
energy_solar_load: pd.Series) -> pd.Series:
"""
evaluates the customers cost
Args:
tariff: specifies tariffs to be applied to aggregation of customers.
energy_grid_load: specifies the energy flow between grid and load
energy_solar_grid: specifies the energy flow between solar and gird
energy_battery_load: specifies the energy flow between battery and load
energy_solar_battery: specifies the energy flow between solar and battery
energy_solar_load: specifies the energy flow between solar and load
Returns:
customer_cost (pd.Series):
"""
customer_cost = financial(energy_grid_load, tariff['re_import_tariff'], 0)
customer_cost += financial(energy_grid_load, tariff['rt_import_tariff'], 0)
customer_cost += financial(energy_battery_load, tariff['le_import_tariff'], 0)
customer_cost += financial(energy_battery_load, tariff['lt_import_tariff'], 0)
customer_cost -= financial(energy_solar_grid, tariff['re_export_tariff'], 0)
customer_cost += financial(energy_solar_grid, tariff['rt_export_tariff'], 0)
customer_cost -= financial(energy_solar_battery, tariff['le_export_tariff'], 0)
customer_cost += financial(energy_solar_battery, tariff['lt_export_tariff'], 0)
customer_cost -= financial(energy_solar_battery, tariff['le_export_tariff'], 0)
customer_cost += financial(energy_solar_load, tariff['lt_import_tariff'], 0)
customer_cost += financial(energy_solar_load, tariff['lt_export_tariff'], 0)
return customer_cost
def battery_cost_financial(tariff: dict, energy_grid_battery: pd.Series,
energy_battery_grid: pd.Series, energy_battery_load: pd.Series,
energy_solar_battery: pd.Series) -> pd.Series:
"""
evaluates the battery cost
Args:
tariff (dict): specifies tariffs to be applied to aggregation of customers.
energy_grid_battery (pd.Series): specifies the energy flow between grid and battery
energy_battery_grid (pd.Series): specifies the energy flow between battery and gird
energy_battery_load (pd.Series): specifies the energy flow between battery and load
energy_solar_battery (pd.Series): specifies the energy flow between solar and battery
Returns:
battery_cost (pd.Series):
"""
battery_cost = financial(energy_solar_battery, tariff['le_import_tariff'], 0)
battery_cost += financial(energy_solar_battery, tariff['lt_import_tariff'], 0)
battery_cost -= financial(energy_battery_load, tariff['le_export_tariff'], 0)
battery_cost += financial(energy_battery_load, tariff['lt_export_tariff'], 0)
battery_cost += financial(energy_grid_battery, tariff['re_import_tariff'], 0)
battery_cost += financial(energy_grid_battery, tariff['rt_import_tariff'], 0)
battery_cost -= financial(energy_battery_grid, tariff['re_export_tariff'], 0)
battery_cost += financial(energy_battery_grid, tariff['rt_export_tariff'], 0)
return battery_cost
def network_cost_financial(tariff: dict, energy_grid_load: pd.Series,
energy_grid_battery: pd.Series, energy_battery_grid: pd.Series,
energy_battery_load: pd.Series, energy_solar_battery: pd.Series,
energy_solar_load: pd.Series) -> pd.Series:
"""
evaluates the network cost
Args:
tariff (dict): specifies tariffs to be applied to aggregation of customers.
energy_grid_load (pd.Series): specifies the energy flow between grid and load
energy_grid_battery (pd.Series): specifies the energy flow between grid and battery
energy_battery_grid (pd.Series): specifies the energy flow between battery and grid
energy_battery_load (pd.Series): specifies the energy flow between battery and solar
energy_solar_battery (pd.Series) : specifies the energy flow between solar and battery
energy_solar_load (pd.Series): specifies the energy flow between solar and load
Returns:
network_cost(pd.Series)
"""
network_cost = -financial(energy_grid_load, tariff['rt_import_tariff'], 0)
network_cost -= financial(energy_battery_load, tariff['lt_import_tariff'], 0)
network_cost -= financial(energy_battery_load, tariff['lt_export_tariff'], 0)
network_cost -= financial(energy_solar_battery, tariff['lt_import_tariff'], 0)
network_cost -= financial(energy_solar_battery, tariff['lt_export_tariff'], 0)
network_cost -= financial(energy_grid_battery, tariff['rt_import_tariff'], 0)
network_cost -= financial(energy_battery_grid, tariff['rt_export_tariff'], 0)
network_cost -= financial(energy_solar_load, tariff['lt_import_tariff'], 0)
network_cost -= financial(energy_solar_load, tariff['lt_export_tariff'], 0)
return network_cost
def lem_financial(customer_tariffs, energy_grid_load, energy_grid_battery, energy_solar_grid,
energy_battery_grid, energy_battery_load, energy_solar_battery,
energy_solar_load, battery_tariffs=None):
"""
evaluate the cost for the local energy model
Args:
customer_tariffs: specifies tariffs to be applied to aggregation of customers.
energy_grid_load (pd.series): specifies the energy flow between grid and load
energy_grid_battery: specifies the energy flow between grid and battery
energy_solar_grid: specifies the energy flow between solar and grid
energy_battery_grid: specifies the energy flow between battery and grid
energy_battery_load: specifies the energy flow between battery and solar
energy_solar_battery: specifies the energy flow between solar and battery
energy_solar_load: specifies the energy flow between solar and load
battery_tariffs: specifies tariffs to be applied to aggregation of battery.
(if none given customer_tariffs ware used)
Returns:
customer_cost, battery_cost, network_cost
"""
customer_cost = customer_cost_financial(customer_tariffs, energy_grid_load, energy_solar_grid,
energy_battery_load, energy_solar_battery,
energy_solar_load)
bt_choice = battery_tariffs if battery_tariffs else customer_tariffs
battery_cost = battery_cost_financial(bt_choice, energy_grid_battery, energy_battery_grid,
energy_battery_load, energy_solar_battery)
network_cost = network_cost_financial(customer_tariffs, energy_grid_load, energy_grid_battery,
energy_battery_grid, energy_battery_load,
energy_solar_battery, energy_solar_load)
return customer_cost, battery_cost, network_cost
def peak_powers(meas_dict: dict, node_keys: list = None) -> dict:
"""
Calculate the peak power flows into and out of the network.
#TODO: consider selecting peak powers per phase
#TODO: consider inverters and how to avoid double counting with solar, batteries
Args:
meas_dict (dict): dict with measurement for one or multiple nodes.
node_keys (list): list of Node.names in Network.nodes.
Returns:
results_dict (dict): dictionary of peak power into and out of network in kW,
and in kW/connection point.
"""
nodes = node_keys if node_keys else meas_dict.keys()
sum_meter_power = pd.DataFrame([])
for key in nodes:
if type(key) == int:
key = str(key)
if meas_dict[key]:
meter_p = meter_power(meas_dict, key, axis=1)
if sum_meter_power.empty:
sum_meter_power = meter_p.copy()
else:
sum_meter_power = pd.concat([sum_meter_power, meter_p], axis=1, sort=True)
sum_power = sum_meter_power.sum(axis=1)
aver_power = numpy.nanmean(sum_meter_power, axis=1)
return {"peak_power_import": numpy.max(sum_power),
"peak_power_export": numpy.min(sum_power),
"peak_power_import_av": numpy.max(aver_power),
"peak_power_export_av": numpy.min(aver_power),
"peak_power_import_index": sum_power.idxmax(),
"peak_power_export_index": sum_power.idxmax()}
def self_sufficiency(load_p: pd.DataFrame, solar_p: pd.DataFrame, battery_p: pd.DataFrame):
"""
Self-sufficiency = 1 - imports / consumption
Note: the function expects a full index
#TODO: consider inverters and how to avoid double counting with solar, batteries
Args:
load_p (pd.dataframe): measurement data for load of a s single node.
solar_p (pd.dataframe): measurement data for solar of a s single node.
battery_p(pd.dataframe): measurement data for battery of a s single node.
Returns:
results_dict: self_consumption_solar, self_consumption_batteries
"""
self_sufficiency_solar = numpy.nan
self_sufficiency_battery = numpy.nan
if not load_p.empty:
net_load_solar = pd.concat((load_p, solar_p), axis=1).sum(axis=1)
net_load_solar_battery = pd.concat((load_p, solar_p, battery_p), axis=1).sum(axis=1)
#create an array that contains which entries are import and which are export
mask_import_solar = (net_load_solar >= 0)
mask_import_solar_battery = (net_load_solar_battery >= 0)
net_import_solar = net_load_solar * mask_import_solar
net_import_solar_battery = net_load_solar_battery * mask_import_solar_battery
sum_load = numpy.nansum(load_p)
sum_solar = numpy.nansum(solar_p)
# it doesn't make sense to calculate this if there is no solar or the load date is missing (0.0)
if sum_solar < 0 and sum_load != 0:
self_sufficiency_solar = 1 - (numpy.nansum(net_import_solar) / sum_load)
self_sufficiency_battery = 1 - (numpy.nansum(net_import_solar_battery) / sum_load)
else:
print("Warning: not enough data to calculate")
return {"self_sufficiency_solar": self_sufficiency_solar,
"self_sufficiency_batteries": self_sufficiency_battery}
def self_consumption(load_p: pd.DataFrame, solar_p: pd.DataFrame, battery_p: pd.DataFrame) -> dict:
"""
Self-consumption = 1 - exports / generation
Note: the function expects a full index
#TODO: consider inverters and how to avoid double counting with solar, batteries
Args:
load_p (pd.dataframe): measurement data for load of a s single node.
solar_p (pd.dataframe): measurement data for solar of a s single node.
battery_p(pd.dataframe): measurement data for battery of a s single node.
Retruns:
results_dict: self_consumption_solar, self_consumption_batteries
"""
net_load_solar = pd.concat((load_p, solar_p), axis=1).sum(axis=1)
net_load_solar_battery = pd.concat((load_p, solar_p, battery_p), axis=1).sum(axis=1)
# create an array that contains which entries are import and which are export
mask_export_solar = (net_load_solar < 0)
mask_export_solar_battery = (net_load_solar_battery < 0)
net_export_solar = net_load_solar * mask_export_solar
net_import_solar_battery = net_load_solar_battery * mask_export_solar_battery
sum_solar = numpy.nansum(solar_p)
self_consumption_solar = numpy.nan
self_consumption_battery = numpy.nan
if sum_solar < 0:
self_consumption_solar = 1 - (numpy.nansum(net_export_solar) / sum_solar)
self_consumption_battery = 1 - (numpy.nansum(net_import_solar_battery) / sum_solar)
return {"self_consumption_solar": self_consumption_solar,
"self_consumption_batteries": self_consumption_battery}
def self_sufficiency_self_consumption_average(self_consumption_self_sufficiency_dict: dict) -> dict:
"""
calculates the average for self sufficiency and consumption over a given measurement.
#TODO: consider inverters and how to avoid double counting with solar, batteries
Args:
self_consumption_self_sufficiency_dict: The dictionary has a node Id as Key and
4 values per node
Returns:
results_dict: dictionary with averages for the given network
"""
self_sufficiency_solar = []
self_sufficiency_batteries = []
self_consumption_solar = []
self_consumption_batteries = []
for node in self_consumption_self_sufficiency_dict.values():
self_sufficiency_solar.append(node["self_sufficiency_solar"])
self_sufficiency_batteries.append(node["self_sufficiency_batteries"])
self_consumption_solar.append(node["self_consumption_solar"])
self_consumption_batteries.append(node["self_consumption_batteries"])
av_self_sufficiency_solar = numpy.nanmean(self_sufficiency_solar)
av_self_sufficiency_batteries = numpy.nanmean(self_sufficiency_batteries)
av_self_consumption_solar = numpy.nanmean(self_consumption_solar)
av_self_consumption_batteries = numpy.nanmean(self_consumption_batteries)
return {"av_self_sufficiency_solar": av_self_sufficiency_solar,
"av_self_sufficiency_batteries": av_self_sufficiency_batteries,
"av_self_consumption_solar": av_self_consumption_solar,
"av_self_consumption_batteries": av_self_consumption_batteries}
def self_sufficiency_self_consumption(meas_dict: dict, node_keys: list = None, column: int = 0) -> dict:
"""
Self-sufficiency = 1 - imports / consumption
Self-consumption = 1 - exports / generation
And average over those
#TODO: consider inverters and how to avoid double counting with solar, batteries
Args:
meas_dict (dict): dict with measurement for one or multiple nodes.
node_keys (list): list of Node.names in Network.nodes.
column (int): Column index used for calculation
Returns:
results_dict: self_sufficiency_solar, self_sufficiency_batteries,
self_consumption_solar, self_consumption_batteries
"""
results_dict = {}
nodes = node_keys if node_keys else meas_dict.keys()
for key in nodes:
if type(key) == int:
key = str(key)
if meas_dict[key]:
load_p = pd.DataFrame()
solar_p = pd.DataFrame()
battery_p = pd.DataFrame()
for meas in meas_dict[key]:
data_df = meas_dict[key][meas]
if not data_df.empty:
if 'loads' in meas:
load_p = pd.concat([load_p, meas_dict[key][meas].iloc[:,column]])
elif 'solar' in meas:
solar_p = pd.concat([solar_p, meas_dict[key][meas].iloc[:,column]])
elif 'batteries' in meas:
battery_p = pd.concat([battery_p, meas_dict[key][meas].iloc[:,column]])
self_sufficiency_dict = self_sufficiency(load_p, solar_p, battery_p)
self_consumption_dict = self_consumption(load_p, solar_p, battery_p)
results_dict[key] = self_sufficiency_dict.copy()
results_dict[key].update(self_consumption_dict)
averages_dict = self_sufficiency_self_consumption_average(results_dict)
results_dict.update(averages_dict)
return results_dict
def network_net_power(meas_dict: dict, node_keys: list = None, column: int = 0) -> dict:
"""
Calculate the net power (kW) of the network on the point of common coupling
(ignoring network structure and losses etc).
Import and Export are the net_load with all values set to zero, which are not matching.
Note: net_load is calculated by using load, solar and battery values for each node at each
time. If your load already has solar factored into it, then you should not pass the solar data
on as a separate column in your measurement dict
#TODO: consider inverters and how to avoid double counting with solar, batteries
Args:
meas_dict (dict): dict with measurement for one or multiple nodes.
node_keys (list): list of Node.names in Network.nodes.
column (int): Column index used for calculation
Returns:
dictionary of net_load, net_import, net_export
"""
nodes = node_keys if node_keys else meas_dict.keys()
load_p = pd.DataFrame()
solar_p = pd.DataFrame()
battery_p = pd.DataFrame()
for key in nodes:
if type(key) == int:
key = str(key)
if meas_dict[key]:
for meas in meas_dict[key]:
if 'load' in meas:
load_p = pd.concat([load_p, meas_dict[key][meas].iloc[:,column]], axis=1)
elif 'solar' in meas:
solar_p = pd.concat([solar_p, meas_dict[key][meas].iloc[:,column]], axis=1)
elif 'batteries' in meas:
battery_p = pd.concat([battery_p, meas_dict[key][meas].iloc[:,column]],axis=1)
net_load = pd.DataFrame()
net_load = pd.concat([net_load, load_p, solar_p, battery_p], axis=1).sum(axis=1)
# create an array that contains which entries are import and which are export
net_import = numpy.copy(net_load)
net_export = numpy.copy(net_load)
net_import[net_import < 0] = 0
net_export[net_export > 0] = 0
return {'net_load': net_load, 'net_import': net_import, 'net_export': net_export}
def solar_kwh_per_kw(meas_dict: dict, node_info: pd.DataFrame, node_keys: list = None, column: int = 0) -> dict:
"""
Calculates the amount of solar energy generated per kW of rated solar capacity for all
given meters
Args:
meas_dict (dict): dict with measurement for one or multiple nodes.
node_info (pd.DataFrame) : Data frame with additional information on each node
node_keys (list): list of Node ID's in the network.
column (int): Column index used for calculation
Returns:
results_dict(dict): rated capacity for all given meters
"""
results_dict = {}
hours_in_day = 24
initiate = 0
nu_nonzero_properties = 0
nodes = node_keys if node_keys else meas_dict.keys()
for key in nodes:
if type(key) == int:
key = str(key)
if meas_dict[key]:
solar_power = pd.DataFrame([])
solar_capacity = 0
for meas in meas_dict[key]:
if 'solar' in meas:
solar_power = | pd.concat([solar_power, meas_dict[key][meas].iloc[:,column]]) | pandas.concat |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Implement GroupBy public API as pandas does."""
import numpy as np
import pandas
import pandas.core.groupby
from pandas.core.dtypes.common import is_list_like, is_numeric_dtype
from pandas.core.apply import reconstruct_func
from pandas._libs.lib import no_default
import pandas.core.common as com
from types import BuiltinFunctionType
from collections.abc import Iterable
from modin.error_message import ErrorMessage
from modin.utils import (
_inherit_docstrings,
try_cast_to_pandas,
wrap_udf_function,
hashable,
wrap_into_list,
)
from modin.core.storage_formats.base.query_compiler import BaseQueryCompiler
from modin.core.dataframe.algebra.default2pandas.groupby import GroupBy
from modin.config import IsExperimental
from .series import Series
from .utils import is_label
@_inherit_docstrings(pandas.core.groupby.DataFrameGroupBy)
class DataFrameGroupBy(object):
def __init__(
self,
df,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
drop,
**kwargs,
):
self._axis = axis
self._idx_name = idx_name
self._df = df
self._query_compiler = self._df._query_compiler
self._columns = self._query_compiler.columns
self._by = by
self._drop = drop
if (
level is None
and | is_list_like(by) | pandas.core.dtypes.common.is_list_like |
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import time
# prepare data
(X_train_full, y_train_full),(X_test, y_test) = keras.datasets.fashion_mnist.load_data()
num_valid = 5000
X_valid = X_train_full[:num_valid] / 255.
X_train = X_train_full[num_valid:] / 255.
y_valid = y_train_full[:num_valid]
y_train = y_train_full[num_valid:]
class_names = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat",
"Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]
# check data
X_train_full.shape
X_train_full.dtype
plt.imshow(X_train_full[0],cmap="binary")
plt.axis('off')
# prepare for saving a model
checkpoint_cb = keras.callbacks.ModelCheckpoint("../model/fc_sample.h5", save_best_only=True)
earlystopping_cb = keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True)
logdir = os.path.join(os.curdir, time.strftime("../log/%Y_%m_%d_%H_%M_%S"))
tensorboard_cb = keras.callbacks.TensorBoard(logdir)
# create model and train
model = keras.models.Sequential([keras.layers.Flatten(input_shape=[28,28]),
keras.layers.Dense(200, activation="relu"),
keras.layers.Dense(50),
keras.layers.Dense(10, activation="softmax")])
# model.compile(optimizer="sgd", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
model.compile(optimizer=keras.optimizers.SGD(lr=0.05), loss="sparse_categorical_crossentropy", metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid,y_valid),
callbacks=[checkpoint_cb, earlystopping_cb, tensorboard_cb])
# check result
| pd.DataFrame(history.history) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import yfinance as yf
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import os
import math
import matplotlib.pylab as plt
import matplotlib
from Machine_Learning_for_Asset_Managers import ch2_fitKDE_find_best_bandwidth as best_bandwidth
from Machine_Learning_for_Asset_Managers import ch2_marcenko_pastur_pdf as mp
from Machine_Learning_for_Asset_Managers import ch2_monte_carlo_experiment as mc
from Machine_Learning_for_Asset_Managers import ch4_optimal_clustering as oc
import onc as onc
from Machine_Learning_for_Asset_Managers import ch5_financial_labels as fl
from Machine_Learning_for_Asset_Managers import ch7_portfolio_construction as pc
#import mlfinlab.trend_scanning as ts
#import mlfinlab.nco as nco
#import mlfinlab as ml # used for testing code
#from mlfinlab.portfolio_optimization.mean_variance import MeanVarianceOptimisation
#from mlfinlab.portfolio_optimization.returns_estimators import ReturnsEstimators
#Resources:
#Random matrix theory: https://calculatedcontent.com/2019/12/03/towards-a-new-theory-of-learning-statistical-mechanics-of-deep-neural-networks/
#Review: [Book] Commented summary of Machine Learning for Asset Managers by <NAME>
#https://gmarti.gitlab.io/qfin/2020/04/12/commented-summary-machine-learning-for-asset-managers.html
#Chapter 2: This chapter essentially describes an approach that Bouchaud and his crew from the CFM have
#pioneered and refined for the past 20 years. The latest iteration of this body of work is summarized in
#Joel Bun’s Cleaning large correlation matrices: Tools from Random Matrix Theory.
#https://www.sciencedirect.com/science/article/pii/S0370157316303337
#Condition number: https://dominus.ai/wp-content/uploads/2019/11/ML_WhitePaper_MarcoGruppo.pdf
# Excersize 2.9:
# 2. Using a series of matrix of stock returns:
# a) Compute the covariance matrix.
# What is the condition number of the correlation matrix
# b) Compute one hundredth efficient frontiers by drawing one hundred
# alternative vectors of expected returns from a Normal distribution
# with mean 10% and std 10%
# c) Compute the variance of the errors against the mean efficient frontier.
def get_OL_tickers_close(T=936, N=234):
# N - num stocks in portfolio, T lookback time
ol = pd.read_csv('csv/ol_ticker.csv', sep='\t', header=None)
ticker_names = ol[0]
closePrice = np.empty([T, N])
covariance_matrix = np.empty([T, N])
portfolio_name = [ [ None ] for x in range( N ) ]
ticker_adder = 0
for i in range(0, len(ticker_names)): #len(ticker_names)): # 46
ticker = ticker_names[i]
print(ticker)
ol_ticker = ticker + '.ol'
df = yf.Ticker(ol_ticker)
#'shortName' in df.info and
try:
ticker_df = df.history(period="7y")
if ticker=='EMAS': print("****EMAS******")
if ticker=='AVM': print("****AVM*********")
if ticker_df.shape[0] > T and ticker!='EMAS' and ticker != 'AVM': # only read tickers with more than 30 days history
#1.Stock Data
closePrice[:,ticker_adder] = ticker_df['Close'][-T:].values # inserted from oldest tick to newest tick
portfolio_name[ticker_adder] = ol_ticker
ticker_adder += 1
else:
print("no data for ticker:" + ol_ticker)
except ValueError:
print("no history:"+ol_ticker)
return closePrice, portfolio_name
def denoise_OL(S, do_plot=True):
np.argwhere( np.isnan(S) )
# cor.shape = (1000,1000). If rowvar=1 - row represents a var, with observations in the columns.
cor = np.corrcoef(S, rowvar=0)
eVal0 , eVec0 = mp.getPCA( cor )
print(np.argwhere(np.isnan(np.diag(eVal0))))
# code snippet 2.4
T = float(S.shape[0])
N = S.shape[1]
q = float(S.shape[0])/S.shape[1] #T/N
eMax0, var0 = mp.findMaxEval(np.diag(eVal0), q, bWidth=.01)
nFacts0 = eVal0.shape[0]-np.diag(eVal0)[::-1].searchsorted(eMax0)
if do_plot:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(np.diag(eVal0), density = True, bins=100) #, normed = True) #normed = True,
pdf0 = mp.mpPDF(var0, q=S.shape[0]/float(S.shape[1]), pts=N)
pdf1 = mp.fitKDE( np.diag(eVal0), bWidth=.005) #empirical pdf
#plt.plot(pdf1.keys(), pdf1, color='g') #no point in drawing this
plt.plot(pdf0.keys(), pdf0, color='r')
plt.show()
# code snippet 2.5 - denoising by constant residual eigenvalue
corr1 = mp.denoisedCorr(eVal0, eVec0, nFacts0)
eVal1, eVec1 = mp.getPCA(corr1)
return eVal0, eVec0, eVal1, eVec1, corr1, var0
#consider using log-returns
def calculate_returns( S, percentageAsProduct=False ):
ret = np.zeros((S.shape[0]-1, S.shape[1]))
cum_sums = np.zeros(S.shape[1])
for j in range(0, S.shape[1]):
cum_return = 0
S_ret = np.zeros(S.shape[0]-1)
for i in range(0,S.shape[0]-1):
if percentageAsProduct==True:
S_ret[i] = 1+((S[i+1,j]-S[i,j])/S[i,j])
else:
S_ret[i] = ((S[i+1,j]-S[i,j])/S[i,j])
cum_return = np.prod(S_ret)-1
cum_sums[j] = cum_return
ret[:, j] = S_ret
return ret, cum_sums
def getVolatility(S): #std of instruments
return [np.std(S[:,i]) for i in range(0, S.shape[1])]
def test_exception_in_plotting_efficient_frontier(S_value):
# pylint: disable=invalid-name, protected-access
"""
Test raising of exception when plotting the efficient frontier.
"""
mvo = MeanVarianceOptimisation()
pdPrice = pd.DataFrame(S_value)
pdPrice.index = pd.RangeIndex(start=0, stop=6, step=1)
dates = ['2019-01-01','2019-02-01','2019-03-01','2019-04-01','2019-05-01','2019-06-01']
pdPrice['Datetime'] = pd.to_datetime(dates)
pdPrice.set_index('Datetime')
expected_returns = ReturnsEstimators().calculate_mean_historical_returns(asset_prices=pdPrice, resample_by=None) #'W')
covariance = ReturnsEstimators().calculate_returns(asset_prices=pdPrice, resample_by=None).cov()
plot = mvo.plot_efficient_frontier(covariance=covariance, max_return=1.0, expected_asset_returns=expected_returns)
assert len(plot._A) == 41
plot.savefig('books_read.png')
print("read books")
# Chapter 7 - apply the Nested Clustered Optimization (NCO) algorithm
def testNCO():
N = 5
T = 5
S_value = np.array([[1., 2,3, 4,5],
[1.1,3,2, 3,5],
[1.2,4,1.3,4,5],
[1.3,5,1, 3,5],
[1.4,6,1, 4,5.5],
[1.5,7,1, 3,5.5]])
S_value[:,1] =1
S_value[5,1] =1.1
S, _ = calculate_returns(S_value)
_, instrument_returns = calculate_returns(S_value, percentageAsProduct=True)
np.testing.assert_almost_equal(S, pd.DataFrame(S_value).pct_change().dropna(how="all"))
mu1 = None
cov1_d = np.cov(S ,rowvar=0, ddof=1)
#test baseClustering
corr1 = mp.cov2corr(cov1_d)
a,b,c = nco.NCO()._cluster_kmeans_base(pd.DataFrame(corr1))
d,e,f = oc.clusterKMeansBase(pd.DataFrame(corr1))
#b={0: [0, 1, 2], 1: [3, 4]}
#e={0: [0, 3, 4], 1: [1, 2]}
min_var_markowitz = mc.optPort(cov1_d, mu1).flatten()
#compare min_var_markowitz with mlfinlab impl
#ml.
min_var_NCO = pc.optPort_nco(cov1_d, mu1, max(int(cov1_d.shape[0]/2), 2)).flatten()
mlfinlab_NCO= nco.NCO().allocate_nco(cov1_d, mu1, max(int(cov1_d.shape[0]/2), 2)).flatten()
cov1_d = np.cov(S,rowvar=0, ddof=1)
mlfinlab_NCO= nco.NCO().allocate_nco(cov1_d, mu1, int(cov1_d.shape[0]/2)).flatten()
expected_return_markowitz = [min_var_markowitz[i]*instrument_returns[i] for i in range(0,cov1_d.shape[0])]
e_m = sum(expected_return_markowitz)
expected_return_NCO = [min_var_NCO[i]*instrument_returns[i] for i in range(0,cov1_d.shape[0])]
e_NCO = sum(expected_return_markowitz)
vol = getVolatility(S_value)
m_minVol = [min_var_markowitz[i]*vol[i] for i in range(0, cov1_d.shape[0])]
NCO_minVol = [mlfinlab_NCO[i]*vol[i] for i in range(0, cov1_d.shape[0])]
if __name__ == '__main__':
testNCO()
N = 333 #3
T = 936
S_value = np.loadtxt('csv/ol184.csv', delimiter=',')
if S_value.shape[0] < 1 or not os.path.exists('csv/portfolio_name.csv'):
S_value, portfolio_name = get_OL_tickers_close(T, N)
np.savetxt('csv/ol184.csv', S_value, delimiter=',')
np.savetxt('csv/portfolio_name.csv', portfolio_name, delimiter=',', fmt='%s')
portfolio_name = pd.read_csv('csv/portfolio_name.csv', sep='\t', header=None).values
lastIndex = 173
S_value = S_value[:,0:lastIndex] # S = S[:,6:9]
portfolio_name = portfolio_name[0:lastIndex] #portfolio_name = portfolio_name[6:9]
# use matrix of returns to calc correlation
S, instrument_returns = calculate_returns(S_value)
_, instrument_returns = calculate_returns(S_value, percentageAsProduct=True)
print(np.asarray(portfolio_name)[np.argsort(instrument_returns)]) #prints performance ascending
#calculate_correlation(S)
eVal0, eVec0, denoised_eVal, denoised_eVec, denoised_corr, var0 = denoise_OL(S)
detoned_corr = mp.detoned_corr(denoised_corr, denoised_eVal, denoised_eVec, market_component=1)
detoned_eVal, detoned_eVec = mp.getPCA(detoned_corr)
denoised_eigenvalue = np.diag(denoised_eVal)
eigenvalue_prior = np.diag(eVal0)
plt.plot(range(0, len(denoised_eigenvalue)), np.log(denoised_eigenvalue), color='r', label="Denoised eigen-function")
plt.plot(range(0, len(eigenvalue_prior)), np.log(eigenvalue_prior), color='g', label="Original eigen-function")
plt.xlabel("Eigenvalue number")
plt.ylabel("Eigenvalue (log-scale)")
plt.legend(loc="upper right")
plt.show()
#from code snippet 2.10
detoned_cov = mc.corr2cov(detoned_corr, var0)
w = mc.optPort(detoned_cov)
print(w)
#min_var_port = 1./nTrials*(np.sum(w, axis=0))
#print(min_var_port)
#expected portfolio variance: W^T.(Cov).W
#https://blog.quantinsti.com/calculating-covariance-matrix-portfolio-variance/
minVarPortfolio_var = np.dot(np.dot(w.T, detoned_corr), w)
#Expected return: w.T . mu
# https://www.mn.uio.no/math/english/research/projects/focustat/publications_2/shatthik_barua_master2017.pdf p8
# or I.T.cov^-1.mu / I.T.cov^-1.I
#inv = np.linalg.inv(cov)
#e_r = np.dot(np.dot(ones.T, inv), mu) / np.dot(ones.T, np.dot(ones.T, inv))
#Chapter 4 optimal clustering
# recreate fig 4.1 colormap of random block correlation matrix
nCols, minBlockSize = 183, 2
print("minBlockSize"+str(minBlockSize))
corr0 = detoned_corr
corr1, clstrs, silh = oc.clusterKMeansTop(pd.DataFrame(detoned_corr)) #1: [18, 24, 57, 81, 86, 99, 112, 120, 134, 165]
tStatMeanDepth = np.mean([np.mean(silh[clstrs[i]]) / np.std(silh[clstrs[i]]) for i in clstrs.keys()])
print("tstat at depth:")
print(tStatMeanDepth)
corr1, clstrs, silh = oc.clusterKMeansTop(pd.DataFrame(detoned_corr)) #1: [18, 24, 57, 81, 86, 99, 112, 120, 134, 165]
tStatMeanDepth = np.mean([np.mean(silh[clstrs[i]]) / np.std(silh[clstrs[i]]) for i in clstrs.keys()])
print("tstat at depth:")
print(tStatMeanDepth)
raise SystemExit
#corr11, clstrs11, silh11 = onc.get_onc_clusters(pd.DataFrame(detoned_corr)) #test with mlfinlab impl: 1: [18, 24, 57, 81, 86, 99, 112, 120, 134, 165]
matplotlib.pyplot.matshow(corr11) #invert y-axis to get origo at lower left corner
matplotlib.pyplot.gca().xaxis.tick_bottom()
matplotlib.pyplot.gca().invert_yaxis()
matplotlib.pyplot.colorbar()
matplotlib.pyplot.show()
#Chapter 5 Financial labels
#Lets try trend-following on PHO
idxPHO =118
idxBGBIO = 29
idxWWI = 169
pho = S_value[:,idxBGBIO]
df0 = | pd.Series(pho[-50:]) | pandas.Series |
import pandas as pd
import config
import numpy as np
import os
import datetime
class AttendanceMarker:
def __init__(self):
# current datetime to put attendance
now = datetime.datetime.now()
self.time = now.strftime(config.DATE_TIME_FORMAT)
def _create_new_csv(self):
names = os.listdir(config.FACE_DATABASE_DIR)
names = np.array(names)
df = | pd.DataFrame(data=names,columns=[config.CSV_COL_NAME]) | pandas.DataFrame |
# Import required Libraries
import csv
from bs4 import BeautifulSoup
from selenium import webdriver
from openpyxl import Workbook
import pandas as pd
# Function to get the search term
def get_url(search_term):
"""Generate a URL from search term"""
template = 'https://www.amazon.com/s?k={}'
search_term = search_term.replace(' ', '+')
#add term query to url
url = template.format(search_term)
#add page query placeholder
url += '&page={}'
return url
def extract_record(item):
try:
atag = item.h2.a
description = atag.text.strip()
url = 'https://www.amazon.com'+atag.get('href')
except:
pass
try:
# Price
price_parent = item.find('span', 'a-price')
price = price_parent.find('span', 'a-offscreen').text
except AttributeError:
return
try:
#Ratings
rating = item.i.text
# Ratings Count
review_count = item.find('span', {'class': 'a-size-base', 'dir': 'auto'}).text
except AttributeError:
rating = ''
review_count = ''
result = {description, price, rating, review_count, url}
return result
def main(search_term):
# start the driver
driver = webdriver.Chrome()
records = []
url = get_url(search_term)
for page in range(1, 5):
driver.get(url.format(page))
soup = BeautifulSoup(driver.page_source, 'html.parser')
results = soup.find_all('div', {'data-component-type':'s-search-result'})
for item in results:
record = extract_record(item)
if record:
records.append(record)
driver.close()
#save results in .csv
#with open('results.csv', 'w', newline='', encoding='utf-8') as f:
# writer = csv.writer(f)
# writer.writerow(['Description', 'Price', 'Rating', 'Review_count','URL'])
# writer.writerows(records)
try:
wb = Workbook()
ws = wb.worksheets[0]
ws.append(['Description', 'Price', 'Rating', 'Review_count','URL'])
for row in records:
ws.append(row)
ws.save(f'data.xlsx')
wb.close()
except:
df = | pd.DataFrame({'Product Name':records}) | pandas.DataFrame |
from __future__ import absolute_import
# PopulationSim
# See full license in LICENSE.txt.
from builtins import object
import logging
import os
import numpy as np
import pandas as pd
from activitysim.core.config import setting
from .lp import get_single_integerizer
from .lp import STATUS_SUCCESS
from .lp import STATUS_OPTIMAL
logger = logging.getLogger(__name__)
def smart_round(int_weights, resid_weights, target_sum):
"""
Round weights while ensuring (as far as possible that result sums to target_sum)
Parameters
----------
int_weights : numpy.ndarray(int)
resid_weights : numpy.ndarray(float)
target_sum : int
Returns
-------
rounded_weights : numpy.ndarray array of ints
"""
assert len(int_weights) == len(resid_weights)
assert (int_weights == int_weights.astype(int)).all()
assert target_sum == int(target_sum)
target_sum = int(target_sum)
# integer part of numbers to round (astype both copies and coerces)
rounded_weights = int_weights.astype(int)
# find number of residuals that we need to round up
int_shortfall = target_sum - rounded_weights.sum()
# clip to feasible, in case target was not achievable by rounding
int_shortfall = np.clip(int_shortfall, 0, len(resid_weights))
# Order the residual weights and round at the tipping point where target_sum is achieved
if int_shortfall > 0:
# indices of the int_shortfall highest resid_weights
i = np.argsort(resid_weights)[-int_shortfall:]
# add 1 to the integer weights that we want to round upwards
rounded_weights[i] += 1
return rounded_weights
class Integerizer(object):
def __init__(self,
incidence_table,
control_importance_weights,
float_weights,
relaxed_control_totals,
total_hh_control_value,
total_hh_control_index,
control_is_hh_based,
trace_label=''):
"""
Parameters
----------
control_totals : pandas.Series
targeted control totals (either explict or backstopped) we are trying to hit
incidence_table : pandas.Dataframe
incidence table with columns only for targeted controls
control_importance_weights : pandas.Series
importance weights (from control_spec) of targeted controls
float_weights
blanaced float weights to integerize
relaxed_control_totals
total_hh_control_index : int
control_is_hh_based : bool
"""
self.incidence_table = incidence_table
self.control_importance_weights = control_importance_weights
self.float_weights = float_weights
self.relaxed_control_totals = relaxed_control_totals
self.total_hh_control_value = total_hh_control_value
self.total_hh_control_index = total_hh_control_index
self.control_is_hh_based = control_is_hh_based
self.trace_label = trace_label
def integerize(self):
sample_count = len(self.incidence_table.index)
control_count = len(self.incidence_table.columns)
incidence = self.incidence_table.values.transpose().astype(np.float64)
float_weights = np.asanyarray(self.float_weights).astype(np.float64)
relaxed_control_totals = np.asanyarray(self.relaxed_control_totals).astype(np.float64)
control_is_hh_based = np.asanyarray(self.control_is_hh_based).astype(bool)
control_importance_weights = \
np.asanyarray(self.control_importance_weights).astype(np.float64)
assert len(float_weights) == sample_count
assert len(relaxed_control_totals) == control_count
assert len(control_is_hh_based) == control_count
assert len(self.incidence_table.columns) == control_count
assert (relaxed_control_totals == np.round(relaxed_control_totals)).all()
assert not np.isnan(incidence).any()
assert not np.isnan(float_weights).any()
assert (incidence[self.total_hh_control_index] == 1).all()
int_weights = float_weights.astype(int)
resid_weights = float_weights % 1.0
if (resid_weights == 0.0).all():
# not sure this matters...
logger.info("Integerizer: all %s resid_weights zero. Returning success." %
((resid_weights == 0).sum(), ))
integerized_weights = int_weights
status = STATUS_OPTIMAL
else:
# - lp_right_hand_side - relaxed_control_shortfall
lp_right_hand_side = relaxed_control_totals - np.dot(int_weights, incidence.T)
lp_right_hand_side = np.maximum(lp_right_hand_side, 0.0)
# - max_incidence_value of each control
max_incidence_value = np.amax(incidence, axis=1)
assert (max_incidence_value[control_is_hh_based] <= 1).all()
# - create the inequality constraint upper bounds
num_households = relaxed_control_totals[self.total_hh_control_index]
relax_ge_upper_bound = \
np.maximum(max_incidence_value * num_households - lp_right_hand_side, 0)
hh_constraint_ge_bound = \
np.maximum(self.total_hh_control_value * max_incidence_value, lp_right_hand_side)
# popsim3 does does something rather peculiar, which I am not sure is right
# it applies a huge penalty to rounding a near-zero residual upwards
# the documentation justifying this is sparse and possibly confused:
# // Set objective: min sum{c(n)*x(n)} + 999*y(i) - 999*z(i)}
# objective_function_coefficients = -1.0 * np.log(resid_weights)
# objective_function_coefficients[(resid_weights <= np.exp(-999))] = 999
# We opt for an alternate interpretation of what they meant to do: avoid log overflow
# There is not much difference in effect...
LOG_OVERFLOW = -725
log_resid_weights = np.log(np.maximum(resid_weights, np.exp(LOG_OVERFLOW)))
assert not np.isnan(log_resid_weights).any()
if (float_weights == 0).any():
# not sure this matters...
logger.warn("Integerizer: %s zero weights out of %s" %
((float_weights == 0).sum(), sample_count))
assert False
if (resid_weights == 0.0).any():
# not sure this matters...
logger.info("Integerizer: %s zero resid_weights out of %s" %
((resid_weights == 0).sum(), sample_count))
# assert False
integerizer_func = get_single_integerizer()
resid_weights, status = integerizer_func(
incidence=incidence,
resid_weights=resid_weights,
log_resid_weights=log_resid_weights,
control_importance_weights=control_importance_weights,
total_hh_control_index=self.total_hh_control_index,
lp_right_hand_side=lp_right_hand_side,
relax_ge_upper_bound=relax_ge_upper_bound,
hh_constraint_ge_bound=hh_constraint_ge_bound
)
integerized_weights = \
smart_round(int_weights, resid_weights, self.total_hh_control_value)
self.weights = pd.DataFrame(index=self.incidence_table.index)
self.weights['integerized_weight'] = integerized_weights
delta = (integerized_weights != np.round(float_weights)).sum()
logger.debug("Integerizer: %s out of %s different from round" % (delta, len(float_weights)))
return status
def do_integerizing(
trace_label,
control_spec,
control_totals,
incidence_table,
float_weights,
total_hh_control_col):
"""
Parameters
----------
trace_label : str
trace label indicating geography zone being integerized (e.g. PUMA_600)
control_spec : pandas.Dataframe
full control spec with columns 'target', 'seed_table', 'importance', ...
control_totals : pandas.Series
control totals explicitly specified for this zone
incidence_table : pandas.Dataframe
float_weights : pandas.Series
balanced float weights to integerize
total_hh_control_col : str
name of total_hh column (preferentially constrain to match this control)
Returns
-------
integerized_weights : pandas.Series
status : str
as defined in integerizer.STATUS_TEXT and STATUS_SUCCESS
"""
# incidence table should only have control columns
incidence_table = incidence_table[control_spec.target]
if total_hh_control_col not in incidence_table.columns:
raise RuntimeError("total_hh_control column '%s' not found in incidence table"
% total_hh_control_col)
zero_weight_rows = (float_weights == 0)
if zero_weight_rows.any():
logger.debug("omitting %s zero weight rows out of %s"
% (zero_weight_rows.sum(), len(incidence_table.index)))
incidence_table = incidence_table[~zero_weight_rows]
float_weights = float_weights[~zero_weight_rows]
total_hh_control_value = control_totals[total_hh_control_col]
status = None
if setting('INTEGERIZE_WITH_BACKSTOPPED_CONTROLS') \
and len(control_totals) < len(incidence_table.columns):
##########################################
# - backstopped control_totals
# Use balanced float weights to establish target values for all control values
# note: this more frequently results in infeasible solver results
##########################################
relaxed_control_totals = \
np.round(np.dot(np.asanyarray(float_weights), incidence_table.values))
relaxed_control_totals = \
pd.Series(relaxed_control_totals, index=incidence_table.columns.values)
# if the incidence table has only one record, then the final integer weights
# should be just an array with 1 element equal to the total number of households;
assert len(incidence_table.index) > 1
integerizer = Integerizer(
incidence_table=incidence_table,
control_importance_weights=control_spec.importance,
float_weights=float_weights,
relaxed_control_totals=relaxed_control_totals,
total_hh_control_value=total_hh_control_value,
total_hh_control_index=incidence_table.columns.get_loc(total_hh_control_col),
control_is_hh_based=control_spec['seed_table'] == 'households',
trace_label='backstopped_%s' % trace_label
)
# otherwise, solve for the integer weights using the Mixed Integer Programming solver.
status = integerizer.integerize()
logger.debug("Integerizer status for backstopped %s: %s" % (trace_label, status))
# if we either tried backstopped controls or failed, or never tried at all
if status not in STATUS_SUCCESS:
##########################################
# - unbackstopped partial control_totals
# Use balanced weights to establish control totals only for explicitly specified controls
# note: this usually results in feasible solver results, except for some single hh zones
##########################################
balanced_control_cols = control_totals.index
incidence_table = incidence_table[balanced_control_cols]
control_spec = control_spec[control_spec.target.isin(balanced_control_cols)]
relaxed_control_totals = \
np.round(np.dot(np.asanyarray(float_weights), incidence_table.values))
relaxed_control_totals = \
| pd.Series(relaxed_control_totals, index=incidence_table.columns.values) | pandas.Series |
from rest_framework import permissions, status
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from rest_framework.views import APIView
from datetime import date, datetime, timedelta
from django.forms.models import model_to_dict
from django.db.models import Q, Count, F, Sum
from django.db.models.functions import TruncWeek, TruncMonth, TruncYear
from django.apps import apps
from django.core.files.storage import default_storage
from .serializers import *
from .models import *
from .content_based_recommender import ContentBasedRecommender
from .utils import *
from pathlib import Path
from google.analytics.data_v1beta import BetaAnalyticsDataClient
from google.analytics.data_v1beta.types import DateRange
from google.analytics.data_v1beta.types import Dimension
from google.analytics.data_v1beta.types import Metric
from google.analytics.data_v1beta.types import RunReportRequest
from apiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
from slugify import slugify
import pandas as pd
import random
import json
import uuid
import os
import pydash
import urllib3
import dotenv
# Read configure file
base_dir = Path(__file__).resolve().parent.parent
module_dir = os.path.dirname(__file__)
mapping_template_file_path = os.path.join(module_dir, 'configuration/mapping_template.json')
schema_table_file_path = os.path.join(module_dir, 'configuration/schema_table.json')
schema_detail_file_path = os.path.join(module_dir, 'configuration/schema_detail.json')
ga4_json = os.path.join(module_dir, 'configuration/ga4.json')
ua_json = os.path.join(module_dir, 'configuration/ua.json')
# Initialize environment variables
dotenv.load_dotenv(os.path.join(base_dir, '.env'))
# Global vaos.environrial
API_KEY = os.environ['API_KEY']
IP_DOMAIN = os.environ['IP_DOMAIN']
scope = 'https://www.googleapis.com/auth/analytics.readonly'
dimensions = ['date', 'eventName', 'pageLocation', 'browser', 'deviceCategory', 'operatingSystem', 'country']
metrics = ['eventCount', 'sessions']
ua_dimensions = ['ga:date', 'ga:eventCategory', 'ga:pagePath', 'ga:browser', 'ga:deviceCategory', 'ga:operatingSystem', 'ga:country']
ua_metrics = ['ga:totalEvents', 'ga:sessions']
@api_view(['GET'])
def home(request):
try:
# Initialize KPI reports
web_activity_report = []
event_report = []
product_report = []
traffics = {}
# Total number of web activities (interactions)
web_activities_file = len(Interaction_f.objects.all())
web_activities_ga = Interaction_ga.objects.all().aggregate(Sum('event_count'))['event_count__sum']
if (web_activities_ga is None):
web_activities_ga = 0
web_activities = web_activities_file + web_activities_ga
# Total number of sessions (a session includes multiple interactions)
sessions_file = len(Interaction_f.objects.values('session_id').distinct())
sessions_ga = Interaction_ga.objects.all().aggregate(Sum('session_count'))['session_count__sum']
if (sessions_ga is None):
sessions_ga = 0
sessions = sessions_file + sessions_ga
# Total number of web activities by page location
pages_file = Interaction_f.objects.all().values('page_location').annotate(total=Count('page_location'))
pages_ga = Interaction_ga.objects.all().values('page_location').annotate(total=Sum('event_count'))
pages = list(pages_file) + list(pages_ga)
if (len(pages)):
pages = pd.DataFrame(pages).groupby(['page_location'], as_index=False).sum().to_dict('r')
pages = sorted(pages, key=lambda k : k['total'], reverse=True)
# Total number of web activities by device categories
device_categories_file = Interaction_f.objects.all().values('device_category').annotate(total=Count('device_category'))
device_categories_ga = Interaction_ga.objects.all().values('device_category').annotate(total=Sum('event_count'))
device_categories = list(device_categories_ga) + list(device_categories_file)
for category in list(device_categories):
type = category['device_category']
if (type not in traffics):
traffics[type] = 0
traffics[type] += category['total']
# Web activities report - Total number of web activities by event name
web_activity_data_file = Interaction_f.objects.all().values('event_name').annotate(total=Count('event_name'))
web_activity_data_ga = Interaction_ga.objects.all().values('event_name').annotate(total=Sum('event_count'))
web_activity_data = list(web_activity_data_file) + list(web_activity_data_ga)
if (len(web_activity_data)):
web_activity_data = | pd.DataFrame(web_activity_data) | pandas.DataFrame |
#!/usr/bin/env python3
"""
Gaussian mixture fitting with Nested Sampling. This module was tested in the
main `nestfit` repo on bare arrays and Gaussian components -- without a
spectral axis, units, or other necessary complications.
The `.wrapped` references a Cython implementation of the Gaussian model class.
"""
import ctypes
import operator
from pathlib import Path
import h5py
import numpy as np
import pandas as pd
from scipy import (special, stats)
from matplotlib import ticker
from matplotlib import pyplot as plt
import corner
import pymultinest
from .wrapped import CGaussianModel
plt.rc('font', size=10, family='serif')
plt.rc('text', usetex=True)
plt.rc('xtick', direction='out', top=True)
plt.rc('ytick', direction='out', right=True)
ROOT_DIR = Path('/lustre/aoc/users/bsvoboda/temp/nestfit')
DATA_DIR = ROOT_DIR / Path('data')
PLOT_DIR = ROOT_DIR / Path('plots')
class SyntheticSpectrum:
def __init__(self, xaxis, amp, cen, std, noise=0.03, set_seed=False):
"""
Construct a mixture of Gaussians expressed as:
f(x) = A * exp(-(x - c)^2 / (2 * s^2))
for "A" amplitude, "c" centroid, and "s" standard deviation.
Parameters
----------
xaxis : np.ndarray
amp : np.ndarray
Array of Gaussian amplitudes
cen : np.ndarray
Array of Gaussian centroid positions
std : np.ndarray
Array of Guassian standard deviations
noise : float, default=0.03
Noise standard deviation
set_seed : bool, default=False
If `True` will use a default seed of 5 for the np.random module.
"""
if set_seed:
np.random.seed(5)
else:
np.random.seed()
self.xaxis = xaxis.reshape(-1, 1)
self.ncomp = len(amp)
self.size = self.xaxis.shape[0]
self.amp = amp
self.cen = cen
self.std = std
self.truths = np.concatenate([amp, cen, std])
self.noise = noise
self.components = self.profile().T
self.sum_spec = self.components.sum(axis=0)
self.noise_spec = np.random.normal(scale=self.noise, size=self.size)
self.sampled_spec = self.sum_spec + self.noise_spec
def profile(self):
return self.amp * np.exp(-(self.xaxis - self.cen)**2 / (2 * self.std**2))
def resample_spectrum(self, noise=None):
if noise is not None:
self.noise = noise
noise_spec = np.random.normal(scale=self.noise, size=self.size)
self.noise_spec = noise_spec
self.sampled_spec = self.sum_spec + self.noise_spec
def test_spectrum():
return SyntheticSpectrum(
np.linspace(-6, 6, 100),
amp=np.array([0.3, 0.5, 0.4]),
cen=np.array([-1, 0, 3]),
std=np.array([1.5, 1.0, 0.5]),
noise=0.03,
set_seed=True,
)
class GaussianModel:
model_name = 'gaussian'
def __init__(self, xaxis, ydata, noise, ncomp):
self.xaxis = xaxis.reshape(-1, 1)
self.size = xaxis.shape[0]
self.ydata = ydata
self.noise = noise
self.ncomp = ncomp
self.n_params = 3 * ncomp
self.lnpin = -self.size / 2 * np.log(2 * np.pi * noise**2)
self.null_lnZ = self.lnpin - np.sum(ydata**2) / (2 * self.noise**2)
#self.array_type = np.ctypeslib.ndpointer(
# ctypes.c_double, 1, (self.n_params,), 'C_CONTIGUOUS')
@property
def par_labels(self):
comps = range(1, self.ncomp+1)
return [
f'{label}{n}'
for label in ('a', 'c', 's')
for n in comps
]
def loglikelihood(self, theta, ndim, nparams):
n = self.ncomp
#atheta = ctypes.cast(theta, self.array_type).contents
atheta = np.ctypeslib.as_array(theta, shape=(self.n_params,))
amp = atheta[0 : n]
cen = atheta[ n:2*n]
std = atheta[2*n:3*n]
ymodel = np.sum(
amp * np.exp(-(self.xaxis - cen)**2 / (2 * std**2)),
axis=1,
)
difsqsum = np.sum((self.ydata - ymodel)**2)
lnL = self.lnpin - difsqsum / (2 * self.noise**2)
return lnL
def prior_transform(self, utheta, ndim, nparams):
n = self.ncomp
# amplitude -- uniform [0.06, 1.00]
for i in range(0, n):
utheta[i] = 0.94 * utheta[i] + 0.06
# centroid velocity -- uniform [-5.00, 5.00]
# but enforce ordering from left-to-right for the peaks to sort
# and limit multi-modality in posteriors
vmin, vmax = -5.0, 5.0
for i in range(n, 2*n):
v = (vmax - vmin) * utheta[i] + vmin
utheta[i] = vmin = v
# standard deviation -- uniform [0.30, 3.00]
for i in range(2*n, 3*n):
utheta[i] = 2.7 * utheta[i] + 0.30
return utheta # XXX
def run_nested(spec, model, basename='run/test_run'):
pymultinest.run(
model.loglikelihood,
model.prior_transform,
model.n_params,
outputfiles_basename=basename,
resume=False,
verbose=True,
evidence_tolerance=0.3,
n_live_points=400,
sampling_efficiency=0.3,
n_iter_before_update=2000,
)
analyzer = pymultinest.Analyzer(
outputfiles_basename=basename,
n_params=model.n_params,
)
lnZ = analyzer.get_stats()['global evidence']
print(':: Evidence Z:', lnZ/np.log(10))
return analyzer
def test_nested(ncomp=3):
spec = test_spectrum()
model = GaussianModel(
spec.xaxis,
spec.sampled_spec,
spec.noise,
ncomp,
)
analyzer = run_nested(spec, model)
return spec, model, analyzer
def test_nested_cython(ncomp=3):
spec = test_spectrum()
model = CGaussianModel(
spec.xaxis.flatten(),
spec.sampled_spec,
spec.noise,
ncomp,
)
analyzer = run_nested(spec, model)
return spec, model, analyzer
def marginals_to_pandas(a_stats):
margs = a_stats['marginals']
df = | pd.DataFrame(margs) | pandas.DataFrame |
import pandas as pd
import bentoml
from bentoml.artifact import PickleArtifact
from bentoml.handlers import DataframeHandler
from data_preprocess import Posts
from word_embedding_vectorizer import WordEmbeddingVectorizer
from gensim.models import Word2Vec
@bentoml.artifacts([PickleArtifact('word_vectorizer'),
PickleArtifact('word_embedding_rf')])
@bentoml.env(pip_dependencies=["pandas", "numpy", "gensim", "scikit-learn", "nltk"])
class WordEmbeddingModel(bentoml.BentoService):
@bentoml.api(DataframeHandler, typ='series')
def preprocess(self, series):
preprocess_series = Posts(series).preprocess()
input_matrix = self.artifacts.word_vectorizer.fit(preprocess_series).transform(preprocess_series)
return input_matrix
@bentoml.api(DataframeHandler, typ='series')
def predict(self, series):
input_matrix = self.preprocess(series)
pred_labels = self.artifacts.word_embedding_rf.predict(input_matrix)
pred_proba = self.artifacts.word_embedding_rf.predict_proba(input_matrix)
confidence_score = [prob[1] for prob in pred_proba]
output = | pd.DataFrame({'text': series, 'confidence_score': confidence_score, 'labels': pred_labels}) | pandas.DataFrame |
#!/usr/bin/env python3
import websocket
import config
import json
import pandas as pd
import numpy as np
from src.data_methods import get_gestures
from src.leap_methods import collect_frame
import src.features as features
import random
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--version", type=int, help="select which gesture file version to use", default=3)
args = parser.parse_args()
websocket_cache = {}
# get gestures
gestures, _, _ = get_gestures(version=args.version)
FINGERS = ["thumb", "index", "middle", "ring", "pinky"]
# note: the modes expect no_gesture to be in first place
current_gesture = 0
change_time = time.time()
warned = False
next_gesture = 0
current_gesture = 0
# whether or not to store captured frames
record = True
# collect only every nth frame, use this for lowering frame rate
n = 4
if __name__ == "__main__":
frames = []
frames_total = 0
# variables for gesture messages
gesturing = False
message = ''
# frame at which user is notified of impending change
notify_frame = 0
mode = int(input('Select mode:\n1 for alternating between gestures randomly\
\n2 for performing each gesture in succession\
\n3 for alternating no gesture with other gestures in sequential order\
\n4 for continuously performing a single gesture\
\n5 for viewing variables recorded\n'))
if mode == 1 or mode == 2 or mode == 3:
# where are we up to in the sequence of gestures?
seq_n = 0
g_i = np.arange(len(gestures))
# seconds warning to receive before performing a gesture
warn_time = 1
# delay between gestures
delay = float(input('enter time length for each gesture: '))
elif mode == 4:
print('Available gestures:')
for i, g in enumerate(gestures):
print(f'{i}. {g}')
print()
gesture_n = int(input('Select gesture to record: '))
gesture = gestures[gesture_n]
elif mode == 5:
record = False
print_variable = input('input variable name (e.g. right_grabAngle): ')
else:
raise Exception('Input not a valid mode')
try:
while True:
frames_total += 1
packed_frame = collect_frame(frames_total, n, websocket_cache)
if len(packed_frame) > 0:
# store variable indicating gesture
if mode == 1 or mode == 2 or mode == 3:
packed_frame["gesture"] = gestures[current_gesture]
elif mode == 4:
packed_frame["gesture"] = gesture
if record:
frames.append(packed_frame)
# change to the next gesture
if (mode == 1 or mode == 2) and change_time < time.time():
current_gesture = next_gesture
change_time = time.time() + delay # + random.uniform(-1,1)
print('###### Start ' + gestures[current_gesture])
warned = False
seq_n += 1
# set the next gesture, and warn user of impending change
elif (mode == 1 or mode == 2) and change_time - 1 < time.time() and warned == False:
if seq_n >= len(gestures): #check that we're not out of range
seq_n = 0
if mode == 1: #randomize
np.random.shuffle(g_i)
next_gesture = g_i[seq_n]
print('Prepare to perform ' + gestures[next_gesture])
# the user has been warned
warned = True
elif mode == 3 and change_time < time.time():
current_gesture = next_gesture
change_time = time.time() + delay # + random.uniform(-1,1) # can include a slight randomness in change time
print('###### Start ' + gestures[current_gesture])
warned = False
if current_gesture == 0:
seq_n += 1
# set the next gesture, and warn user of impending change
elif mode == 3 and change_time - warn_time < time.time() and warned == False:
if seq_n >= len(gestures): #check that we're not out of range
seq_n = 1
if current_gesture == 0:
next_gesture = g_i[seq_n]
else:
next_gesture = 0
print('Prepare to perform ' + gestures[next_gesture])
# the user has been warned
warned = True
elif mode == 5:
if frames_total % 1 == 0:
try:
print(print_variable + ': ', packed_frame[print_variable])
except KeyError:
print(print_variable + ' not found')
except KeyboardInterrupt:
if mode != 5:
fn = input("Enter filename to save recording to: ")
df = | pd.DataFrame(frames) | pandas.DataFrame |
# Author: <NAME>
# Email: <EMAIL>
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
import matplotlib.pyplot as plt
import logging
class TrajDataset:
def __init__(self):
"""
data might include the following columns:
"scene_id", "frame_id", "agent_id",
"pos_x", "pos_y"
"vel_x", "vel_y",
"""
self.critical_columns = ["frame_id", "agent_id", "pos_x", "pos_y"]
self.data = pd.DataFrame(columns=self.critical_columns)
# a map from agent_id to a list of [agent_ids] that are annotated as her groupmate
# if such informatoin is not available the map should be filled with an empty list
# for each agent_id
self.groupmates = {}
self.title = ''
self.goals_areas = None
def postprocess(self, fps, sampling_rate=1, use_kalman=False):
"""
This function should be called after loading the data by loader
It performs the following steps:
-: check fps value, should be set and bigger than 0
-: check critical columns should exist in the table
-: update data types
-: fill 'groumates' if they are not set
-: checks if velocity do not exist, compute it for each agent
-: compute bounding box of trajectories
:param fps: video framerate
:param sampling_rate: if bigger than one, the data needs downsampling,
otherwise needs interpolation
:param use_kalman: for smoothing agent velocities
:return: None
"""
# check
for critical_column in self.critical_columns:
if critical_column not in self.data:
raise ValueError("Error! some critical columns are missing from trajectory dataset!")
# modify data types
self.data["frame_id"] = self.data["frame_id"].astype(int)
if str(self.data["agent_id"].iloc[0]).replace('.', '', 1).isdigit():
self.data["agent_id"] = self.data["agent_id"].astype(int)
self.data["pos_x"] = self.data["pos_x"].astype(float)
self.data["pos_y"] = self.data["pos_y"].astype(float)
self.data["label"] = self.data["label"].str.lower() # search with lower-case labels
# fill scene_id
if "scene_id" not in self.data:
self.data["scene_id"] = 0
# fill timestamps based on frame_id and video_fps
if "timestamp" not in self.data:
self.data["timestamp"] = self.data["frame_id"] / fps
# fill groupmates
agent_ids = pd.unique(self.data["agent_id"])
for agent_id in agent_ids:
if agent_id not in self.groupmates:
self.groupmates[agent_id] = []
# down/up sampling frames
if sampling_rate >= 2:
# FixMe: down-sampling
sampling_rate = int(sampling_rate)
self.data = self.data.loc[(self.data["frame_id"] % sampling_rate) == 0]
self.data = self.data.reset_index()
elif sampling_rate < (1-1E-2):
# TODO: interpolation
pass
else:pass
# remove the trajectories shorter than 2 frames
data_grouped = self.data.groupby(["scene_id", "agent_id"])
single_length_inds = data_grouped.head(1).index[data_grouped.size() < 2]
self.data = self.data.drop(single_length_inds)
# fill velocities
if "vel_x" not in self.data:
data_grouped = self.data.groupby(["scene_id", "agent_id"])
dt = data_grouped["timestamp"].diff()
if (dt > 2).sum():
logging.warn('Too big dt in {}'.format(self.title))
self.data["vel_x"] = (data_grouped["pos_x"].diff() / dt).astype(float)
self.data["vel_y"] = (data_grouped["pos_y"].diff() / dt).astype(float)
nan_inds = np.array(np.nonzero(dt.isnull().to_numpy())).reshape(-1)
self.data["vel_x"].iloc[nan_inds] = self.data["vel_x"].iloc[nan_inds + 1].to_numpy()
self.data["vel_y"].iloc[nan_inds] = self.data["vel_y"].iloc[nan_inds + 1].to_numpy()
# ============================================
if use_kalman:
def smooth(group):
if len(group) < 2: return group
logging.info('Smoothing trajectories {:d} / {:d}'.format((group["agent_id"].iloc[0], len(data_grouped))))
dt = group["timestamp"].diff().iloc[1]
kf = KalmanModel(dt, n_dim=2, n_iter=7)
smoothed_pos, smoothed_vel = kf.smooth(group[["pos_x", "pos_y"]].to_numpy())
group["pos_x"] = smoothed_pos[:, 0]
group["pos_y"] = smoothed_pos[:, 1]
group["vel_x"] = smoothed_vel[:, 0]
group["vel_y"] = smoothed_vel[:, 1]
return group
data_grouped = self.data.groupby(["scene_id", "agent_id"])
self.data = data_grouped.apply(smooth)
# compute bounding box
# Warning: the trajectories should belong to the same (physical) scene
# self.bbox['x']['min'] = min(self.data["pos_x"])
# self.bbox['x']['max'] = max(self.data["pos_x"])
# self.bbox['y']['min'] = min(self.data["pos_y"])
# self.bbox['y']['max'] = max(self.data["pos_y"])
def interpolate_frames(self, inplace=True):
all_frame_ids = sorted(pd.unique(self.data["frame_id"]))
if len(all_frame_ids) < 2:
# FixMe: print warning
return
frame_id_A = all_frame_ids[0]
frame_A = self.data.loc[self.data["frame_id"] == frame_id_A]
agent_ids_A = frame_A["agent_id"].to_list()
interp_data = self.data # "agent_id", "pos_x", "pos_y", "vel_x", "vel_y"
# df.append([df_try] * 5, ignore_index=True
for frame_id_B in all_frame_ids[1:]:
frame_B = self.data.loc[self.data["frame_id"] == frame_id_B]
agent_ids_B = frame_B["agent_id"].to_list()
common_agent_ids = list(set(agent_ids_A) & set(agent_ids_B))
frame_A_fil = frame_A.loc[frame_A["agent_id"].isin(common_agent_ids)]
frame_B_fil = frame_B.loc[frame_B["agent_id"].isin(common_agent_ids)]
for new_frame_id in range(frame_id_A+1, frame_id_B):
alpha = (new_frame_id - frame_id_A) / (frame_id_B - frame_id_A)
new_frame = frame_A_fil.copy()
new_frame["frame_id"] = new_frame_id
new_frame["pos_x"] = frame_A_fil["pos_x"].to_numpy() * (1 - alpha) +\
frame_B_fil["pos_x"].to_numpy() * alpha
new_frame["pos_y"] = frame_A_fil["pos_y"].to_numpy() * (1 - alpha) +\
frame_B_fil["pos_y"].to_numpy() * alpha
new_frame["vel_x"] = frame_A_fil["vel_x"].to_numpy() * (1 - alpha) +\
frame_B_fil["vel_x"].to_numpy() * alpha
new_frame["vel_y"] = frame_A_fil["vel_y"].to_numpy() * (1 - alpha) +\
frame_B_fil["vel_y"].to_numpy() * alpha
if inplace:
self.data = self.data.append(new_frame)
else:
self.data = self.data.append(new_frame) # TODO
frame_id_A = frame_id_B
frame_A = frame_B
agent_ids_A = agent_ids_B
self.data = self.data.sort_values('frame_id')
# FixMe: rename to add_row()/add_entry()
def add_agent(self, agent_id, frame_id, pos_x, pos_y):
"""Add one single data at a specific frame to dataset"""
new_df = pd.DataFrame(columns=self.critical_columns)
new_df["frame_id"] = [int(frame_id)]
new_df["agent_id"] = [int(agent_id)]
new_df["pos_x"] = [float(pos_x)]
new_df["pos_y"] = [float(pos_y)]
self.data = self.data.append(new_df)
def get_agent_ids(self):
""":return all agent_id in data table"""
return pd.unique(self.data["agent_id"])
def get_trajectories(self, label=""):
"""
Returns a list of trajectories
:param label: select agents from a specific class (e.g. pedestrian), ignore if empty
:return list of trajectories
"""
df = self.data
if label:
label_filtered = self.data.groupby("label")
df = label_filtered.get_group(label.lower())
trajectories = [g for _, g in df.groupby(["scene_id", "agent_id"])]
trl_np_list = []
for trl in trajectories:
trl_np = trl[["pos_y", "pos_x", "vel_y", "vel_x", "timestamp"]].to_numpy()
trl_np_list.append(trl_np)
return(trl_np_list)
# TODO:
def get_entries(self, agent_ids=[], frame_ids=[], label=""):
"""
Returns a list of data entries
:param agent_ids: select specific agent ids, ignore if empty
:param frame_ids: select a time interval, ignore if empty # TODO:
:param label: select agents from a specific label (e.g. car), ignore if empty # TODO:
:return list of data entries
"""
output_table = self.data # no filter
if agent_ids:
output_table = output_table[output_table["agent_id"].isin(agent_ids)]
if frame_ids:
output_table = output_table[output_table["frame_id"].isin(frame_ids)]
return output_table
def get_frames(self, frame_ids: list = [], scene_ids=""):
if not frame_ids:
frame_ids = | pd.unique(self.data["frame_id"]) | pandas.unique |
import pandas as pd
def generate_train(playlists):
# define category range
cates = {'cat1': (10, 50), 'cat2': (10, 78), 'cat3': (10, 100), 'cat4': (40, 100), 'cat5': (40, 100),
'cat6': (40, 100),'cat7': (101, 250), 'cat8': (101, 250), 'cat9': (150, 250), 'cat10': (150, 250)}
cat_pids = {}
for cat, interval in cates.items():
df = playlists[(playlists['num_tracks'] >= interval[0]) & (playlists['num_tracks'] <= interval[1])].sample(
n=1000)
cat_pids[cat] = list(df.pid)
playlists = playlists.drop(df.index)
playlists = playlists.reset_index(drop=True)
return playlists, cat_pids
def generate_test(cat_pids, playlists, interactions, tracks):
def build_df_none(cat_pids, playlists, cat, num_samples):
df = playlists[playlists['pid'].isin(cat_pids[cat])]
df = df[['pid', 'num_tracks']]
df['num_samples'] = num_samples
df['num_holdouts'] = df['num_tracks'] - df['num_samples']
return df
def build_df_name(cat_pids, playlists, cat, num_samples):
df = playlists[playlists['pid'].isin(cat_pids[cat])]
df = df[['name', 'pid', 'num_tracks']]
df['num_samples'] = num_samples
df['num_holdouts'] = df['num_tracks'] - df['num_samples']
return df
df_test_pl = pd.DataFrame()
df_test_itr = pd.DataFrame()
df_eval_itr = pd.DataFrame()
for cat in list(cat_pids.keys()):
if cat == 'cat1':
num_samples = 0
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
# all interactions used for evaluation
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
df_eval_itr = pd.concat([df_eval_itr, df_itr])
# clean interactions for training
interactions = interactions.drop(df_itr.index)
print("cat1 done")
if cat == 'cat2':
num_samples = 1
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[df_itr['pos'] == 0]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat2 done")
if cat == 'cat3':
num_samples = 5
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat3 done")
if cat == 'cat4':
num_samples = 5
df = build_df_none(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat4 done")
if cat == 'cat5':
num_samples = 10
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat5 done")
if cat == 'cat6':
num_samples = 10
df = build_df_none(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat6 done")
if cat == 'cat7':
num_samples = 25
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat7 done")
if cat == 'cat8':
num_samples = 25
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
for pid in cat_pids[cat]:
df = df_itr[df_itr['pid'] == pid]
df_sample = df.sample(n=num_samples)
df_test_itr = pd.concat([df_test_itr, df_sample])
df = df.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df])
print("cat8 done")
if cat == 'cat9':
num_samples = 100
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat9 done")
if cat == 'cat10':
num_samples = 100
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
for pid in cat_pids[cat]:
df = df_itr[df_itr['pid'] == pid]
df_sample = df.sample(n=num_samples)
df_test_itr = pd.concat([df_test_itr, df_sample])
df = df.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df])
print("cat10 done")
tids = set(df_eval_itr['tid'])
df = tracks[tracks['tid'].isin(tids)]
df = df[['tid', 'arid']]
df_eval_itr = | pd.merge(df_eval_itr, df, on='tid') | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
# In[66]:
#置入所需套件
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
# Load in the data
df = | pd.read_csv("InterestsSurvey.csv") | pandas.read_csv |
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm as cm
import seaborn as sns
sns.set_style("whitegrid")
import sys
import os
from pathlib import Path
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold, GridSearchCV, StratifiedKFold,RepeatedKFold, learning_curve
from xgboost.sklearn import XGBClassifier
from utils import data_handler
from utils import bayesiantests as bt
root_dir = str(Path(os.getcwd())) #.parent
to_dir = root_dir + '/results/'
import warnings
warnings.filterwarnings('ignore')
#res= None
##------------------------------ font, fig size setup------------------------------
plt.rc('font', family='serif')
def set_fig_fonts(SMALL_SIZE=22, MEDIUM_SIZE=24,BIGGER_SIZE = 26):
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
set_fig_fonts()
##------------------------------functions----------------------------------------
def save_fig(fig, title):
to_path = data_handler.format_title(to_dir,title,'.png')
fig.savefig(to_path ,dpi=1000,bbox_inches="tight",pad_inches=0)#, bbox_inches='tight', pad_inches=10
print("Successfully saved to: ",to_path)
return to_path
def plot_correlation_matrix(X,title, col_list, toSaveFig=True):
set_fig_fonts(12,14,16)
# standardization
scaler = StandardScaler()
df_transf = scaler.fit_transform(X)
df = pd.DataFrame(df_transf,columns = col_list)
fig = plt.figure()
ax1 = fig.add_subplot(111)
cmap = cm.get_cmap('coolwarm', 30)
#cax = ax1.pcolor(df.corr(), cmap=cmap, vmin=-1, vmax=1)
mat = df.corr()
flip_mat = mat.iloc[::-1]
cax = ax1.imshow(flip_mat , interpolation="nearest", cmap=cmap,vmin=-1, vmax=1)
ax1.grid(True)
#plt.suptitle('Features\' Correlation', y =0)
labels=df.columns.tolist()
x_labels = labels.copy()
labels.reverse()
#ax1.xaxis.set_ticks_position('top')
ax1.set_xticks(np.arange(len(labels)))#np.arange(len(labels))
ax1.set_yticks(np.arange(len(labels)))
# want a more natural, table-like display
#ax1.xaxis.tick_top()
ax1.set_xticklabels(x_labels, rotation = -45, ha="left") #, , rotation = 45,horizontalalignment="left"
ax1.set_yticklabels(labels, ha="right")
#plt.xticks(rotation=90)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
fig.colorbar(cax, boundaries=np.linspace(-1,1,21),ticks=np.linspace(-1,1,5))
plt.show()
if(toSaveFig):
save_fig(fig,title+'_confusion_matrix')
set_fig_fonts()
def plot_ROC_curve(pipe, tuned_parameters, title = 'roc_curve', save_csv = True,task=0):
# cross validation settup
Ntrials = 1
outter_nsplit = 10
inner_nsplit = 10
# Results store
Y_true = pd.Series(name='Y_true')
pred_results = pd.Series(name='pred_prob')
# load data
assert (task ==0 or task ==2),'Error: invalid task spec!'
X_df, Y_df = data_handler.load_XY(task)
X = X_df.values
Y = Y_df.values
for i in range(Ntrials):
train_index = []
test_index = []
outer_cv = StratifiedKFold(n_splits=outter_nsplit, shuffle=True, random_state=i)
for train_ind,test_ind in outer_cv.split(X,Y):
train_index.append(train_ind.tolist())
test_index.append(test_ind.tolist())
for j in range(outter_nsplit):#outter_nsplit
print("progress >> ",j,' / ',outter_nsplit)
X_train = X[train_index[j]]
Y_train = Y[train_index[j]]
X_test = X[test_index[j]]
Y_test = Y[test_index[j]]
inner_cv = StratifiedKFold(n_splits=inner_nsplit, shuffle=False, random_state=j)
clf = GridSearchCV(pipe,tuned_parameters, cv=inner_cv,scoring='roc_auc')
clf.fit(X_train, Y_train)
pred = pd.Series(clf.predict_proba(X_test)[:,1])
pred_results = pd.concat([pred_results, pred], axis=0,ignore_index=True)
Y_test_df = pd.Series(Y_test,name='Y_test')
Y_true = pd.concat([Y_true,Y_test_df], axis=0,ignore_index=True)
# plotting
fpr, tpr, thresholds = metrics.roc_curve(Y_true,pred_results)
roc_auc = metrics.auc(fpr, tpr)
auc_value = metrics.roc_auc_score(Y_true, pred_results)
fig = plt.figure(figsize=(12,12/1.618))
ax1 = fig.add_subplot(111)
labl = np.linspace(0,1,6)
labels = [float("{0:.2f}".format(x)) for x in labl]
ax1.set_xticks(labels)
ax1.set_xticklabels(labels)
labels[0] = ''
ax1.set_yticklabels(labels)
plt.grid(False)
ax1.plot(fpr, tpr, lw=2, label='ROC curve (area = {:.2f})'.format(auc_value),marker='.', linestyle='-', color='b')
ax1.plot([0,1],[0,1], linestyle='--', color='k')
ax1.set_xlabel('False Positive Rate')
ax1.set_ylabel('True Positive Rate')
ax1.set_xlim(0, 1)
ax1.set_ylim(0,1)
ax1.legend(loc='lower right')
color = 'black'
plt.setp(ax1.spines.values(), color=color)
ax1.yaxis.set_visible(True)
ax1.xaxis.set_visible(True)
ax1.yaxis.set_ticks_position('left')
ax1.xaxis.set_ticks_position('bottom')
ax1.get_yaxis().set_tick_params(direction='out', width=2)
plt.show()
fig.savefig(data_handler.format_title(to_dir,title+'_ROC_curve','.png'),dpi=1000,bbox_inches="tight",pad_inches=0)
# save results to csv if true
if save_csv:
data_mat = np.array([fpr,tpr]).T
ret = pd.DataFrame(data_mat,columns=['fpr','tpr'])
data_handler.save_csv(ret,title+'_ROC_curve')
return True;
def plot_learning_curve_versus_tr_epoch(title='',ntrials=1, nfolds=10, save_csv=False,verbose=True, save_fig=False):
X_df,Y_df = data_handler.load_XY()
X = X_df.values
Y = Y_df.values
_ylabel = 'Mean AUROC'
n_jobs=4
# cross validation settup
Ntrials = ntrials
outter_nsplit = nfolds
tot_count = Ntrials * outter_nsplit
# Results store
train_mat = np.zeros((tot_count,500))
test_mat = np.zeros((tot_count,500))
for i in range(Ntrials):
init_time = time.time()
print("trial = ",i)
train_index = []
test_index = []
outer_cv = StratifiedKFold(n_splits=outter_nsplit, shuffle=True, random_state=i)
for train_ind,test_ind in outer_cv.split(X,Y):
train_index.append(train_ind.tolist())
test_index.append(test_ind.tolist())
for j in range(outter_nsplit):#outter_nsplit
count = i * outter_nsplit + j
print(str(count), " / ",str(tot_count))
X_train = X[train_index[j]]
Y_train = Y[train_index[j]]
X_test = X[test_index[j]]
Y_test = Y[test_index[j]]
eval_sets = [(X_train, Y_train), (X_test,Y_test)]
clf = XGBClassifier(objective="binary:logistic",min_child_weight=1,**{'tree_method':'exact'},silent=True,
n_jobs=4,random_state=3,seed=3,
learning_rate=0.01,
colsample_bylevel=0.9,
colsample_bytree=0.9,
n_estimators=500,
gamma=0.8,
max_depth =11,
reg_lambda = 0.8,
subsample=0.4)
clf.fit(X_train,Y_train, eval_metric=['auc'], eval_set = eval_sets, verbose=False)
results = clf.evals_result()
epochs = len(results['validation_0']['auc'])
# record results
train_mat[count] = results['validation_0']['auc']
test_mat[count] = results['validation_1']['auc']
if(verbose):
print('Iter: %d, epochs: %d'%(count, epochs))
print('training result: %.4f, testing result: %.4f'%(train_mat[count][499], test_mat[count][499]))
print('total time: %.4f mins'% ((time.time()-init_time)/60))
# Results store
epoch_lists=list(range(1,epochs+1))
train_results = pd.DataFrame(data=train_mat,columns=['epoch_'+str(i) for i in epoch_lists])
test_results = pd.DataFrame(data=test_mat,columns=['epoch_'+str(i) for i in epoch_lists])
if(save_csv):
data_handler.save_csv(train_results,title='mos2_learning_curve_train_raw')
data_handler.save_csv(test_results,title='mos2_learning_curve_test_raw')
print('end')
_ylim=(0.5, 1.01)
n_jobs=4
# create learning curve values
train_scores_mean = np.mean(train_mat, axis=0)
train_scores_std = np.std(train_mat, axis=0)
test_scores_mean = np.mean(test_mat, axis=0)
test_scores_std = np.std(test_mat, axis=0)
tr_size_df = pd.Series(epoch_lists, name='training_epoch')
tr_sc_m_df = pd.Series(train_scores_mean, name='training_score_mean')
val_sc_m_df = pd.Series(test_scores_mean, name='val_score_mean')
tr_sc_std_df = | pd.Series(train_scores_std, name='training_score_std') | pandas.Series |
# encoding: utf-8
'''
组合策略测试
'''
import sys
sys.path.append('../../')
from vnpy.app.cta_strategy.strategies.strategyMulti import MultiStrategy
import argparse
import pandas as pd
import numpy as np
from datetime import datetime
from setup_logger import setup_logger
setup_logger(filename='logsBackTest/vnpy_{0}.log'.format(datetime.now().strftime('%m%d_%H%M')), debug=False)
from vnpy.app.cta_strategy.backtesting import BacktestingEngine, OptimizationSetting
from vnpy.app.cta_strategy.backtestingPatch import BacktestingEnginePatch
from datetime import datetime,date,timedelta
import time
import json
import traceback
########################################################################
'''
backtesting
'''
def backtesting(settingFile, kLineCycle = 30, vt_symbol = 'rb1801', vt_symbol2 = None, mode = 'B', startDate = None, days = 1, historyDays = 0, optimization = False):
# 创建回测引擎
engine = BacktestingEnginePatch()
# 设置回测用的数据起始日期
if startDate:
startDate = startDate
endDate = datetime.strptime(startDate, '%Y%m%d') + timedelta(days)
else:
startDate = date.today() - timedelta(days + historyDays)
endDate = date.today()
engine.set_parameters(
vt_symbol=vt_symbol,
interval="1m",
start= startDate,
end=endDate,
rate=1/10000,
slippage=1,
size=10,
pricetick=1,
capital=1_000_000,
)
setting = {}
setting['vt_symbol'] = vt_symbol
setting['kLineCycle'] = kLineCycle
setting['settingFile'] = settingFile
engine.add_strategy(MultiStrategy, setting)
engine.load_data()
engine.run_backtesting()
df = engine.calculate_result()
engine.calculate_statistics()
#engine.show_chart()
# 显示回测结果
resultList = engine.showBacktestingResult()
# try:
# engine.showDailyResult()
# except:
# print ('-' * 20)
# print ('Failed to showDailyResult')
# #traceback.print_exc()
# pass
try:
# 显示定单信息
import pandas as pd
orders = pd.DataFrame([i.__dict__ for i in resultList['resultList']])
try:
orders['holdTime'] = (orders.exitDt - orders.entryDt).astype('timedelta64[m]')
except:
pass
pd.options.display.max_rows = 100
pd.options.display.width = 300
pd.options.display.precision = 2
engine.output ('-' * 50)
engine.output(str(orders))
except:
print ('-' * 20)
print ('Failed to print result')
#traceback.print_exc()
try:
# 显示详细信息
import pandas as pd
from utils import plot_candles, plot_candles1
import talib
import numpy as np
# analysis
#engine.loadHistoryData()
orders = pd.DataFrame([i.__dict__ for i in resultList['resultList']])
pricing = | pd.DataFrame([i.__dict__ for i in engine.history_data]) | pandas.DataFrame |
import numpy as np
import pytest
from pandas import Categorical, Series
import pandas._testing as tm
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, False, False, True, True, False])),
("last", Series([False, True, True, False, False, False, False])),
(False, Series([False, True, True, False, True, True, False])),
],
)
def test_drop_duplicates(any_numpy_dtype, keep, expected):
tc = Series([1, 0, 3, 5, 3, 0, 4], dtype=np.dtype(any_numpy_dtype))
if tc.dtype == "bool":
pytest.skip("tested separately in test_drop_duplicates_bool")
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(keep=keep, inplace=True)
tm.assert_series_equal(sc, tc[~expected])
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, True, True])),
("last", Series([True, True, False, False])),
(False, Series([True, True, True, True])),
],
)
def test_drop_duplicates_bool(keep, expected):
tc = Series([True, False, True, False])
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(keep=keep, inplace=True)
tm.assert_series_equal(sc, tc[~expected])
@pytest.mark.parametrize("values", [[], list(range(5))])
def test_drop_duplicates_no_duplicates(any_numpy_dtype, keep, values):
tc = Series(values, dtype=np.dtype(any_numpy_dtype))
expected = Series([False] * len(tc), dtype="bool")
if tc.dtype == "bool":
# 0 -> False and 1-> True
# any other value would be duplicated
tc = tc[:2]
expected = expected[:2]
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
result_dropped = tc.drop_duplicates(keep=keep)
tm.assert_series_equal(result_dropped, tc)
# validate shallow copy
assert result_dropped is not tc
class TestSeriesDropDuplicates:
@pytest.mark.parametrize(
"dtype",
["int_", "uint", "float_", "unicode_", "timedelta64[h]", "datetime64[D]"],
)
def test_drop_duplicates_categorical_non_bool(self, dtype, ordered_fixture):
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
# Test case 1
input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))
tc1 = Series(Categorical(input1, categories=cat_array, ordered=ordered_fixture))
if dtype == "datetime64[D]":
# pre-empty flaky xfail, tc1 values are seemingly-random
if not (np.array(tc1) == input1).all():
pytest.xfail(reason="GH#7996")
expected = Series([False, False, False, True])
tm.assert_series_equal(tc1.duplicated(), expected)
tm.assert_series_equal(tc1.drop_duplicates(), tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(inplace=True)
tm.assert_series_equal(sc, tc1[~expected])
expected = Series([False, False, True, False])
tm.assert_series_equal(tc1.duplicated(keep="last"), expected)
tm.assert_series_equal(tc1.drop_duplicates(keep="last"), tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(keep="last", inplace=True)
tm.assert_series_equal(sc, tc1[~expected])
expected = Series([False, False, True, True])
tm.assert_series_equal(tc1.duplicated(keep=False), expected)
tm.assert_series_equal(tc1.drop_duplicates(keep=False), tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(keep=False, inplace=True)
tm.assert_series_equal(sc, tc1[~expected])
# Test case 2
input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))
tc2 = Series(Categorical(input2, categories=cat_array, ordered=ordered_fixture))
if dtype == "datetime64[D]":
# pre-empty flaky xfail, tc2 values are seemingly-random
if not (np.array(tc2) == input2).all():
pytest.xfail(reason="GH#7996")
expected = Series([False, False, False, False, True, True, False])
tm.assert_series_equal(tc2.duplicated(), expected)
tm.assert_series_equal(tc2.drop_duplicates(), tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(inplace=True)
tm.assert_series_equal(sc, tc2[~expected])
expected = Series([False, True, True, False, False, False, False])
tm.assert_series_equal(tc2.duplicated(keep="last"), expected)
tm.assert_series_equal(tc2.drop_duplicates(keep="last"), tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(keep="last", inplace=True)
tm.assert_series_equal(sc, tc2[~expected])
expected = Series([False, True, True, False, True, True, False])
tm.assert_series_equal(tc2.duplicated(keep=False), expected)
tm.assert_series_equal(tc2.drop_duplicates(keep=False), tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(keep=False, inplace=True)
tm.assert_series_equal(sc, tc2[~expected])
def test_drop_duplicates_categorical_bool(self, ordered_fixture):
tc = Series(
Categorical(
[True, False, True, False],
categories=[True, False],
ordered=ordered_fixture,
)
)
expected = Series([False, False, True, True])
tm.assert_series_equal(tc.duplicated(), expected)
tm.assert_series_equal(tc.drop_duplicates(), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(inplace=True)
| tm.assert_series_equal(sc, tc[~expected]) | pandas._testing.assert_series_equal |
# External Libraries
from datetime import date
import pandas as pd
pd.options.mode.chained_assignment = None
import os
from pathlib import Path
import logging, coloredlogs
# Internal Libraries
import dicts_and_lists as dal
import Helper
# ------ Logger ------- #
logger = logging.getLogger('get_past_datasets.py')
coloredlogs.install(level='DEBUG')
folder = 'past_data/2017_2018/'
months = ['october', 'november', 'december', 'january', 'february', 'march', 'april', 'may', 'june']
for month in months:
url = 'https://www.basketball-reference.com/leagues/NBA_2018_games-'+ month + '.html'
df_url = pd.read_html(url)[0]
df_url = df_url.rename(columns=
{
'Visitor/Neutral' : 'AwayTeam',
'Home/Neutral' : 'HomeTeam',
'PTS' : 'AwayPoints',
'PTS.1' : 'HomePoints'
}
)
df_url = df_url.drop(['Unnamed: 6', 'Unnamed: 7', 'Attend.', 'Notes'], axis=1) # Remove non interesting columns
df_url = df_url.dropna(subset=['AwayPoints', 'HomePoints']) # Remove rows containing games not yet played
my_file = Path(os.getcwd() + '/' + folder + month + '_data.csv')
if not my_file.exists(): # If current data is not present in past_data folder, add it
df_url.to_csv(my_file, index=False) # Save the df as .csv
logger.info(f'An update has been made: {month}_data.csv has been created.')
logger.info(f'{month}_data.csv is up to date.')
# Create a big dataset
october_df = pd.read_csv(folder + 'october_data.csv')
november_df = pd.read_csv(folder + 'november_data.csv')
december_df = pd.read_csv(folder + 'december_data.csv')
january_df = pd.read_csv(folder + 'january_data.csv')
february_df = pd.read_csv(folder + 'february_data.csv')
march_df = pd.read_csv(folder + 'march_data.csv')
april_df = pd.read_csv(folder + 'april_data.csv')
may_df = pd.read_csv(folder + 'may_data.csv')
june_df = pd.read_csv(folder + 'june_data.csv')
#season_df = pd.concat([october_df, november_df, december_df, january_df, february_df, march_df, april_df, may_df, june_df])
season_df = pd.concat([april_df, may_df, june_df])
season_df.to_csv(folder + 'half_season.csv', index=False)
df = pd.DataFrame(columns = dal.columns_data_dict)
for _, row in season_df.iterrows():
print(row['HomeTeam'])
print(row['AwayTeam'])
try:
date = row['Date'].partition(', ')[2]
month = dal.months_dict[date[:3]]
day = date.partition(',')[0][4:]
if len(day) == 1:
day = '0' + day
year = date[-4:]
except Exception as e:
logger.error(e)
try:
home_team_short = dal.teams_dict[row['HomeTeam']]
url = 'https://www.basketball-reference.com/boxscores/' + year + month + day + '0' + home_team_short + '.html'
logger.info(f'Fetching data from: {url}')
except Exception as e:
logger.error(e)
try:
tables = | pd.read_html(url, match='Basic') | pandas.read_html |
"""
Name: foneutil
Version: 0.4.4
Info: Python based script in order to record customer interactions, allowing
the user to record relevant information from customer interaction. The
script allows for the user to edit already entered in real time.
Requirements: Pandas, pyfiglet and termcolor modules
Created by: <NAME> - <EMAIL>
"""
import datetime
import readline
import os
import sys
import pandas as pd
import numpy as np
from pyfiglet import figlet_format
def clear():
"""
Clear the screen at the start of the script
"""
_ = os.system('clear')
try:
from termcolor import colored
except ImportError:
colored = None
def banner(string, color, font="speed", figlet=False):
"""
Add a color banner on the top of the menu when loading script
"""
if colored:
if not figlet:
print(colored(string, color))
else:
print(colored(figlet_format(string, font=font), color))
else:
print(string)
def rlinput(prompt, prefill=''):
"""
Function to allow the user to go back and edit the existing variable
data when entering call info. This in effect allows the form to act as
an interactive utility.
"""
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
return input(prompt)
finally:
readline.set_startup_hook()
def display_file_content(filename):
"""
This function reads in the data file and displays the rows along with a
numberical index value. The pd.set_option() is there to stop the displayed
rows being truncated.
"""
clear()
df_csv = pd.read_csv(filename)
df_csv = df_csv.replace(np.nan, '', regex=True)
pd.set_option('display.max_rows', None)
print(df_csv)
def get_record(var):
"""
Pull the row related to the index ID that is provided by the user when
the read records area.
"""
df_csv = | pd.read_csv(filename) | pandas.read_csv |
import matplotlib.pyplot as pl
import numpy as np
import pandas as pd
from pyitab.analysis.results.base import filter_dataframe
from pyitab.analysis.results.dataframe import apply_function
import seaborn as sns
from matplotlib.colors import LinearSegmentedColormap
def find_distance_boundaries(data):
scene_center = .5*(d['Scena_offset_sec'] - d['Scena_onset_sec'])
distance_offset = scene_center - d['VAS sec']
value_click = np.int_(np.sign(distance_offset) == 1)
return value_click
def windowed_similarity(x, y, window):
spearman = []
for i in range(len(x) - window):
s = spearmanr(x[i:i+window], y[i:i+window])
spearman.append(s[0])
return spearman
def bootstrap(x, y, n=100, fx=windowed_similarity, window=10):
permutations = []
for p in range(n):
idx = np.sort(np.random.choice(len(x), size=len(x), replace=True))
spearman = windowed_similarity(x[idx], y[idx], window)
permutations.append(spearman)
return permutations
def plot_fit(x, y, ax, linestyle='--', color='gray'):
from scipy.stats import linregress
m, b, r, p, s = linregress(x, y)
ax.plot(x, m*x+b, linestyle=linestyle, c=color, label=r**2)
#ax.legend()
pl.style.use("seaborn")
fontsize = 18
style = {
'figure.figsize': (19, 15),
'axes.facecolor': 'white',
'axes.spines.top': False,
'axes.spines.right': False,
'axes.spines.bottom': True,
'axes.spines.left': True,
'axes.edgecolor': 'black',
'axes.linewidth': 1.5,
'axes.grid': False,
'grid.color': 'white',
'xtick.color': 'black',
'ytick.color': 'black',
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.major.size': 3,
'ytick.major.size': 3,
'xtick.minor.size': 2,
'ytick.minor.size': 2,
'ytick.labelsize': fontsize-2,
'xtick.labelsize': fontsize-2,
'legend.fontsize': fontsize-5,
'legend.title_fontsize': fontsize-4,
'font.size': fontsize,
'axes.labelsize': fontsize-1,
'axes.titlesize': fontsize,
'svg.fonttype':'none'
}
pl.rcParams.update(style)
palette_scatter = LinearSegmentedColormap.from_list("scatter_click", ['#73a87c', '#eba2b6'], N=2)
palette_half = LinearSegmentedColormap.from_list("palette_part", ['purple', 'orange'], N=2)
experiment_list = [
"VAS_DOPPIA_Delayed", "VAS_DOPPIA_Immediate", "VAS_Mid", "VAS_NewIns"
]
experiment_figure = {
'VAS_DOPPIA_Delayed':'Exp. 3 | Delayed',
'VAS_DOPPIA_Immediate': 'Exp. 3 | Immediate',
'VAS_Mid': 'Exp. 2',
'VAS_NewIns': 'Exp. 1'
}
palette = {
'VAS_NewIns': sns.light_palette("dimgray", n_colors=9),
'VAS_Mid': sns.light_palette("#046c9a", n_colors=9),
'VAS_DOPPIA_Immediate': sns.light_palette("#f2300f", n_colors=9),
'VAS_DOPPIA_Delayed': sns.light_palette("#0b775e", n_colors=9),
}
for e in experiment_list:
pl.figure()
sns.palplot(palette[e])
path = "/home/robbis/Dropbox/PhD/experiments/memory_movie/paper_2/"
full_dataset = list()
for experiment in experiment_list[:]:
print(experiment)
data = pd.read_excel(os.path.join(path, experiment+"_Recognition.xlsx"))
d = filter_dataframe(data, corresp=[1], **{'IR.ACC':[1]})
d = d.dropna()
if experiment == "VAS_DOPPIA_Delayed":
d = filter_dataframe(d, Session=[2])
if experiment == "VAS_DOPPIA_Immediate":
d = filter_dataframe(d, Session=[1])
d['experiment'] = [experiment for _ in range(d.shape[0])]
d['Experiment'] = [experiment_figure[experiment] for _ in range(d.shape[0])]
full_dataset.append(d)
ds = | pd.concat(full_dataset) | pandas.concat |
# -*- coding: utf-8 -*-
#%%
from datetime import datetime
startTime =datetime.now()
import pandas as pd
"""
import numpy as np
import glob
import matplotlib.pyplot as plt
import sys
import time
import datetime
from datetime import timedelta
import matplotlib.gridspec as gridspec
import matplotlib.cm as mpl_cm
from scipy.stats import ttest_ind
from scipy.stats import ttest_rel
from scipy import stats
"""
import string
pd.options.display.float_format = '{:.6f}'.format
#%% READ IN DATA FILES
# Read in user data
user = pd.read_csv("user.csv",
low_memory=False)
# Read in activity data
mylist = []
for chunk in pd.read_csv("activitydf.csv",chunksize=1000,low_memory=False):
mylist.append(chunk)
active = pd.concat(mylist, axis= 0)
del mylist
#%% CLEAN USER DATA: Age and gender
# calculate age of users
# data collected in 2016
user["Age_calc"] = "2016"
# calculate age in 2016
user["Age"] = pd.to_numeric(user["Age_calc"])- pd.to_numeric(user["Year_Of_Birth"])
# remove column used to calculate age
user = user.drop("Age_calc", axis=1)
# ensure consistent age formatting
user['Age']= user['Age'].astype(float)
# consistent Gender label formatting
user["Gender"]= user["Gender"].replace({"Female":"female"})
user["Gender"]= user["Gender"].replace({"Male":"male"})
user["Gender"] =user["Gender"].fillna("Unknown")
user.head(),user.shape # n=539979
### CLEANING PseudoUserId ####
user= user.drop(user.loc[user['PseudoUserId'].duplicated()==True].index)
user.shape # n=539957
### CLEANING AGE ####
# create dataframes for known and unknown ages
# can use for later statistical comparison - is there a difference in gender between those who choose to enter their age
dem_users_unknown_age = pd.concat([user.loc[(user['Age']>100)],user.loc[(user['Age']<18)], user.loc[(user['Age'].isnull())]])
dem_users_known_age = user.loc[(user['Age']<=100) & (user['Age']>=18)]
# check add up to 539957 (145962 + 393995)
dem_users_unknown_age.shape[0]+ dem_users_known_age.shape[0]
# Number of users with unknown or excluded age: 145,962
dem_users_unknown_age.shape #145962
# Of these users (uncomment to check)
# Users with no recorded age:
# user.loc[user['Age'].isnull()].shape # 135,381
# Users older than 100:
# user.loc[(user['Age']>100)].shape #493
# Users younger than 18:
# user.loc[(user['Age']<18)].shape # 10,088
# Check add up to total number of users with unknown or excluded ages
# 135381 + 493 + 10088 # = 145962
# Number of users with known and included ages:
dem_users_known_age.shape #393995
# check no variables not in either dataframe (should return blank df)
temp =pd.concat([dem_users_known_age,dem_users_unknown_age])
user.loc[~user['PseudoUserId'].isin(temp['PseudoUserId'])].copy()
### CLEANING GENDER ####
# create dataframes for known and unknown genders
# located unknown gender
dem_users_unknown_gender = user.loc[user["Gender"]=='Unknown']
# located known gender
dem_users_known_gender = pd.concat([user.loc[user["Gender"]=='male'], user.loc[user["Gender"]=='female']])
# check add up to 539957 (102784+ 437173)
dem_users_unknown_gender.shape[0]+ dem_users_known_gender.shape[0]
### drop unwanted variables
# drop the unknown + unwanted age variables
# (only do once investigated other cleaning)
user = user.drop(pd.concat([user.loc[(user['Age']>100)],user.loc[(user['Age']<18)], user.loc[(user['Age'].isnull())]]).index)
# user.loc[user["Gender"]=='Unknown'].shape # 3138
# drop the unknown gender variables
# (only do once investigated other cleaning)
user=user.drop(user.loc[user["Gender"]=='Unknown'].index)
#%%
### POSTCODE CLEANING ###
# get postcode sector data
sectors = pd.read_csv("postcode_sectors.csv",
low_memory=False)
sectors_nssec = pd.read_csv("sector_nssec.csv",
low_memory=False)
sectors = sectors.merge(sectors_nssec, left_on='Postdist',right_on='postdist', how='left')
sectors.to_csv("postcode_sectors_merged.csv")
# subset data
sectors['Sectlen_three'] =sectors['Postcode_nospace'].str.slice(stop=3)
sectors['SectFour'] =sectors['Postcode_nospace'].str.slice(stop=4)
print('Number of unique postcode districts:', sectors['Postdist'].nunique()) # 2960 unique postcode districts
#%%
# create temporary copy of users dataframe
df_user_temp = user
# remove users with no postcode
df_user_temp = df_user_temp.drop(df_user_temp.loc[df_user_temp['Postcode'].isnull()].index)
# number of users with non-null postcode
df_user_temp.shape[0] # n= 359311
# remove letters and white space from the end of the postcode
df_user_temp['Postcode_stripletter'] = df_user_temp['Postcode'].str.rstrip(string.ascii_letters + string.whitespace)
def add_space(string, length):
return ' '.join(string[i:i+length] for i in range(0,len(string),length))
# add column with postcode separated by a space after first 2 characters
df_user_temp['Postcode_space2'] = df_user_temp['Postcode'].apply(lambda x : add_space(x,2))
# remove letters and white space from the end of the postcode
df_user_temp['Postcode_space2'] = df_user_temp['Postcode_space2'].str.rstrip(string.ascii_letters + string.whitespace)
# Apply series of postcode length matching, there are several postal district formats
# L: Letter
# N: Number
# e.g. LLNN: Letter Letter Number Number i.e. OX10 SO22, LS12
# LLNN postal district format
LLNN = df_user_temp.loc[(df_user_temp.Postcode.str[0].str.isalpha() ==True) & (df_user_temp.Postcode.str[1].str.isalpha() ==True) & (df_user_temp.Postcode.str[2].str.isnumeric() ==True)& (df_user_temp.Postcode.str[3].str.isnumeric() ==True)]
LLNN.shape[0] # n=310137
# LLNL postal district format
LLNL = df_user_temp.loc[(df_user_temp.Postcode.str[0].str.isalpha() ==True) & (df_user_temp.Postcode.str[1].str.isalpha() ==True) & (df_user_temp.Postcode.str[2].str.isnumeric() ==True)& (df_user_temp.Postcode.str[3].str.isalpha() ==True)]
LLNL.shape[0] # n=1868
# LNL postal district format
LNL = df_user_temp.loc[(df_user_temp.Postcode.str[0].str.isalpha() ==True) & (df_user_temp.Postcode.str[1].str.isnumeric() ==True) & (df_user_temp.Postcode.str[2].str.isalpha() ==True)]
LNL['LNL']= LNL['Postcode'].str.slice(stop=3)
LNL.shape[0] # n=2020
# LN postal district format
LN = df_user_temp.loc[(df_user_temp.Postcode.str[0].str.isalpha() ==True) & (df_user_temp.Postcode.str[1].str.isnumeric() ==True) & (df_user_temp.Postcode.str[2].str.isnumeric() ==True)& (df_user_temp.Postcode.str[3].str.isalpha() ==True)]
LN.shape[0] # n= 10772
# LNN postal district format
LNN = df_user_temp.loc[(df_user_temp.Postcode.str[0].str.isalpha() ==True) & (df_user_temp.Postcode.str[1].str.isnumeric() ==True) & (df_user_temp.Postcode.str[2].str.isnumeric() ==True)& (df_user_temp.Postcode.str[3].str.isnumeric() ==True)]
LNN['LNN'] = LNN['Postcode'].str.slice(stop=3)
# LNN_L postal district format
LNN_L = df_user_temp.loc[(df_user_temp.Postcode.str[0].str.isalpha() ==True) & (df_user_temp.Postcode.str[1].str.isnumeric() ==True) & (df_user_temp.Postcode.str[2].str.isnumeric() ==True)& (df_user_temp.Postcode.str[3].str.isalpha() ==True)]
LNN_L['Postcode_strip2']= LNN_L['Postcode_stripletter'].str.slice(stop=2)
LNN_L.shape[0] # n= 10772
# Get postcodes 3 characters long to avoid missing matches
len_three= df_user_temp.loc[df_user_temp['Postcode'].apply(lambda x: len(x) ==3) ]
len_three.shape[0] # n=603
# Get postcodes 2 characters long to avoid missing matches
len_two= df_user_temp.loc[df_user_temp['Postcode'].apply(lambda x: len(x) ==2) ]
len_two.shape[0] # n=252
LLNN = df_user_temp.loc[(df_user_temp.Postcode.str[0].str.isalpha() ==True) & (df_user_temp.Postcode.str[1].str.isalpha() ==True) & (df_user_temp.Postcode.str[2].str.isnumeric() ==True)& (df_user_temp.Postcode.str[3].str.isnumeric() ==True)]
LLNN.shape[0] # n= 310137
#%% Subset refernce sector data in same way
# get sectors with the LLNL postal district format
sectors_LLNL = sectors.loc[(sectors.Postdist.str[0].str.isalpha() ==True) & (sectors.Postdist.str[1].str.isalpha() ==True) & (sectors.Postdist.str[2].str.isnumeric() ==True)& (sectors.Postdist.str[3].str.isalpha() ==True)].drop_duplicates(subset=['Postdist'])
sectors_LLNL.shape[0] # 51 unique postal districts with the LLNL format
# get sectors with the LNL postal district format
sectors_LNL = sectors.loc[(sectors.Postdist.str[0].str.isalpha() ==True) & (sectors.Postdist.str[1].str.isnumeric() ==True) & (sectors.Postdist.str[2].str.isalpha() ==True)].drop_duplicates(subset=['Postdist'])
sectors_LNL.shape[0] # 15 unique postal districts with the LLNL format
# get sectors with the LN postal district format
sectors_LN = sectors.loc[(sectors.Postcode_nospace.str[0].str.isalpha() ==True) & (sectors.Postcode_nospace.str[1].str.isnumeric() ==True) & (sectors.Postcode_nospace.str[2].str.isnumeric() ==True)].drop_duplicates(subset=['Postdist'])
sectors_LN.shape[0] # 336 unique postal districts with the LN format
# get sectors with the LNN postal district format
sectors_LNN = sectors.loc[(sectors.Postdist.str[0].str.isalpha() ==True) & (sectors.Postdist.str[1].str.isnumeric() ==True) & (sectors.Postdist.str[2].str.isnumeric() ==True)].drop_duplicates(subset=['Postdist'])
sectors_LNN.shape[0] # 268 unique postal districts with the LN format
# get sectors with the LLN postal district format
sectors_LLN = sectors.loc[sectors['Postcode_nospace'].str.len()==4]
sectors_LLN.shape[0] # 5261 unique postal districts with the LLN format
# get sectors with the LLNN postal district format
sectors_LLNN = sectors.loc[sectors['Postcode_nospace'].str.len()==5]
sectors_LLNN.shape[0] # 5481 unique postal districts with the LLNN format
#%% join bounts postal districts to sector postla districts data to get cleaned postal districts. Across the different district formats.
# get Bounts users with LLNL postal district format
LLNL_clean = LLNL.merge(sectors_LLNL, left_on='Postcode', right_on='Postdist', how='left').dropna(subset=['ns_sec_1'])
print(LLNL_clean.shape[0]) # n= 1597 users with LLNL postal district format
# get Bounts users with LNL postal district format
LNL_clean = LNL.merge(sectors_LNL, left_on='LNL', right_on='Postdist', how='left').dropna(subset=['ns_sec_1'])
print(LNL_clean.shape[0]) # n= 857 users with LNL postal district format
# get Bounts users with LN postal district format
LN_clean = LN.merge(sectors_LN, left_on='Postcode_space2', right_on='Postcode').dropna(subset=['ns_sec_1'])
print(LN_clean.shape[0]) # n= 2038 users with LNL postal district format
LNN_L_clean = LNN_L.merge(sectors.drop_duplicates(subset=['Postdist']), left_on='Postcode_stripletter',right_on='Postdist', how='left').dropna(subset=['ns_sec_1'])
print(LNN_L_clean.shape[0])
LNN_L_clean2 = LNN_L.merge(sectors.drop_duplicates(subset=['Postdist']), left_on='Postcode_strip2',right_on='Postdist', how='left').dropna(subset=['ns_sec_1'])
print(LNN_L_clean2.shape[0])
len_three_clean =len_three.merge(sectors.drop_duplicates(subset=['Postdist']), left_on='Postcode',right_on='Postdist').dropna(subset=['ns_sec_1'])
print(len_three_clean.shape[0])
len_two_clean = len_two.merge(sectors.drop_duplicates(subset=['Postdist']), left_on='Postcode',right_on='Postdist').dropna(subset=['ns_sec_1'])
print(len_two_clean.shape[0])
LNN_clean = LNN.merge(sectors_LNN, left_on='LNN', right_on='Postdist', how='left').dropna(subset=['ns_sec_1'])
print(LNN_clean.shape[0])
LLNN_temp_1 = LLNN.merge(sectors_LLNN, left_on='Postcode', right_on='Postdist', how='left').dropna(subset=['ns_sec_1'])
# 11723
LLNN_temp_2= LLNN.merge(sectors_LLN, left_on='Postcode', right_on='Postcode_nospace', how='left').dropna(subset=['ns_sec_1'])
# define user postal districts that could be LLN or LLNN
# can be used for exploratory analysis
LLN_LLNN = LLNN_temp_1.loc[LLNN_temp_1['PseudoUserId'].isin(LLNN_temp_2['PseudoUserId'])==True].drop_duplicates(subset=['PseudoUserId','Postdist'])
# LLN clean is in LLN_sectors but not LLNN sectors
LLN_clean = LLNN_temp_2.loc[LLNN_temp_2['PseudoUserId'].isin(LLNN_temp_1['PseudoUserId'])==False].drop_duplicates(subset=['PseudoUserId','Postdist'])
# LLNN clean is in LLNN_sectors but not LLN sectors
LLNN_clean = LLNN_temp_1.loc[LLNN_temp_1['PseudoUserId'].isin(LLNN_temp_2['PseudoUserId'])==False].drop_duplicates(subset=['PseudoUserId','Postdist'])
d1 = LLNL_clean[['PseudoUserId', 'Gender', 'Postcode_x', 'Year_Of_Birth',\
'Age', 'Postdist','Latitude', 'Longitude', 'Easting',
'Northing', 'Grid Ref', 'Postcodes', 'Active postcodes', 'Population',
'Households', 'Built up area', 'postdist', 'ns_sec_1', 'ns_sec_1_per',
'ns_sec_2', 'ns_sec_2_per', 'ns_sec_3', 'ns_sec_3_per', 'ns_sec_4',
'ns_sec_4_per', 'ns_sec_5', 'ns_sec_5_per', 'ns_sec_6', 'ns_sec_6_per',
'ns_sec_7', 'ns_sec_7_per', 'ns_sec_8', 'ns_sec_8_per',
'ns_sec_notclass', 'ns_sec_notclass_per']]
d2 = LNL_clean[['PseudoUserId', 'Gender', 'Postcode_x', 'Year_Of_Birth',\
'Age', 'Postdist','Latitude', 'Longitude', 'Easting',
'Northing', 'Grid Ref', 'Postcodes', 'Active postcodes', 'Population',
'Households', 'Built up area', 'postdist', 'ns_sec_1', 'ns_sec_1_per',
'ns_sec_2', 'ns_sec_2_per', 'ns_sec_3', 'ns_sec_3_per', 'ns_sec_4',
'ns_sec_4_per', 'ns_sec_5', 'ns_sec_5_per', 'ns_sec_6', 'ns_sec_6_per',
'ns_sec_7', 'ns_sec_7_per', 'ns_sec_8', 'ns_sec_8_per',
'ns_sec_notclass', 'ns_sec_notclass_per']]
d3 = LN_clean[['PseudoUserId', 'Gender', 'Postcode_x', 'Year_Of_Birth',\
'Age', 'Postdist','Latitude', 'Longitude', 'Easting',
'Northing', 'Grid Ref', 'Postcodes', 'Active postcodes', 'Population',
'Households', 'Built up area', 'postdist', 'ns_sec_1', 'ns_sec_1_per',
'ns_sec_2', 'ns_sec_2_per', 'ns_sec_3', 'ns_sec_3_per', 'ns_sec_4',
'ns_sec_4_per', 'ns_sec_5', 'ns_sec_5_per', 'ns_sec_6', 'ns_sec_6_per',
'ns_sec_7', 'ns_sec_7_per', 'ns_sec_8', 'ns_sec_8_per',
'ns_sec_notclass', 'ns_sec_notclass_per']]
d4 = LNN_clean[['PseudoUserId', 'Gender', 'Postcode_x', 'Year_Of_Birth',\
'Age', 'Postdist','Latitude', 'Longitude', 'Easting',
'Northing', 'Grid Ref', 'Postcodes', 'Active postcodes', 'Population',
'Households', 'Built up area', 'postdist', 'ns_sec_1', 'ns_sec_1_per',
'ns_sec_2', 'ns_sec_2_per', 'ns_sec_3', 'ns_sec_3_per', 'ns_sec_4',
'ns_sec_4_per', 'ns_sec_5', 'ns_sec_5_per', 'ns_sec_6', 'ns_sec_6_per',
'ns_sec_7', 'ns_sec_7_per', 'ns_sec_8', 'ns_sec_8_per',
'ns_sec_notclass', 'ns_sec_notclass_per']]
d5 = LLNN_clean[['PseudoUserId', 'Gender', 'Postcode_x', 'Year_Of_Birth',\
'Age', 'Postdist','Latitude', 'Longitude', 'Easting',
'Northing', 'Grid Ref', 'Postcodes', 'Active postcodes', 'Population',
'Households', 'Built up area', 'postdist', 'ns_sec_1', 'ns_sec_1_per',
'ns_sec_2', 'ns_sec_2_per', 'ns_sec_3', 'ns_sec_3_per', 'ns_sec_4',
'ns_sec_4_per', 'ns_sec_5', 'ns_sec_5_per', 'ns_sec_6', 'ns_sec_6_per',
'ns_sec_7', 'ns_sec_7_per', 'ns_sec_8', 'ns_sec_8_per',
'ns_sec_notclass', 'ns_sec_notclass_per']]
d6 = LLN_clean[['PseudoUserId', 'Gender', 'Postcode_x', 'Year_Of_Birth',\
'Age', 'Postdist','Latitude', 'Longitude', 'Easting',
'Northing', 'Grid Ref', 'Postcodes', 'Active postcodes', 'Population',
'Households', 'Built up area', 'postdist', 'ns_sec_1', 'ns_sec_1_per',
'ns_sec_2', 'ns_sec_2_per', 'ns_sec_3', 'ns_sec_3_per', 'ns_sec_4',
'ns_sec_4_per', 'ns_sec_5', 'ns_sec_5_per', 'ns_sec_6', 'ns_sec_6_per',
'ns_sec_7', 'ns_sec_7_per', 'ns_sec_8', 'ns_sec_8_per',
'ns_sec_notclass', 'ns_sec_notclass_per']]
d7 = len_two_clean[['PseudoUserId', 'Gender', 'Postcode_x', 'Year_Of_Birth',\
'Age', 'Postdist','Latitude', 'Longitude', 'Easting',
'Northing', 'Grid Ref', 'Postcodes', 'Active postcodes', 'Population',
'Households', 'Built up area', 'postdist', 'ns_sec_1', 'ns_sec_1_per',
'ns_sec_2', 'ns_sec_2_per', 'ns_sec_3', 'ns_sec_3_per', 'ns_sec_4',
'ns_sec_4_per', 'ns_sec_5', 'ns_sec_5_per', 'ns_sec_6', 'ns_sec_6_per',
'ns_sec_7', 'ns_sec_7_per', 'ns_sec_8', 'ns_sec_8_per',
'ns_sec_notclass', 'ns_sec_notclass_per']]
d8 = len_three_clean[['PseudoUserId', 'Gender', 'Postcode_x', 'Year_Of_Birth',\
'Age', 'Postdist','Latitude', 'Longitude', 'Easting',
'Northing', 'Grid Ref', 'Postcodes', 'Active postcodes', 'Population',
'Households', 'Built up area', 'postdist', 'ns_sec_1', 'ns_sec_1_per',
'ns_sec_2', 'ns_sec_2_per', 'ns_sec_3', 'ns_sec_3_per', 'ns_sec_4',
'ns_sec_4_per', 'ns_sec_5', 'ns_sec_5_per', 'ns_sec_6', 'ns_sec_6_per',
'ns_sec_7', 'ns_sec_7_per', 'ns_sec_8', 'ns_sec_8_per',
'ns_sec_notclass', 'ns_sec_notclass_per']]
d9 = LNN_L_clean[['PseudoUserId', 'Gender', 'Postcode_x', 'Year_Of_Birth',\
'Age', 'Postdist','Latitude', 'Longitude', 'Easting',
'Northing', 'Grid Ref', 'Postcodes', 'Active postcodes', 'Population',
'Households', 'Built up area', 'postdist', 'ns_sec_1', 'ns_sec_1_per',
'ns_sec_2', 'ns_sec_2_per', 'ns_sec_3', 'ns_sec_3_per', 'ns_sec_4',
'ns_sec_4_per', 'ns_sec_5', 'ns_sec_5_per', 'ns_sec_6', 'ns_sec_6_per',
'ns_sec_7', 'ns_sec_7_per', 'ns_sec_8', 'ns_sec_8_per',
'ns_sec_notclass', 'ns_sec_notclass_per']]
d10 = LNN_L_clean2[['PseudoUserId', 'Gender', 'Postcode_x', 'Year_Of_Birth',\
'Age', 'Postdist','Latitude', 'Longitude', 'Easting',
'Northing', 'Grid Ref', 'Postcodes', 'Active postcodes', 'Population',
'Households', 'Built up area', 'postdist', 'ns_sec_1', 'ns_sec_1_per',
'ns_sec_2', 'ns_sec_2_per', 'ns_sec_3', 'ns_sec_3_per', 'ns_sec_4',
'ns_sec_4_per', 'ns_sec_5', 'ns_sec_5_per', 'ns_sec_6', 'ns_sec_6_per',
'ns_sec_7', 'ns_sec_7_per', 'ns_sec_8', 'ns_sec_8_per',
'ns_sec_notclass', 'ns_sec_notclass_per']]
df_user_postcode = | pd.concat([d1,d2,d3,d4,d5,d6,d7,d8,d9,d10]) | pandas.concat |
"""
This is used for visualizing -- not really needed otherwise
"""
import os
os.environ["DISPLAY"] = ""
import argparse
import h5py
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
import json
import pandas as pd
from matplotlib.animation import FuncAnimation
COLORS = np.array(
[0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, 0.556, 0.466, 0.674, 0.188,
0.301, 0.745, 0.933, 0.635, 0.078, 0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000,
1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 1.000, 0.667, 0.000, 1.000,
0.333, 0.333, 0.000, 0.333, 0.667, 0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000,
0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000, 1.000, 0.000, 0.000, 0.333, 0.500,
0.000, 0.667, 0.500, 0.000, 1.000, 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500,
0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667, 0.667, 0.500, 0.667, 1.000, 0.500,
1.000, 0.000, 0.500, 1.000, 0.333, 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000,
0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333, 0.333, 1.000, 0.333, 0.667, 1.000,
0.333, 1.000, 1.000, 0.667, 0.000, 1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000,
1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000,
0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000,
0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000,
0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833,
0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.286, 0.286, 0.286, 0.429, 0.429, 0.429,
0.571, 0.571, 0.571, 0.714, 0.714, 0.714, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000
]
).astype(np.float32).reshape((-1, 3))
parser = argparse.ArgumentParser(description='Visualize Trajectories')
parser.add_argument(
'-fn',
dest='fn',
type=str,
help='fn to use'
)
args = parser.parse_args()
data_h5 = h5py.File(args.fn, 'r')
meta_info = json.loads(data_h5['meta_info'][()].decode('utf-8'))
meta_info['task_name'] = meta_info['task_name'].strip('_')
output_actions = json.loads(data_h5['output_actions'][()].decode('utf-8'))
output_actions.append({'action': 'Done'})
aliases = json.loads(data_h5['alias_object_id_to_old_object_id'][()].decode('utf-8'))
object_id_to_states = json.loads(data_h5['object_id_to_states'][()].decode('utf-8'))
# Last action / next action
def action_txt(action_t):
action_str = [action_t['action']]
if 'objectId' in action_t:
action_str.append(action_t['objectId'].split('|')[0])
if 'receptacleObjectId' in action_t:
action_str.append(action_t['receptacleObjectId'].split('|')[0])
return '< {} >'.format(' , '.join(action_str))
action_txt = [action_txt(action_t) for action_t in output_actions]
goal_txt = '{}:\n{}'.format(meta_info['task_name'], meta_info['text'])
for i, mid in enumerate(meta_info['main_object_ids']):
goal_txt = goal_txt.replace(f'${i+1}', '[{}]'.format(mid.split('|')[0]))
# Cheap latex style alignment
goal_txt = goal_txt.replace('. ', '.\n')
for s2 in goal_txt.split('\n'):
if len(s2) < 30:
continue
for s1 in s2.split(', ')[:-1]:
goal_txt = goal_txt.replace(s1 + ', ', s1 + ',\n')
ims = []
num_frames = data_h5['frames'].shape[0]
frames = np.array(data_h5['frames'], dtype=np.int32)
IM_SIZE = (384, 640)
DPI = 128
fig = plt.figure(frameon=False)
fig.set_size_inches(IM_SIZE[1] / DPI, IM_SIZE[0] / DPI)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.axis('off')
fig.add_axes(ax)
# Compute interesting state changes accross aliases
def _column_interestingness_score(col):
vcs = col.value_counts().values.astype(np.float32)
vcs_p = vcs / vcs.sum()
return -np.sum(vcs_p * np.log(vcs_p))
dfs = {}
for oid in meta_info['main_object_ids']:
df = []
oid_and_aliases = [oid] + [k for k, v in aliases.items() if v == oid]
for t in range(num_frames):
# Add a dummy None which will get overwritten
df.append(None)
for oid_k in oid_and_aliases:
if str(t) in object_id_to_states[oid_k]:
df[t] = object_id_to_states[oid_k][str(t)]
if df[t] is None:
assert t > 0
df[t] = df[t-1]
df = | pd.DataFrame(df) | pandas.DataFrame |
from sklearn.metrics import pairwise_distances
import pandas as pd
import geopandas as gpd
import lib.helpers as helpers
def zone_distances(zones):
"""
:param zones
GeoDataFrame [*index, zone, geometry]
Must be in a CRS of unit: metre
"""
for ax in zones.crs.axis_info:
assert ax.unit_name == 'metre'
print("Calculating distances between zones...")
distances_meters = pairwise_distances(
list(zip(
zones.geometry.centroid.x.to_list(),
zones.geometry.centroid.y.to_list(),
))
)
distances = pd.DataFrame(
distances_meters / 1000,
columns=zones.zone,
index=zones.zone,
).stack()
return distances
def distance_quantiles(zone_dist):
print("Calculating quantiles...")
quantiles, bins = pd.qcut(zone_dist, q=100, retbins=True)
qgrps = quantiles.groupby(quantiles)
return qgrps, bins
def crs_convert_visits(visits, zones):
print("Convering visits to zone CRS")
visits = gpd.GeoDataFrame(
visits,
crs="EPSG:4326",
geometry=gpd.points_from_xy(visits.longitude, visits.latitude),
)
return visits.to_crs(zones.crs)
def align_visits_to_zones(visits, zones):
print("Aligning region-visits to zones...")
regional_visits = visits[visits.kind == 'region']
n_regional_visits_before = regional_visits.shape[0]
user_regions = regional_visits.groupby(['userid', 'region']).head(1)
user_zones = gpd.sjoin(user_regions, zones, op='intersects')[['region', 'zone']]
regional_visits = user_zones.merge(regional_visits, on=['userid', 'region'])
print("removed", n_regional_visits_before - regional_visits.shape[0], "region-visits due to missing zone geom")
print("Aligning point-visits to zones...")
point_visits = visits[visits.kind == 'point']
if point_visits.shape[0] > 0:
n_point_visits_before = point_visits.shape[0]
point_visits = gpd.sjoin(point_visits, zones, op='intersects')
print("removed", n_point_visits_before - point_visits.shape[0], "point-visits due to missing zone geom")
else:
point_visits = point_visits.assign(zone='0')
# Recombine
columns = ['day', 'timeslot', 'zone']
# Special handling of created at in order to have baseline and baseline 24h time threshold.
if 'createdat' in visits:
columns.append('createdat')
visits = pd.concat([
regional_visits[columns],
point_visits[columns]
])
print(visits.shape[0], "visits left after alignment")
# Re-sort to chronological order
visits = visits \
.reset_index().set_index(['userid', 'day', 'timeslot']).sort_index() \
.reset_index().set_index('userid')
return visits
def align_raw_visits_to_zones(visits, zones):
print("Aligning visits to zones...")
regional_visits = visits.copy()
n_regional_visits_before = regional_visits.shape[0]
user_regions = regional_visits.groupby(['userid', 'region']).head(1)
user_zones = gpd.sjoin(user_regions, zones, op='intersects')[['region', 'zone']]
regional_visits = user_zones.merge(regional_visits, on=['userid', 'region'])
print("removed", n_regional_visits_before - regional_visits.shape[0], "region-visits due to missing zone geom")
# Recombine
columns = ['zone', 'createdat']
visits = regional_visits[columns]
print(visits.shape[0], "visits left after alignment")
# Re-sort to chronological order
visits = visits \
.reset_index().set_index(['userid', 'createdat']).sort_index() \
.reset_index().set_index('userid')
return visits
def aligned_visits_to_odm(visits, multiindex, timethreshold_hours=None):
print("Creating odm...")
gaps_columns = ['zone']
if "createdat" in visits:
gaps_columns.append("createdat")
if "weight" in visits:
gaps_columns.append("weight")
gaps = helpers.visit_gaps(visits[gaps_columns])
if timethreshold_hours is not None:
print("Applying timethreshold to gaps [{} hours]...".format(timethreshold_hours))
gaps = gaps.assign(duration=gaps.createdat_destination - gaps.createdat_origin)
gaps = gaps[gaps.duration < | pd.Timedelta(timethreshold_hours, "hours") | pandas.Timedelta |
__version__ = '0.1.3'
__maintainer__ = '<NAME> 31.12.2019'
__contributors__ = '<NAME>, <NAME>'
__email__ = '<EMAIL>'
__birthdate__ = '31.12.2019'
__status__ = 'dev' # options are: dev, test, prod
#----- imports & packages ------
if __package__ is None or __package__ == '':
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.dirname(__file__))))
import pprint
import pandas as pd
import numpy as np
import warnings
from pathlib import Path
from zipfile import ZipFile
class DataParser:
def __init__(self, configDict: dict, datasetID: str, loadEncrypted=False):
"""
Basic class for parsing a mobility survey trip data set. Currently the both German travel surveys MiD 2008 and
MiD 2017 are pre-configured and one of the two can be given (default: MiD 2017).
The data set can be provided from an encrypted file on a server in which case the link to the ZIP-file as well
as a link to the file within the ZIP-file have to be supplied in the globalConfig and a password has to be
supplied in the parseConfig.
Columns relevant for the EV simulation are selected from the entirety of the data and renamed to VencoPy
internal variable names given in the dictionary parseConfig['dataVariables'] for the respective survey data set.
Manually configured exclude, include, greaterThan and smallerThan filters are applied as they are specified in
parseConfig. For some columns, raw data is transferred to human readable strings and respective columns are
added. Pandas timestamp columns are synthesized from the given trip start and trip end time information.
:param configDict: A dictionary containing multiple yaml config files
:param datasetID: Currently, MiD08 and MiD17 are implemented as travel survey data sets
:param loadEncrypted: If True, load an encrypted ZIP file as specified in parseConfig
"""
self.parseConfig = configDict['parseConfig']
self.globalConfig = configDict['globalConfig']
self.localPathConfig = configDict['localPathConfig']
self.datasetID = self.checkDatasetID(datasetID, self.parseConfig)
self.rawDataPath = Path(self.localPathConfig['pathAbsolute'][self.datasetID]) / self.globalConfig['files'][self.datasetID]['tripsDataRaw']
self.subDict = {}
self.rawData = None
self.data = None
self.__filterDict = {}
self.columns = self.compileVariableList()
self.filterDictNameList = ['include', 'exclude', 'greaterThan', 'smallerThan']
self.updateFilterDict()
print('Parsing properties set up')
if loadEncrypted:
print(f"Starting to retrieve encrypted data file from "
f"{self.globalConfig['pathAbsolute']['encryptedZipfile']}")
self.loadEncryptedData(pathToZip=Path(self.globalConfig['pathAbsolute']['encryptedZipfile']) /
self.globalConfig['files'][self.datasetID]['encryptedZipFileB2'],
pathInZip=self.globalConfig['files'][self.datasetID]['tripDataZipFileRaw'])
else:
print(f"Starting to retrieve local data file from {self.rawDataPath}")
self.loadData()
def updateFilterDict(self) -> None:
"""
Internal function to parse the filter dictionary of a specified data set from parseConfig.yaml
:return: None
"""
self.__filterDict[self.datasetID] = self.parseConfig['filterDicts'][self.datasetID]
self.__filterDict[self.datasetID] = {iKey: iVal for iKey, iVal in self.__filterDict[self.datasetID].items() if self.__filterDict[self.datasetID][iKey] is not
None}
def checkDatasetID(self, datasetID: str, parseConfig: dict) -> str:
"""
General check if data set ID is defined in parseConfig.yaml
:param datasetID: list of strings declaring the datasetIDs to be read in
:param parseConfig: A yaml config file holding a dictionary with the keys 'pathRelative' and 'pathAbsolute'
:return: Returns a string value of a mobility data
"""
availableDatasetIDs = parseConfig['dataVariables']['datasetID']
assert datasetID in availableDatasetIDs, \
f'Defined datasetID {datasetID} not specified under dataVariables in parseConfig. Specified datasetIDs ' \
f'are {availableDatasetIDs}'
return datasetID
def compileVariableList(self) -> list:
"""
Clean up the replacement dictionary of raw data file variable (column) names. This has to be done because some
variables that may be relevant for the analysis later on are only contained in one raw data set while not
contained in another one. E.g. if a trip is an intermodal trip was only assessed in the MiD 2017 while it wasn't
in the MiD 2008. This has to be mirrored by the filter dict for the respective data set.
:return: List of variables
"""
listIndex = self.parseConfig['dataVariables']['datasetID'].index(self.datasetID)
variables = [val[listIndex] if not val[listIndex] == 'NA' else 'NA' for key, val in
self.parseConfig['dataVariables'].items()]
variables.remove(self.datasetID)
self.removeNA(variables)
return variables
def removeNA(self, variables: list):
"""
Removes all strings that can be capitalized to 'NA' from the list of variables
:param variables: List of variables of the mobility dataset
:return: Returns a list with non NA values
"""
vars = [iVar.upper() for iVar in variables]
counter = 0
for idx, iVar in enumerate(vars):
if iVar == 'NA':
del variables[idx - counter]
counter += 1
def loadData(self):
"""
Loads data specified in self.rawDataPath and stores it in self.rawData. Raises an exception if a invalid suffix
is specified in self.rawDataPath. READ IN OF CSV HAS NOT BEEN EXTENSIVELY TESTED BEFORE BETA RELEASE.
:return: None
"""
# Future releases: Are potential error messages (.dta not being a stata file even as the ending matches)
# readable for the user? Should we have a manual error treatment here?
if self.rawDataPath.suffix == '.dta':
self.rawData = pd.read_stata(self.rawDataPath, convert_categoricals=False, convert_dates=False,
preserve_dtypes=False)
# This has not been tested before the beta release
elif self.rawDataPath.suffix == '.csv':
self.rawData = pd.read_csv(self.rawDataPath)
else:
Exception(f"Data type {self.rawDataPath.suffix} not yet specified. Available types so far are .dta and "
f".csv")
print(f'Finished loading {len(self.rawData)} rows of raw data of type {self.rawDataPath.suffix}')
def loadEncryptedData(self, pathToZip, pathInZip):
"""
Since the MiD data sets are only accessible by an extensive data security contract, VencoPy provides the
possibility to access encrypted zip files. An encryption password has to be given in parseConfig.yaml in order
to access the encrypted file. Loaded data is stored in self.rawData
:param pathToZip: path from current working directory to the zip file or absolute path to zipfile
:param pathInZip: Path to trip data file within the encrypted zipfile
:return: None
"""
with ZipFile(pathToZip) as myzip:
if '.dta' in pathInZip:
self.rawData = pd.read_stata(myzip.open(pathInZip, pwd=bytes(self.parseConfig['encryptionPW'],
encoding='utf-8')),
convert_categoricals=False, convert_dates=False, preserve_dtypes=False)
else: # if '.csv' in pathInZip:
self.rawData = pd.read_csv(myzip.open(pathInZip, pwd=bytes(self.parseConfig['encryptionPW'],
encoding='utf-8')), sep=';', decimal=',')
print(f'Finished loading {len(self.rawData)} rows of raw data of type {self.rawDataPath.suffix}')
def selectColumns(self):
"""
Function to filter the rawData for only relevant columns as specified by parseConfig and cleaned in
self.compileVariablesList(). Stores the subset of data in self.data
:return: None
"""
self.data = self.rawData.loc[:, self.columns]
def harmonizeVariables(self):
"""
Harmonizes the input data variables to match internal VencoPy names given as specified in the mapping in
parseConfig['dataVariables']. So far mappings for MiD08 and MiD17 are given. Since the MiD08 doesn't provide
a combined household and person unique identifier, it is synthesized of the both IDs.
:return: None
"""
replacementDict = self.createReplacementDict(self.datasetID, self.parseConfig['dataVariables'])
dataRenamed = self.data.rename(columns=replacementDict)
if self.datasetID == 'MiD08':
dataRenamed['hhPersonID'] = (dataRenamed['hhID'].astype('string') +
dataRenamed['personID'].astype('string')).astype('int')
self.data = dataRenamed
print('Finished harmonization of variables')
def createReplacementDict(self, datasetID: str, dictRaw: dict) -> dict:
"""
Creates the mapping dictionary from raw data variable names to VencoPy internal variable names as specified
in parseConfig.yaml for the specified data set.
:param datasetID: list of strings declaring the datasetIDs to be read in
:param dictRaw: Contains dictionary of the raw data
:return: Dictionary with internal names as keys and raw data column names as values.
"""
if datasetID in dictRaw['datasetID']:
listIndex = dictRaw['datasetID'].index(datasetID)
return {val[listIndex]: key for (key, val) in dictRaw.items()}
else:
raise ValueError(f'Data set {datasetID} not specified in parseConfig variable dictionary.')
def convertTypes(self):
"""
Convert raw column types to predefined python types as specified in parseConfig['inputDTypes'][datasetID]. This is mainly
done for performance reasons. But also in order to avoid index values that are of type int to be cast to float.
The function operates only on self.data and writes back changes to self.data
:return: None
"""
# Filter for dataset specific columns
conversionDict = self.parseConfig['inputDTypes'][self.datasetID]
keys = {iCol for iCol in conversionDict.keys() if iCol in self.data.columns}
self.subDict = {key: conversionDict[key] for key in conversionDict.keys() & keys}
self.data = self.data.astype(self.subDict)
def returnDictBottomValues(self, baseDict: dict, lst: list = []) -> list:
"""
Returns a list of all dictionary values of the last dictionary level (the bottom) of baseDict. The parameter
lst is used as an interface between recursion levels.
:param baseDict: Dictionary of variables
:param lst: empty list, is used as interface to next recursion
:return: Returns a list with all the bottom dictionary values
"""
for iKey, iVal in baseDict.items():
if isinstance(iVal, dict):
lst = self.returnDictBottomValues(iVal, lst)
else:
if iVal is not None:
lst.append(iVal)
return lst
def checkFilterDict(self):
"""
Checking if all values of filter dictionaries are of type list. Currently only checking if list of list str
not typechecked all(map(self.__checkStr, val). Conditionally triggers an assert.
:return: None
"""
assert all(isinstance(val, list) for val in self.returnDictBottomValues(self.__filterDict[self.datasetID])), \
f'All values in filter dictionaries have to be lists, but are not'
def returnDictBottomKeys(self, baseDict: dict, lst: list = None) -> list:
"""
Returns the lowest level keys of baseDict and returns all of them as a list. The parameter lst is used as
interface between recursion levels.
:param baseDict: Dictionary of variables
:param lst: empty list, used as interface between recursion levels
:return: Returns a list with all the bottom level dictionary keys
"""
if lst is None:
lst = []
for iKey, iVal in baseDict.items():
if isinstance(iVal, dict):
lst = self.returnDictBottomKeys(iVal, lst)
else:
if iVal is not None:
lst.append(iKey)
return lst
def filter(self):
"""
Wrapper function to carry out filtering for the four filter logics of including, excluding, greaterThan and
smallerThan. If a filterDict is defined with a different key, a warning is thrown. The function operates on
self.data class-internally.
:return: None
"""
print(f'Starting filtering, applying {len(self.returnDictBottomKeys(self.__filterDict[self.datasetID]))} filters.')
ret = pd.DataFrame(index=self.data.index)
# Future releases: as discussed before we could indeed work here with a plug and pray approach.
# we would need to introduce a filter manager and a folder structure where to look for filters.
# this is very similar code than the one from ioproc. If we want to go down this route we should
# take inspiration from the code there. It was not easy to get it right in the first place. This
# might be easy to code but hard to implement correctly.
for iKey, iVal in self.__filterDict[self.datasetID].items():
if iKey == 'include':
ret = ret.join(self.setIncludeFilter(iVal, self.data.index))
elif iKey == 'exclude':
ret = ret.join(self.setExcludeFilter(iVal, self.data.index))
elif iKey == 'greaterThan':
ret = ret.join(self.setGreaterThanFilter(iVal, self.data.index))
elif iKey == 'smallerThan':
ret = ret.join(self.setSmallerThanFilter(iVal, self.data.index))
else:
warnings.warn(f'A filter dictionary was defined in the parseConfig with an unknown filtering key. '
f'Current filtering keys comprise include, exclude, smallerThan and greaterThan.'
f'Continuing with ignoring the dictionary {iKey}')
self.data = self.data[ret.all(axis='columns')]
self.filterAnalysis(ret)
def setIncludeFilter(self, includeFilterDict: dict, dataIndex) -> pd.DataFrame:
"""
Read-in function for include filter dict from parseConfig.yaml
:param includeFilterDict: Dictionary of include filters defined in parseConfig.yaml
:param dataIndex: Index for the data frame
:return: Returns a data frame with individuals using car as a mode of transport
"""
incFilterCols = pd.DataFrame(index=dataIndex, columns=includeFilterDict.keys())
for incCol, incElements in includeFilterDict.items():
incFilterCols[incCol] = self.data[incCol].isin(incElements)
return incFilterCols
def setExcludeFilter(self, excludeFilterDict: dict, dataIndex) -> pd.DataFrame:
"""
Read-in function for exclude filter dict from parseConfig.yaml
:param excludeFilterDict: Dictionary of exclude filters defined in parseConfig.yaml
:param dataIndex: Index for the data frame
:return: Returns a filtered data frame with exclude filters
"""
exclFilterCols = pd.DataFrame(index=dataIndex, columns=excludeFilterDict.keys())
for excCol, excElements in excludeFilterDict.items():
exclFilterCols[excCol] = ~self.data[excCol].isin(excElements)
return exclFilterCols
def setGreaterThanFilter(self, greaterThanFilterDict: dict, dataIndex):
"""
Read-in function for greaterThan filter dict from parseConfig.yaml
:param greaterThanFilterDict: Dictionary of greater than filters defined in parseConfig.yaml
:param dataIndex: Index for the data frame
:return:
"""
greaterThanFilterCols = pd.DataFrame(index=dataIndex, columns=greaterThanFilterDict.keys())
for greaterCol, greaterElements in greaterThanFilterDict.items():
greaterThanFilterCols[greaterCol] = self.data[greaterCol] >= greaterElements.pop()
if len(greaterElements) > 0:
warnings.warn(f'You specified more than one value as lower limit for filtering column {greaterCol}.'
f'Only considering the last element given in the parseConfig.')
return greaterThanFilterCols
def setSmallerThanFilter(self, smallerThanFilterDict: dict, dataIndex) -> pd.DataFrame:
"""
Read-in function for smallerThan filter dict from parseConfig.yaml
:param smallerThanFilterDict: Dictionary of smaller than filters defined in parseConfig.yaml
:param dataIndex: Index for the data frame
:return: Returns a data frame of trips covering a distance of less than 1000 km
"""
smallerThanFilterCols = pd.DataFrame(index=dataIndex, columns=smallerThanFilterDict.keys())
for smallerCol, smallerElements in smallerThanFilterDict.items():
smallerThanFilterCols[smallerCol] = self.data[smallerCol] <= smallerElements.pop()
if len(smallerElements) > 0:
warnings.warn(f'You specified more than one value as upper limit for filtering column {smallerCol}.'
f'Only considering the last element given in the parseConfig.')
return smallerThanFilterCols
def filterAnalysis(self, filterData: pd.DataFrame):
"""
Function supplies some aggregate info of the data after filtering to the user Function does not change any
class attributes
:param filterData:
:return: None
"""
lenData = sum(filterData.all(axis='columns'))
boolDict = {iCol: sum(filterData[iCol]) for iCol in filterData}
print(f'The following values were taken into account after filtering:')
pprint.pprint(boolDict)
print(f"All filters combined yielded a total of {lenData} was taken into account")
print(f'This corresponds to {lenData / len(filterData)* 100} percent of the original data')
def filterConsistentHours(self):
"""
Filtering out records where starting hour is after end hour but trip takes place on the same day.
These observations are data errors.
:return: No returns, operates only on the class instance
"""
if self.datasetID == 'MiD17' or self.datasetID == 'MiD08':
dat = self.data
self.data = dat.loc[(dat['tripStartClock'] <= dat['tripEndClock']) | (dat['tripEndNextDay'] == 1), :]
# If we want to get rid of tripStartClock and tripEndClock (they are redundant variables)
# self.data = dat.loc[pd.to_datetime(dat.loc[:, 'tripStartHour']) <= pd.to_datetime(dat.loc[:, 'tripEndHour']) |
# (dat['tripEndNextDay'] == 1), :]
def addStrColumnFromVariable(self, colName: str, varName: str):
"""
Replaces each occurence of a MiD/KiD variable e.g. 1,2,...,7 for weekdays with an explicitly mapped string e.g.
'MON', 'TUE',...,'SUN'.
:param colName: Name of the column in self.data where the explicit string info is stored
:param varName: Name of the VencoPy internal variable given in config/parseConfig['dataVariables']
:return: None
"""
self.data.loc[:, colName] \
= self.data.loc[:, varName].replace(self.parseConfig['Replacements'][self.datasetID][varName])
def addStrColumns(self, weekday=True, purpose=True):
"""
Adds string columns for either weekday or purpose.
:param weekday: Boolean identifier if weekday string info should be added in a separate column
:param purpose: Boolean identifier if purpose string info should be added in a separate column
:return: None
"""
if weekday:
self.addStrColumnFromVariable(colName='weekdayStr', varName='tripStartWeekday')
if purpose:
self.addStrColumnFromVariable(colName='purposeStr', varName='tripPurpose')
def composeTimestamp(self, data: pd.DataFrame = None,
colYear: str = 'tripStartYear',
colWeek: str = 'tripStartWeek',
colDay: str = 'tripStartWeekday',
colHour: str = 'tripStartHour',
colMin: str = 'tripStartMinute',
colName: str = 'timestampStart') -> np.datetime64:
"""
:param data: a data frame
:param colYear: year of start of a particular trip
:param colWeek: week of start of a particular trip
:param colDay: weekday of start of a particular trip
:param colHour: hour of start of a particular trip
:param colMin: minute of start of a particular trip
:param colName:
:return: Returns a detailed time stamp
"""
data[colName] = pd.to_datetime(data.loc[:, colYear], format='%Y') + \
pd.to_timedelta(data.loc[:, colWeek] * 7, unit='days') + \
pd.to_timedelta(data.loc[:, colDay], unit='days') + \
pd.to_timedelta(data.loc[:, colHour], unit='hour') + \
pd.to_timedelta(data.loc[:, colMin], unit='minute')
# return data
def composeStartAndEndTimestamps(self):
"""
:return: Returns start and end time of a trip
"""
self.composeTimestamp(data=self.data) # Starting timestamp
self.composeTimestamp(data=self.data, # Ending timestamps
colHour='tripEndHour',
colMin='tripEndMinute',
colName='timestampEnd')
def updateEndTimestamp(self):
"""
:return:
"""
endsFollowingDay = self.data['tripEndNextDay'] == 1
self.data.loc[endsFollowingDay, 'timestampEnd'] = self.data.loc[endsFollowingDay,
'timestampEnd'] + pd.offsets.Day(1)
def updateEndTimestamps(self) -> np.datetime64:
"""
:return: Returns start and end time of a trip
"""
self.updateEndTimestamp()
def harmonizeVariablesGenericIdNames(self):
"""
"""
self.data['genericID'] = self.data[str(self.parseConfig['IDVariablesNames'][self.datasetID])]
print('Finished harmonization of ID variables')
def process(self):
"""
Wrapper function for harmonising and filtering the dataset.
"""
self.selectColumns()
self.harmonizeVariables()
self.convertTypes()
self.checkFilterDict()
self.filter()
self.filterConsistentHours()
self.addStrColumns()
self.composeStartAndEndTimestamps()
self.updateEndTimestamps()
self.harmonizeVariablesGenericIdNames()
print('Parsing completed')
class ParseMiD(DataParser):
# Inherited data class to differentiate between abstract interfaces such as vencopy internal
# variable namings and data set specific functions such as filters etc. Currently not used (06/14/2021)
pass
class ParseKiD(DataParser):
# Inherited data class to differentiate between abstract interfaces such as vencopy internal
# variable namings and data set specific functions such as filters etc.
def __init__(self, configDict: dict, datasetID: str):
super().__init__(configDict, datasetID)
def loadData(self):
rawDataPathTrips = Path(configDict['localPathConfig']['pathAbsolute'][self.datasetID]) / configDict['globalConfig']['files'][self.datasetID]['tripsDataRaw']
rawDataPathVehicles = Path(configDict['localPathConfig']['pathAbsolute'][self.datasetID]) / configDict['globalConfig']['files'][self.datasetID]['vehiclesDataRaw']
rawDataTrips = pd.read_stata(rawDataPathTrips, convert_categoricals=False, convert_dates=False,
preserve_dtypes=False)
rawDataVehicles = pd.read_stata(rawDataPathVehicles, convert_categoricals=False, convert_dates=False,
preserve_dtypes=False)
rawDataVehicles.set_index('k00', inplace=True)
rawData = rawDataTrips.join(rawDataVehicles, on='k00')
self.rawData = rawData
print(f'Finished loading {len(self.rawData)} rows of raw data of type .dta')
def addStrColumns(self, weekday=True, purpose=True):
"""
Adds string columns for either weekday or purpose.
:param weekday: Boolean identifier if weekday string info should be added in a separate column
:param purpose: Boolean identifier if purpose string info should be added in a separate column
:return: None
"""
# from tripStartDate retrieve tripStartWeekday, tripStartWeek, tripStartYear, tripStartMonth, tripStartDay
# from tripStartClock retrieve tripStartHour, tripStartMinute
# from tripEndClock retrieve tripEndHour, tripEndMinute
self.data['tripStartDate'] = pd.to_datetime(self.data['tripStartDate'], format='%d.%m.%Y')
self.data['tripStartYear'] = self.data['tripStartDate'].dt.year
self.data['tripStartMonth'] = self.data['tripStartDate'].dt.month
self.data['tripStartDay'] = self.data['tripStartDate'].dt.day
self.data['tripStartWeekday'] = self.data['tripStartDate'].dt.weekday
self.data['tripStartWeek'] = self.data['tripStartDate'].dt.isocalendar().week
self.data['tripStartHour'] = pd.to_datetime(self.data['tripStartClock'], format='%H:%M').dt.hour
self.data['tripStartMinute'] = pd.to_datetime(self.data['tripStartClock'], format='%H:%M').dt.minute
self.data['tripEndHour'] = pd.to_datetime(self.data['tripEndClock'], format='%H:%M').dt.hour
self.data['tripEndMinute'] = | pd.to_datetime(self.data['tripEndClock'], format='%H:%M') | pandas.to_datetime |
import pandas as pd
from datetime import datetime as dt
# Here we should fetch our data from the Twitter API but since now we have to
# apply for getting API's credentials we pass this step for the sake of the tutorial.
# We use data.csv as source of tweets.
LOCAL_DIR = '/tmp/'
def main():
# Create the dataframe from data.csv
tweets = | pd.read_csv('~/airflow/dags/data/data.csv', encoding='latin1') | pandas.read_csv |
import pandas as pd
import numpy as np
index = | pd.date_range('1/1/2000', periods=8) | pandas.date_range |
import os
from pathlib import Path
import json
import pandas as pd
from google.cloud import bigquery
from datetime import datetime, timedelta, timezone
JST = timezone(timedelta(hours=+9), 'JST')
class Database:
def __init__(self):
super().__init__()
self._usr_table = pd.DataFrame()
self._ch_table = pd.DataFrame()
self._msg_table = pd.DataFrame()
self._JST = timezone(timedelta(hours=+9), 'JST')
# property
@property
def usr_table(self) -> pd.DataFrame():
return self._usr_table
@usr_table.setter
def usr_table(self, value: pd.DataFrame):
self._usr_table = value # setter
@property
def ch_table(self) -> pd.DataFrame():
return self._ch_table
@ch_table.setter
def ch_table(self, value: pd.DataFrame):
self._ch_table = value # setter
@property
def msg_table(self) -> pd.DataFrame():
return self._msg_table
@msg_table.setter
def msg_table(self, value: pd.DataFrame):
self._msg_table = value # setter
# method
def _mk_usr_table(self, usr_dict: dict) -> bool:
uid_list = []
uname_list = []
for usr_ditem in usr_dict:
if usr_ditem['deleted'] is True:
continue
uid_list.append(usr_ditem['id'])
uname_list.append(usr_ditem['profile']['real_name_normalized'])
self._usr_table = | pd.DataFrame({'uid': uid_list, 'uname': uname_list}) | pandas.DataFrame |
import warnings
from typing import Union
import re
import pandas as pd
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer, make_column_selector
from sklearn import metrics
from feature_engine.imputation import (
CategoricalImputer,
AddMissingIndicator,
MeanMedianImputer)
from feature_engine.encoding import OneHotEncoder, RareLabelEncoder
from feature_engine.selection import DropConstantFeatures, DropFeatures
from xgboost.sklearn import XGBClassifier
from utils import str_cleaner_df
def create_pipeline(params : dict = None):
"""
Create sklearn.pipeline.Pipeline
Parameters
----------
params : dict
dictionary of parameters for the pipeline
Returns
-------
sklearn.pipeline.Pipeline
"""
# pipeline for numeric variables
p_num = Pipeline([
("num_nan_ind",AddMissingIndicator(missing_only = True)),
("rmmean",MeanMedianImputer()),
("drop_quasi_constant",DropConstantFeatures(tol=0.97))
])
# pipeline for categorical variables
p_cat = Pipeline([
("fill_cat_nas",CategoricalImputer(fill_value = 'MISSING')),
("rlc",RareLabelEncoder()),
("one_hot_encoder", OneHotEncoder())
])
# list of pipelines to combine
transformers = [
("num",p_num,make_column_selector(dtype_include = np.number)),
("cat",p_cat,make_column_selector(dtype_include = object))
]
# combine pipelines and add XGBClassifier
col_transforms = ColumnTransformer(transformers)
p = Pipeline([
("col_transformers",col_transforms),
("xgb", XGBClassifier(min_child_weight=1, gamma=0, objective= 'binary:logistic',
nthread=4, scale_pos_weight=1, seed=1, gpu_id=0, tree_method = 'gpu_hist'))
])
if params:
p.set_params(**params)
return p
def name_tracker(p, X):
"""
Track names through pipeline. This function is
specific to the architecture of the given pipeline.
If the architecture of the pipeline changes, this
function will need to change.
TODO: Figure out if this can be made
pipeline-architecture independent
Parameters
----------
p : sklearn.pipeline.Pipeline
must have already been fit
X : pandas.DataFrame
the input to the pipeline
Returns
-------
pandas.DataFrame
contains feature importances
"""
cols_in = X.columns.tolist()
df = pd.DataFrame({"cols":cols_in,"cols_in":cols_in})
# Indicators for Missing Numeric Columns
nni = p['col_transformers'].transformers_[0][1]['num_nan_ind']
try:
nan_num_ind = pd.DataFrame({
"cols":[i+"_na" for i in nni.variables_],
"cols_in":nni.variables_})
df = | pd.concat([df, nan_num_ind]) | pandas.concat |
# -*- coding:utf-8 -*-
##############################################################
# Created Date: Wednesday, September 2nd 2020
# Contact Info: <EMAIL>
# Author/Copyright: Mr. <NAME>
##############################################################
import random, urllib3, json, requests, math, plotly
import pandas as pd
import datetime as dt
import numpy as np
import plotly.figure_factory as ff
from time import sleep
from plotly import io as pio
from deap import algorithms
from datetime import datetime
import random,datetime,plotly,math
from itertools import combinations
from deap import base, creator, tools
import random, numpy,sys
import time
def generate3tables():
Table_SchedulingInfo_New = pd.DataFrame()
Table_Changeovers_New = pd.DataFrame()
Table_ShiftCalendar_New = pd.DataFrame()
Table_SchedulingInfo_New_Extended = pd.DataFrame()
LenOfSimulatedData = 100
DataRange_StartOfStart_EndOfStart = 10
DataDiff_StartDate_EndDate = 7
ProcessingTime_Max = 300
ProcessingTime_Min = 1
ChangeOverTime_Max = 30
ChangeOverTime_Min = 5
ProductionLine_Count = 4
Family_Count = 6
Priority_Count = 3
Families_List = []
ProductionLines_List = []
Priorities_List = []
start_date = []
end_date = []
processing_time = []
family_type = []
ProductionLine = []
workorder_num = []
changeover_time = []
Priority = []
Families_List = [np.append(Families_List,"Family_"+str(i+1)).tolist() for i in range (Family_Count )]
ProductionLines_List = [np.append(ProductionLines_List, int(i+1)).tolist() for i in range (ProductionLine_Count)]
Priorities_List = [np.append(Priorities_List, str(i+1)).tolist() for i in range (Priority_Count)]
newFamily_List = []
for fly in Families_List:
newFamily_List = np.append(newFamily_List, fly)
Families_List = newFamily_List
# Generate the Lists of Families_List and Production Lines
start = datetime.datetime.strptime(datetime.datetime.today().strftime("%Y-%m-%d"), "%Y-%m-%d")
date_list = [(start + datetime.timedelta(days=x)).strftime("%Y-%m-%d") for x in range(0, DataRange_StartOfStart_EndOfStart)]
for i in range(LenOfSimulatedData):
start_date = np.append(start_date, random.choice(date_list))
end_date = np.append(end_date,(datetime.datetime.strptime(random.choice(date_list), '%Y-%m-%d')+
datetime.timedelta(days=DataDiff_StartDate_EndDate)).strftime("%Y-%m-%d"))
processing_time = np.append(processing_time,random.randint(ProcessingTime_Min,ProcessingTime_Max))
family_type = np.append(family_type, random.choice(Families_List))
ProductionLine = np.append(ProductionLine, random.choice(ProductionLines_List))
Priority = np.append(Priority, random.choice(Priorities_List))
workorder_num = np.append(workorder_num, i)
for j in range(Family_Count):
changeover_time = np.append(changeover_time, random.randint(ChangeOverTime_Min,ChangeOverTime_Max))
Table_SchedulingInfo_New["Start_date"] = start_date
Table_SchedulingInfo_New["Due_date"] = end_date
Table_SchedulingInfo_New["Processing_time"] = processing_time
Table_SchedulingInfo_New["Family"] = family_type
Table_SchedulingInfo_New["ProductionLine"] = ProductionLine
Table_SchedulingInfo_New["Priority"] = Priority
Table_SchedulingInfo_New["ChangeoverSort"] = Table_SchedulingInfo_New["Family"]
Table_SchedulingInfo_New["WorkOrderNum"] = workorder_num
Lines = [i+1 for i in range(ProductionLine_Count)]
Possible_Com_Of_Lines = sum([list(map(list, combinations(Lines, i))) for i in range(len(Lines) + 1)], [])
del Possible_Com_Of_Lines[0]
WO_Num = 0
for index, row in Table_SchedulingInfo_New.iterrows():
OpLines = random.choice(Possible_Com_Of_Lines)
Option_Lines_Len = len(OpLines)
for i in range(Option_Lines_Len):
Table_SchedulingInfo_New_Extended = Table_SchedulingInfo_New_Extended.append({'OptionalLine': OpLines[i],
'BasicStartDate': row.Start_date,
'DeliveryDate':row.Due_date,
'ProcessingTimeMins':row.Processing_time+i,
'FamilyName':row.Family,
'ProductionLine': row.ProductionLine,
'WorkOrderNum': row.WorkOrderNum,
'MaterialPriority': row.ProductionLine,
'ChangeoverSort':row.ChangeoverSort,
'Priority':Priority}, ignore_index=True)
WO_Num += 1
Table_SchedulingInfo_New["Resource"] = Table_SchedulingInfo_New["Family"]
Table_SchedulingInfo_New["Task"] = Table_SchedulingInfo_New["ProductionLine"]
Table_SchedulingInfo_New["Start"] = Table_SchedulingInfo_New["Start_date"]
Table_SchedulingInfo_New["Finish"] = Table_SchedulingInfo_New["Due_date"]
color_dict = dict(zip(Table_SchedulingInfo_New.Resource.unique(),['rgb({},{},{})'.format(i[0],i[1],i[2])
for i in list(np.random.randint(255, size = (len(Table_SchedulingInfo_New.Resource.unique()),3)))]))
fig = ff.create_gantt(Table_SchedulingInfo_New.to_dict('records'), colors = color_dict, index_col='Resource', show_colorbar=True, group_tasks=True)
Table_Changeovers_New['ToChangeOver'] = Families_List
Table_Changeovers_New['MaxChangeOverTimeMin'] = changeover_time
# print(Table_Changeovers_New.ToChangeOver)
for date in date_list:
Table_ShiftCalendar_New = Table_ShiftCalendar_New.append({"ProductionDate":date,"ShiftAStart": "05:15:00", "ShiftAEnd":"15:20:00", "ShiftBStart":"15:30:00","ShiftBEnd":"01:35:00"}, ignore_index=True)
# Table_SchedulingInfo_New_Extended.to_csv('Table_SchedulingInfo_New.csv',index=False)
# Table_Changeovers_New.to_csv('Table_Changeovers_New.csv',index=False)
# Table_ShiftCalendar_New.to_csv('Table_ShiftCalendar_New.csv',index=False)
return Table_SchedulingInfo_New_Extended,Table_Changeovers_New,Table_ShiftCalendar_New
class ClosedLoopScheduling:
def __init__(self):
# plotly.io.orca.config.executable = '/Users/roche/anaconda3/pkgs/plotly-orca-1.3.1-1/orca.cmd'
# plotly.io.orca.config.save()
self.__Import_And_CleanData() # This line would get the intial solution, and optional lines to chose for each WO
self. HardConstraintPenalty = 150
def __len__(self):
return self.WO_SIZE
def __Import_And_CleanData(self):
JobID = random.randint(1,1000)
self.Table_SchedulingInfo,self.Table_ChangeOverInfo,self.Table_CalendarInfo = generate3tables()
self.Table_SchedulingInfo['ProductionLineCode_Cap'] = self.Table_SchedulingInfo.OptionalLine#.apply(lambda x: x.split('')[-1])
self.Table_CalendarInfo["ProductionDate_ShiftA"] = pd.to_datetime(self.Table_CalendarInfo["ProductionDate"] +' '+ self.Table_CalendarInfo["ShiftAStart"])
self.Table_CalendarInfo['ShiftAStart'] = pd.to_datetime(self.Table_CalendarInfo['ShiftAStart'] )
self.Table_CalendarInfo['ShiftAEnd'] = pd.to_datetime(self.Table_CalendarInfo['ShiftAEnd'] )
self.Table_CalendarInfo["ShiftA_deltaT_minutes"] = ((self.Table_CalendarInfo["ShiftAEnd"] - self.Table_CalendarInfo["ShiftAStart"]).dt.total_seconds())/60
self.Table_CalendarInfo["ShiftB_deltaT_minutes"] = self.Table_CalendarInfo["ShiftA_deltaT_minutes"]
FamilyGp_DupCount = self.Table_SchedulingInfo.groupby('FamilyName').size().sort_values(ascending=False).to_frame('DuplicateCount') # save the results in a dataframe [FamilyName, DuplicateCount]
self.Schedule_Info =pd.DataFrame()
for FamulyNameGroupItem, _ in FamilyGp_DupCount.iterrows():
df_grouped = self.Table_SchedulingInfo.loc[self.Table_SchedulingInfo.FamilyName == FamulyNameGroupItem] # df_grouped.loc[~df_grouped.ChangeoverSort.isin(self.Table_ChangeOverInfo.ToChangeOver.tolist()), 'ChangeoverSort'] = '1020Other'
self.Schedule_Info = self.Schedule_Info.append(df_grouped, ignore_index = True)
self.Schedule_Info.assign(MaxChangeOverTimeMin="")
# 5.1. ACurrent_Start Maximum ChangeOver Time of each family to the self.Schedule_Info table
for i, val in enumerate(self.Table_ChangeOverInfo.ToChangeOver.tolist()):
self.Schedule_Info.loc[self.Schedule_Info.ChangeoverSort == val, 'MaxChangeOverTimeMin'] = self.Table_ChangeOverInfo.MaxChangeOverTimeMin.iloc[i]
# ----------------------- 6. Create a completely new table to save the scheduled work (!!! 6.1. Sort WPT based on Family Group ) - #
self.minor_ChangeOver_Mins = 2.53
## print('#---------------- 6.1. Sort WPT based on Family Group ---------------#')
self.Schedule_Info["Optional_Lines"] = self.Schedule_Info.ProductionLineCode_Cap
# # ==================================== Objective Function Construction ======================================= #
self.Unique_WO_Array = self.Schedule_Info['WorkOrderNum'].unique()
self.WO_SIZE = len(self.Unique_WO_Array)
Unique_WO_Df = pd.DataFrame({'WorkOrderNum':self.Unique_WO_Array})
## print("# 2D Matrics for each work order:O_Lines,P_Times,WP_Times ")
O_Lines = [list((self.Schedule_Info['Optional_Lines'].loc[self.Schedule_Info['WorkOrderNum'] == x['WorkOrderNum']])) for _, x in Unique_WO_Df.iterrows()]
P_Times = [list((self.Schedule_Info['ProcessingTimeMins'].loc[self.Schedule_Info['WorkOrderNum'] == x['WorkOrderNum']])) for _, x in Unique_WO_Df.iterrows()]
## print("# 2D-1. Zaro paCurrent_Starting to make sure all optionals lines have same number of lines")
self.O_Lines_Array = np.array([i + [0]*(len(max(O_Lines, key=len))-len(i)) for i in O_Lines])
self.P_Times_Array = np.array([i + [0]*(len(max(P_Times, key=len))-len(i)) for i in P_Times])
## print("# 2D-2. If an element equalt to 0, relace with previous value in the row")
for idx, item in np.ndenumerate(self.O_Lines_Array):
if item == 0:
self.O_Lines_Array[(idx[0],idx[1])] = self.O_Lines_Array[(idx[0],idx[1]-1)]
self.P_Times_Array[(idx[0],idx[1])] = self.P_Times_Array[(idx[0],idx[1]-1)]
## print("# 1D Matrics for each work order:self.CV_Times, self.CV_Sorts, self.Fmily_T, self.BStart_Dates, self.Del_Dates")
self.Schedule_Info['BasicEndDate'] = self.Schedule_Info['DeliveryDate']
self.CV_Times = np.array([list(set(self.Schedule_Info['MaxChangeOverTimeMin'].loc[self.Schedule_Info['WorkOrderNum'] == x['WorkOrderNum']])) for _, x in Unique_WO_Df.iterrows()])
self.CV_Sorts = np.array([list(set(self.Schedule_Info['ChangeoverSort'].loc[self.Schedule_Info['WorkOrderNum'] == x['WorkOrderNum']])) for _, x in Unique_WO_Df.iterrows()])
self.Fmily_T = np.array([list(set(self.Schedule_Info['FamilyName'].loc[self.Schedule_Info['WorkOrderNum'] == x['WorkOrderNum']])) for _, x in Unique_WO_Df.iterrows()])
self.BStart_Dates = np.array([list(set(self.Schedule_Info['BasicStartDate'].loc[self.Schedule_Info['WorkOrderNum'] == x['WorkOrderNum']])) for _, x in Unique_WO_Df.iterrows()])
def Run_ToGetAllLines_Objectives(self,Chromosome_Solution):
Performances_df = pd.DataFrame()
line_viol = self.CandidateViolation(Chromosome_Solution)
for Line_Code in range(1,5):
self.__Objectives_Of_Each_Line(Line_Code,Chromosome_Solution)
Performances_df = Performances_df.append(pd.DataFrame({'LineName':'Line '+str(Line_Code),
'MakeSpanTime':(self.Line_Total_PT+self.Line_Total_CV_Times)/(60)},
index=[0]), ignore_index = True)
MkSpan_Dif = round(Performances_df['MakeSpanTime'].max() - Performances_df['MakeSpanTime'].min(),2)
return 150*line_viol+50*(MkSpan_Dif/24)
def Final_Run(self,Chromosome_Solution):
print(Chromosome_Solution)
Performances_df = pd.DataFrame()
PT_Mini = 0
line_viol = self.CandidateViolation(Chromosome_Solution)
self.OutputData_Of_Lines =pd.DataFrame()
OutputData =pd.DataFrame()
Unique_Lines = self.Schedule_Info.Optional_Lines.nunique()
for Line_Code in range(1,Unique_Lines+1):
Schedule_Of_The_Line,Start,Finish = self.__FinalRun_Obj_Of_Each_Line_SaveData(Line_Code,Chromosome_Solution)
self.OutputData_Of_Lines = self.OutputData_Of_Lines.append(Schedule_Of_The_Line)
Schedule_Of_The_Line.Start = Start
Schedule_Of_The_Line.Finish = Finish
OutputData = OutputData.append(Schedule_Of_The_Line)
Performances_df = Performances_df.append(pd.DataFrame({'LineName':'Line '+str(Line_Code),
'FamilyAtDayStartFactor':1,
'ProcessingTime':self.Line_Total_PT/(60),
'ChangeoverTime':self.Line_Total_CV_Times/(60),
'MakeSpanTime':(self.Line_Total_PT+self.Line_Total_CV_Times)/(60)},
index=[0]), ignore_index = True)
MkSpan_Dif = round(Performances_df['MakeSpanTime'].max() - Performances_df['MakeSpanTime'].min(),2)
PT_Mini =round(Performances_df['ProcessingTime'].sum(),2)
CVTimes_Mini =round(Performances_df['ChangeoverTime'].sum(),2)
Performances_df = Performances_df.append(pd.DataFrame({'LineName':'AllLines',
'FamilyAtDayStartFactor':1,
'ProcessingTime':PT_Mini,
'ChangeoverTime':CVTimes_Mini,
'MakeSpanTime':(PT_Mini+CVTimes_Mini)},
index=[0]), ignore_index = True)
single_Objective = 150*line_viol+50*(MkSpan_Dif/24)
OutputData = OutputData.rename(columns={"Task": "LineName", "Start":"BasicStartDate", "Finish":"BasicEndDate", "Resource":"Family"})
def Plot_Gantt_Chart(self):
# ------------------------------- Ploting the results by using Plotly Gantt Chart ---------------------------------------------- #
print(f'{"The shape of the OutputData_Of_Lines: "}{self.OutputData_Of_Lines.shape[0]}')
color_dict = dict(zip(self.OutputData_Of_Lines.Resource.unique(),['rgb({},{},{})'.format(i[0],i[1],i[2]) for i in list(np.random.randint(255, size = (len(self.OutputData_Of_Lines.Resource.unique()),3)))]))
fig = ff.create_gantt(self.OutputData_Of_Lines.to_dict(orient = 'records'),
colors = color_dict,
index_col = "Resource",
title = "Genetic Algorithm based Optimization",
show_colorbar = True,
bar_width = 0.3,
showgrid_x = False,
showgrid_y = True,
show_hover_fill=True)
fig_html = pio.to_html(fig)
# fig.show()
# print(fig_html)
# fig.write_image(r"CLS_GanttChart.png")
return fig_html
def CandidateViolation(self,Chromosome_Solution):
line_viol = 0
for idx, item in np.ndenumerate(Chromosome_Solution):
if item not in self.O_Lines_Array[idx[0]]:
line_viol += 1
return line_viol
def __Objectives_Of_Each_Line(self,Line_Code,Chromosome_Solution):
## print('#---------------- 6.2. Reset the initial start and end -------------#')
Line_Curr_End = self.Table_CalendarInfo.ProductionDate_ShiftA + pd.to_timedelta(self.minor_ChangeOver_Mins*60, unit='s')
Line_Curr_End = Line_Curr_End[0]
Shift_AandB_Period = self.Table_CalendarInfo.ShiftA_deltaT_minutes.values[0] + self.Table_CalendarInfo.ShiftB_deltaT_minutes.values[0]
ShiftBAPeriod = 220 # in mins (3 hours 40 minutes)
self.Line_CumMakeSpan = 0
self.Line_Total_PT = 0
self.Line_Total_CV_Times = 0
Line_WO_Idxes = []
Chromosome_Solution = np.array(Chromosome_Solution)
for ii, item in np.ndenumerate(np.array(np.where(Chromosome_Solution == Line_Code))):
Line_WO_Idxes.append(item)
# 2D Matrics
Line_CV_Sorts = self.CV_Sorts[(Line_WO_Idxes),0]
self.Line_Families = self.Fmily_T[(Line_WO_Idxes),0]
Line_CV_Times = self.CV_Times[(Line_WO_Idxes),0]
## print("6.2. Use all lines WorkOrder indexes to find total CV time and Family Sort Change ")
Previous_CV = Line_CV_Sorts[0]
self.CV_Times_Each_Order = []
for CV_idx, CV_ele in np.ndenumerate(Line_CV_Sorts):
if CV_ele != Previous_CV:
self.Line_Total_CV_Times += Line_CV_Times[CV_idx]
self.CV_Times_Each_Order = np.append(self.CV_Times_Each_Order,Line_CV_Times[CV_idx])
else:
self.Line_Total_CV_Times += self.minor_ChangeOver_Mins
self.CV_Times_Each_Order = np.append(self.CV_Times_Each_Order,self.minor_ChangeOver_Mins)
Previous_CV = CV_ele
## print("6.3.=========== Use all lines WorkOrder indexes to find the total processing time ================= ")
self.P_Times_Each_Order = []
self.Line_Late_Falg = []
curr_line_idx = 0
for _, WO_idx in np.ndenumerate(Line_WO_Idxes):
for i in np.where(self.O_Lines_Array[WO_idx]==Line_Code):
for j in i:
curr_line_idx=j
self.Line_Total_PT += self.P_Times_Array[(WO_idx,curr_line_idx)]
self.P_Times_Each_Order = np.append(self.P_Times_Each_Order,self.P_Times_Array[(WO_idx,curr_line_idx)])
def __FinalRun_Obj_Of_Each_Line_SaveData(self,Line_Code,Chromosome_Solution):
## print('#---------------- 6.2. Reset the initial start and end -------------#')
Line_Curr_End = self.Table_CalendarInfo.ProductionDate_ShiftA + pd.to_timedelta(self.minor_ChangeOver_Mins*60, unit='s')
Line_Curr_End = Line_Curr_End[0]
self.PlanStartTime = Line_Curr_End
intial_start_date = Line_Curr_End
Shift_AandB_Period = self.Table_CalendarInfo.ShiftA_deltaT_minutes.values[0] + self.Table_CalendarInfo.ShiftB_deltaT_minutes.values[0]
Shift_A_Period = self.Table_CalendarInfo.ShiftA_deltaT_minutes.values[0]
ShiftBAPeriod = 220 # in mins (3 hours 40 minutes)
self.Line_CumMakeSpan = 0
self.Line_Total_PT = 0
self.Line_Total_CV_Times = 0
## print("4. Obtain the line work order indexes for the line by determine at what positon line 1/2/3/4 is used")
Line_WO_Idxes = []
Chromosome_Solution = np.array(Chromosome_Solution)
for ii, item in np.ndenumerate(np.array(np.where(Chromosome_Solution == Line_Code))):
Line_WO_Idxes.append(item)
## print("5. Find the processing time, setup time, CV sorts, families, line calendar start time and delivery time for each line")
Line_WO_Num = self.Unique_WO_Array[(Line_WO_Idxes)]
# 2D Matrics
Line_CV_Sorts = self.CV_Sorts[(Line_WO_Idxes),0]
self.Line_Families = self.Fmily_T[(Line_WO_Idxes),0]
Line_CV_Times = self.CV_Times[(Line_WO_Idxes),0]
Previous_CV = Line_CV_Sorts[0]
self.CV_Times_Each_Order = []
for CV_idx, CV_ele in np.ndenumerate(Line_CV_Sorts):
if CV_ele != Previous_CV:
self.Line_Total_CV_Times += Line_CV_Times[CV_idx]
self.CV_Times_Each_Order = np.append(self.CV_Times_Each_Order,Line_CV_Times[CV_idx])
else:
self.Line_Total_CV_Times += self.minor_ChangeOver_Mins
self.CV_Times_Each_Order = np.append(self.CV_Times_Each_Order,self.minor_ChangeOver_Mins)
Previous_CV = CV_ele
## print("6.3. Use all lines WorkOrder indexes to find the total processing time ")
self.P_Times_Each_Order = []
curr_line_idx = 0
for _, WO_idx in np.ndenumerate(Line_WO_Idxes):
for i in np.where(self.O_Lines_Array[WO_idx]==Line_Code):
for j in i:
curr_line_idx=j
self.Line_Total_PT += self.P_Times_Array[(WO_idx,curr_line_idx)]
self.P_Times_Each_Order = np.append(self.P_Times_Each_Order,self.P_Times_Array[(WO_idx,curr_line_idx)])
## print("# 6.3.1. Define a dataframe to save all results for plotting")
LineSequence = 0
Schedule_Of_The_Line =pd.DataFrame()
for P_Time_idx, P_Time_ele in np.ndenumerate(self.P_Times_Each_Order):
Line_Remainder = self.Line_CumMakeSpan%(Shift_AandB_Period)
Line_Curr_CV_Time = self.CV_Times_Each_Order[P_Time_idx]
Line_Curr_P_Time = self.P_Times_Each_Order[P_Time_idx]
self.Line_CumMakeSpan += Line_Curr_P_Time + Line_Curr_CV_Time
if (Line_Remainder + Line_Curr_P_Time) > Shift_AandB_Period:
SecondP_of_P_Time = Line_Remainder + Line_Curr_P_Time-Shift_AandB_Period
FirstP_of_P_Time = Line_Curr_P_Time - SecondP_of_P_Time
Line_Curr_Start = Line_Curr_End + pd.to_timedelta((Line_Curr_CV_Time)*60, unit='s')
Line_Curr_End = Line_Curr_Start + pd.to_timedelta((FirstP_of_P_Time*60), unit='s')
Schedule_Of_The_Line = Schedule_Of_The_Line.append(pd.DataFrame({'Task':'Line '+str(Line_Code),
'Start':Line_Curr_Start,
'Finish':Line_Curr_End,
'ProcessingTimeInMins':FirstP_of_P_Time,
'WorkOrderNum':str(Line_WO_Num[P_Time_idx]) ,
'Resource': self.Line_Families[P_Time_idx],
'ChangeoverTimeInMins':Line_Curr_CV_Time,
'WorkOrderSplitCounter':0,
},
index=[0]), ignore_index = True)
Line_Curr_Start_b = Line_Curr_End + pd.to_timedelta((ShiftBAPeriod)*60, unit='s')
Line_Curr_End = Line_Curr_Start_b + pd.to_timedelta(SecondP_of_P_Time*60, unit='s')
Schedule_Of_The_Line = Schedule_Of_The_Line.append(pd.DataFrame({'Task':'Line '+str(Line_Code),
'Start':Line_Curr_Start_b,
'Finish':Line_Curr_End,
'ProcessingTimeInMins':Line_Curr_P_Time,
'WorkOrderNum':str(Line_WO_Num[P_Time_idx]) ,
'Resource': self.Line_Families[P_Time_idx],
'ChangeoverTimeInMins':0,
'WorkOrderSplitCounter':0,
},
index=[LineSequence]), ignore_index = True)
else:
Line_Curr_Start = Line_Curr_End + | pd.to_timedelta(Line_Curr_CV_Time*60, unit='s') | pandas.to_timedelta |
import csv
import math
from absl import app, flags
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
import numpy as np
from pylot_utils import ProfileEvent, ProfileEvents, fix_pylot_profile
from utils import setup_plot
import pandas as pd
import seaborn as sns
from utils import *
from pylot_color_utils import get_colors
FLAGS = flags.FLAGS
flags.DEFINE_string('base_dir', '', 'Path to the base dir where the logs are')
flags.DEFINE_bool('small_paper_mode', False, 'Adjusts the size of the plots.')
flags.DEFINE_bool('stretched', False, 'Adjusts the size of the plots.')
flags.DEFINE_bool('paper_mode', False, 'Adjusts the size of the plots.')
flags.DEFINE_bool('slide_mode', False, 'Adjusts the size of the plots.')
flags.DEFINE_bool('poster_mode', False, 'Adjusts the size of the plots.')
flags.DEFINE_string('file_format', 'png', 'File type of the output plot.')
flags.DEFINE_integer('ignore_first_sim_time_ms', 0,
'Ignore data from the first simulation time ms.')
flags.DEFINE_bool('verbose', False, 'Enables verbose logging.')
flags.DEFINE_integer('start_route', 1, 'Id of the first completed route.')
flags.DEFINE_integer('end_route', 9, 'Id of the last completed route.')
flags.DEFINE_integer('num_reps', 5, 'Number of experiment repetitions.')
flags.DEFINE_list('towns', ['1'], 'List of towns.')
flags.DEFINE_bool('plot_histogram', False,
'Plot a single histogram of runtimes.')
flags.DEFINE_bool('plot_multi_histograms', False,
'Plot configs in different subplots.')
flags.DEFINE_bool('plot_violin', False,
'Plot config runtimes as violion plots.')
def read_challenge_runtimes(csv_file_path):
csv_file = open(csv_file_path)
csv_reader = csv.reader(csv_file)
sensor_send_runtime = {}
print("WARNING: End-to-end runtime includes sensor send time")
sim_times = []
e2e_runtimes = []
for row in csv_reader:
sim_time = int(row[1])
event_name = row[2]
if event_name == 'e2e_runtime':
e2e_runtime = float(row[3])
if e2e_runtime > 600:
# Ignorning outlier entries because the policy experiments
# didn't have a deadline exception handler for detection.
# Therefore each run has 1-2 outlier runtimes when a new
# detection model is loaded.
print("WARNING: Ignoring entry {}".format(row))
continue
e2e_runtimes.append(e2e_runtime - sensor_send_runtime[sim_time])
#e2e_runtimes.append(e2e_runtime)
sim_times.append(sim_time)
elif event_name == 'sensor_send_runtime':
sensor_send_runtime[sim_time] = float(row[3])
return sim_times, e2e_runtimes
def read_data(log_dir_base, town, route, detector, num_reps, segmentation_name,
segmentation_value):
e2e_runtimes = []
for run in range(1, num_reps + 1):
log_dir = '{}_run_{}/'.format(log_dir_base, run)
csv_file = log_dir + 'challenge.csv'
# Get the end-to-end runtimes.
(sim_times, run_e2e) = read_challenge_runtimes(csv_file)
e2e_runtimes = e2e_runtimes + run_e2e
entries = len(e2e_runtimes)
runtimes_df = pd.DataFrame({
'town': [town] * entries,
'route': [route] * entries,
'detector': [detector] * entries,
segmentation_name: [segmentation_value] * entries,
'e2eruntime': e2e_runtimes,
})
return runtimes_df
def main(argv):
matplotlib.rc('font', family='serif', size=7)
matplotlib.rc('text.latex', preamble=r'\usepackage{times,mathptmx}')
matplotlib.rc('text', usetex=True)
matplotlib.rc('legend', fontsize=6)
matplotlib.rc('figure', figsize=(3.33, 2.22))
# matplotlib.rc('figure.subplot', left=0.10, top=0.90, bottom=0.12, right=0.95)
matplotlib.rc('axes', linewidth=0.5)
matplotlib.rc('lines', linewidth=0.5)
plt.figure(figsize=(3.3, 0.85))
colors = get_colors(['No-Constraints', 'Deadlines', 'Policy'])
towns = [int(town) for town in FLAGS.towns]
detector = 4
runtimes_dfs = []
for town in towns:
for route in range(FLAGS.start_route, FLAGS.end_route + 1):
# log_dir_base = FLAGS.base_dir + \
# 'logs_town_{}_route_{}_timely_True_edet_{}'.format(
# town, route, detector)
# runtimes_df = read_data(log_dir_base, town, route, detector,
# FLAGS.num_reps, 'configuration',
# 'no-deadlines')
# runtimes_dfs.append(runtimes_df)
log_dir_base = FLAGS.base_dir + \
'logs_deadlines_town_{}_route_{}_timely_True_edet_{}'.format(
town, route, detector)
runtimes_df = read_data(log_dir_base, town, route, detector,
FLAGS.num_reps, 'configuration',
'deadlines')
runtimes_dfs.append(runtimes_df)
# log_dir_base = FLAGS.base_dir + \
# 'logs_nopolicy_town_{}_route_{}_timely_True_edet_{}'.format(
# town, route, detector)
# runtimes_df = read_data(
# log_dir_base, town, route, detector, FLAGS.num_reps,
# 'configuration', 'no-policy')
# runtimes_dfs.append(runtimes_df)
log_dir_base = FLAGS.base_dir + \
'logs_policy_town_{}_route_{}_timely_True'.format(
town, route)
runtimes_df = read_data(log_dir_base, town, route, detector,
FLAGS.num_reps, 'configuration', 'policy')
runtimes_dfs.append(runtimes_df)
runtime_data = | pd.concat(runtimes_dfs) | pandas.concat |
# Created on 2020/7/16
# This module is for functions generating random time series.
# Standard library imports
from datetime import datetime
from typing import Union
# Third party imports
import numpy as np
import pandas as pd
from typeguard import typechecked
# Local application imports
from .. import timeseries as ts
#---------#---------#---------#---------#---------#---------#---------#---------#---------#
### TIME SERIES MODELS ###
# Simple models
@typechecked
def constant(start_date: Union[str, datetime.date],
end_date: Union[str, datetime.date],
frequency: str,
cst: float=0.,
sigma: float=0.,
tz=None, unit=None, name=""
) -> ts.TimeSeries:
"""
Defines a time series with constant numerical value
and eventually add a noise to it.
Parameters
----------
start_date : str or datetime
Starting date of the time series.
end_date : str or datetime
Ending date of the time series.
frequency : str or DateOffset
Indicates the frequency of data as an offset alias (e.g. 'D' for days, 'M' for months, etc.).
cst : int or float
The constant to build the time series from.
sigma : float
Standard deviation for the Gaussian noise.
tz : str
Timezone name.
unit : str or None
Unit of the time series values.
name : str
Name or nickname of the series.
Returns
-------
TimeSeries
The constant time series with eventual Gaussian noise.
Raises
------
None
Notes
-----
White noise is Gaussian here.
For offset aliases available see:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases.
Examples
--------
None
"""
# Generate index
data_index = pd.date_range(start=start_date, end=end_date, freq=frequency)
T = len(data_index)
# Generate data
if float(sigma) != 0.:
rand_val = np.random.normal(loc=0., scale=sigma, size=T)
data_vals = [cst] * T + rand_val
else:
data_vals = [cst] * T
# Make time series
df = pd.DataFrame(index=data_index, data=data_vals)
rs = ts.TimeSeries(df, tz=tz, unit=unit, name=name)
return rs
# These models describe the evolution of time series.
@typechecked
def auto_regressive(start_date: Union[str, datetime.date],
end_date: Union[str, datetime.date],
frequency: str,
start_values: list,
cst: float,
order: int,
coeffs: list,
sigma: float,
tz: str=None,
unit: str=None,
name: str="",
verbose: bool=False
) -> ts.TimeSeries:
"""
Generates a time series from the Auto-Regressive (AR) model of arbitrary order P.
The model is of the form:
x_t = cst + coeffs[0] * x_{t-1} + ... + coeffs[P-1] * x_{t-P} + a_t
where a_t is the white noise with standard deviation sigma.
Initial values for {x_0, ..., x_P} are imposed from the values in start_values.
Parameters
----------
start_date : str or datetime
Starting date of the time series.
end_date : str or datetime
Ending date of the time series.
frequency : str or DateOffset
Indicates the frequency of data as an offset alias (e.g. 'D' for days, 'M' for months, etc.).
start_values : list
Initial values of the process (P of them).
cst : float
Constant value of the process.
order : int
Order of the process (i.e. value of P).
coeffs : list
Coefficients of the process.
sigma : float
Standard deviation of the Gaussian white noise.
tz : str
Timezone name.
unit : str or None
Unit of the time series values.
name : str
Name or nickname of the series.
verbose : bool
Verbose option.
Returns
-------
TimeSeries
The time series resulting from the Auto-Regressive process.
Raises
------
None
Notes
-----
White noise is Gaussian here.
For offset aliases available see:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases.
Examples
--------
None
"""
# Checks
assert(len(coeffs)==order)
assert(len(start_values)==order)
P = len(start_values)
# Generate index
data_index = pd.date_range(start=start_date, end=end_date, freq=frequency)
T = len(data_index)
# Generate the white noise (Note: p first values are not used)
a = np.random.normal(loc=0., scale=sigma, size=T)
# Generate the random series
x = [0.] * T
for t_ini in range(P):
x[t_ini] = start_values[t_ini]
for t in range(P,T,1):
x[t] = cst + a[t]
for p in range(P):
x[t] += coeffs[p] * x[t-p-1]
# Compute theoretical expectation value
if verbose:
E = cst / (1 - sum(coeffs))
print(f"Under stationarity assumption, the expected value for this AR({str(P)}) model is: {str(E)} \n")
# Combine them into a time series
df = pd.DataFrame(index=data_index, data=x)
rs = ts.TimeSeries(df, tz=tz, unit=unit, name=name)
return rs
@typechecked
def random_walk(start_date: Union[str, datetime.date],
end_date: Union[str, datetime.date],
frequency: str,
start_value: float,
sigma: float,
tz: str=None,
unit: str=None,
name: str=""
) -> ts.TimeSeries:
"""
Generates a time series from the Random Walk process,
i.e. an AR(1) model with {cst = 0, coeff[0] = 1}.
The model is of the form:
x_t = x_{t-1} + a_t
where a_t is the white noise with standard deviation sigma.
Parameters
----------
start_date : str or datetime
Starting date of the time series.
end_date : str or datetime
Ending date of the time series.
frequency : str or DateOffset
Indicates the frequency of data as an offset alias (e.g. 'D' for days, 'M' for months, etc.).
start_value : float
Initial value of the process.
sigma : float
Standard deviation of the Gaussian white noise.
tz : str
Timezone name.
unit : str or None
Unit of the time series values.
name : str
Name or nickname of the series.
Returns
-------
TimeSeries
The time series resulting from the Random Walk process.
Raises
------
None
Notes
-----
White noise is Gaussian here.
For offset aliases available see:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases.
Examples
--------
None
"""
# Generate index
data_index = pd.date_range(start=start_date, end=end_date, freq=frequency)
T = len(data_index)
# Generate the white noise (Note: first value is not used)
a = np.random.normal(loc=0., scale=sigma, size=T)
# Generate the random series
x = [0.] * T
x[0] = start_value
for t in range(1,T,1):
x[t] = x[t-1] + a[t]
# Combine them into a time series
df = pd.DataFrame(index=data_index, data=x)
rs = ts.TimeSeries(df, tz=tz, unit=unit, name=name)
return rs
@typechecked
def drift_random_walk(start_date: Union[str, datetime.date],
end_date: Union[str, datetime.date],
frequency: str,
start_value: float,
drift: float,
sigma: float,
tz: str=None,
unit: str=None,
name: str=""
) -> ts.TimeSeries:
"""
Generates a time series from the Random Walk with Drift process,
i.e. an AR(1) model with {cst != 0, coeffs[0] = 1}.
The model is of the form:
x_t = drift + x_{t-1} + a_t
where a_t is the white noise with standard deviation sigma.
Parameters
----------
start_date : str or datetime
Starting date of the time series.
end_date : str or datetime
Ending date of the time series.
frequency : str or DateOffset
Indicates the frequency of data as an offset alias (e.g. 'D' for days, 'M' for months, etc.).
start_value : float
Initial value of the process.
drift : float
Value of the drift.
sigma : float
Standard deviation of the Gaussian white noise.
tz : str
Timezone name.
unit : str or None
Unit of the time series values.
name : str
Name or nickname of the series.
Returns
-------
TimeSeries
The time series resulting from the Random Walk process with drift.
Raises
------
None
Notes
-----
White noise is Gaussian here.
For offset aliases available see:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases.
Examples
--------
None
"""
# Generate index
data_index = pd.date_range(start=start_date, end=end_date, freq=frequency)
T = len(data_index)
# Generate the white noise (Note: first value is not used)
a = np.random.normal(loc=0., scale=sigma, size=T)
# Generate the random series
x = [0.] * T
x[0] = start_value
for t in range(1,T,1):
x[t] = drift + x[t-1] + a[t]
# Combine them into a time series
df = pd.DataFrame(index=data_index, data=x)
rs = ts.TimeSeries(df, tz=tz, unit=unit, name=name)
return rs
@typechecked
def moving_average(start_date: Union[str, datetime.date],
end_date: Union[str, datetime.date],
frequency: str,
cst: float,
order: int,
coeffs: list,
sigma: float,
tz: str=None,
unit: str=None,
name: str="",
verbose: bool=False
) -> ts.TimeSeries:
"""
Generates a time series from the Moving Average (MA) model of arbitrary order Q.
The model is of the form:
x_t = cst + a_t + coeffs[0] * a_{t-1} + ... + coeffs[Q-1] * a_{t-Q}
where {a_t} is the white noise series with standard deviation sigma.
We don't need to impose any initial values for {x_t}, they are imposed directly from {a_t}.
To be clear, the initial steps of the process are:
x_0 = cst + a_0
x_1 = cst + a_1 + coeffs[0] * a_0
x_2 = cst + a_2 + coeffs[0] * a_1 + coeffs[1] * a_0
Parameters
----------
start_date : str or datetime
Starting date of the time series.
end_date : str or datetime
Ending date of the time series.
frequency : str or DateOffset
Indicates the frequency of data as an offset alias (e.g. 'D' for days, 'M' for months, etc.).
cst : float
Constant value of the process.
order : int
Order of the process (i.e. value of Q).
coeffs : list
List of coefficients.
sigma : float
Standard deviation of the Gaussian white noise.
tz : str
Timezone name.
unit : str or None
Unit of the time series values.
name : str
Name or nickname of the series.
verbose : bool
Verbose option.
Returns
-------
TimeSeries
The time series resulting from the Moving Average process.
Raises
------
None
Notes
-----
White noise is Gaussian here.
For offset aliases available see:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases.
Examples
--------
None
"""
# Checks
assert(len(coeffs)==order)
Q = order
# Generate index
data_index = pd.date_range(start=start_date, end=end_date, freq=frequency)
T = len(data_index)
# Generate the white noise
a = np.random.normal(loc=0., scale=sigma, size=T)
# Generate the random series
x = [0.] * T
for t in range(T):
x[t] = cst + a[t]
for q in range(Q):
if t-q > 0:
x[t] -= coeffs[q] * a[t-q-1]
# Compute theoretical values
if verbose:
V = 1.
for q in range(Q):
V += coeffs[q]**2
V *= sigma**2
print(f"The expected value for this MA({str(Q)}) model is: {str(cst)}")
print(f"The estimation of the variance for this MA({str(Q)}) model is: {str(V)}" + \
f" , i.e. a standard deviation of: {str(np.sqrt(V))} \n")
# Combine them into a time series
df = pd.DataFrame(index=data_index, data=x)
rs = ts.TimeSeries(df, tz=tz, unit=unit, name=name)
return rs
@typechecked
def arma(start_date: Union[str, datetime.date],
end_date: Union[str, datetime.date],
frequency: str,
start_values: list,
cst: float,
ARorder: int,
ARcoeffs: list,
MAorder: int,
MAcoeffs: list,
sigma: float,
tz: str=None,
unit: str=None,
name: str=""
) -> ts.TimeSeries:
"""
Function generating a time series from the Auto-Regressive Moving Average (ARMA)
model of orders (P,Q).
The model is of the form:
x_t = cst + Sum_{i=0}^{P-1} ARcoeffs[i] * a_{t-i-1}
+ a_t + Sum_{j=0}^{Q-1} MAcoeffs[j] * a_{t-j-1}
where {a_t} is the white noise series with standard deviation sigma.
Initial values for {x_0, ..., x_P} are imposed from the values in start_values.
Parameters
----------
start_date : str or datetime
Starting date of the time series.
end_date : str or datetime
Ending date of the time series.
frequency : str or DateOffset
Indicates the frequency of data as an offset alias (e.g. 'D' for days, 'M' for months, etc.).
start_values : list
Initial values of the process (P of them).
cst : float
Constant value of the process.
ARorder : int
Order of the AR part of the process (i.e. value of P).
ARcoeffs : list
List of coefficients for the AR part of the process.
MAorder : int
Order of the MA part of the process (i.e. value of Q).
MAcoeffs : list
List of coefficients for the MA part of the process.
sigma : float
Standard deviation of the Gaussian white noise.
tz : str
Timezone name.
unit : str or None
Unit of the time series values.
name : str
Name or nickname of the series.
Returns
-------
TimeSeries
The time series resulting from the ARMA process.
Raises
------
None
Notes
-----
White noise is Gaussian here.
For offset aliases available see:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases.
Examples
--------
None
"""
# Checks
assert(len(ARcoeffs)==ARorder)
assert(len(MAcoeffs)==MAorder)
assert(len(start_values)==ARorder)
P = ARorder
Q = MAorder
# Generate index
data_index = pd.date_range(start=start_date, end=end_date, freq=frequency)
T = len(data_index)
# Generate the white noise
a = np.random.normal(loc=0., scale=sigma, size=T)
# Generate the random series
x = [0.] * T
# Taking care of {x_0, x_1, ..., x_P}
for t_ini in range(P):
x[t_ini] = start_values[t_ini]
# Taking care of the rest
for t in range(P,T,1):
x[t] = cst + a[t]
for p in range(P):
x[t] += ARcoeffs[p] * x[t-p]
for q in range(Q):
if t-q > 0:
x[t] -= MAcoeffs[q] * x[t-q-1]
# Combine them into a time series
df = pd.DataFrame(index=data_index, data=x)
rs = ts.TimeSeries(df, tz=tz, unit=unit, name=name)
return rs
@typechecked
def rca(start_date: Union[str, datetime.date],
end_date: Union[str, datetime.date],
frequency: str,
cst: float,
order: int,
ARcoeffs: list,
cov_matrix: list,
sigma: float,
tz: str=None,
unit: str=None,
name: str=""
) -> ts.TimeSeries:
"""
Function generating a time series from the Random Coefficient Auto-Regressive (RCA)
model of order M.
The model is of the form:
x_t = cst + Sum_{m=0}^{M-1} (ARcoeffs[m] + coeffs[m]) * x_{t-m-1} + a_t.
Here {a_t} is a Gaussian white noise with standard deviation sigma
and coeffs_t are randomly generated from the covariance matrix cov_matrix.
In addition, we have some imposed coefficients of the Auto-Regressive type in ARcoeffs.
Parameters
----------
start_date : str or datetime
Starting date of the time series.
end_date : str or datetime
Ending date of the time series.
frequency : str or DateOffset
Indicates the frequency of data as an offset alias (e.g. 'D' for days, 'M' for months, etc.).
cst : float
Constant value of the process.
order : int
Order of the process (i.e. value of M).
ARcoeffs : list
List of coefficients for the AR part of the process.
cov_matrix: list of lists
Covariance matrix for the random part of the process.
sigma : float
Standard deviation of the Gaussian white noise.
tz : str
Timezone name.
unit : str or None
Unit of the time series values.
name : str
Name or nickname of the series.
Returns
-------
TimeSeries
The time series resulting from the RCA process.
Raises
------
None
Notes
-----
We assume coeffs_t follow a multivariate Gaussian distribution.
Also cov_matrix should be a non-negative definite matrix.
Here we do not have an argument called start_value,
compared with randomseries.auto_regressive().
This choice is made as there are already random coefficients involved.
There is no real use in imposing the first values of the time series
other than just ARcoeffs and the generated coeffs.
For offset aliases available see:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases.
Examples
--------
None
"""
# Checks
assert(len(ARcoeffs)==order)
assert(len(cov_matrix)==order and len(cov_matrix[0])==order)
for row in cov_matrix:
for x in row:
assert(x>=0)
M = order
# Generate index
data_index = pd.date_range(start=start_date, end=end_date, freq=frequency)
T = len(data_index)
# Generate the white noise
a = np.random.normal(loc=0., scale=sigma, size=T)
# Generate the random series
x = [0.] * T
for t in range(T):
x[t] = cst + a[t]
# Generate the list of coefficients
coeffs = np.random.multivariate_normal(mean=[0.] * M, cov=cov_matrix, size=1)[0]
for m in range(M):
if t-m > 0:
x[t] += (ARcoeffs[m] + coeffs[m]) * a[t-m-1]
# Combine them into a time series
df = pd.DataFrame(index=data_index, data=a)
rs = ts.TimeSeries(df, tz=tz, unit=unit, name=name)
return rs
### HETEROSCEDASTIC MODELS ###
# These models describe the volatility of a time series.
@typechecked
def arch(start_date: Union[str, datetime.date],
end_date: Union[str, datetime.date],
frequency: str,
cst: float,
order: int,
coeffs: list,
tz: str=None,
unit: str=None,
name: str="",
verbose: bool=False
) -> ts.TimeSeries:
"""
Function generating a volatility series from the
Auto-Regressive Conditional Heteroscedastic (ARCH) model of order M.
The model is of the form:
a_t = sig_t * eps_t
with sig_t^2 = cst + coeffs[0] * a_{t-1}^2 + ... + coeffs[M-1] * a_{t-M}^2.
Here {eps_t} is a sequence of idd random variables with mean zero and unit variance,
i.e. a white noise with unit variance.
The coefficients cst and coeffs[i] are assumed to be positive
and must be such that a_t is finite with positive variance.
Parameters
----------
start_date : str or datetime
Starting date of the time series.
end_date : str or datetime
Ending date of the time series.
frequency : str or DateOffset
Indicates the frequency of data as an offset alias (e.g. 'D' for days, 'M' for months, etc.).
cst : float
Constant value of the process.
order : int
Order of the process (i.e. value of M).
coeffs : list
List of coefficients of the process.
tz : str
Timezone name.
unit : str or None
Unit of the time series values.
name : str
Name or nickname of the series.
verbose : bool
Verbose option.
Returns
-------
TimeSeries
The time series resulting from the ARCH process.
Raises
------
None
Notes
-----
For offset aliases available see:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases.
Examples
--------
None
"""
# Checks
assert(len(coeffs)==order)
# Non-negativity
if(cst<=0):
print("cst must be strictly positive.")
assert(cst>0)
for x in coeffs:
if (x<0):
print("coefficients are not allowed to take negative values.")
assert(x>=0)
# Sum less than unity
if(sum(coeffs)>=1):
print("Sum of coefficients must be < 1 in order to have positive variance.")
assert(sum(coeffs)<1)
M = order
# Generate index
data_index = pd.date_range(start=start_date, end=end_date, freq=frequency)
T = len(data_index)
# Generate the "unit" white noise
eps = np.random.normal(loc=0., scale=1, size=T)
# Generate the random series
a = [0.] * T
for t in range(T):
sig_square = cst
for m in range(M):
if t-m > 0:
sig_square += coeffs[m] * a[t-m-1]**2
sig = np.sqrt(sig_square)
a[t] = sig * eps[t]
# Compute theoretical values
if verbose:
print(f"The expected value for this ARCH({str(M)}) model is 0, like any other ARCH model," \
+ f" and the estimated value is: {str(np.mean(a))}")
V = cst / (1 - sum(coeffs))
print(f"The theoretical standard deviation value for this ARCH({str(M)}) model is: {str(V)}")
# Combine them into a time series
df = pd.DataFrame(index=data_index, data=a)
rs = ts.TimeSeries(df, tz=tz, unit=unit, name=name)
return rs
@typechecked
def garch(start_date: Union[str, datetime.date],
end_date: Union[str, datetime.date],
frequency: str,
cst: float,
order_a: int,
coeffs_a: list,
order_sig: int,
coeffs_sig: list,
tz: str=None,
unit: str=None,
name: str="",
verbose: bool=False
) -> ts.TimeSeries:
"""
Function generating a volatility series from the
Generalized ARCH (GARCH) model of order M.
The model is of the form:
a_t = sig_t * eps_t
with sig_t^2 = cst + Sum_{i=0}^{M-1} coeffs_a[i] * a_{t-i-1}^2
+ Sum_{j=0}^{S-1} coeffs_sig[j] * sig_{t-j-1}^2.
Here {eps_t} is a sequence of idd random variables with mean zero and unit variance,
i.e. a white noise with unit variance.
The coefficients cst and coeffs[i] are assumed to be positive
and must be such that a_t is finite with positive variance.
Parameters
----------
start_date : str or datetime
Starting date of the time series.
end_date : str or datetime
Ending date of the time series.
frequency : str or DateOffset
Indicates the frequency of data as an offset alias (e.g. 'D' for days, 'M' for months, etc.).
cst : float
Constant value of the process.
order_a : int
Order of the a_t part of the process (i.e. value of M).
coeffs_a : list
List of coefficients of the a_t part of the process.
order_sig : int
Order of the sig_t part of the process (i.e. value of S).
coeffs_sig : list
List of coefficients of the sig_t part of the process.
tz : str
Timezone name.
unit : str or None
Unit of the time series values.
name : str
Name or nickname of the series.
verbose : bool
Verbose option.
Returns
-------
TimeSeries
The time series resulting from the GARCH process.
Raises
------
None
Notes
-----
For offset aliases available see:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases.
Examples
--------
None
"""
# Checks
assert(len(coeffs_a)==order_a)
assert(len(coeffs_sig)==order_sig)
# Non-negativity
if(cst<=0):
print("cst must be strictly positive.")
assert(cst>0)
for x in coeffs_a + coeffs_sig:
if (x<0):
print("coefficients are not allowed to take negative values.")
assert(x>=0)
# Sum less than unity
if(sum(coeffs_a) + sum(coeffs_sig) >= 1):
print("Sum of coefficients must be less than one in order to have positive variance.")
assert(sum(coeffs_a) + sum(coeffs_sig) < 1)
M = order_a
S = order_sig
# Generate index
data_index = pd.date_range(start=start_date, end=end_date, freq=frequency)
T = len(data_index)
# Generate the "unit" white noise
eps = np.random.normal(loc=0., scale=1, size=T)
# Generate the random series
a = [0.] * T
sig = [0.] * T
for t in range(T):
sig_square = cst
for m in range(M):
if t-m > 0:
sig_square += coeffs_a[m] * a[t-m-1]**2
for s in range(S):
if t-s > 0:
sig_square += coeffs_sig[s] * sig[t-s-1]**2
sig[t] = np.sqrt(sig_square)
a[t] = sig[t] * eps[t]
# Compute theoretical values
if verbose:
V = cst / (1 - sum(coeffs_a) - sum(coeffs_sig))
print(f"The theoretical standard deviation for this GARCH({str(M)}, {str(S)}) model is: {str(np.sqrt(V))}")
# Combine them into a time series
df = pd.DataFrame(index=data_index, data=a)
rs = ts.TimeSeries(df, tz=tz, unit=unit, name=name)
return rs
@typechecked
def charma(start_date: Union[str, datetime.date],
end_date: Union[str, datetime.date],
frequency: str,
order: int,
cov_matrix: list,
sigma: float,
tz: str=None,
unit: str=None,
name: str=""
) -> ts.TimeSeries:
"""
Function generating a volatility series from the
Conditional Heterescedastic ARMA (CHARMA) model of order M.
The model is of the form:
a_t = Sum_{m=0}^{M-1} coeffs[m] * a_{t-m-1} + eta_t.
Here {eta_t} is a Gaussian white noise with standard deviation sigma
and coeffs_t are generated from the covariance matrix cov_matrix.
Parameters
----------
start_date : str or datetime
Starting date of the time series.
end_date : str or datetime
Ending date of the time series.
frequency : str or DateOffset
Indicates the frequency of data as an offset alias (e.g. 'D' for days, 'M' for months, etc.).
order : int
Order of the process (i.e. value of M).
cov_matrix: list of lists
Covariance matrix for the random part of the process.
sigma : float
Standard deviation of the Gaussian white noise.
tz : str
Timezone name.
unit : str or None
Unit of the time series values.
name : str
Name or nickname of the series.
Returns
-------
TimeSeries
The time series resulting from the CHARMA process.
Raises
------
None
Notes
-----
White noise is Gaussian here.
We assume coeffs_t follow a multivariate Gaussian distribution.
Also cov_matrix should be a non-negative definite matrix.
For offset aliases available see:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases.
Examples
--------
None
"""
# Checks
assert(len(cov_matrix)==order and len(cov_matrix[0])==order)
for row in cov_matrix:
for x in row:
assert(x>=0)
M = order
# Generate index
data_index = | pd.date_range(start=start_date, end=end_date, freq=frequency) | pandas.date_range |
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from tqdm import tqdm
import yaml
import os
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from joblib import dump, load
from category_encoders import OrdinalEncoder
from src.data.spdat import get_spdat_data
def load_df(path):
'''
Load a Pandas dataframe from a CSV file
:param path: The file path of the CSV file
:return: A Pandas dataframe
'''
# Read HIFIS data into a Pandas dataframe
df = pd.read_csv(path, encoding="ISO-8859-1", low_memory=False)
return df
def classify_cat_features(df, cat_features):
'''
Classify categorical features as either single- or multi-valued.
:param df: Pandas dataframe
:param cat_features: List of categorical features
:return: list of single-valued categorical features, list of multi-valued categorical features
'''
def classify_features(client_df):
'''
Helper function for categorical feature classification, distributed across clients.
:param client_df: Dataframe with 1 client's records
:return List of single-valued categorical features, list of multi-valued categorical features
'''
for feature in cat_features:
# If this feature takes more than 1 value per client, move it to the list of multi-valued features
if client_df[feature].nunique() > 1:
sv_cat_features.remove(feature)
mv_cat_features.append(feature)
return
sv_cat_features = cat_features # First, assume all categorical features are single-valued
mv_cat_features = []
df.groupby('ClientID').progress_apply(classify_features)
return sv_cat_features, mv_cat_features
def get_mv_cat_feature_names(df, mv_cat_features):
'''
Build list of possible multi-valued categorical features
:param df: DataFrame containing HIFIS data
:param mv_cat_features: List of multi-valued categorical features
:return: List of all individual multi-valued categorical features
'''
mv_vec_cat_features = []
for f in mv_cat_features:
mv_vec_cat_features += [(f + '_' + v) for v in list(df[f].unique()) if type(v) == str]
return mv_vec_cat_features
def vec_multi_value_cat_features(df, mv_cat_features, cfg, load_ct=False, categories=None):
'''
Converts multi-valued categorical features to vectorized format and appends to the dataframe
:param df: A Pandas dataframe
:param mv_categorical_features: The names of the categorical features to vectorize
:param cfg: project config
:param load_ct: Flag indicating whether to load a saved column transformer
:param categories: List of columns containing all possible values to encode
:return: dataframe containing vectorized features, list of vectorized feature names
'''
orig_col_names = df.columns
if categories is None:
categories = 'auto'
# One hot encode the multi-valued categorical features
mv_cat_feature_idxs = [df.columns.get_loc(c) for c in mv_cat_features if c in df] # List of categorical column indices
if load_ct:
col_trans_ohe = load(cfg['PATHS']['OHE_COL_TRANSFORMER_MV'])
df_ohe = pd.DataFrame(col_trans_ohe.transform(df), index=df.index.copy())
else:
col_trans_ohe = ColumnTransformer(
transformers=[('col_trans_mv_ohe', OneHotEncoder(categories=categories, sparse=False, handle_unknown='ignore', dtype=int), mv_cat_feature_idxs)],
remainder='passthrough'
)
df_ohe = pd.DataFrame(col_trans_ohe.fit_transform(df), index=df.index.copy())
dump(col_trans_ohe, cfg['PATHS']['OHE_COL_TRANSFORMER_MV'], compress=True) # Save the column transformer
# Build list of feature names for the new DataFrame
mv_vec_cat_features = []
for i in range(len(mv_cat_features)):
feat_names = list(col_trans_ohe.transformers_[0][1].categories_[i])
for j in range(len(feat_names)):
mv_vec_cat_features.append(mv_cat_features[i] + '_' + feat_names[j])
ohe_feat_names = mv_vec_cat_features.copy()
for feat in orig_col_names:
if feat not in mv_cat_features:
ohe_feat_names.append(feat)
df_ohe.columns = ohe_feat_names
return df_ohe, mv_vec_cat_features
def vec_single_value_cat_features(df, sv_cat_features, cfg, load_ct=False):
'''
Converts single-valued categorical features to one-hot encoded format (i.e. vectorization) and appends to the dataframe.
Keeps track of a mapping from feature indices to categorical values, for interpretability purposes.
:param df: A Pandas dataframe
:param sv_cat_features: The names of the categorical features to encode
:param cfg: project config dict
:param load_ct: Flag indicating whether to load saved column transformers
:return: dataframe containing one-hot encoded features, list of one-hot encoded feature names
'''
# Convert single-valued categorical features to numeric data
cat_feature_idxs = [df.columns.get_loc(c) for c in sv_cat_features if c in df] # List of categorical column indices
cat_value_names = {} # Dictionary of categorical feature indices and corresponding names of feature values
if load_ct:
col_trans_ordinal = load(cfg['PATHS']['ORDINAL_COL_TRANSFORMER'])
df[sv_cat_features] = col_trans_ordinal.transform(df)
else:
col_trans_ordinal = ColumnTransformer(transformers=[('col_trans_ordinal', OrdinalEncoder(handle_unknown='value'), sv_cat_features)])
df[sv_cat_features] = col_trans_ordinal.fit_transform(df) # Want integer representation of features to start at 0
dump(col_trans_ordinal, cfg['PATHS']['ORDINAL_COL_TRANSFORMER'], compress=True) # Save the column transformer
# Preserve named values of each categorical feature
for i in range(len(sv_cat_features)):
cat_value_names[cat_feature_idxs[i]] = []
for j in range(len(col_trans_ordinal.transformers_[0][1].category_mapping[i])):
# Last one is nan; we don't want that
cat_value_names[cat_feature_idxs[i]] = col_trans_ordinal.transformers_[0][1].category_mapping[i]['mapping'].index.tolist()[:-1]
# One hot encode the single-valued categorical features
if load_ct:
col_trans_ohe = load(cfg['PATHS']['OHE_COL_TRANSFORMER_SV'])
df_ohe = pd.DataFrame(col_trans_ohe.transform(df), index=df.index.copy())
else:
col_trans_ohe = ColumnTransformer(
transformers=[('col_trans_ohe', OneHotEncoder(sparse=False, handle_unknown='ignore'), cat_feature_idxs)],
remainder='passthrough'
)
df_ohe = pd.DataFrame(col_trans_ohe.fit_transform(df), index=df.index.copy())
dump(col_trans_ohe, cfg['PATHS']['OHE_COL_TRANSFORMER_SV'], compress=True) # Save the column transformer
# Build list of feature names for OHE dataset
ohe_feat_names = []
for i in range(len(sv_cat_features)):
for value in cat_value_names[cat_feature_idxs[i]]:
ohe_feat_names.append(sv_cat_features[i] + '_' + str(value))
vec_sv_cat_features = ohe_feat_names.copy()
for feat in df.columns:
if feat not in sv_cat_features:
ohe_feat_names.append(feat)
df_ohe.columns = ohe_feat_names
cat_feat_info = {} # To store info for later use in LIME
cat_feat_info['SV_CAT_FEATURES'] = sv_cat_features
cat_feat_info['VEC_SV_CAT_FEATURES'] = vec_sv_cat_features
cat_feat_info['SV_CAT_FEATURE_IDXS'] = cat_feature_idxs
# To use sparse matrices in LIME, ordinal encoded values must start at 1. Add dummy value to MV categorical features name lists.
for i in range(len(sv_cat_features)):
cat_value_names[cat_feature_idxs[i]].insert(0, 'DUMMY_VAL')
cat_feat_info['SV_CAT_VALUES'] = cat_value_names
return df, df_ohe, cat_feat_info
def process_timestamps(df):
'''
Convert timestamps in raw date to datetimes
:param df: A Pandas dataframe
:return: The dataframe with its datetime fields updated accordingly
'''
features_list = list(df) # Get a list of features
for feature in features_list:
if ('Date' in feature) or ('Start' in feature) or ('End' in feature) or (feature == 'DOB'):
df[feature] = pd.to_datetime(df[feature], errors='coerce')
return df
def remove_n_weeks(df, train_end_date, dated_feats):
'''
Remove records from the dataframe that have timestamps in the n weeks leading up to the ground truth date
:param df: Pandas dataframe
:param train_end_date: the most recent date that should appear in the dataset
:param dated_feats: list of feature names with dated events
:return: updated dataframe with the relevant rows removed
'''
df = df[df['DateStart'] <= train_end_date] # Delete rows where service occurred after this date
df['DateEnd'] = df['DateEnd'].clip(upper=train_end_date) # Set end date for ongoing services to this date
# Update client age
if 'DOB' in df.columns:
df['CurrentAge'] = (train_end_date - df['DOB']).astype('<m8[Y]')
return df.copy()
def calculate_ground_truth(df, chronic_threshold, days, end_date):
'''
Iterate through dataset by client to calculate ground truth
:param df: a Pandas dataframe
:param chronic_threshold: Minimum # of days spent in shelter to be considered chronically homeless
:param days: Number of days over which to count # days spent in shelter
:param end_date: The last date of the time period to consider
:return: a DataSeries mapping ClientID to ground truth
'''
def client_gt(client_df):
'''
Helper function ground truth calculation.
:param client_df: A dataframe containing all rows for a client
:return: the client dataframe ground truth calculated correctly
'''
client_df.sort_values(by=['DateStart'], inplace=True) # Sort records by service start date
gt_stays = 0 # Keep track of total stays, as well as # stays during ground truth time range
last_stay_end = pd.to_datetime(0)
last_stay_start = pd.to_datetime(0)
# Iterate over all of client's records. Note itertuples() is faster than iterrows().
for row in client_df.itertuples():
stay_start = getattr(row, 'DateStart')
stay_end = min(getattr(row, 'DateEnd'), end_date) # If stay is ongoing through end_date, set end of stay as end_date
service_type = getattr(row, 'ServiceType')
if (stay_start > last_stay_start) and (stay_end > last_stay_end) and (service_type == 'Stay'):
if (stay_start.date() >= start_date.date()) or (stay_end.date() >= start_date.date()):
# Account for cases where stay start earlier than start of range, or stays overlapping from previous stay
stay_start = max(start_date, stay_start, last_stay_end)
if (stay_end - stay_start).total_seconds() >= min_stay_seconds:
gt_stays += (stay_end.date() - stay_start.date()).days + (stay_start.date() != last_stay_end.date())
last_stay_end = stay_end
last_stay_start = stay_start
# Determine if client meets ground truth threshold
if gt_stays >= chronic_threshold:
client_df['GroundTruth'] = 1
return client_df
start_date = end_date - timedelta(days=days) # Get start of ground truth window
min_stay_seconds = 60 * 15 # Stays must be at least 15 minutes
df_temp = df[['ClientID', 'ServiceType', 'DateStart', 'DateEnd']]
df_temp['GroundTruth'] = 0
df_temp = df_temp.groupby('ClientID').progress_apply(client_gt)
if df_temp.shape[0] == 0:
return None
if 'ClientID' not in df_temp.index:
df_temp.set_index(['ClientID'], append=True, inplace=True)
df_gt = df_temp['GroundTruth']
df_gt = df_gt.groupby(['ClientID']).agg({'GroundTruth': 'max'})
return df_gt
def calculate_client_features(df, end_date, noncat_feats, counted_services, timed_services, start_date=None):
'''
Iterate through dataset by client to calculate numerical features from services received by a client
:param df: a Pandas dataframe
:param end_date: The latest date of the time period to consider
:param noncat_feats: List of noncategorical features
:param counted_services: Service features for which we wish to count occurrences and create a feature for
:param timed_services: Service features for which we wish to count total time received and create a feature for
:param start_date: The earliest date of the time period to consider
:return: the dataframe with the new service features included, updated list of noncategorical features
'''
def client_features(client_df):
'''
Helper function for total stay, total income and ground truth calculation.
To be used on a subset of the dataframe
:param client_df: A dataframe containing all rows for a client
:return: the client dataframe with total stays and ground truth columns appended
'''
if start_date is not None:
client_df = client_df[client_df['DateEnd'] >= start_date]
client_df['DateStart'].clip(lower=start_date, inplace=True)
client_df = client_df[client_df['DateStart'] <= end_date]
client_df['DateEnd'].clip(upper=end_date, inplace=True) # If ongoing through end_date, set end as end_date
client_df.sort_values(by=['DateStart'], inplace=True) # Sort records by service start date
total_services = dict.fromkeys(total_timed_service_feats, 0) # Keep track of total days of service prior to training data end date
last_service_end = dict.fromkeys(timed_services + counted_services, pd.to_datetime(0)) # Unix Epoch (1970-01-01 00:00:00)
last_service_start = dict.fromkeys(timed_services + counted_services, pd.to_datetime(0))
last_service = ''
# Iterate over all of client's records. Note: itertuples() is faster than iterrows().
for row in client_df.itertuples():
service_start = getattr(row, 'DateStart')
service_end = getattr(row, 'DateEnd')
service = getattr(row, 'ServiceType')
if (service in timed_services):
if (service_start > last_service_start[service]) and (service_end > last_service_end[service]):
service_start = max(service_start, last_service_end[service]) # Don't count any service overlapping from previous service
if (service == 'Stay') and ((service_end - service_start).total_seconds() < min_stay_seconds):
continue # Don't count a stay if it's less than 15 minutes
total_services['Total_' + service] += (service_end.date() - service_start.date()).days + \
(service_start.date() != last_service_end[service].date())
last_service_end[service] = service_end
last_service_start[service] = service_start
elif (service in counted_services) and \
((service_end != last_service_end[service]) or (getattr(row, 'ServiceType') != last_service)):
service = getattr(row, 'ServiceType')
client_df['Num_' + service] += 1 # Increment # of times this service was accessed by this client
last_service_end[service] = service_end
last_service = service
# Set total length of timed service features in client's records
for feat in total_services:
client_df[feat] = total_services[feat]
# Calculate total monthly income for client
client_income_df = client_df[['IncomeType', 'MonthlyAmount', 'DateStart', 'DateEnd']]\
.sort_values(by=['DateStart']).drop_duplicates(subset=['IncomeType'], keep='last')
client_df['IncomeTotal'] = client_income_df['MonthlyAmount'].sum()
return client_df
total_timed_service_feats = ['Total_' + s for s in timed_services]
for feat in total_timed_service_feats:
df[feat] = 0
df['IncomeTotal'] = 0
df['MonthlyAmount'] = pd.to_numeric(df['MonthlyAmount'])
min_stay_seconds = 60 * 15 # Stays must be at least 15 minutes
numerical_service_features = []
for service in counted_services:
df['Num_' + service] = 0
numerical_service_features.append('Num_' + service)
df_temp = df.copy()
df_temp = df_temp.groupby('ClientID').progress_apply(client_features)
df_temp = df_temp.droplevel('ClientID', axis='index')
df.update(df_temp) # Update all rows with corresponding stay length and total income
# Update list of noncategorical features
noncat_feats.extend(numerical_service_features)
noncat_feats.extend(total_timed_service_feats)
noncat_feats.extend(['IncomeTotal'])
return df, noncat_feats
def calculate_ts_client_features(df, end_date, timed_services, counted_services, total_timed_service_feats,
numerical_service_feats, feat_prefix, start_date=None):
'''
Iterate through dataset by client to calculate numerical features from services received by a client
:param df: a Pandas dataframe
:param end_date: The latest date of the time period to consider
:param timed_services: Service features for which we wish to count total time received and create a feature for
:param counted_services: Service features for which we wish to count occurrences and create a feature for
:param total_timed_service_feats: Names of features to represent totals for timed service features
:param numerical_service_feats: Names of features to represent totals for numerical service features
:param feat_prefix: Prefix for total or timestep features
:param start_date: The earliest date of the time period to consider
:return: the dataframe with the new service features included, updated list of noncategorical features
'''
def client_services(client_df):
'''
Helper function for total stay, total income and ground truth calculation.
To be used on a subset of the dataframe
:param client_df: A dataframe containing all rows for a client
:return: the client dataframe with total stays and ground truth columns appended
'''
if start_date is not None:
client_df = client_df[client_df['DateEnd'] >= start_date]
client_df['DateStart'].clip(lower=start_date, inplace=True)
else:
if not (client_df['ServiceType'].isin(timed_services + counted_services)).any():
return pd.DataFrame()
client_df = client_df[client_df['DateStart'] <= end_date]
client_df['DateEnd'].clip(upper=end_date, inplace=True)
client_df.sort_values(by=['DateStart'], inplace=True) # Sort records by service start date
total_services = dict.fromkeys(total_timed_service_feats, 0) # Keep track of total days of service prior to training data end date
last_service_end = dict.fromkeys(timed_services + counted_services, pd.to_datetime(0)) # Unix Epoch (1970-01-01 00:00:00)
last_service_start = dict.fromkeys(timed_services + counted_services, pd.to_datetime(0))
last_service = ''
# Iterate over all of client's records. Note: itertuples() is faster than iterrows().
for row in client_df.itertuples():
service_start = getattr(row, 'DateStart')
service_end = getattr(row, 'DateEnd')
service = getattr(row, 'ServiceType')
if (service in timed_services):
if (service_start > last_service_start[service]) and (service_end > last_service_end[service]):
service_start = max(service_start, last_service_end[service]) # Don't count any service overlapping from previous service
if (service == 'Stay') and ((service_end - service_start).total_seconds() < min_stay_seconds):
continue # Don't count a stay if it's less than 15 minutes
total_services[feat_prefix + service] += (service_end.date() - service_start.date()).days + \
(service_start.date() != last_service_end[service].date())
last_service_end[service] = service_end
last_service_start[service] = service_start
elif (service in counted_services) and \
((service_end != last_service_end[service]) or (getattr(row, 'ServiceType') != last_service)):
service = getattr(row, 'ServiceType')
client_df[feat_prefix + service] += 1 # Increment # of times this service was accessed by this client
last_service_end[service] = service_end
last_service = service
# Set total length of timed service features in client's records
for feat in total_services:
client_df[feat] = total_services[feat]
# Calculate total monthly income for client
client_income_df = client_df[['IncomeType', 'MonthlyAmount', 'DateStart', 'DateEnd']]\
.sort_values(by=['DateStart']).drop_duplicates(subset=['IncomeType'], keep='last')
client_df['IncomeTotal'] = client_income_df['MonthlyAmount'].sum()
return client_df
if df is None:
return df
df_temp = df.copy()
min_stay_seconds = 60 * 15 # Stays must be at least 15 minutes
end_date -= timedelta(seconds=1) # To make calculations easier
df_temp = df_temp.groupby('ClientID').progress_apply(client_services)
if df_temp.shape[0] == 0:
return None
df_temp = df_temp.droplevel('ClientID', axis='index')
if start_date is not None:
df_temp.drop(['DateStart', 'DateEnd'], axis=1, inplace=True) # Don't want to keep the clipped dates
return df_temp
def assemble_time_sequences(cfg, df_clients, noncat_feats, include_gt):
'''
Appends most recent values for time series service features to data examples
:param cfg: Project config
:param df_clients: Dataframe of client data indexed by ClientID and Date
:param noncat_feats: list of noncategorical features
:param include_gt: Boolean indicating whether ground truth is included in data (False if in prediction mode)
:return: Dataframe with recent time series service features, updated list of noncategorical features
'''
def client_windows(client_ts_df):
'''
Helper function to create examples with time series features going back T_X time steps for a client's records
:param client_ts_df: A Dataframe of a client's time series service features
:return: A Dataframe with client's current and past T_X time series service features in each row
'''
client_ts_df.sort_values(by=['Date'], ascending=False, inplace=True) # Sort records by date
for i in range(1, T_X):
for f in time_series_feats:
client_ts_df['(-' + str(i) + ')' + f] = client_ts_df[f].shift(-i, axis=0)
return client_ts_df
T_X = cfg['DATA']['TIME_SERIES']['T_X']
time_series_feats = [f for f in df_clients.columns if '-Day_' in f]
df_ts_idx = list(df_clients.columns).index(time_series_feats[0]) # Get column number of first time series feature
for i in range(1, T_X):
for f in reversed(time_series_feats):
new_ts_feat = '(-' + str(i) + ')' + f
df_clients.insert(df_ts_idx, new_ts_feat, 0)
noncat_feats.append(new_ts_feat)
df_clients = df_clients.groupby('ClientID', group_keys=False).progress_apply(client_windows)
# Records at the beginning of a client's experience should have 0 for past time series feats
df_clients.fillna(0, inplace=True)
# Cut off any trailing records that could have possible false 0's
N_WEEKS = cfg['DATA']['N_WEEKS']
DAYS_PER_YEAR = 365.25
cutoff_date = (pd.to_datetime(cfg['DATA']['GROUND_TRUTH_DATE']) - timedelta(days=N_WEEKS * 7)).floor('d')
if include_gt:
cutoff_date -= timedelta(days=int(cfg['DATA']['TIME_SERIES']['YEARS_OF_DATA'] * DAYS_PER_YEAR))
df_clients = df_clients[df_clients.index.get_level_values(1) >= cutoff_date]
return df_clients, noncat_feats
def aggregate_df(df, noncat_feats, vec_mv_cat_feats, vec_sv_cat_feats):
'''
Build a dictionary of columns and arguments to feed into the aggregation function, and aggregate the dataframe
:param df: a Pandas dataframe
:param noncat_feats: list of noncategorical features
:param vec_mv_cat_feats: list of one-hot encoded multi-valued categorical features
:param vec_sv_cat_feats: list of one-hot encoded single-valued categorical features
:return: A grouped dataframe with one row for each client
'''
grouping_dictionary = {}
temp_dict = {}
if 'DateStart' in noncat_feats:
noncat_feats.remove('DateStart')
if 'ClientID' in noncat_feats:
noncat_feats.remove('ClientID')
# Create a dictionary of column names and function names to pass into the groupby function
for i in range(len(noncat_feats)):
if noncat_feats[i] in df.columns:
if noncat_feats[i] == 'ExpenseAmount':
grouping_dictionary[noncat_feats[i]] = 'sum'
elif noncat_feats[i] == 'TotalScore':
grouping_dictionary[noncat_feats[i]] = 'first'
else:
grouping_dictionary[noncat_feats[i]] = 'max'
for i in range(len(vec_sv_cat_feats)):
if vec_sv_cat_feats[i] in df.columns:
temp_dict[vec_sv_cat_feats[i]] = 'first' # Group single-valued categorical features by first occurrence
grouping_dictionary = {**grouping_dictionary, **temp_dict}
temp_dict = {}
for i in range(len(vec_mv_cat_feats)):
temp_dict[vec_mv_cat_feats[i]] = lambda x: 1 if any(x) else 0 # Group multi-valued categorical features by presence
grouping_dictionary = {**grouping_dictionary, **temp_dict}
if 'GroundTruth' in df.columns:
temp_dict = {'GroundTruth': 'max', }
grouping_dictionary = {**grouping_dictionary, **temp_dict}
# Group the data by ClientID (and Date if time series) using the dictionary created above
groupby_feats = ['ClientID']
if 'Date' in df.columns:
groupby_feats += ['Date']
df_unique_clients = df.groupby(groupby_feats).agg(grouping_dictionary)
return df_unique_clients
def calculate_gt_and_service_feats(cfg, df, categorical_feats, noncategorical_feats, gt_duration, include_gt, calculate_gt):
# Calculate ground truth and save it. Or load pre-saved ground truth.
n_weeks = cfg['DATA']['N_WEEKS']
timed_service_feats = cfg['DATA']['TIMED_SERVICE_FEATURES']
counted_service_feats = cfg['DATA']['COUNTED_SERVICE_FEATURES']
gt_end_date = pd.to_datetime(cfg['DATA']['GROUND_TRUTH_DATE'])
train_end_date = gt_end_date - timedelta(days=(n_weeks * 7)) # Maximum for training set records
if include_gt:
if calculate_gt:
print("Calculating ground truth.")
ds_gt = calculate_ground_truth(df, cfg['DATA']['CHRONIC_THRESHOLD'], gt_duration, gt_end_date)
ds_gt.to_csv(cfg['PATHS']['GROUND_TRUTH'], sep=',', header=True) # Save ground truth
else:
ds_gt = load_df(cfg['PATHS']['GROUND_TRUTH']) # Load ground truth from file
ds_gt = ds_gt.set_index('ClientID')
ds_gt.index = ds_gt.index.astype(int)
# Remove records from the database from n weeks ago and onwards
print("Removing records ", n_weeks, " weeks back. Cutting off at ", train_end_date)
df = remove_n_weeks(df, train_end_date, cfg['DATA']['TIMED_EVENTS'])
# Compute total stays, total monthly income, total # services accessed for each client.
print("Calculating total service features, monthly income total.")
df, noncategorical_feats = calculate_client_features(df, train_end_date, noncategorical_feats,
counted_service_feats, timed_service_feats)
return df, ds_gt, noncategorical_feats
def calculate_time_series(cfg, cat_feat_info, df, categorical_feats, noncategorical_feats, gt_duration, include_gt, calculate_gt, load_ct):
'''
Calculates ground truth, service time series features for client, client monthly income. Vectorizes multi-valued
categorical features. Aggregates data to be indexed by ClientID and Date.
:param cfg: Project config dict
:param cat_feat_info: Generated config with feature information
:param df: Dataframe of raw data
:param categorical_feats: List of categorical features
:param noncategorical_feats: List of noncategorical features
:param gt_duration: Length of time used to calculate chronic homelessness ground truth
:param include_gt: Boolean indicating whether to include ground truth in processed data
:param calculate_gt: Boolean indicating whether to calculate ground truth
:param load_ct: Boolean indicating whether to load serialized column transformers
:return: Aggreated client Dataframe with time series features and one-hot encoded multi-valued categorical features,
Dataframe containing client ground truth, updated list of noncategorical features, list of all multi-valued
categorical variables
'''
gt_end_date = | pd.to_datetime(cfg['DATA']['GROUND_TRUTH_DATE']) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
#get_ipython().magic('load_ext autoreload')
#get_ipython().magic('reload_ext autoreload')
import requests
import lxml.html as hl
from xml.etree import ElementTree
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
from queue import Queue, Empty
from urllib.parse import urljoin, urlparse
import urllib.robotparser
import string
import json
import pickle
import re
import unicodedata
from unidecode import unidecode
from itertools import chain
from collections import Counter
from urllib.parse import unquote
import operator
from matplotlib import pyplot as plt
import math
import statistics
import WOSutilities as wosutil
from nameparser import HumanName
import name_tools
import enchant
d=enchant.Dict('en_US')
import imp
#import load_data as load_data
get_ipython().run_cell_magic('bash', '', 'jupyter nbconvert Nobel_prize_crawl.ipynb --to script')
def strip_accents(text):
"""
Strip accents from input String.
:param text: The input string.
:type text: String.
:returns: The processed String.
:rtype: String.
"""
try:
text = unicode(text, 'utf-8')
except (TypeError, NameError): # unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)
text = text.encode('ascii', 'ignore')
text = text.decode("utf-8")
return str(text)
def copytofile(raw_html,filename):
with open(filename, 'wb') as outfile:
outfile.write(raw_html)
#retrieve nobel prize lauarates from main WIKI page
nobel_prize_page='https://en.wikipedia.org/wiki/List_of_Nobel_laureates_in_Physics'
page = requests.get(nobel_prize_page)
doc = lh.fromstring(page.content)
list_of_nobel_prize_winners=[]
tr_elements=doc.xpath('//*[@id="mw-content-text"]/div/table[1]/tbody/tr')
prev=None
#print(tr_elements)
for each_tr_element in tr_elements:
winner_href=None
winner_title=None
year=None
#print(each_tr_element)
td_elements=each_tr_element.xpath('.//td')
if td_elements:
if td_elements[0].xpath('boolean(.//a[contains(@class,"image")])') is False:
year=td_elements[0].text
year=year.strip("\n")
# for shared prices in a year
if year == '' or year == '–':
year=prev
prev=year
else:
year=prev
th_elements=each_tr_element.xpath('.//th')
if th_elements:
winner_href=th_elements[0].xpath('./a/@href')
winner_title=th_elements[0].xpath('./a/@title')
if winner_href and winner_title:
list_of_nobel_prize_winners.append([int(year),re.sub(r"\(chemist\)|\(physicist\)",'',clean_data.strip_accents(winner_title[0])),winner_href[0],parse_web.urlCanonicalization(winner_href[0], base_url=nobel_prize_page)])
#creating dataframe with winners,year they were awarded and url of the winner page
nobel_prize_winners=pd.DataFrame(list_of_nobel_prize_winners,columns=['Year','Name','Url','Cannonicalized_Url'])
#to retrieve all information relevant information available in the winner page in WIKI
def update_winner_information(prize_type,prize_winners_dataframe,path_to_store_crawled_info):
winner_wiki_information={}
doc_num=0
count=0
visited_seed=set()
for index,row in prize_winners_dataframe.iterrows():
count=count+1
url=row['Cannonicalized_Url']
if url in visited_seed or not parse_web.ispolite(url):
continue
print(row['Name'])
visited_seed.add(url)
page = requests.get(url)
doc_num=doc_num+1
raw_html=page.content
doc = lh.fromstring(page.content)
path=path_to_store_crawled_info+'/'+prize_type+'-document-{0}'
copytofile(raw_html,path.format(doc_num))
winner_wiki_information.update(parse_web.get_wiki_information(prize_type,doc_num,doc))
return winner_wiki_information
nobel_winner_wiki_information=update_winner_information('nobel',nobel_prize_winners,'/home/apoorva_kasoju2712/nobel_crawled_data')
#store nobel_winner_wiki_information as pickled file
with open('/home/apoorva_kasoju2712/wos_samplecode/nobel_winner_wiki_p.pickle', 'wb') as handle:
pickle.dump(nobel_winner_wiki_information, handle, protocol=pickle.HIGHEST_PROTOCOL)
#retrieve stored nobel_winner_wiki_information
with open('/home/apoorva_kasoju2712/wos_samplecode/nobel_winner_wiki_p.pickle', 'rb') as handle:
nobel_winner_wiki_information = pickle.load(handle)
path2rawdata='/home/apoorva_kasoju2712/WOS_data'
#loading article_df
article_df=load_data.load_article_data(path2rawdata)
#converting author_df from hdf5
article_df.to_hdf('/home/apoorva_kasoju2712/wos_samplecode/article_df_data.h5','article_df',mode='w',format='table',complevel=9,complib ='blosc')
#or use loaded article_df
#article_df=pd.read_hdf('/home/apoorva_kasoju2712/wos_samplecode/article_df_data.h5','article_df')
#loading author df
author_df=load_data.load_author_data(path2rawdata)
#converting author_df from hdf5
author_df.to_hdf('/home/apoorva_kasoju2712/wos_samplecode/author_df_data_full.h5','author_df_full',mode='w',format='table',complevel=9,complib ='blosc')
#or use loaded article_df
author_df=pd.read_hdf('/home/apoorva_kasoju2712/wos_samplecode/author_df_data_full.h5','author_df_full')
#loading address df
address_df=load_data.load_address_data(path2rawdata)
#converting address df to hdf5 and store
address_df.to_hdf('/home/apoorva_kasoju2712/wos_samplecode/address_df_data.h5','address_df',mode='w',format='table',complevel=9,complib ='blosc')
#or use loaded address_df
#address_df=pd.read_hdf('/home/apoorva_kasoju2712/wos_samplecode/address_df_data.h5','address_df',mode='w',format='table',complevel=9,complib ='blosc')
#loading paper_address df
paper_address_df=load_data.load_paper_address_data(path2rawdata)
#converting paper_address df to hdf5 and store
paper_address_df.to_hdf('/home/apoorva_kasoju2712/wos_samplecode/paper_address_df_data.h5','paper_address_df',mode='w',format='table',complevel=9,complib ='blosc')
#or use loaded paper_address_df
paper_address_df=pd.read_hdf('/home/apoorva_kasoju2712/wos_samplecode/paper_address_df_data.h5','paper_address_df')
#merge paper_address and address_df
address_df_merged=pd.merge(paper_address_df[['ArticleID','AddressOrder','Organization','SubOrganization']], address_df[['ArticleID','AuthorOrder','AddressOrder']], how='inner', on=['ArticleID','AddressOrder'])
address_df_merged["AddressOrder"]=address_df_merged["AddressOrder"].astype('int64')
address_df_merged["AuthorOrder"]=address_df_merged["AuthorOrder"].astype('int64')
address_df_merged.sort_values(by = ['AuthorOrder','AddressOrder'], inplace = True)
address_df_merged.dropna(subset=['AuthorOrder','ArticleID'], inplace=True)
#prepare author_address
author_address=pd.merge(author_df[['ArticleID','FullName', 'LastName', 'FirstName','AuthorDAIS','AuthorOrder']],address_df_merged[['ArticleID','AuthorOrder','Organization']],on=['ArticleID','AuthorOrder'], how='inner')
#or use loaded author_address
author_address=pd.read_hdf('/home/apoorva_kasoju2712/wos_samplecode/author_address_data.h5','author_address')
#getting relevant records for nobel prize winners matching in the WOS
nobel_author_df=match_utilities.get_wos_records(nobel_winner_wiki_information,author_df)
#storing the relevant data to hdf5
prize_type='nobel'
nobel_author_df.to_hdf('/home/apoorva_kasoju2712/wos_samplecode/'+prize_type+'_author_df_data.h5',prize_type+'_author_df',mode='w',format='table',complevel=9,complib ='blosc')
# In[7]:
#loading nobel_author_df
prize_type='nobel'
nobel_author_df=pd.read_hdf('/home/apoorva_kasoju2712/wos_samplecode/'+prize_type+'_author_df_data.h5',prize_type+'_author_df')
#retrieving co-authors for the articles in nobel_author_df
nobel_article_co_author_temp=pd.merge(author_df[['ArticleID','FullName','LastName']],nobel_author_df[['ArticleID']],how='inner',on=['ArticleID'])
nobel_article_co_author=nobel_article_co_author_temp.groupby(['ArticleID']).agg({'LastName':lambda x: list(x.unique()),'FullName':lambda x: list(x.unique())}).reset_index()
nobel_article_co_author.to_pickle("/home/apoorva_kasoju2712/wos_samplecode/nobel_article_co_author_df.pkl")
#loading nobel_article_co_author as pandas Dataframe
#nobel_article_co_author=pd.read_pickle("/home/apoorva_kasoju2712/wos_samplecode/nobel_article_co_author_df.pkl")
#retrieving nobel_author_df articles from article_df
nobel_author_article= | pd.merge(article_df[['ArticleID','Title']],nobel_author_df[['ArticleID','AuthorOrder','AuthorDAIS','FullName','LastName']],on=['ArticleID'],how='inner') | pandas.merge |
import toml
import pandas as pd
from pathlib import Path
def excel_mqtt_topics_to_toml(
excelFile="metadata.xlsx", tomlDestination="src/backend/.config/topics.toml"
):
df = pd.read_excel(excelFile, sheet_name="mqtt_topics")
d = df.to_dict()
topics = [d["topic"][row] for row in d["topic"]]
qos = [d["quality_of_service"][row] for row in d["quality_of_service"]]
if topics:
tomlTopics = {"top": topics, "qos": qos}
else:
print(
"WARNING! No topics defined in excel file. Will instead add wildcard '#' as subscription, "
"meaning backend will subscribe to all topics from broker. "
"If you do not wish this, either rerun the conversion from excel with topics in excel file, "
"or remove the subscroption from 'src/backend/.config/topics.toml'."
)
tomlTopics = {"top": ["#"], "qos": [1]}
for topic, qlevel in zip(topics, qos):
print(f"Adding topic '{topic}' with qos {qlevel}.")
with open(tomlDestination, "w") as f:
toml.dump(tomlTopics, f)
def excel_meta_to_toml(
excelFile="metadata.xlsx", tomlDestination="src/backend/.config/metadata.toml"
):
# Process the date_range sheet
df = pd.read_excel(
excelFile,
sheet_name="date_range",
dtype={"Project_start": str, "Project_end": str},
)
d = df.to_dict()
dateDict = {
"Project_start": d["Project_start"][0],
"Project_end": d["Project_end"][0],
}
# Process the tbrs sheet
df = pd.read_excel(excelFile, sheet_name="tbrs")
d = df.to_dict()
tbrDict = {}
for row in d["tbr_serial_id"]:
tbrDict[f"tbr_{row}"] = {
"tbr_serial_id": d["tbr_serial_id"][row],
"frequency": d["frequency"][row],
"include": d["include"][row],
"cage_name": d["cage_name"][row],
}
# Process the 3D sheet
df = pd.read_excel(excelFile, sheet_name="3D")
d = df.to_dict()
dict3D = {
"include": d["include"][0],
"active_cages": list(str.split(d["active_cages"][0], ", ")),
}
# Process the 3D_cages sheet if include is set to True
if dict3D["include"]:
df = | pd.read_excel(excelFile, sheet_name="3D_cages") | pandas.read_excel |
import operator
import functools as f
import json
from pkg_resources import resource_filename
import pandas as pd
def _events_cleaning_map():
with open(resource_filename(__name__, 'events_cleaning_map.json')) as json_map:
return(json.load(json_map))
def _find(element_path):
return lambda json: f.reduce(lambda a, b: a[b] if a and a == a and b in a else None, element_path.split('.'), json)
def format_events(events, details_col, paths):
details = pd.DataFrame({
json_field: events[details_col].apply(_find(path)) for json_field, path in paths.items()
})
events_without_generic_cols = events.loc[
:, (events.columns != details_col) & (events.columns != 'type')
]
return | pd.concat([events_without_generic_cols, details], axis=1) | pandas.concat |
"""
Tests for the blaze interface to the pipeline api.
"""
from __future__ import division
from collections import OrderedDict
from datetime import timedelta, time
from unittest import TestCase
import warnings
import blaze as bz
from datashape import dshape, var, Record
from nose_parameterized import parameterized
import numpy as np
from numpy.testing.utils import assert_array_almost_equal
from odo import odo
import pandas as pd
from pandas.util.testing import assert_frame_equal
from toolz import keymap, valmap, concatv
from toolz.curried import operator as op
from zipline.pipeline import Pipeline, CustomFactor
from zipline.pipeline.data import DataSet, BoundColumn
from zipline.pipeline.engine import SimplePipelineEngine
from zipline.pipeline.loaders.blaze import (
from_blaze,
BlazeLoader,
NoDeltasWarning,
)
from zipline.pipeline.loaders.blaze.core import (
NonNumpyField,
NonPipelineField,
no_deltas_rules,
)
from zipline.utils.numpy_utils import (
float64_dtype,
int64_dtype,
repeat_last_axis,
)
from zipline.utils.test_utils import tmp_asset_finder, make_simple_equity_info
nameof = op.attrgetter('name')
dtypeof = op.attrgetter('dtype')
asset_infos = (
(make_simple_equity_info(
tuple(map(ord, 'ABC')),
pd.Timestamp(0),
pd.Timestamp('2015'),
),),
(make_simple_equity_info(
tuple(map(ord, 'ABCD')),
pd.Timestamp(0),
pd.Timestamp('2015'),
),),
)
with_extra_sid = parameterized.expand(asset_infos)
def _utc_localize_index_level_0(df):
"""``tz_localize`` the first level of a multiindexed dataframe to utc.
Mutates df in place.
"""
idx = df.index
df.index = pd.MultiIndex.from_product(
(idx.levels[0].tz_localize('utc'), idx.levels[1]),
names=idx.names,
)
return df
class BlazeToPipelineTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.dates = dates = pd.date_range('2014-01-01', '2014-01-03')
dates = cls.dates.repeat(3)
cls.sids = sids = ord('A'), ord('B'), ord('C')
cls.df = df = pd.DataFrame({
'sid': sids * 3,
'value': (0., 1., 2., 1., 2., 3., 2., 3., 4.),
'int_value': (0, 1, 2, 1, 2, 3, 2, 3, 4),
'asof_date': dates,
'timestamp': dates,
})
cls.dshape = dshape("""
var * {
sid: ?int64,
value: ?float64,
int_value: ?int64,
asof_date: datetime,
timestamp: datetime
}
""")
cls.macro_df = df[df.sid == 65].drop('sid', axis=1)
dshape_ = OrderedDict(cls.dshape.measure.fields)
del dshape_['sid']
cls.macro_dshape = var * Record(dshape_)
cls.garbage_loader = BlazeLoader()
cls.missing_values = {'int_value': 0}
def test_tabular(self):
name = 'expr'
expr = bz.Data(self.df, name=name, dshape=self.dshape)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule=no_deltas_rules.ignore,
missing_values=self.missing_values,
)
self.assertEqual(ds.__name__, name)
self.assertTrue(issubclass(ds, DataSet))
self.assertIs(ds.value.dtype, float64_dtype)
self.assertIs(ds.int_value.dtype, int64_dtype)
self.assertTrue(np.isnan(ds.value.missing_value))
self.assertEqual(ds.int_value.missing_value, 0)
invalid_type_fields = ('asof_date',)
for field in invalid_type_fields:
with self.assertRaises(AttributeError) as e:
getattr(ds, field)
self.assertIn("'%s'" % field, str(e.exception))
self.assertIn("'datetime'", str(e.exception))
# test memoization
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule=no_deltas_rules.ignore,
missing_values=self.missing_values,
),
ds,
)
def test_column(self):
exprname = 'expr'
expr = bz.Data(self.df, name=exprname, dshape=self.dshape)
value = from_blaze(
expr.value,
loader=self.garbage_loader,
no_deltas_rule=no_deltas_rules.ignore,
missing_values=self.missing_values,
)
self.assertEqual(value.name, 'value')
self.assertIsInstance(value, BoundColumn)
self.assertIs(value.dtype, float64_dtype)
# test memoization
self.assertIs(
from_blaze(
expr.value,
loader=self.garbage_loader,
no_deltas_rule=no_deltas_rules.ignore,
missing_values=self.missing_values,
),
value,
)
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule=no_deltas_rules.ignore,
missing_values=self.missing_values,
).value,
value,
)
# test the walk back up the tree
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule=no_deltas_rules.ignore,
missing_values=self.missing_values,
),
value.dataset,
)
self.assertEqual(value.dataset.__name__, exprname)
def test_missing_asof(self):
expr = bz.Data(
self.df.loc[:, ['sid', 'value', 'timestamp']],
name='expr',
dshape="""
var * {
sid: ?int64,
value: float64,
timestamp: datetime,
}""",
)
with self.assertRaises(TypeError) as e:
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule=no_deltas_rules.ignore,
)
self.assertIn("'asof_date'", str(e.exception))
self.assertIn(repr(str(expr.dshape.measure)), str(e.exception))
def test_auto_deltas(self):
expr = bz.Data(
{'ds': self.df,
'ds_deltas': pd.DataFrame(columns=self.df.columns)},
dshape=var * Record((
('ds', self.dshape.measure),
('ds_deltas', self.dshape.measure),
)),
)
loader = BlazeLoader()
ds = from_blaze(
expr.ds,
loader=loader,
missing_values=self.missing_values,
)
self.assertEqual(len(loader), 1)
exprdata = loader[ds]
self.assertTrue(exprdata.expr.isidentical(expr.ds))
self.assertTrue(exprdata.deltas.isidentical(expr.ds_deltas))
def test_auto_deltas_fail_warn(self):
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
loader = BlazeLoader()
expr = bz.Data(self.df, dshape=self.dshape)
from_blaze(
expr,
loader=loader,
no_deltas_rule=no_deltas_rules.warn,
missing_values=self.missing_values,
)
self.assertEqual(len(ws), 1)
w = ws[0].message
self.assertIsInstance(w, NoDeltasWarning)
self.assertIn(str(expr), str(w))
def test_auto_deltas_fail_raise(self):
loader = BlazeLoader()
expr = bz.Data(self.df, dshape=self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
loader=loader,
no_deltas_rule=no_deltas_rules.raise_,
)
self.assertIn(str(expr), str(e.exception))
def test_non_numpy_field(self):
expr = bz.Data(
[],
dshape="""
var * {
a: datetime,
asof_date: datetime,
timestamp: datetime,
}""",
)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule=no_deltas_rules.ignore,
)
with self.assertRaises(AttributeError):
ds.a
self.assertIsInstance(object.__getattribute__(ds, 'a'), NonNumpyField)
def test_non_pipeline_field(self):
# NOTE: This test will fail if we ever allow string types in
# the Pipeline API. If this happens, change the dtype of the `a` field
# of expr to another type we don't allow.
expr = bz.Data(
[],
dshape="""
var * {
a: string,
asof_date: datetime,
timestamp: datetime,
}""",
)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule=no_deltas_rules.ignore,
)
with self.assertRaises(AttributeError):
ds.a
self.assertIsInstance(
object.__getattribute__(ds, 'a'),
NonPipelineField,
)
def test_complex_expr(self):
expr = bz.Data(self.df, dshape=self.dshape)
# put an Add in the table
expr_with_add = bz.transform(expr, value=expr.value + 1)
# Test that we can have complex expressions with no deltas
from_blaze(
expr_with_add,
deltas=None,
loader=self.garbage_loader,
missing_values=self.missing_values,
)
with self.assertRaises(TypeError):
from_blaze(
expr.value + 1, # put an Add in the column
deltas=None,
loader=self.garbage_loader,
missing_values=self.missing_values,
)
deltas = bz.Data(
pd.DataFrame(columns=self.df.columns),
dshape=self.dshape,
)
with self.assertRaises(TypeError):
from_blaze(
expr_with_add,
deltas=deltas,
loader=self.garbage_loader,
missing_values=self.missing_values,
)
with self.assertRaises(TypeError):
from_blaze(
expr.value + 1,
deltas=deltas,
loader=self.garbage_loader,
missing_values=self.missing_values,
)
def _test_id(self, df, dshape, expected, finder, add):
expr = bz.Data(df, name='expr', dshape=dshape)
loader = BlazeLoader()
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule=no_deltas_rules.ignore,
missing_values=self.missing_values,
)
p = Pipeline()
for a in add:
p.add(getattr(ds, a).latest, a)
dates = self.dates
with tmp_asset_finder() as finder:
result = SimplePipelineEngine(
loader,
dates,
finder,
).run_pipeline(p, dates[0], dates[-1])
assert_frame_equal(
result,
_utc_localize_index_level_0(expected),
check_dtype=False,
)
def test_custom_query_time_tz(self):
df = self.df.copy()
df['timestamp'] = (
pd.DatetimeIndex(df['timestamp'], tz='EST') +
timedelta(hours=8, minutes=44)
).tz_convert('utc').tz_localize(None)
df.ix[3:5, 'timestamp'] = pd.Timestamp('2014-01-01 13:45')
expr = bz.Data(df, name='expr', dshape=self.dshape)
loader = BlazeLoader(data_query_time=time(8, 45), data_query_tz='EST')
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule=no_deltas_rules.ignore,
missing_values=self.missing_values,
)
p = Pipeline()
p.add(ds.value.latest, 'value')
p.add(ds.int_value.latest, 'int_value')
dates = self.dates
with tmp_asset_finder() as finder:
result = SimplePipelineEngine(
loader,
dates,
finder,
).run_pipeline(p, dates[0], dates[-1])
expected = df.drop('asof_date', axis=1)
expected['timestamp'] = expected['timestamp'].dt.normalize().astype(
'datetime64[ns]',
).dt.tz_localize('utc')
expected.ix[3:5, 'timestamp'] += timedelta(days=1)
expected.set_index(['timestamp', 'sid'], inplace=True)
expected.index = pd.MultiIndex.from_product((
expected.index.levels[0],
finder.retrieve_all(expected.index.levels[1]),
))
assert_frame_equal(result, expected, check_dtype=False)
def test_id(self):
"""
input (self.df):
asof_date sid timestamp value
0 2014-01-01 65 2014-01-01 0
1 2014-01-01 66 2014-01-01 1
2 2014-01-01 67 2014-01-01 2
3 2014-01-02 65 2014-01-02 1
4 2014-01-02 66 2014-01-02 2
5 2014-01-02 67 2014-01-02 3
6 2014-01-03 65 2014-01-03 2
7 2014-01-03 66 2014-01-03 3
8 2014-01-03 67 2014-01-03 4
output (expected)
value
2014-01-01 Equity(65 [A]) 0
Equity(66 [B]) 1
Equity(67 [C]) 2
2014-01-02 Equity(65 [A]) 1
Equity(66 [B]) 2
Equity(67 [C]) 3
2014-01-03 Equity(65 [A]) 2
Equity(66 [B]) 3
Equity(67 [C]) 4
"""
with tmp_asset_finder() as finder:
expected = self.df.drop('asof_date', axis=1).set_index(
['timestamp', 'sid'],
)
expected.index = pd.MultiIndex.from_product((
expected.index.levels[0],
finder.retrieve_all(expected.index.levels[1]),
))
self._test_id(
self.df, self.dshape, expected, finder, ('int_value', 'value',)
)
def test_id_ffill_out_of_window(self):
"""
input (df):
asof_date timestamp sid other value
0 2013-12-22 2013-12-22 65 0 0
1 2013-12-22 2013-12-22 66 NaN 1
2 2013-12-22 2013-12-22 67 2 NaN
3 2013-12-23 2013-12-23 65 NaN 1
4 2013-12-23 2013-12-23 66 2 NaN
5 2013-12-23 2013-12-23 67 3 3
6 2013-12-24 2013-12-24 65 2 NaN
7 2013-12-24 2013-12-24 66 3 3
8 2013-12-24 2013-12-24 67 NaN 4
output (expected):
other value
2014-01-01 Equity(65 [A]) 2 1
Equity(66 [B]) 3 3
Equity(67 [C]) 3 4
2014-01-02 Equity(65 [A]) 2 1
Equity(66 [B]) 3 3
Equity(67 [C]) 3 4
2014-01-03 Equity(65 [A]) 2 1
Equity(66 [B]) 3 3
Equity(67 [C]) 3 4
"""
dates = self.dates.repeat(3) - timedelta(days=10)
df = pd.DataFrame({
'sid': self.sids * 3,
'value': (0, 1, np.nan, 1, np.nan, 3, np.nan, 3, 4),
'other': (0, np.nan, 2, np.nan, 2, 3, 2, 3, np.nan),
'asof_date': dates,
'timestamp': dates,
})
fields = OrderedDict(self.dshape.measure.fields)
fields['other'] = fields['value']
with tmp_asset_finder() as finder:
expected = pd.DataFrame(
np.array([[2, 1],
[3, 3],
[3, 4],
[2, 1],
[3, 3],
[3, 4],
[2, 1],
[3, 3],
[3, 4]]),
columns=['other', 'value'],
index=pd.MultiIndex.from_product(
(self.dates, finder.retrieve_all(self.sids)),
),
)
self._test_id(
df,
var * Record(fields),
expected,
finder,
('value', 'other'),
)
def test_id_multiple_columns(self):
"""
input (df):
asof_date sid timestamp value other
0 2014-01-01 65 2014-01-01 0 1
1 2014-01-01 66 2014-01-01 1 2
2 2014-01-01 67 2014-01-01 2 3
3 2014-01-02 65 2014-01-02 1 2
4 2014-01-02 66 2014-01-02 2 3
5 2014-01-02 67 2014-01-02 3 4
6 2014-01-03 65 2014-01-03 2 3
7 2014-01-03 66 2014-01-03 3 4
8 2014-01-03 67 2014-01-03 4 5
output (expected):
value other
2014-01-01 Equity(65 [A]) 0 1
Equity(66 [B]) 1 2
Equity(67 [C]) 2 3
2014-01-02 Equity(65 [A]) 1 2
Equity(66 [B]) 2 3
Equity(67 [C]) 3 4
2014-01-03 Equity(65 [A]) 2 3
Equity(66 [B]) 3 4
Equity(67 [C]) 4 5
"""
df = self.df.copy()
df['other'] = df.value + 1
fields = OrderedDict(self.dshape.measure.fields)
fields['other'] = fields['value']
with tmp_asset_finder() as finder:
expected = df.drop('asof_date', axis=1).set_index(
['timestamp', 'sid'],
).sort_index(axis=1)
expected.index = pd.MultiIndex.from_product((
expected.index.levels[0],
finder.retrieve_all(expected.index.levels[1]),
))
self._test_id(
df,
var * Record(fields),
expected,
finder,
('value', 'int_value', 'other'),
)
def test_id_macro_dataset(self):
"""
input (self.macro_df)
asof_date timestamp value
0 2014-01-01 2014-01-01 0
3 2014-01-02 2014-01-02 1
6 2014-01-03 2014-01-03 2
output (expected):
value
2014-01-01 Equity(65 [A]) 0
Equity(66 [B]) 0
Equity(67 [C]) 0
2014-01-02 Equity(65 [A]) 1
Equity(66 [B]) 1
Equity(67 [C]) 1
2014-01-03 Equity(65 [A]) 2
Equity(66 [B]) 2
Equity(67 [C]) 2
"""
asset_info = asset_infos[0][0]
nassets = len(asset_info)
with tmp_asset_finder() as finder:
expected = pd.DataFrame(
list(concatv([0] * nassets, [1] * nassets, [2] * nassets)),
index=pd.MultiIndex.from_product((
self.macro_df.timestamp,
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
self._test_id(
self.macro_df,
self.macro_dshape,
expected,
finder,
('value',),
)
def test_id_ffill_out_of_window_macro_dataset(self):
"""
input (df):
asof_date timestamp other value
0 2013-12-22 2013-12-22 NaN 0
1 2013-12-23 2013-12-23 1 NaN
2 2013-12-24 2013-12-24 NaN NaN
output (expected):
other value
2014-01-01 Equity(65 [A]) 1 0
Equity(66 [B]) 1 0
Equity(67 [C]) 1 0
2014-01-02 Equity(65 [A]) 1 0
Equity(66 [B]) 1 0
Equity(67 [C]) 1 0
2014-01-03 Equity(65 [A]) 1 0
Equity(66 [B]) 1 0
Equity(67 [C]) 1 0
"""
dates = self.dates - timedelta(days=10)
df = pd.DataFrame({
'value': (0, np.nan, np.nan),
'other': (np.nan, 1, np.nan),
'asof_date': dates,
'timestamp': dates,
})
fields = OrderedDict(self.macro_dshape.measure.fields)
fields['other'] = fields['value']
with tmp_asset_finder() as finder:
expected = pd.DataFrame(
np.array([[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1]]),
columns=['value', 'other'],
index=pd.MultiIndex.from_product(
(self.dates, finder.retrieve_all(self.sids)),
),
).sort_index(axis=1)
self._test_id(
df,
var * Record(fields),
expected,
finder,
('value', 'other'),
)
def test_id_macro_dataset_multiple_columns(self):
"""
input (df):
asof_date timestamp other value
0 2014-01-01 2014-01-01 1 0
3 2014-01-02 2014-01-02 2 1
6 2014-01-03 2014-01-03 3 2
output (expected):
other value
2014-01-01 Equity(65 [A]) 1 0
Equity(66 [B]) 1 0
Equity(67 [C]) 1 0
2014-01-02 Equity(65 [A]) 2 1
Equity(66 [B]) 2 1
Equity(67 [C]) 2 1
2014-01-03 Equity(65 [A]) 3 2
Equity(66 [B]) 3 2
Equity(67 [C]) 3 2
"""
df = self.macro_df.copy()
df['other'] = df.value + 1
fields = OrderedDict(self.macro_dshape.measure.fields)
fields['other'] = fields['value']
asset_info = asset_infos[0][0]
with tmp_asset_finder(equities=asset_info) as finder:
expected = pd.DataFrame(
np.array([[0, 1],
[1, 2],
[2, 3]]).repeat(3, axis=0),
index=pd.MultiIndex.from_product((
df.timestamp,
finder.retrieve_all(asset_info.index),
)),
columns=('value', 'other'),
).sort_index(axis=1)
self._test_id(
df,
var * Record(fields),
expected,
finder,
('value', 'other'),
)
def test_id_take_last_in_group(self):
T = pd.Timestamp
df = pd.DataFrame(
columns=['asof_date', 'timestamp', 'sid', 'other', 'value'],
data=[
[T('2014-01-01'), T('2014-01-01 00'), 65, 0, 0],
[T('2014-01-01'), T('2014-01-01 01'), 65, 1, np.nan],
[T('2014-01-01'), T('2014-01-01 00'), 66, np.nan, np.nan],
[T('2014-01-01'), T('2014-01-01 01'), 66, np.nan, 1],
[T('2014-01-01'), T('2014-01-01 00'), 67, 2, np.nan],
[T('2014-01-01'), T('2014-01-01 01'), 67, np.nan, np.nan],
[T('2014-01-02'), T('2014-01-02 00'), 65, np.nan, np.nan],
[T('2014-01-02'), T('2014-01-02 01'), 65, np.nan, 1],
[T('2014-01-02'), T('2014-01-02 00'), 66, np.nan, np.nan],
[T('2014-01-02'), T('2014-01-02 01'), 66, 2, np.nan],
[T('2014-01-02'), T('2014-01-02 00'), 67, 3, 3],
[T('2014-01-02'), T('2014-01-02 01'), 67, 3, 3],
[T('2014-01-03'), T('2014-01-03 00'), 65, 2, np.nan],
[T('2014-01-03'), T('2014-01-03 01'), 65, 2, np.nan],
[T('2014-01-03'), T('2014-01-03 00'), 66, 3, 3],
[T('2014-01-03'), T('2014-01-03 01'), 66, np.nan, np.nan],
[T('2014-01-03'), T('2014-01-03 00'), 67, np.nan, np.nan],
[T('2014-01-03'), T('2014-01-03 01'), 67, np.nan, 4],
],
)
fields = OrderedDict(self.dshape.measure.fields)
fields['other'] = fields['value']
with tmp_asset_finder() as finder:
expected = pd.DataFrame(
columns=['other', 'value'],
data=[
[1, 0], # 2014-01-01 Equity(65 [A])
[np.nan, 1], # Equity(66 [B])
[2, np.nan], # Equity(67 [C])
[1, 1], # 2014-01-02 Equity(65 [A])
[2, 1], # Equity(66 [B])
[3, 3], # Equity(67 [C])
[2, 1], # 2014-01-03 Equity(65 [A])
[3, 3], # Equity(66 [B])
[3, 3], # Equity(67 [C])
],
index=pd.MultiIndex.from_product(
(self.dates, finder.retrieve_all(self.sids)),
),
)
self._test_id(
df,
var * Record(fields),
expected,
finder,
('value', 'other'),
)
def test_id_take_last_in_group_macro(self):
"""
output (expected):
other value
2014-01-01 Equity(65 [A]) NaN 1
Equity(66 [B]) NaN 1
Equity(67 [C]) NaN 1
2014-01-02 Equity(65 [A]) 1 2
Equity(66 [B]) 1 2
Equity(67 [C]) 1 2
2014-01-03 Equity(65 [A]) 2 2
Equity(66 [B]) 2 2
Equity(67 [C]) 2 2
"""
T = pd.Timestamp
df = pd.DataFrame(
columns=['asof_date', 'timestamp', 'other', 'value'],
data=[
[T('2014-01-01'), T('2014-01-01 00'), np.nan, 1],
[T('2014-01-01'), T('2014-01-01 01'), np.nan, np.nan],
[T('2014-01-02'), T('2014-01-02 00'), 1, np.nan],
[T('2014-01-02'), T('2014-01-02 01'), np.nan, 2],
[T('2014-01-03'), T('2014-01-03 00'), 2, np.nan],
[T('2014-01-03'), T('2014-01-03 01'), 3, 3],
],
)
fields = OrderedDict(self.macro_dshape.measure.fields)
fields['other'] = fields['value']
with tmp_asset_finder() as finder:
expected = pd.DataFrame(
columns=[
'other', 'value',
],
data=[
[np.nan, 1], # 2014-01-01 Equity(65 [A])
[np.nan, 1], # Equity(66 [B])
[np.nan, 1], # Equity(67 [C])
[1, 2], # 2014-01-02 Equity(65 [A])
[1, 2], # Equity(66 [B])
[1, 2], # Equity(67 [C])
[2, 2], # 2014-01-03 Equity(65 [A])
[2, 2], # Equity(66 [B])
[2, 2], # Equity(67 [C])
],
index=pd.MultiIndex.from_product(
(self.dates, finder.retrieve_all(self.sids)),
),
)
self._test_id(
df,
var * Record(fields),
expected,
finder,
('value', 'other'),
)
def _run_pipeline(self,
expr,
deltas,
expected_views,
expected_output,
finder,
calendar,
start,
end,
window_length,
compute_fn):
loader = BlazeLoader()
ds = from_blaze(
expr,
deltas,
loader=loader,
no_deltas_rule=no_deltas_rules.raise_,
missing_values=self.missing_values,
)
p = Pipeline()
# prevent unbound locals issue in the inner class
window_length_ = window_length
class TestFactor(CustomFactor):
inputs = ds.value,
window_length = window_length_
def compute(self, today, assets, out, data):
assert_array_almost_equal(data, expected_views[today])
out[:] = compute_fn(data)
p.add(TestFactor(), 'value')
result = SimplePipelineEngine(
loader,
calendar,
finder,
).run_pipeline(p, start, end)
assert_frame_equal(
result,
_utc_localize_index_level_0(expected_output),
check_dtype=False,
)
@with_extra_sid
def test_deltas(self, asset_info):
expr = bz.Data(self.df, name='expr', dshape=self.dshape)
deltas = bz.Data(self.df, dshape=self.dshape)
deltas = bz.Data(
odo(
bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
),
pd.DataFrame,
),
name='delta',
dshape=self.dshape,
)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': np.array([[10.0, 11.0, 12.0],
[1.0, 2.0, 3.0]]),
'2014-01-03': np.array([[11.0, 12.0, 13.0],
[2.0, 3.0, 4.0]]),
'2014-01-04': np.array([[12.0, 13.0, 14.0],
[12.0, 13.0, 14.0]]),
})
nassets = len(asset_info)
if nassets == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan]],
expected_views,
)
with tmp_asset_finder(equities=asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([12] * nassets, [13] * nassets, [14] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
dates = self.dates
dates = dates.insert(len(dates), dates[-1] + timedelta(days=1))
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
@with_extra_sid
def test_deltas_only_one_delta_in_universe(self, asset_info):
expr = bz.Data(self.df, name='expr', dshape=self.dshape)
deltas = pd.DataFrame({
'sid': [65, 66],
'asof_date': [self.dates[1], self.dates[0]],
'timestamp': [self.dates[2], self.dates[1]],
'value': [10, 11],
})
deltas = bz.Data(deltas, name='deltas', dshape=self.dshape)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': np.array([[0.0, 11.0, 2.0],
[1.0, 2.0, 3.0]]),
'2014-01-03': np.array([[10.0, 2.0, 3.0],
[2.0, 3.0, 4.0]]),
'2014-01-04': np.array([[2.0, 3.0, 4.0],
[2.0, 3.0, 4.0]]),
})
nassets = len(asset_info)
if nassets == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan]],
expected_views,
)
with tmp_asset_finder(equities=asset_info) as finder:
expected_output = pd.DataFrame(
columns=[
'value',
],
data=np.array([11, 10, 4]).repeat(len(asset_info.index)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
)
dates = self.dates
dates = dates.insert(len(dates), dates[-1] + timedelta(days=1))
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
def test_deltas_macro(self):
asset_info = asset_infos[0][0]
expr = bz.Data(self.macro_df, name='expr', dshape=self.macro_dshape)
deltas = bz.Data(
self.macro_df.iloc[:-1],
name='deltas',
dshape=self.macro_dshape,
)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
nassets = len(asset_info)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': repeat_last_axis(np.array([10.0, 1.0]), nassets),
'2014-01-03': repeat_last_axis(np.array([11.0, 2.0]), nassets),
})
with tmp_asset_finder(equities=asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([10] * nassets, [11] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
dates = self.dates
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
@with_extra_sid
def test_novel_deltas(self, asset_info):
base_dates = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-04')
])
repeated_dates = base_dates.repeat(3)
baseline = pd.DataFrame({
'sid': self.sids * 2,
'value': (0., 1., 2., 1., 2., 3.),
'int_value': (0, 1, 2, 1, 2, 3),
'asof_date': repeated_dates,
'timestamp': repeated_dates,
})
expr = bz.Data(baseline, name='expr', dshape=self.dshape)
deltas = bz.Data(
odo(
bz.transform(
expr,
value=expr.value + 10,
timestamp=expr.timestamp + timedelta(days=1),
),
pd.DataFrame,
),
name='delta',
dshape=self.dshape,
)
expected_views = keymap(pd.Timestamp, {
'2014-01-03': np.array([[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0]]),
'2014-01-06': np.array([[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0],
[11.0, 12.0, 13.0]]),
})
if len(asset_info) == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan, np.nan]],
expected_views,
)
expected_output_buffer = [10, 11, 12, np.nan, 11, 12, 13, np.nan]
else:
expected_output_buffer = [10, 11, 12, 11, 12, 13]
cal = pd.DatetimeIndex([
| pd.Timestamp('2014-01-01') | pandas.Timestamp |
from numbers import Number
from typing import List
import pandas as pd
from pandas.api.types import is_integer_dtype, is_float_dtype, is_string_dtype, is_numeric_dtype
from sklearn.base import TransformerMixin
from sklearn.impute import KNNImputer
from sklearn.preprocessing import LabelEncoder
from model_config import ModelConfig
from transformers import BaseTransformer
# TODO: Merge with set default feature names
def _default_transformer(col, train_df):
if is_integer_dtype(train_df[col]):
return int
if | is_float_dtype(train_df[col]) | pandas.api.types.is_float_dtype |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.