prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
Download, transform and simulate various binary datasets.
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: MIT
from re import sub
from collections import Counter
from itertools import product
from urllib.parse import urljoin
from string import ascii_lowercase
from zipfile import ZipFile
from io import BytesIO, StringIO
import requests
import numpy as np
import pandas as pd
from sklearn.utils import check_X_y
from imblearn.datasets import make_imbalance
from .base import Datasets, FETCH_URLS, RANDOM_STATE
class ImbalancedBinaryDatasets(Datasets):
"""Class to download, transform and save binary class imbalanced
datasets."""
MULTIPLICATION_FACTORS = [2, 3]
@staticmethod
def _calculate_ratio(multiplication_factor, y):
"""Calculate ratio based on IRs multiplication factor."""
ratio = Counter(y).copy()
ratio[1] = int(ratio[1] / multiplication_factor)
return ratio
def _make_imbalance(self, data, multiplication_factor):
"""Undersample the minority class."""
X_columns = [col for col in data.columns if col != "target"]
X, y = check_X_y(data.loc[:, X_columns], data.target)
if multiplication_factor > 1.0:
sampling_strategy = self._calculate_ratio(multiplication_factor, y)
X, y = make_imbalance(
X, y, sampling_strategy=sampling_strategy, random_state=RANDOM_STATE
)
data = pd.DataFrame(np.column_stack((X, y)))
data.iloc[:, -1] = data.iloc[:, -1].astype(int)
return data
def download(self):
"""Download the datasets and append undersampled versions of them."""
super(ImbalancedBinaryDatasets, self).download()
undersampled_datasets = []
for (name, data), factor in list(
product(self.content_, self.MULTIPLICATION_FACTORS)
):
ratio = self._calculate_ratio(factor, data.target)
if ratio[1] >= 15:
data = self._make_imbalance(data, factor)
undersampled_datasets.append((f"{name} ({factor})", data))
self.content_ += undersampled_datasets
return self
def fetch_breast_tissue(self):
"""Download and transform the Breast Tissue Data Set.
The minority class is identified as the `car` and `fad`
labels and the majority class as the rest of the labels.
http://archive.ics.uci.edu/ml/datasets/breast+tissue
"""
data = pd.read_excel(FETCH_URLS["breast_tissue"], sheet_name="Data")
data = data.drop(columns="Case #").rename(columns={"Class": "target"})
data["target"] = data["target"].isin(["car", "fad"]).astype(int)
return data
def fetch_ecoli(self):
"""Download and transform the Ecoli Data Set.
The minority class is identified as the `pp` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/ecoli
"""
data = pd.read_csv(FETCH_URLS["ecoli"], header=None, delim_whitespace=True)
data = data.drop(columns=0).rename(columns={8: "target"})
data["target"] = data["target"].isin(["pp"]).astype(int)
return data
def fetch_eucalyptus(self):
"""Download and transform the Eucalyptus Data Set.
The minority class is identified as the `best` label
and the majority class as the rest of the labels.
https://www.openml.org/d/188
"""
data = pd.read_csv(FETCH_URLS["eucalyptus"])
data = data.iloc[:, -9:].rename(columns={"Utility": "target"})
data = data[data != "?"].dropna()
data["target"] = data["target"].isin(["best"]).astype(int)
return data
def fetch_glass(self):
"""Download and transform the Glass Identification Data Set.
The minority class is identified as the `1` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/glass+identification
"""
data =
|
pd.read_csv(FETCH_URLS["glass"], header=None)
|
pandas.read_csv
|
# -*- coding: utf-8 -*- {{{
#
# Your license here
# }}}
import os
import sys
from dateutil import parser
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from os.path import dirname, abspath, join
sys.path.insert(0, dirname(dirname(dirname(abspath(__file__)))))
from fleet_request import FleetRequest
from utils import ensure_ddir
from services.reg_service.helpers.historical_signal_helper import HistoricalSignalHelper
from services.reg_service.helpers.clearing_price_helper import ClearingPriceHelper
from pdb import set_trace as bp
# Class for traditional regulation and dynamic regulation services.
class RegService():
"""
This class implements FleetInterface so that it can communicate with a fleet
"""
_fleet = None
def __init__(self, *args, **kwargs):
self._historial_signal_helper = HistoricalSignalHelper()
self._clearing_price_helper = ClearingPriceHelper()
# The "request_loop" function is the workhorse that manages hourly loops and sending requests & retrieving responses.
# It returns a 2-level dictionary; 1st level key is the starting time of each hour.
# TODO: [minor] currently, the start and end times are hardcoded. Ideally, they would be based on promoted user inputs.
def request_loop(self, service_type="Traditional",
start_time=parser.parse("2017-08-01 16:00:00"),
end_time=parser.parse("2017-08-01 21:00:00"),
sim_step=timedelta(seconds=2),
clearing_price_filename='historical-ancillary-service-data-2017.xls',
fleet_name="PVInverterFleet"):
# Check service type compatibility.
if service_type not in ['Traditional', 'Dynamic']:
raise ValueError("service_type has to be either 'Traditional' or 'Dynamic'!")
# Generate lists of 2s request and response class objects based on regulation service type (i.e. traditional vs. dynamic).
print(' Generating traditional signal lists')
request_list_2s_trad, response_list_2s_trad = self.get_signal_lists('Traditional', start_time, end_time, sim_step)
if service_type == 'Dynamic':
print(' Generating dynamic signal lists')
request_list_2s_dynm, response_list_2s_dynm = self.get_signal_lists(service_type, start_time, end_time, sim_step)
# Assign generic names to signal lists.
request_list_2s_tot = request_list_2s_dynm
response_list_2s_tot = response_list_2s_dynm
else:
request_list_2s_tot = request_list_2s_trad
response_list_2s_tot = response_list_2s_trad
# Returns a Dictionary containing a month-worth of hourly regulation price data indexed by datetime.
print(' Getting price data')
clearing_price_filename = join(dirname(abspath(__file__)), clearing_price_filename)
self._clearing_price_helper.read_and_store_clearing_prices(clearing_price_filename, start_time)
# Create a dictionary to store hourly results incl. performance score, clearing price credit, etc.
hourly_results = {}
# Set time duration.
cur_time = start_time
one_hour = timedelta(hours=1)
print(' Starting hourly loop')
# Loop through each hour between "start_time" and "end_time".
while cur_time < end_time - timedelta(minutes=65):
# Generate 1-hour worth (65 min) of request and response arrays for calculating scores.
cur_end_time = cur_time + timedelta(minutes=65)
# Traditional regulation request and response signals are needed regardless of service type.
request_list_2s_65min_trad = [r.P_req for r in request_list_2s_trad if cur_time <= r.ts_req <= cur_end_time]
response_list_2s_65min_trad = [r.P_service for r in response_list_2s_trad if
cur_time <= r.ts <= cur_end_time]
request_array_2s_65min_trad = np.asarray(request_list_2s_65min_trad)
response_array_2s_65min_trad = np.asarray(response_list_2s_65min_trad)
# For dynamic regulation, mileage ratio calculation is as below.
if service_type == 'Dynamic':
# Chop total signals to 1 hour.
request_list_2s_65min_dynm = [r.P_req for r in request_list_2s_dynm if
cur_time <= r.ts_req <= cur_end_time]
response_list_2s_65min_dynm = [r.P_service for r in response_list_2s_dynm if
cur_time <= r.ts <= cur_end_time]
request_array_2s_65min_dynm = np.asarray(request_list_2s_65min_dynm)
response_array_2s_65min_dynm = np.asarray(response_list_2s_65min_dynm)
# The "mileage ratio" equals "1" for traditional regulation and is > 1 for dynamic regulation.
try:
Hourly_mileage_trad = self.Hourly_reg_mileage(request_array_2s_65min_trad)
Hourly_mileage_dynm = self.Hourly_reg_mileage(request_array_2s_65min_dynm)
mileage_ratio = Hourly_mileage_dynm / Hourly_mileage_trad
except:
# This occurs for March 12 at 23:00 hours.
# The self.Hourly_reg_mileage() methods requires an array of a specific
# length to work properly. Therefore, henever the underlying data have missing
# values, this function breaks.
mileage_ratio = np.nan
# Assign generic names to signal lists.
request_list_2s_65min = request_list_2s_65min_dynm
response_list_2s_65min = response_list_2s_65min_dynm
else:
request_list_2s_65min = request_list_2s_65min_trad
response_list_2s_65min = response_list_2s_65min_trad
mileage_ratio = 1
# Convert lists into arrays. convert units from kW to MW.
request_array_2s = np.asarray(request_list_2s_65min) / 1000
response_array_2s = np.asarray(response_list_2s_65min) / 1000
# Slice arrays at 10s intervals - resulted arrays have 390 data points.
request_array_10s = request_array_2s[::5]
response_array_10s = response_array_2s[::5]
# Use if statement to ensure full array is present
# (Pandas skips over the NaN rows, so the array ends up being shorter than it should be)
if len(request_array_10s) == 391:
# Calculate performance scores for current hour and store in a dictionary keyed by starting time.
hourly_results[cur_time] = {}
hourly_results[cur_time]['performance_score'] = self.perf_score(request_array_10s, response_array_10s)
hourly_results[cur_time]['hourly_integrated_MW'] = self.Hr_int_reg_MW(request_array_2s)
hourly_results[cur_time]['mileage_ratio'] = mileage_ratio
hourly_results[cur_time]['Regulation_Market_Clearing_Price(RMCP)'] = \
self._clearing_price_helper.clearing_prices[cur_time]
hourly_results[cur_time]['Reg_Clearing_Price_Credit'] = self.Reg_clr_pr_credit(service_type,
hourly_results[cur_time][
'Regulation_Market_Clearing_Price(RMCP)'],
hourly_results[cur_time][
'performance_score'][
0],
hourly_results[cur_time][
'hourly_integrated_MW'],
mileage_ratio)
else: # There are no NaNs in request_array_10s
pass
# Move to the next hour.
cur_time += one_hour
# Store request and response parameters in lists for plotting and printing to text files.
P_request = [r.P_req for r in request_list_2s_tot]
ts_request = [r.ts_req for r in request_list_2s_tot]
P_response = [r.P_service for r in response_list_2s_tot]
P_togrid = [r.P_togrid for r in response_list_2s_tot]
# Save the responses to a csv
results_df = pd.DataFrame({
'DateTime': ts_request,
'P_request': P_request,
'P_response': P_response,
'P_togrid': P_togrid
})
# Calculate P_base
results_df['P_base'] = results_df['P_togrid'] - results_df['P_response']
# Add SoC if battery fleet
if 'battery' in fleet_name.lower():
SOC = [r.soc for r in response_list_2s_tot]
results_df['SOC'] = SOC
results_df_dir = join(dirname(abspath(__file__)), 'results', '')
ensure_ddir(results_df_dir)
results_df_filename = datetime.now().strftime('%Y%m%d') + '_' + ts_request[0].strftime(
'%B') + '_2sec_results_' + service_type + '_' + fleet_name + '.csv'
results_df.to_csv(results_df_dir + results_df_filename)
# Generate and save plot of the normalized request and response signals for the month
print(' Plotting monthly response signal')
plot_dir = join(dirname(abspath(__file__)), 'results', 'plots', '')
ensure_ddir(plot_dir)
plot_filename = datetime.now().strftime('%Y%m%d') + '_' + \
ts_request[0].strftime('%B') + \
'_2secnormsignals_' + \
service_type + \
'_' + \
fleet_name + '.png'
plt.figure(1)
plt.figure(figsize=(15, 8))
plt.subplot(211)
if (not (all(pd.isnull(results_df['P_request'])))):
plt.plot(ts_request, P_request, label='P_request')
if (not (all(pd.isnull(results_df['P_response'])))):
plt.plot(ts_request, P_response, label='P_response')
if (not (all(
|
pd.isnull(results_df['P_togrid'])
|
pandas.isnull
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 11:33:55 2020
@author: User
"""
import sys
from pathlib import Path
import functools
# import collections
from collections import Counter
import pickle
# import types
# import post_helper
# import plotting
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.stats import linregress, zscore
import pandas as pd
import numpy as np
import datetime as dt
import pandas as pd
mpl.style.use("seaborn")
mpl.rcParams["figure.dpi"] = 100
# from sklearn.cluster import KMeans
# print ('Name prepare input:', __name__ )
if __name__ == "__main__":
# print(f'Package: {__package__}, File: {__file__}')
# FH_path = Path(__file__).parent.parent.parent.joinpath('FileHelper')
# sys.path.append(str(FH_path))
# sys.path.append(str(Path(__file__).parent.parent.joinpath('indexer')))
sys.path.append(str(Path(__file__).parent.parent.parent))
# sys.path.append("..")
# print(sys.path)
# import FileHelper
from FileHelper.PostChar import Characterization_TypeSetting, SampleCodesChar
from FileHelper.PostPlotting import *
from FileHelper.FindSampleID import GetSampleID
from FileHelper.FindFolders import FindExpFolder
# from FileHelper.FileFunctions.FileOperations import PDreadXLorCSV
from collect_load import Load_from_Indexes, CollectLoadPars
# from FileHelper.FindExpFolder import FindExpFolder
from plotting import eisplot
from prep_postchar import postChar
import EIS_export
elif "prepare_input" in __name__:
pass
# import RunEC_classifier
# from FileHelper.FindSampleID import FindSampleID
import logging
_logger = logging.getLogger(__name__)
# from FileHelper.PostChar import SampleSelection, Characterization_TypeSetting
def mkfolder(folder):
folder.mkdir(exist_ok=True, parents=True)
return folder
def filter_cols(_df, n):
if any(["startswith" in i for i in n]):
_lst = [i for i in _df.columns if i.startswith(n[-1])]
else:
_lst = [i for i in _df.columns if n[-1] in i]
return _lst
OriginColors = Characterization_TypeSetting.OriginColorList()
Pfolder = FindExpFolder().TopDir.joinpath(
Path("Preparation-Thesis/SiO2_projects/SiO2_Me_ECdepth+LC")
)
plotsfolder = mkfolder(Pfolder.joinpath("correlation_plots"))
EC_folder = Pfolder.joinpath("EC_data")
EC_index, SampleCodes = Load_from_Indexes.get_EC_index()
print("finished")
# SampleCodesChar().load
def multiIndex_pivot(df, index=None, columns=None, values=None):
# https://github.com/pandas-dev/pandas/issues/23955
output_df = df.copy(deep=True)
if index is None:
names = list(output_df.index.names)
output_df = output_df.reset_index()
else:
names = index
output_df = output_df.assign(
tuples_index=[tuple(i) for i in output_df[names].values]
)
if isinstance(columns, list):
output_df = output_df.assign(
tuples_columns=[tuple(i) for i in output_df[columns].values]
) # hashable
output_df = output_df.pivot(
index="tuples_index", columns="tuples_columns", values=values
)
output_df.columns = pd.MultiIndex.from_tuples(
output_df.columns, names=columns
) # reduced
else:
output_df = output_df.pivot(
index="tuples_index", columns=columns, values=values
)
output_df.index = pd.MultiIndex.from_tuples(output_df.index, names=names)
return output_df
def get_float_cols(df):
return [key for key, val in df.dtypes.to_dict().items() if "float64" in str(val)]
def cm2inch(value):
return value / 2.54
# class PorphSamples():
# def __init__(self):
# self.template = PorphSamples.template()
def decorator(func):
@functools.wraps(func)
def wrapper_decorator(*args, **kwargs):
# Do something before
value = func(*args, **kwargs)
# Do something after
return value
return wrapper_decorator
def read_load_pkl(_pklstem):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
if _pklpath.exists():
try:
print("pkl reloading:", _pklpath)
DF_diff = pd.read_pickle(_pklpath)
DF_diff.columns
return DF_diff
except Exception as e:
print("reading error", e)
return pd.DataFrame()
else:
print("read error not existing", _pklpath)
return pd.DataFrame()
def save_DF_pkl(_pklstem, _DF):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
try:
print("pkl saving to:", _pklpath)
_DF.to_pickle(_pklpath)
except Exception as e:
print("pkl saving error", e, _pklpath)
return _pklpath
def load_dict_pkl(_pklstem):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
if _pklpath.exists():
try:
print("pkl reloading:", _pklpath)
with open(_pklpath, "rb") as file:
_dict = pickle.load(file)
return _dict
except Exception as e:
print("reading error", e)
return {}
else:
print("read error not existing", _pklpath)
return {}
def save_dict_pkl(_pklstem, _dict):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
try:
print("pkl saving to:", _pklpath)
with open(_pklpath, "wb") as file:
pickle.dump(_dict, file)
except Exception as e:
print("pkl saving error", e, _pklpath)
return _pklpath
def PorphSiO2_template():
# 'SerieIDs' : ('Porph_SiO2')*5,
Series_Porph_SiO2 = {
"SampleID": ("JOS1", "JOS2", "JOS3", "JOS4", "JOS5"),
"Metal": ("Fe", "Co", "MnTPP", "FeTPP", "H2"),
"color": (2, 4, 6, 15, 3),
}
Porphyrins = {
"TMPP": {"Formula": "C48H38N4O4", "MW": 734.8382},
"TMPP-Fe(III)Cl": {"Formula": "C48H36ClFeN4O4", "MW": 824.1204},
"TMPP-Co(II)": {"Formula": "C48H36CoN4O4", "MW": 791.7556},
"TTP-Mn(III)Cl": {"Formula": "C44H28ClMnN4", "MW": 703.1098},
"TPP-Fe(III)Cl": {"Formula": "C44H28ClFeN4", "MW": 704.0168},
"TPP": {"Formula": "C44H30N4", "MW": 614.7346},
}
Porph_template = pd.DataFrame(Series_Porph_SiO2)
return Porph_template
def EC_types_grp():
# KL ['ORR_E_AppV_RHE', 'ORR_KL_E_AppV_RHE','Electrode']
_basic_EC_cond = ["postAST_post", "Sweep_Type", "pH", "Loading_cm2"]
_extra_EC_cond = {
"N2CV": [],
"N2": [],
"ORR": ["RPM_DAC_uni"],
"KL": ["Electrode", "ORR_E_AppV_RHE"],
"EIS": ["E_RHE"],
"HER": ["HER_RPM_post"],
"OER": [],
}
_out = {key: _basic_EC_cond + val for key, val in _extra_EC_cond.items()}
return _out
def save_EC_index_PorphSiO2(EC_index, EC_folder):
_porph_index = EC_index.loc[EC_index.SampleID.isin(PorphSiO2_template().SampleID)]
_porph_index.to_excel(EC_folder.joinpath("EC_index_PorphSiO2.xlsx"))
# save_EC_index_PorphSiO2(EC_index, EC_folder)
class EC_PorphSiO2:
folder = FindExpFolder("PorphSiO2").compare
Porph_template = PorphSiO2_template()
# globals EC_index
# ['Model(Singh2015_RQRQ)', 'Model(Singh2015_RQRQR)', 'Model(Bandarenka_2011_RQRQR)',
# 'Model(Singh2015_RQRWR)', 'Model(Randles_RQRQ)', 'Model(Singh2015_R3RQ)']
# model_select = EC_PorphSiO2.EIS_models[1]
# self = EC_PorphSiO2()
def __init__(self):
# self.index, self.AST_days = EC_PorphSiO2.select_ECexps(EC_folder)
self.select_EC_ASTexps_from_ECindex()
# self.pars = EC_PorphSiO2.mergedEC()
# self.par_export = EC_OHC.to_excel(self.folder.joinpath('EC_ORR_HPRR.xlsx'))
def select_EC_ASTexps_from_ECindex(self):
EC_idx_PorphSiO2_samples = EC_index.loc[
EC_index.SampleID.isin(self.Porph_template.SampleID.unique())
]
# pd.read_excel(list(EC_folder.rglob('*EC_index*'))[0])
EC_idx_PorphSiO2_samples = EC_idx_PorphSiO2_samples.assign(
**{
"PAR_date_day_dt": [
dt.date.fromisoformat(np.datetime_as_string(np.datetime64(i, "D")))
for i in EC_idx_PorphSiO2_samples.PAR_date.to_numpy()
]
}
)
self.EC_idx_PorphSiO2_samples = EC_idx_PorphSiO2_samples
self.get_AST_days()
# LC_idx_fp = list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx')
EC_idx_PorphSiO2_AST = EC_idx_PorphSiO2_samples.loc[
EC_idx_PorphSiO2_samples.PAR_date_day_dt.isin(
[i for a in self.AST_days.to_numpy() for i in a]
)
]
# AST_days = EC_PorphSiO2.get_AST_days()
# EC_idx_PorphSiO2_AST.to_excel(list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx'))
self.EC_idx_PorphSiO2 = EC_idx_PorphSiO2_AST
# if LC_idx_fp.exists():
# else:
# try:
# LC_fls = pd.read_excel(LC_idx_fp,index_col=[0])
# except Exception as e:
# print(f'Excel load fail: {e}\n,file: {LC_idx_fp}')
# LC_fls = pd.DataFrame()
# return LC_fls, AST_days
def get_AST_days(self):
gr_idx = self.EC_idx_PorphSiO2_samples.groupby("PAR_date_day_dt")
AST_days = []
for n, gr in gr_idx:
# n,gr
exps = gr.PAR_exp.unique()
# gr.PAR_date_day.unique()[0]
if any(["AST" in i for i in exps]):
# print(n,exps)
# AST_days.append(n)
if n + dt.timedelta(1) in gr_idx.groups.keys():
_post = gr_idx.get_group(n + dt.timedelta(1))
# print(n + dt.timedelta(1), gr_idx.get_group(n + dt.timedelta(1)))
AST_days.append((n, n + dt.timedelta(1)))
else:
AST_days.append((n, n))
print(n + dt.timedelta(1), "grp missing")
# (AST_days[-1][0], AST_days[0][1])
# AST_days.append((dt.date(2019,5,6), dt.date(2019,1,25)))
# AST_days.append((dt.date(2019,5,6), dt.date(2019,1,26)))
_extra_AST_days = [
(dt.date(2019, 5, 6), dt.date(2019, 1, 25)),
(dt.date(2019, 5, 6), dt.date(2019, 1, 26)),
]
AST_days += _extra_AST_days
AST_days = pd.DataFrame(
AST_days, columns=["PAR_date_day_dt_pre", "PAR_date_day_dt_post"]
)
AST_days = AST_days.assign(
**{
"PAR_date_day_dt_diff": AST_days.PAR_date_day_dt_pre
- AST_days.PAR_date_day_dt_post
}
)
self.AST_days = AST_days
# def select_ECexps(EC_folder):
# LC_idx_fp = list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx')
# AST_days = EC_PorphSiO2.get_AST_days()
# if LC_idx_fp.exists():
# LC_fls = EC_PorphSiO2.EC_idx_PorphSiO2.loc[EC_PorphSiO2.EC_idx_PorphSiO2.PAR_date_day_dt.isin([i for a in AST_days.to_numpy() for i in a])]
# LC_fls.to_excel(list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx'))
# else:
# try:
# LC_fls = pd.read_excel(LC_idx_fp,index_col=[0])
# except Exception as e:
# print(f'Excel load fail: {e}\n,file: {LC_idx_fp}')
# LC_fls = pd.DataFrame()
# return LC_fls, AST_days
# def repr_index(self):
# PAR_exp_uniq = {grn : len(grp) for grn,grp in self.index.groupby("PAR_exp")}
# print(f'Len({len(self.index)},\n{PAR_exp_uniq}')
def _testing_():
tt = EC_prepare_EC_merged(reload_AST=True, reload_merged=True, reload_pars=True)
self = tt
N2CV = self.N2cv(reload=False, use_daily=True)
#%% == EC_prepare_EC_merged == testing
class EC_prepare_EC_merged:
EIS_models = EIS_export.EIS_selection.mod_select
# ['Model(EEC_Randles_RWpCPE)', 'Model(EEC_2CPE)', 'Model(EEC_2CPEpW)',
# 'Model(EEC_RQ_RQ_RW)', 'Model(EEC_RQ_RQ_RQ)', 'Model(Randles_RQRQ)']
ORR_reload = dict(reload=True, use_daily=False)
ORR_no_reload = dict(reload=False, use_daily=True)
use_daily = True
# global ParsColl
# ParsColl = ParsColl
mcols = [i for i in Load_from_Indexes.EC_label_cols if i not in ["PAR_file"]] + [
"Sweep_Type"
]
_pkl_EC_merged = "EC_merged_dict"
def __init__(self, reload_AST=False, reload_merged=False, reload_pars=True):
self.reload_AST = reload_AST
self.reload_merged = reload_merged
self.reload_pars = reload_pars
self.set_pars_collection()
self.reload_pars_kws = dict(reload=reload_pars, use_daily=self.use_daily)
self.EC_merged_dict = {}
self.load_EC_PorphSiO2()
self.load_merged_EC()
def set_pars_collection(self):
if "ParsColl" in globals().keys():
self.ParsColl = ParsColl
else:
Pars_Collection = CollectLoadPars(load_type="fast")
# globals()['Pars_Collection'] = Pars_Collection
ParsColl = Pars_Collection.pars_collection
self.ParsColl = ParsColl
def load_EC_PorphSiO2(self):
self.EC_PorphSiO2 = EC_PorphSiO2()
self.AST_days = self.EC_PorphSiO2.AST_days
self.EC_idx_PorphSiO2 = self.EC_PorphSiO2.EC_idx_PorphSiO2
def load_merged_EC(self):
if self.reload_merged:
self.reload_merged_EC()
if not self.EC_merged_dict:
_load_EC_merge = load_dict_pkl(self._pkl_EC_merged)
if _load_EC_merge:
self.EC_merged_dict = _load_EC_merge
def reload_merged_EC(self):
try:
self.load_N2CV()
self.load_ORR()
self.load_KL()
self.load_EIS()
self.load_HER()
self.add_filter_selection_of_EC_merged()
save_dict_pkl(self._pkl_EC_merged, self.EC_merged_dict)
except Exception as e:
_logger.warning(f"EC_prepare_EC_merged, reload_merged_EC failure: {e}")
def get_AST_matches(self, DF, _verbose=False):
# LC_fls, AST_days = EC_PorphSiO2.select_ECexps(EC_folder)
# DF = ORR.drop_duplicates()
# DF = N2CV.drop_duplicates()
# DF = EIS.drop_duplicates()
# DF = HER.drop_duplicates()
# DF = ttpars
if "PAR_date_day_dt" not in DF.columns:
DF = DF.assign(
**{
"PAR_date_day_dt": [
dt.date.fromisoformat(
np.datetime_as_string(np.datetime64(i, "D"))
)
for i in DF.PAR_date.to_numpy()
]
}
)
DF.PAR_date_day_dt = pd.to_datetime(DF.PAR_date_day_dt, unit="D")
# list((set(DF.columns).intersection(set(LC_fls.columns))).intersection(set(mcols) ))
# DF = pd.merge(DF,LC_fls,on=)
_compare_cols = [
i for i in ["SampleID", "pH", "Gas", "Loading_cm2"] if i in DF.columns
]
_swp_rpm = [
"Sweep_Type",
"RPM_DAC_uni" if "RPM_DAC_uni" in DF.columns else "RPM_DAC",
]
_coll = []
# AST_days_run_lst = [i for i in AST_days if len(i) == 2][-1:]
for n, r in self.AST_days.iterrows():
# if len(_dates) == 2:
# _pre,_post = _dates
# elif (len_dates) == 1:
_pre, _post = r.PAR_date_day_dt_pre, r.PAR_date_day_dt_post
_preslice = DF.loc[
(DF.PAR_date_day == _pre.strftime("%Y-%m-%d")) & (DF.postAST == "no")
]
pre = _preslice.groupby(_compare_cols)
_postslice = DF.loc[
(DF.PAR_date_day == _post.strftime("%Y-%m-%d")) & (DF.postAST != "no")
]
post = _postslice.groupby(_compare_cols)
_res = {}
_res = {
"pre_PAR_date_day_dt": _pre,
"post_PAR_date_day_dt": _post,
"AST_days_n": n,
}
# print(_res,[_preslice.postAST.unique()[0], _postslice.postAST.unique()[0]])
union = set(pre.groups.keys()).union(set(post.groups.keys()))
matches = set(pre.groups.keys()).intersection(set(post.groups.keys()))
_difference_pre = set(pre.groups.keys()).difference(set(post.groups.keys()))
_difference_post = set(post.groups.keys()).difference(
set(pre.groups.keys())
)
# _diffr.append((_pre,_post,_difference_pre, _difference_post))
if not _preslice.empty and not _postslice.empty:
for match in union:
_res.update(dict(zip(_compare_cols, match)))
_mgrpcols = ["PAR_file", "dupli_num", "postAST"]
if match in matches:
_mpre = pre.get_group(match).groupby(_mgrpcols)
_mpost = post.get_group(match).groupby(_mgrpcols)
elif match in _difference_pre:
_mpre = pre.get_group(match).groupby(_mgrpcols)
_mpost = pre.get_group(match).groupby(_mgrpcols)
elif match in _difference_post:
_mpre = post.get_group(match).groupby(_mgrpcols)
_mpost = post.get_group(match).groupby(_mgrpcols)
# print(_mpost.groups)
for (_prePF, npr, _preAST), prgrp in _mpre:
_res.update(
{
"pre_dupli_num": npr,
"pre_PAR_file": _prePF,
"pre_postAST": _preAST,
}
)
for (_poPF, npo, _postAST), pogrp in _mpost:
_res.update(
{
"post_dupli_num": npo,
"post_PAR_file": _poPF,
"post_postAST": _postAST,
"dupli_num_combo": f"{npr}, {npo}",
}
)
if _postAST in "postAST_sHA|postAST_LC" and _verbose:
print(_res)
_pr1 = prgrp.groupby(_swp_rpm)
_po1 = pogrp.groupby(_swp_rpm)
_rpmswp_matches = set(_pr1.groups.keys()).intersection(
set(_po1.groups.keys())
)
for _m in _rpmswp_matches:
_res.update(dict(zip(_swp_rpm, _m)))
# print(_res)
_coll.append(_res.copy())
AST_matches = pd.DataFrame(_coll)
return AST_matches
# prgrp.groupby(['Sweep_Type','RPM_DAC']).groups
# prgrp['ORR_Jkin_min_700']-pogrp['ORR_Jkin_min_700']
def load_N2CV(self):
N2CV = self.edit_pars_N2cv(**self.reload_pars_kws)
# N2_pltqry = EC_merged_dict.get('N2CV')
N2_AST = self.get_AST_matches(N2CV)
N2_AST_diff = self.compare_AST_pars(N2CV, N2_AST, reload=self.reload_AST)
# _DFtype = EC_PorphSiO2.sense_DF_type(N2CV)
# EC_merged_dict.update({'N2CV' : N2_AST_diff})
self.EC_merged_dict.update(
{"N2CV": {"PARS": N2CV, "AST_matches": N2_AST, "AST_diff": N2_AST_diff}}
)
def load_ORR(self, _testing=False):
ORR = self.edit_pars_ORR()
ORR_AST = self.get_AST_matches(ORR)
ORR_AST_diff = self.compare_AST_pars(ORR, ORR_AST, reload=self.reload_AST)
if _testing:
ttpars = ORR.query('RPM_DAC_uni > 1000 & Sweep_Type == "cathodic"')
tt_AST = self.get_AST_matches(ttpars)
tt = ORR_AST.query('RPM_DAC_uni > 1000 & Sweep_Type == "cathodic"')
tt_diff = self.compare_AST_pars(ORR, tt, reload=reload_AST, save_pkl=False)
# ttpfs = ORR.loc[ORR.ORR_Jkin_max_700 > 0].PAR_file.unique()
# ttpfs = ORR.query('Sweep_Type == "mean"').loc[ORR.ORR_E_onset > 0.85].PAR_file.unique()
# ORR.loc[(ORR.ORR_E_onset > 0.85) & (ORR.Sweep_Type == 'cathodic')].PAR_file.unique()
# EC_merged_dict.update({'ORR' : ORR_AST_diff})
self.EC_merged_dict.update(
{"ORR": {"PARS": ORR, "AST_matches": ORR_AST, "AST_diff": ORR_AST_diff}}
)
def load_KL(self):
KL = self.edit_pars_KL()
KL = KL.assign(**{"RPM_DAC": 1500})
KL_AST = self.get_AST_matches(KL)
KL_AST_diff = self.compare_AST_pars(KL, KL_AST, reload=self.reload_AST)
# EC_merged_dict.update({'KL' : KL_AST_diff})
self.EC_merged_dict.update(
{"KL": {"PARS": KL, "AST_matches": KL_AST, "AST_diff": KL_AST_diff}}
)
def load_EIS(self):
EIS = self.edit_pars_EIS()
EIS_AST = self.get_AST_matches(EIS)
EIS_AST_diff = self.compare_AST_pars(EIS, EIS_AST, reload=self.reload_AST)
# EC_merged_dict.update({'EIS' : EIS_AST_diff})
self.EC_merged_dict.update(
{"EIS": {"PARS": EIS, "AST_matches": EIS_AST, "AST_diff": EIS_AST_diff}}
)
def load_HER(self):
HER = self.edit_pars_HER()
HER_type_grp = HER.groupby("HER_type")
HER.HER_at_E_slice = HER.HER_at_E_slice.round(3)
HER_AST = self.get_AST_matches(HER)
for Htype, Hgrp in HER_type_grp:
# Htype, Hgrp = 'E_slice', HER.loc[HER.groupby('HER_type').groups['E_slice']]
HER_AST_diff = self.compare_AST_pars(
Hgrp, HER_AST, reload=self.reload_AST, extra=Htype
)
try:
if not HER_AST_diff.empty:
self.EC_merged_dict.update(
{
f"HER_{Htype}": {
"PARS": Hgrp,
"AST_matches": HER_AST,
"AST_diff": HER_AST_diff,
}
}
)
except Exception as e:
print(f"HER {Htype} fail, {e}")
# EC_merged_dict.update({f'HER_{Htype}' : HER_AST_diff})
def finish_EC_merged(self):
_pkl_EC_merged = "EC_merged_dict"
EC_merged_dict = EC_PorphSiO2.add_filter_selection_of_EC_merged(EC_merged_dict)
save_dict_pkl(_pkl_EC_merged, EC_merged_dict)
# EC_merged_dict = load_dict_pkl(_pkl_EC_merged)
def add_filter_selection_of_EC_merged(self):
_drop_AST_row_pre = [
"2019-01-25;N2_20cls_300_100_10_JOS5_256;no;0",
"2019-01-25;N2_20cls_300_100_10_JOS4_256;no;0",
]
_check_cols = [
"SampleID",
"AST_row",
"PAR_date_day_dt_pre",
"PAR_date_day_dt_post",
"postAST_post",
]
_srt2 = ["postAST_post", "SampleID"]
_ch_match = [
"SampleID",
"pre_PAR_date_day_dt",
"post_PAR_date_day_dt",
"post_postAST",
"pre_postAST",
]
_sortcols = ["SampleID", "post_postAST"][::-1]
|
pd.set_option("display.max_columns", 6)
|
pandas.set_option
|
from datetime import datetime
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.generic import ABCDateOffset
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
PeriodIndex,
Series,
Timestamp,
bdate_range,
date_range,
)
from pandas.tests.test_base import Ops
import pandas.util.testing as tm
from pandas.tseries.offsets import BDay, BMonthEnd, CDay, Day, Hour
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH#7206
msg = "'Series' object has no attribute '{}'"
for op in ["year", "day", "second", "weekday"]:
with pytest.raises(AttributeError, match=msg.format(op)):
getattr(self.dt_series, op)
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
msg = "'Series' object has no attribute 'weekday'"
with pytest.raises(AttributeError, match=msg):
s.weekday
def test_repeat_range(self, tz_naive_fixture):
tz = tz_naive_fixture
rng =
|
date_range("1/1/2000", "1/1/2001")
|
pandas.date_range
|
import pandas as pd
import numpy as np
import sys
from config import TARGET_WEEKS_INTO_FUTURE, RAW_DATA_PATH, WEBSCRAPE_DATA_PATH
def pick_team(out, budget, position_column, points_column, name_column, price_column, formation=[4, 4, 2]):
output_df = out.copy()
goalkeeper = output_df.where(output_df[position_column] == 1).sort_values(points_column, ascending=False).head(1)
defenders = output_df.where(output_df[position_column] == 2).sort_values(points_column, ascending=False).head(
formation[0])
midfielders = output_df.where(output_df[position_column] == 3).sort_values(points_column, ascending=False).head(
formation[1])
strikers = output_df.where(output_df[position_column] == 4).sort_values(points_column, ascending=False).head(
formation[2])
team = goalkeeper.append(defenders).append(midfielders).append(strikers)
print("")
print("IDEAL TEAM:")
print("")
print(team.head(11))
print("")
output_df = pd.concat([output_df, team, team]).drop_duplicates(keep=False)
team_value = team[price_column].sum()
print("Starting value= " + str(team[price_column].sum()))
print("Starting points= " + str(team[points_column].sum() + team[points_column].max()))
print("")
while team_value >= budget + 0.001:
potential_replacements = pd.DataFrame(
columns=['init_name', 'init_price', 'init_points', 'new_name', 'new_price', 'new_points', 'pos'])
# Find any players in the same position with a lower price, sorted descending
for index, row in team.iterrows():
pos = row[position_column]
price = row[price_column]
potential_replacement = output_df.where(
(output_df[position_column] == pos) & (output_df[price_column] < price)).sort_values(points_column,
ascending=False).head(
1)
potential_replacements = potential_replacements.append({
'init_name': row[name_column],
'init_price': price,
'init_points': row[points_column],
'new_name': potential_replacement.iloc[0][name_column],
'new_price': potential_replacement.iloc[0][price_column],
'new_points': potential_replacement.iloc[0][points_column],
'pos': pos
}, ignore_index=True)
potential_replacements['points_hit'] = potential_replacements['init_points'] - potential_replacements[
'new_points']
potential_replacements['price_boost'] = potential_replacements['init_price'] - potential_replacements[
'new_price']
potential_replacements['points_price_ratio'] = potential_replacements['price_boost'] / potential_replacements[
'points_hit']
# potential_replacements = potential_replacements.sort_values(by=['price_boost'], ascending = False)
# potential_replacements = potential_replacements.sort_values(by=['points_hit'], ascending = True)
potential_replacements = potential_replacements.sort_values(by=['points_price_ratio'], ascending=False)
replacement = potential_replacements.iloc[0]
team_replacement = output_df[output_df['player'] == replacement['new_name']]
print(
"Unfortunate " + replacement['init_name'] + " replaced with " + replacement['new_name'] + ", saving " + str(
replacement['price_boost']) + " but losing " + str(replacement['points_hit']) + " points\n")
team = team.where(team[name_column] != replacement['init_name']).dropna()
team = team.append(team_replacement, ignore_index=True)
output_df =
|
pd.concat([output_df, team, team])
|
pandas.concat
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Dataset handling within binet.
Copyright © 2013-2015 <NAME>.
Licensed under GPL, version 2 or a later (see LICENSE.rst)
binet stores datasets as HDF5 files. A dataset is comprised of 6 matrices:
trainx, trainy, validx, validy, testx, testy
These are usually stored as float-values, with the labels (y-values) in a
one-hot encoding.
NOTE: This file can be executed. It then converts datasets from their original
format into HDF5 files.
Usage: datasets.py (mnist | norb | cifar10) [directory]
'''
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import sys
if sys.version_info < (3,):
range = xrange
import cPickle as pickle
else:
import pickle
import struct
import gzip
import zipfile
import os
import scipy
import platform
import logging
import gc
import numpy as np
import pandas as pd
from scipy import io
try:
import h5py
except ImportError:
import warnings
warnings.warn("h5py unavailable")
# some machine specific paths for bioinf@jku machines
__datadir = {'tomlap': '/media/scratch/data',
'blucomp': '/media/scratch/data'}
_DATA_DIRECTORY = __datadir.get(platform.node(), os.path.expanduser("~/data"))
def load_dataset(dataset_name, return_testset=False,
dtype=np.float32, revert_scaling=False):
'''Loads a dataset, given the filename of the HDF5 file.
Returns 4 tuple of X, y, Xvalid, yvalid)
'''
if not dataset_name.endswith(".hdf5"):
fname = os.path.join(_DATA_DIRECTORY, dataset_name + ".hdf5")
else:
fname = os.path.join(_DATA_DIRECTORY, dataset_name)
# try to create standard datset if it doesn't exist yet
if not os.path.exists(fname):
createfuncs = {
'mnist': _create_mnist,
'norb': _create_norb,
'cifar10': _create_cifar10_flat,
'cifar10_img': _create_cifar10_img,
'mnist_basic': _create_mnist_basic,
'mnist_bgimg': _create_mnist_bgimg,
'mnist_bgrand': _create_mnist_bgrand,
'mnist_rot': _create_mnist_rot,
'rectangles': _create_rectangles,
'convex': _create_convex,
'covertype': _create_covertype,
'enwik8': _create_enwik8,
'tox21': _create_tox21}
cf = createfuncs.get(dataset_name, None)
if cf is not None:
l = logging.getLogger(__name__)
l.warning("%s does not exist, trying to create it" % fname)
cf(_DATA_DIRECTORY)
if not os.path.exists(fname):
raise RuntimeError("File %s does not exist" % fname)
with h5py.File(fname) as dataset:
if dataset_name == "enwik8":
ds_keys = ['train', 'valid', 'test']
else:
ds_keys = ['trainx', 'trainy', 'validx', 'validy']
if return_testset:
ds_keys.extend(['testx', 'testy'])
data = []
s = dataset['scale'][:] if 'scale' in dataset else 1.0
c = dataset['center'][:] if 'center' in dataset else 0.0
for k in ds_keys:
if k.endswith('x') and revert_scaling:
data.append(((dataset[k][:] * s)+c).astype(dtype))
else:
data.append(dataset[k][:].astype(dtype))
gc.collect()
return data
def _download_file(urlbase, fname, destination_dir):
if not os.path.exists(destination_dir):
os.mkdir(destination_dir)
if sys.version_info < (3,):
from urllib import urlretrieve
else:
from urllib.request import urlretrieve
url = urlbase + fname
dst = os.path.join(destination_dir, fname)
if not os.path.exists(dst):
logging.getLogger(__name__).info("downloading %s to %s" % (url, dst))
urlretrieve(url, dst)
return dst
def _to_one_hot_encoding(labels, dtype=np.float64):
labels = labels.reshape((labels.shape[0], 1))
'''Creates a one-hot encoding of the labels.'''
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder(dtype=dtype)
return enc.fit_transform(labels).toarray()
def _shuffle(data, labels):
''' Shuffles the data and the labels.'''
np.random.seed(42) # Make sure the same file is produced on each machine
idx = np.array(range(data.shape[0]))
np.random.shuffle(idx)
data = data[idx, :]
labels = labels[idx, :]
return data, labels, idx
def _read_mnist_image(filename):
with gzip.open(filename) as f:
buf = f.read(16)
magic, n_items, xsize, ysize = struct.unpack(">iiii", buf)
assert(magic == 2051) # magic number
n_features = xsize*ysize
data = np.zeros((n_items, n_features), dtype=np.uint8)
for i in range(n_items):
buf = f.read(n_features)
x = struct.unpack("B"*n_features, buf)
data[i, :] = x
return data
def _read_mnist_label(filename):
with gzip.open(filename) as f:
buf = f.read(8)
magic, n_items = struct.unpack(">ii", buf)
assert(magic == 2049) # magic number
data = np.zeros(n_items, dtype=np.uint8)
buf = f.read(n_items)
data[:] = struct.unpack("B"*n_items, buf)
return data.reshape(-1, 1)
def _read_norb_data(filename):
with gzip.open(filename) as f:
buf = f.read(8)
magic, ndims = struct.unpack("<ii", buf)
if magic == 0x1e3d4c55:
dt = np.dtype(np.uint8)
elif magic == 0x1e3d4c54:
dt = np.dtype(np.uint32)
else:
assert(False)
n = max(ndims, 3)
buf = f.read(n * 4)
dims = struct.unpack('<' + ('i'*n) , buf)
nitems = dims[0]
nfeatures = int(np.prod(dims[1:]))
data = np.empty((nitems, nfeatures), dtype=dt.type)
# we have to iterate here, as doing it all at once might cause a MemoryError
for i in range(nitems):
buf = f.read(nfeatures*dt.itemsize)
data[i] = struct.unpack(dt.char*nfeatures, buf)
return data
def _store(data, filename, other=None):
#
# Note: deactivating compression got a MASSIVE boost in read-speed.
# Our only compression-choice was gzip, as rhdf5 (R implementation)
# could not handle LZO.
# without compression, CIFAR10 can be read in <1 second in R (filesize ~750MB)
# with GZIP, no matter what compression level, the times were ~40s.
# (even though GZIP with compression_opts = 0 resulted in a file of 750MB)
# (compression_opts = 9 reached ~250 MB)
#
logging.getLogger(__name__).info("saving into %s ..." % filename)
with h5py.File(filename, "w") as f:
for i in range(len(data)):
f.create_dataset(data[i][0] + "x", data=data[i][1])
f.create_dataset(data[i][0] + "y", data=data[i][2])#, compression="gzip", compression_opts = 0)
if other:
for k in other:
f.create_dataset(k, data=other[k])
def _process_and_store(data, filename, other=None, rescale=False, dtype=np.float32):
'''Shuffles, converts and stores the data.
Shuffles training and testset, converts the data to np.float64 and stores it.
`other` can be dictionary of additional data to store.
data is expected to be a list of datasets, where each dataset is a list of
[name, data, labels]. I.e. a normal train/testset split would be
data = [ ['train', traindata, trainlabels], ['test', testdata, testlabels]]
'''
logger = logging.getLogger(__name__)
logger.info("shuffling...")
for i in range(len(data)):
data[i][1], data[i][2], _ = _shuffle(data[i][1], data[i][2])
logger.info("converting...")
for i in range(len(data)):
data[i][1] = data[i][1].astype(dtype)
data[i][2] = _to_one_hot_encoding(data[i][2], dtype=dtype)
if rescale:
s = data[0][1].max() # scale based on training set
for i in range(len(data)):
data[i][1] /= s
if other is None:
other = {}
other['scale'] = s*np.ones(data[0][1].shape[1])
_store(data, filename, other)
gc.collect()
def _split_dataset(data, labels, fraction):
""" Splits a dataset into two set, with the first part
obtaining fraction % of the data."""
n = int(data.shape[0] * fraction + 0.5)
idx = np.random.choice(range(data.shape[0]), n, replace=False)
return (data[idx, ], labels[idx],
np.delete(data, idx, 0), np.delete(labels, idx, 0))
def _create_mnist(directory):
''' MNIST dataset from yann.lecun.com/exdb/mnist/ '''
from os.path import join
logger = logging.getLogger(__name__)
logger.info("reading data...")
urlbase = 'http://yann.lecun.com/exdb/mnist/'
files = ['train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz',
't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz']
destdir = join(directory, "raw")
for fname in files:
_download_file(urlbase, fname, destdir)
trainx = _read_mnist_image(join(destdir, "train-images-idx3-ubyte.gz"))
trainy = _read_mnist_label(join(destdir, "train-labels-idx1-ubyte.gz"))
testx = _read_mnist_image(join(destdir, "t10k-images-idx3-ubyte.gz"))
testy = _read_mnist_label(join(destdir, "t10k-labels-idx1-ubyte.gz"))
trainx, trainy, validx, validy = _split_dataset(trainx, trainy, 5/6.0)
data = [['train', trainx, trainy],
['valid', validx, validy],
['test', testx, testy]]
_process_and_store(data, join(directory, "mnist.hdf5"), rescale=True)
def _create_norb(directory):
'''Small NORB dataset from www.cs.nyu.edu/~ylclab/data/norb-v1.0-small/ '''
urlbase = "http://www.cs.nyu.edu/~ylclab/data/norb-v1.0-small/"
dst = os.path.join(directory, "raw")
trainx = _read_norb_data(_download_file(urlbase,
'smallnorb-5x46789x9x18x6x2x96x96-training-dat.mat.gz', dst))
trainy = _read_norb_data(_download_file(urlbase,
'smallnorb-5x46789x9x18x6x2x96x96-training-cat.mat.gz', dst))
traini = _read_norb_data(_download_file(urlbase,
'smallnorb-5x46789x9x18x6x2x96x96-training-info.mat.gz', dst))
testx = _read_norb_data(_download_file(urlbase,
'smallnorb-5x01235x9x18x6x2x96x96-testing-dat.mat.gz', dst))
testy = _read_norb_data(_download_file(urlbase,
'smallnorb-5x01235x9x18x6x2x96x96-testing-cat.mat.gz', dst))
# instead of assigning the validation set randomly, we pick one of the
# "instances" of the training set. This is much better than doing it randomly!
fold = traini[:, 0].ravel()
vi = (fold == 4) # let's make instance 4 the validation-instance
#print vi.sum()
validx, trainx = trainx[vi], trainx[~vi]
validy, trainy = trainy[vi], trainy[~vi]
#print validx.shape, trainx.shape
data = [['train', trainx, trainy],
['valid', validx, validy],
['test', testx, testy]]
_process_and_store(data, os.path.join(directory, "norb.hdf5"), rescale=True)
def _create_norb_downsampled(directory):
if not os.path.exists(os.path.join(directory, "norb.hdf5")):
_create_norb(directory)
def downsample(X):
Xd = np.empty((X.shape[0], 2048))
for i, x in enumerate(X):
y = scipy.misc.imresize(x.reshape(96*2, 96), 1.0 / 3.0, "bicubic")
Xd[i] = y.ravel()
return Xd
tmp = h5py.File(os.path.join(directory, "norb.hdf5"))
trainx = downsample(tmp['trainx'])
validx = downsample(tmp['validx'])
testx = downsample(tmp['testx'])
data = [['train', trainx, tmp['trainy']],
['valid', validx, tmp['validy']],
['test', testx, tmp['testy']]]
_store(data, os.path.join(directory, "norb_downsampled.hdf5"))
def _load_cifar10(directory):
logger = logging.getLogger(__name__)
logger.info("reading CIFAR10 data...")
fname = _download_file("http://www.cs.toronto.edu/~kriz/",
"cifar-10-binary.tar.gz",
os.path.join(directory, "raw"))
import tarfile
with tarfile.open(fname) as tf:
filemembers = tf.getmembers()
trainx = np.zeros((0, 3072))
trainy = np.zeros((0,), dtype=np.uint8)
files = [f.name for f in filemembers if "data_batch" in f.name]
files.sort()
def _read_file(fn):
f = tf.extractfile(fn)
tmp = np.frombuffer(f.read(), np.uint8).reshape(-1, 3073)
return tmp[:, 0].reshape(-1, 1), tmp[:, 1:].reshape(-1, 3*32*32)
# save last batch as validation
traindata = [_read_file(fn) for fn in files[0:len(files)-1]]
y_tr = np.vstack([t[0] for t in traindata])
x_tr = np.vstack([t[1] for t in traindata])
y_va, x_va = _read_file(files[-1])
y_te, x_te = _read_file('cifar-10-batches-bin/test_batch.bin')
return x_tr, y_tr, x_va, y_va, x_te, y_te
def _create_cifar10_flat(directory):
''' CIFAR-10, from www.cs.toronto.edu/~kriz/cifar.html.'''
x_tr, y_tr, x_va, y_va, x_te, y_te = _load_cifar10(directory)
data = [['train', x_tr, y_tr],
['valid', x_va, y_va],
['test', x_te, y_te]]
dst = os.path.join(directory, "cifar10.hdf5")
_process_and_store(data, dst, rescale=True)
#imshow(np.rot90(traindata[882, ].reshape((3, 32, 32)).T), origin="lower")
def _create_cifar10_img(directory):
''' CIFAR-10 in nbatches x width x height x channels format
from www.cs.toronto.edu/~kriz/cifar.html.'''
x_tr, y_tr, x_va, y_va, x_te, y_te = _load_cifar10(directory)
x_tr, x_va, x_te = [x.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
for x in (x_tr, x_va, x_te)]
data = [['train', x_tr, y_tr],
['valid', x_va, y_va],
['test', x_te, y_te]]
dst = os.path.join(directory, "cifar10_img.hdf5")
_store(data, dst)
#imshow(np.rot90(traindata[882, ].reshape((3, 32, 32)).T), origin="lower")
def _handle_larochelle_icml2007(directory, fn, train_data_file, test_data_file,
rotate_images=True):
'''Basic procedure to load the datasets from Larochelle et al., ICML 2007.
Unfortunately the structure of the datasets differs sometimes,
so we need this abstraction.
fn = name of the zip file (w/o extension)
train_data_file: name of the training set file within the archive
test_data_file: name of the test set file within the archive
rotate_images: rotate images (needed if file is in column-major format)
'''
import zipfile
urlbase = "http://www.iro.umontreal.ca/~lisa/icml2007data/"
dst = os.path.join(directory, "raw")
f = _download_file(urlbase, '%s.zip' % fn, dst)
with zipfile.ZipFile(f) as zf:
tmp = np.loadtxt(zf.open(train_data_file))
trainx, trainy = tmp[:, :-1].copy(), tmp[:, -1].copy()
tmp = np.loadtxt(zf.open(test_data_file))
testx, testy = tmp[:, :-1].copy(), tmp[:, -1].copy()
trainy = trainy.reshape((-1, 1))
testy = testy.reshape((-1, 1))
if rotate_images:
n = int(np.sqrt(trainx.shape[1]))
trainx = np.rollaxis(trainx.reshape(trainx.shape[0], n, n), 2, 1)
trainx = trainx.reshape(-1, n*n)
testx = np.rollaxis(testx.reshape(testx.shape[0], n, n), 2, 1)
testx = testx.reshape(-1, n*n)
return trainx, trainy, testx, testy
def _create_mnist_basic(directory):
trainx, trainy, testx, testy = _handle_larochelle_icml2007(directory, "mnist",
'mnist_train.amat', 'mnist_test.amat', rotate_images=False)
trainx, trainy, validx, validy = _split_dataset(trainx, trainy, 5/6.0)
data = [['train', trainx, trainy],
['valid', validx, validy],
['test', testx, testy]]
_process_and_store(data, os.path.join(directory, "mnist_basic.hdf5"), rescale=True)
def _create_mnist_bgimg(directory):
trainx, trainy, testx, testy = _handle_larochelle_icml2007(directory,
"mnist_background_images",
'mnist_background_images_train.amat',
'mnist_background_images_test.amat')
trainx, trainy, validx, validy = _split_dataset(trainx, trainy, 5/6.0)
data = [['train', trainx, trainy],
['valid', validx, validy],
['test', testx, testy]]
_process_and_store(data, os.path.join(directory, "mnist_bgimg.hdf5"), rescale=True)
def _create_mnist_bgrand(directory):
trainx, trainy, testx, testy = _handle_larochelle_icml2007(directory,
"mnist_background_random",
'mnist_background_random_train.amat',
'mnist_background_random_test.amat')
trainx, trainy, validx, validy = _split_dataset(trainx, trainy, 5/6.0)
data = [['train', trainx, trainy],
['valid', validx, validy],
['test', testx, testy]]
_process_and_store(data, os.path.join(directory, "mnist_bgrand.hdf5"), rescale=True)
def _create_mnist_rot(directory):
trainx, trainy, testx, testy = _handle_larochelle_icml2007(directory,
"mnist_rotation_new",
'mnist_all_rotation_normalized_float_train_valid.amat',
'mnist_all_rotation_normalized_float_test.amat')
trainx, trainy, validx, validy = _split_dataset(trainx, trainy, 5/6.0)
data = [['train', trainx, trainy],
['valid', validx, validy],
['test', testx, testy]]
_process_and_store(data, os.path.join(directory, "mnist_rot.hdf5"), rescale=True)
def _create_rectangles(directory):
trainx, trainy, testx, testy = _handle_larochelle_icml2007(directory,
"rectangles",
'rectangles_train.amat',
'rectangles_test.amat')
trainx, trainy, validx, validy = _split_dataset(trainx, trainy, 5/6.0)
data = [['train', trainx, trainy],
['valid', validx, validy],
['test', testx, testy]]
_process_and_store(data, os.path.join(directory, "rectangles.hdf5"), rescale=True)
def _create_convex(directory):
trainx, trainy, testx, testy = _handle_larochelle_icml2007(directory,
"convex",
'convex_train.amat',
'50k/convex_test.amat')
trainx, trainy, validx, validy = _split_dataset(trainx, trainy, 5/6.0)
data = [['train', trainx, trainy],
['valid', validx, validy],
['test', testx, testy]]
_process_and_store(data, os.path.join(directory, "convex.hdf5"), rescale=True)
def _create_covertype(directory):
urlbase = 'https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/'
destdir = os.path.join(directory, "raw")
fn = _download_file(urlbase, 'covtype.data.gz', destdir)
with gzip.open(fn, "rb") as gzfile:
X = pd.read_csv(gzfile, header=None).values
X, y = X[:, :-1].astype(np.float64), X[:, -1]
y -= 1 # make classes 0-based
# split into test- and validationset
idx = range(X.shape[0])
from sklearn.cross_validation import train_test_split
X, Xtest, y, ytest = train_test_split(X, y, test_size=0.1)
X, Xval, y, yval = train_test_split(X, y, test_size=0.25)
from sklearn.preprocessing import LabelBinarizer
lb = LabelBinarizer()
y = lb.fit_transform(y)
yval = lb.transform(yval)
ytest = lb.transform(ytest)
# Most values are binary, except for these, so let's standardize them
quant_idx = [0, 1, 2, 3, 4, 5, 9] # real numbers
int_idx = [6, 7, 8] # integers from [0, 255)
from sklearn.preprocessing import StandardScaler as Scaler
scaler = Scaler()
X[:, quant_idx + int_idx] = scaler.fit_transform(X[:, quant_idx+int_idx])
Xval[:, quant_idx + int_idx] = scaler.transform(Xval[:, quant_idx + int_idx])
Xtest[:, quant_idx + int_idx] = scaler.transform(Xtest[:, quant_idx + int_idx])
data = [['train', X, y],
['valid', Xval, yval],
['test', Xtest, ytest]]
m = np.zeros(X.shape[1])
m[quant_idx+int_idx] = scaler.mean_
s = np.ones(X.shape[1])
s[quant_idx+int_idx] = scaler.std_
other = {'center': m, "scale": s}
_store(data, os.path.join(directory, "covertype.hdf5"), other)
def _create_enwik8(directory):
'''Prepares the enwik8/hutter prize data: an extract from wikipedia.'''
urlbase = 'http://mattmahoney.net/dc/'
destdir = os.path.join(directory, "raw")
fn = _download_file(urlbase, 'enwik8.zip', destdir)
# we first read the text as UTF-8, and then map each present character
# to a number, instead of using UTF-8 bytes directly
with zipfile.ZipFile(fn, "r") as zf:
with zf.open("enwik8") as z:
text_train = z.read(96*10**6).decode("utf8")
text_valid = z.read(2*10**6).decode("utf8")
text_test = z.read(2*10**6).decode("utf8")
assert(len(z.read()) == 0) # make sure we read everything
# ignore "uncommon" characters.
# In "Generating Sequences With Recurrent Neural Networks"
# <NAME> says that there are 205 distinct single-byte characters.
# However the following will only yield 196. No idea where Alex
# got the rest of them ?-)
data_tr = np.array([ord(c) for c in text_train if ord(c) < 256], dtype=np.uint8)
data_va = np.array([ord(c) for c in text_valid if ord(c) < 256], dtype=np.uint8)
data_te = np.array([ord(c) for c in text_test if ord(c) < 256], dtype=np.uint8)
cnt = pd.value_counts(data_tr)
del(text_train, text_valid, text_test)
import gc
gc.collect()
# remove characters with <=10 occourences (there are 16 of those)
# (we use a lookup table, othewise it takes forever)
count_loopup = np.zeros(256, np.int64)
count_loopup[cnt.index.values] = cnt.values
occ = count_loopup[data_tr]
data_tr = data_tr[occ > 10]
data_va = data_va[count_loopup[data_va] > 10]
data_te = data_te[count_loopup[data_te] > 10]
decode_lookup = 255 * np.ones(256, np.uint8)
u = np.unique(data_tr)
decode_lookup[:len(u)] = u
encode_lookup = np.iinfo(np.uint16).max * np.ones(256, np.uint16)
for c, e in enumerate(u):
encode_lookup[e] = c
code_tr = encode_lookup[data_tr]
code_va = encode_lookup[data_va]
code_te = encode_lookup[data_te]
assert(np.all(decode_lookup[code_tr] == data_tr))
assert(np.all(code_tr <= 255))
assert(np.all(code_va <= 255))
assert(np.all(code_te <= 255))
del(data_tr, data_va, data_te)
gc.collect()
fname = os.path.join(directory, "enwik8.hdf5")
with h5py.File(fname, "w") as f:
f.create_dataset('train', data=code_tr)
f.create_dataset('valid', data=code_va)
f.create_dataset('test', data=code_te)
f.create_dataset('encode', data=encode_lookup)
f.create_dataset('decode', data=decode_lookup)
def create_tox21(sparsity_cutoff, validation_fold,
dtype=np.float32, download_directory=_DATA_DIRECTORY):
urlbase = "http://www.bioinf.jku.at/research/deeptox/"
dst = os.path.join(download_directory, "raw")
fn_x_tr_d = _download_file(urlbase, 'tox21_dense_train.csv.gz', dst)
fn_x_tr_s = _download_file(urlbase, 'tox21_sparse_train.mtx.gz', dst)
fn_y_tr = _download_file(urlbase, 'tox21_labels_train.csv', dst)
fn_x_te_d = _download_file(urlbase, 'tox21_dense_test.csv.gz', dst)
fn_x_te_s = _download_file(urlbase, 'tox21_sparse_test.mtx.gz', dst)
fn_y_te = _download_file(urlbase, 'tox21_labels_test.csv', dst)
cpd = _download_file(urlbase, 'tox21_compoundData.csv', dst)
y_tr = pd.read_csv(fn_y_tr, index_col=0)
y_te = pd.read_csv(fn_y_te, index_col=0)
x_tr_dense = pd.read_csv(fn_x_tr_d, index_col=0).values
x_te_dense = pd.read_csv(fn_x_te_d, index_col=0).values
x_tr_sparse = io.mmread(fn_x_tr_s).tocsc()
x_te_sparse = io.mmread(fn_x_te_s).tocsc()
# filter out very sparse features
sparse_col_idx = ((x_tr_sparse > 0).mean(0) >= sparsity_cutoff).A.ravel()
x_tr_sparse = x_tr_sparse[:, sparse_col_idx].A
x_te_sparse = x_te_sparse[:, sparse_col_idx].A
dense_col_idx = np.where(x_tr_dense.var(0) > 1e-6)[0]
x_tr_dense = x_tr_dense[:, dense_col_idx]
x_te_dense = x_te_dense[:, dense_col_idx]
# The validation set consists of those samples with
# cross validation fold #5
info =
|
pd.read_csv(cpd, index_col=0)
|
pandas.read_csv
|
import pandas as pd
import plotly.graph_objs as go
# Use this file to read in your data and prepare the plotly visualizations. The path to the data files are in
# `data/file_name.csv`
def generateWeatherTable():
df = pd.read_csv('data/weatherAUS.csv')
dfTemperature = df[['MinTemp', 'MaxTemp']]
dfDates = df["Date"].str.split("-", n = 2, expand = True)
dfWeather =
|
pd.concat([dfDates, dfTemperature], axis=1)
|
pandas.concat
|
"""
"""
import os
import pandas as pd
import numpy as np
from oemof.thermal.stratified_thermal_storage import calculate_storage_u_value
from oemof.thermal import facades
from oemof.solph import (processing, Source, Sink, Bus, Flow,
Model, EnergySystem)
# Set paths
data_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data/stratified_thermal_storage.csv')
# Read input data
input_data = pd.read_csv(data_path, index_col=0, header=0)['var_value']
# Precalculation
u_value = calculate_storage_u_value(
input_data['s_iso'],
input_data['lamb_iso'],
input_data['alpha_inside'],
input_data['alpha_outside'])
# Set up an energy system model
solver = 'cbc'
periods = 100
datetimeindex =
|
pd.date_range('1/1/2019', periods=periods, freq='H')
|
pandas.date_range
|
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
|
_maybe_remove(store, "df2")
|
pandas.tests.io.pytables.common._maybe_remove
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import time
import random
from contextlib import contextmanager
from adabelief_pytorch import AdaBelief
import functools
from omegaconf import OmegaConf
from sklearn.model_selection import StratifiedKFold, GroupKFold, KFold
from collections import defaultdict, Counter
import sys
from tqdm import tqdm
from typing import Tuple
from torch.utils.data import Dataset
from torch.utils.data.dataloader import DataLoader
# from pytorch_lightning.metrics.functional.classification import auroc
import cv2
from pytorch_lightning import LightningDataModule
from sklearn import model_selection
import albumentations as A
from sklearn.metrics import roc_auc_score, accuracy_score
from pytorch_lightning.core.lightning import LightningModule
from albumentations import (
Compose, OneOf, Normalize, Resize, RandomResizedCrop, RandomCrop, HorizontalFlip, VerticalFlip,
RandomBrightness, RandomContrast, RandomBrightnessContrast, Rotate, ShiftScaleRotate, Cutout,
IAAAdditiveGaussianNoise, Transpose, CLAHE, MultiplicativeNoise, IAASharpen
)
from torch.autograd import Variable
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
from albumentations.pytorch import ToTensorV2
from albumentations import ImageOnlyTransform
import matplotlib.pyplot as plt
from pathlib import Path
import timm
from google.cloud import storage
from torchvision import models
import torch.nn as nn
from torch.nn import functional as F
import torch
from torch.nn.parameter import Parameter
# from torch.optim import Adam, SGD
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, CosineAnnealingLR, ReduceLROnPlateau
import pytorch_lightning as pl
import torch.optim as optim
from src.optimizer import get_optimizer
from src.loss import get_criterion
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger
import wandb
import warnings
warnings.simplefilter("ignore")
train = pd.read_csv('../data/Training_Set/RFMiD_Training_Labels.csv')
extra =
|
pd.read_csv("../extra/use_df.csv")
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import requests
import random
import urllib
import json
import time
import sys
import datetime
from datetime import date
from bs4 import BeautifulSoup
from selenium import webdriver
from discourse_ordering import DiscourseOrderingClass
from twitter_api import TwitterClass
import os
class HelperClassTempo:
"""
Classe de métodos auxiliares
"""
def __init__(self):
# mapeamento de meses
self.dict_map_mes = {1: 'janeiro',
2: 'fevereiro',
3: 'março',
4: 'abril',
5: 'maio',
6: 'junho',
7: 'julho',
8: 'agosto',
9: 'setembro',
10: 'outubro',
11: 'novembro',
12: 'dezembro'
}
# dia atual
print (self.get_dia_atual())
# path atual
self.current_path = str(os.getcwd())
# path do chromedriver
self.path_to_chromedriver = os.path.join(self.current_path, 'chromedriver')
# API do Twitter
self.twitter_api = TwitterClass()
# arquivos auxiliares
self.path_infos_cidades = os.path.join(self.current_path, "cidades.csv")
self.path_bd = os.path.join(self.current_path, "cidades_bd.csv")
path_credenciais_user_agent = os.path.join(self.current_path, "credenciais_user_agent.json")
path_intents = os.path.join(self.current_path, "intents.json")
path_analisador_lexico = os.path.join(self.current_path, "analisador_lexico.json")
self.discourse_ordering_object = DiscourseOrderingClass()
# leitura do arquivo json com as credenciais
try:
f = open(path_credenciais_user_agent, mode="r")
infos_login = json.load(f)
self.user_agent = infos_login['user_agent']
f.close()
except:
self.user_agent = "temporary_agent"
# leitura do arquivo json com os intents
f = open(path_intents, encoding='utf-8', mode="r")
self.dict_intents = json.load(f)
f.close()
# leitura do arquivo json com o analisador léxico
f = open(path_analisador_lexico, mode="r")
dict_json_lexico = json.load(f)
f.close()
self.dict_analisador_lexico = {}
self.lista_palavras_analisador_lexico = set()
for chave, valor in dict_json_lexico.items():
palavra = chave.split(']')[1].strip()
numero = chave.split('numero=')[1].split(',')[0].strip()
genero = chave.split('genero=')[1].split(']')[0].strip()
chave = f"{palavra}|{numero}|{genero}"
self.dict_analisador_lexico[chave] = valor.strip()
self.lista_palavras_analisador_lexico.add(palavra)
self.lista_palavras_analisador_lexico = list(self.lista_palavras_analisador_lexico)
# parametros do webdriver
self.chromeOptions = webdriver.ChromeOptions()
self.chromeOptions.add_argument('--no-sandbox')
self.chromeOptions.add_argument("--headless")
self.chromeOptions.add_argument(f"user-agent={self.user_agent}")
# parâmetros
self.url_tabua_mares = "https://www.tideschart.com"
self.tempo_espera_tweet_segundos = 60
self.qtd_cidades_selecionadas = 15
self.qtd_min_dias_consecutivos = 10
self.multiplicador_std = 1.3
self.altura_mare_ruim = 1.6
self.filler_tempo = 'céu nublado'
self.modulo = 'tempo'
# temperaturas max e min possíveis (validação conceitual)
self.maior_temperatura_possivel = 55
self.menor_temperatura_possivel = -10
# icones
self.icone_up = '▲'
self.icone_down = '▼'
# df cidades
self.df_cidades = pd.read_csv(self.path_infos_cidades, encoding='latin-1', sep=';')
# colunas para atribuir valor
self.lista_colunas_tempo = ['cidade',
'uf',
'tempo',
'temperatura',
'temperatura_max',
'temperatura_min',
'nebulosidade',
'umidade',
'vento',
'horario_por_sol',
'pesca',
'melhor_horario_pesca',
'altura_maior_onda',
'texto_onda',
'url_imagem']
# colunas para atribuir valor
self.lista_colunas_salvar = ['cidade',
'uf',
'tempo',
'temperatura',
'temperatura_max',
'temperatura_min',
'nebulosidade',
'umidade',
'vento',
'horario_por_sol',
'pesca',
'melhor_horario_pesca',
'altura_maior_onda',
'data']
# se não existe arquivo de bd, cria
if not os.path.exists(self.path_bd):
|
pd.DataFrame(columns=self.lista_colunas_salvar)
|
pandas.DataFrame
|
# Copyright 2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal
from kedro.contrib.io.gcs.csv_gcs import CSVGCSDataSet
from kedro.io import DataSetError, Version
from kedro.io.core import generate_timestamp
from . import gcs_mocks
FILENAME = "test.csv"
BUCKET_NAME = "testbucketkedro"
GCP_PROJECT = "testproject"
class MockGCSFileSystem(gcs_mocks.BasicGCSFileSystemMock):
@contextmanager
def open(self, filepath, *args, **kwargs):
gcs_file = self.files.get(filepath)
if not gcs_file:
gcs_file = gcs_mocks.MockGCSFile()
self.files[filepath] = gcs_file
yield gcs_file
@pytest.fixture
def dummy_dataframe():
return
|
pd.DataFrame({"col1": [1, 2], "col2": [4, 5], "col3": [5, 6]})
|
pandas.DataFrame
|
from __future__ import print_function
import os
import pandas as pd
from inferelator_prior import (SRR_SUBPATH, FASTQ_SUBPATH, STAR_ALIGNMENT_SUBPATH, HTSEQ_ALIGNMENT_SUBPATH,
KALLISTO_ALIGNMENT_SUBPATH)
from inferelator_prior.processor.htseq_count import htseq_count_aligned
from inferelator_prior.processor.matrix import pileup_raw_counts, normalize_matrix_to_fpkm, normalize_matrix_to_tpm
from inferelator_prior.processor.srr import get_srr_files, unpack_srr_files
from inferelator_prior.processor.star import star_align_fastqs
from inferelator_prior.processor.kallisto import kallisto_align_fastqs, KALLISTO_COUNT_COL, KALLISTO_TPM_COL
from inferelator_prior.processor.utils import file_path_abs, test_requirements_exist, ArgParseTestRequirements
OUTPUT_COUNT_FILE_NAME = "srr_counts.tsv"
OUTPUT_COUNT_METADATA_NAME = "srr_alignment_metadata.tsv"
OUTPUT_FPKM_FILE_NAME = "srr_fpkm.tsv"
OUTPUT_TPM_FILE_NAME = "srr_tpm.tsv"
def main():
ap = ArgParseTestRequirements(description="Turn a list of RNAseq expression SRRs from NCBI GEO into a count matrix")
ap.add_argument("-s", "--srr", dest="srr", help="SRR record IDs", nargs="+", metavar="SRRID", default=None)
ap.add_argument("-f", "--file", dest="file", help="List of SRR records in a TXT file", metavar="FILE", default=None)
ap.add_argument("-g", "--genome", dest="genome", help="Reference genome (STAR or Kallisto)", metavar="PATH",
required=True)
ap.add_argument("-a", "--annotation", dest="anno", help="GTF/GFF Annotation File", metavar="FILE", default=None)
ap.add_argument("-o", "--out", dest="out", help="Output PATH", metavar="PATH", required=True)
ap.add_argument("--gzip", dest="gzip", help="GZIP output file", action='store_const', const=True, default=False)
ap.add_argument("--cpu", dest="cpu", help="NUM of cores to use", metavar="NUM", type=int, default=4)
ap.add_argument("--star_jobs", dest="sjob", help="NUM of STAR workers to use", metavar="NUM", type=int, default=4)
ap.add_argument("--kallisto", dest="kallisto", help="Align and quant with Kallisto", action='store_const',
const=True, default=False)
ap.add_argument("--skip_srr", dest="skip", help="Skip downloading & unpacking SRRs", action='store_const',
const=True, default=False)
args, star_args = ap.parse_known_args()
srr_ids = list()
if args.srr is None and args.file is None:
print("One of --srr or --file must be set")
exit(1)
elif args.srr is not None and args.file is not None:
print("Only one of --srr or --file may be set (not both)")
exit(1)
elif args.srr is not None:
# SRR IDs are provided at command line
srr_ids = args.srr
elif args.file is not None:
# SRR IDs are in a .txt file; read them into a list
srr_ids = pd.read_csv(args.file, sep="\t", index_col=None, header=None).iloc[:, 0].tolist()
else:
raise ValueError("There is something wrong with this switch")
srr_tomat0(srr_ids, args.out, args.genome, annotation_file=args.anno, gzip_output=args.gzip, cores=args.cpu,
star_jobs=args.sjob, star_args=star_args, kallisto=args.kallisto, skip=args.skip)
def srr_tomat0(srr_ids, output_path, star_reference_genome, annotation_file=None, gzip_output=False, cores=4, star_jobs=2,
star_args=None, kallisto=False, skip=False):
output_path = file_path_abs(output_path)
os.makedirs(output_path, exist_ok=True)
# Download all the SRR files
print("Downloading SRR files")
os.makedirs(os.path.join(output_path, SRR_SUBPATH), exist_ok=True)
srr_file_names = get_srr_files(srr_ids, os.path.join(output_path, SRR_SUBPATH), num_workers=cores, skip=skip)
# Unpack all the SRR files into FASTQ files
print("Unpacking SRR files")
os.makedirs(os.path.join(output_path, FASTQ_SUBPATH), exist_ok=True)
fastq_file_names = unpack_srr_files(srr_ids, srr_file_names, os.path.join(output_path, FASTQ_SUBPATH),
num_workers=cores, skip=skip)
gz_extension = ".gz" if gzip_output else ""
count_matrix_file_name = os.path.join(output_path, OUTPUT_COUNT_FILE_NAME) + gz_extension
fpkm_file_name = os.path.join(output_path, OUTPUT_FPKM_FILE_NAME) + gz_extension
tpm_file_name = os.path.join(output_path, OUTPUT_TPM_FILE_NAME) + gz_extension
if kallisto:
print("Aligning FASTQ files")
os.makedirs(os.path.join(output_path, KALLISTO_ALIGNMENT_SUBPATH), exist_ok=True)
count_file_names = kallisto_align_fastqs(srr_ids, fastq_file_names, star_reference_genome,
os.path.join(output_path, KALLISTO_ALIGNMENT_SUBPATH),
num_workers=cores)
tpm_df = None
count_matrix = None
for sid, cf in zip(srr_ids, count_file_names):
if cf is None:
continue
counts = pd.read_csv(cf, sep="\t", index_col=0)
if tpm_df is None:
tpm_df =
|
pd.DataFrame(index=counts.index)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
test .agg behavior / note that .apply is tested generally in test_groupby.py
"""
import pytest
import numpy as np
import pandas as pd
from pandas import concat, DataFrame, Index, MultiIndex, Series
from pandas.core.groupby import Grouping, SpecificationError
from pandas.compat import OrderedDict
import pandas.util.testing as tm
@pytest.fixture
def ts():
return tm.makeTimeSeries()
@pytest.fixture
def tsframe():
return DataFrame(tm.getTimeSeriesData())
@pytest.fixture
def df():
return DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
@pytest.fixture
def mframe():
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
return DataFrame(np.random.randn(10, 3),
index=index,
columns=['A', 'B', 'C'])
@pytest.fixture
def three_group():
return DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar',
'bar', 'bar', 'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one',
'one', 'two', 'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny',
'shiny', 'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_agg_regression1(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_agg_must_agg(df):
grouped = df.groupby('A')['C']
msg = "Must produce aggregated value"
with tm.assert_raises_regex(Exception, msg):
grouped.agg(lambda x: x.describe())
with tm.assert_raises_regex(Exception, msg):
grouped.agg(lambda x: x.index[:2])
def test_agg_ser_multi_key(df):
# TODO(wesm): unused
ser = df.C # noqa
f = lambda x: x.sum()
results = df.C.groupby([df.A, df.B]).aggregate(f)
expected = df.groupby(['A', 'B']).sum()['C']
tm.assert_series_equal(results, expected)
def test_agg_apply_corner(ts, tsframe):
# nothing to group, all NA
grouped = ts.groupby(ts * np.nan)
assert ts.dtype == np.float64
# groupby float64 values results in Float64Index
exp = Series([], dtype=np.float64,
index=pd.Index([], dtype=np.float64))
tm.assert_series_equal(grouped.sum(), exp)
tm.assert_series_equal(grouped.agg(np.sum), exp)
tm.assert_series_equal(grouped.apply(np.sum), exp,
check_index_type=False)
# DataFrame
grouped = tsframe.groupby(tsframe['A'] * np.nan)
exp_df = DataFrame(columns=tsframe.columns, dtype=float,
index=pd.Index([], dtype=np.float64))
tm.assert_frame_equal(grouped.sum(), exp_df, check_names=False)
tm.assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False)
tm.assert_frame_equal(grouped.apply(np.sum), exp_df.iloc[:, :0],
check_names=False)
def test_agg_grouping_is_list_tuple(ts):
df = tm.makeTimeDataFrame()
grouped = df.groupby(lambda x: x.year)
grouper = grouped.grouper.groupings[0].grouper
grouped.grouper.groupings[0] = Grouping(ts.index, list(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped.grouper.groupings[0] = Grouping(ts.index, tuple(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_agg_python_multiindex(mframe):
grouped = mframe.groupby(['A', 'B'])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('groupbyfunc', [
lambda x: x.weekday(),
[lambda x: x.month, lambda x: x.weekday()],
])
def test_aggregate_str_func(tsframe, groupbyfunc):
grouped = tsframe.groupby(groupbyfunc)
# single series
result = grouped['A'].agg('std')
expected = grouped['A'].std()
tm.assert_series_equal(result, expected)
# group frame by function name
result = grouped.aggregate('var')
expected = grouped.var()
tm.assert_frame_equal(result, expected)
# group frame by function dict
result = grouped.agg(OrderedDict([['A', 'var'],
['B', 'std'],
['C', 'mean'],
['D', 'sem']]))
expected = DataFrame(OrderedDict([['A', grouped['A'].var()],
['B', grouped['B'].std()],
['C', grouped['C'].mean()],
['D', grouped['D'].sem()]]))
tm.assert_frame_equal(result, expected)
def test_aggregate_item_by_item(df):
grouped = df.groupby('A')
aggfun = lambda ser: ser.size
result = grouped.agg(aggfun)
foo = (df.A == 'foo').sum()
bar = (df.A == 'bar').sum()
K = len(result.columns)
# GH5782
# odd comparisons can result here, so cast to make easy
exp = pd.Series(np.array([foo] * K), index=list('BCD'),
dtype=np.float64, name='foo')
tm.assert_series_equal(result.xs('foo'), exp)
exp = pd.Series(np.array([bar] * K), index=list('BCD'),
dtype=np.float64, name='bar')
tm.assert_almost_equal(result.xs('bar'), exp)
def aggfun(ser):
return ser.size
result = DataFrame().groupby(df.A).agg(aggfun)
assert isinstance(result, DataFrame)
assert len(result) == 0
def test_wrap_agg_out(three_group):
grouped = three_group.groupby(['A', 'B'])
def func(ser):
if ser.dtype == np.object:
raise TypeError
else:
return ser.sum()
result = grouped.aggregate(func)
exp_grouped = three_group.loc[:, three_group.columns != 'C']
expected = exp_grouped.groupby(['A', 'B']).aggregate(func)
tm.assert_frame_equal(result, expected)
def test_agg_multiple_functions_maintain_order(df):
# GH #610
funcs = [('mean', np.mean), ('max', np.max), ('min', np.min)]
result = df.groupby('A')['C'].agg(funcs)
exp_cols = Index(['mean', 'max', 'min'])
tm.assert_index_equal(result.columns, exp_cols)
def test_multiple_functions_tuples_and_non_tuples(df):
# #1359
funcs = [('foo', 'mean'), 'std']
ex_funcs = [('foo', 'mean'), ('std', 'std')]
result = df.groupby('A')['C'].agg(funcs)
expected = df.groupby('A')['C'].agg(ex_funcs)
tm.assert_frame_equal(result, expected)
result = df.groupby('A').agg(funcs)
expected = df.groupby('A').agg(ex_funcs)
|
tm.assert_frame_equal(result, expected)
|
pandas.util.testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
"""
German bank holiday.
"""
try:
from pandas import Timedelta
from pandas.tseries.offsets import Easter, Day, Week
from pandas.tseries.holiday import EasterMonday, GoodFriday, \
Holiday, AbstractHolidayCalendar
except ImportError:
print('Pandas could not be imported')
raise
from german_holidays.state_codes import STATE_CODE_MAP, StateCodeError
class ChristiHimmelfahrt(Easter):
def apply(*args, **kwargs):
new = Easter.apply(*args, **kwargs)
new += Timedelta('39d')
return new
class Pfingstsonntag(Easter):
def apply(*args, **kwargs):
new = Easter.apply(*args, **kwargs)
new += Timedelta('49d')
return new
class Pfingstmontag(Easter):
def apply(*args, **kwargs):
new = Easter.apply(*args, **kwargs)
new += Timedelta('50d')
return new
class Fronleichnam(Easter):
def apply(*args, **kwargs):
new = Easter.apply(*args, **kwargs)
new += Timedelta('60d')
return new
ALL_GERMAN_HOLIDAY_RULES = {
'Karfreitag': GoodFriday,
# 'Ostersonntag': Holiday('Ostersonntag', month=1, day=1,
# offset=[Easter()]),
'Ostermontag': EasterMonday,
'<NAME>': Holiday('<NAME>', month=1, day=1,
offset=[
|
Easter()
|
pandas.tseries.offsets.Easter
|
import itertools
import math
import pandas as pd
import pickle
import re
import pprint
import time
#region lipidomics infomartions
no_acyl_list = ['CoQ', 'Vitamine E', 'CASulfate', 'CA', 'Vitamine D',
'SSulfate', 'Cholesterol', 'SHex', 'SPE', 'BAHex',
'BASulfate', 'SPEHex', 'SPGHex']
mono_acyl_list = ['FA', 'NAE', 'CAR', 'MG', 'LDGCC', 'LDGTS/A',
'LPA', 'EtherLPC', 'LPC', 'EtherLPE', 'LPE', 'EtherLPG', 'LPG', 'LPI', 'LPS',
'VAE', 'PhytoSph', 'DHSph', 'Sph', 'DCAE', 'GDCAE', 'GLCAE', 'TDCAE', 'TLCAE',
'AHexCAS', 'AHexCS', 'AHexSIS', 'AHexBRS', 'AHexSTS',
'BRSE', 'CASE', 'CE', 'SISE', 'STSE']
tri_acyl_list = ['ADGGA', 'EtherTG', 'TG', 'HBMP', 'MLCL',
'Cer_EBDS', 'Cer_EODS', 'Cer_EOS', 'AHexCer', 'HexCer_EOS', 'ASM']
triacyls_sphingolipids = ['AHexCer', 'ASM', 'Cer_EBDS', 'Cer_EODS', 'Cer_EOS', 'HexCer_EOS']
lipidclass_dict = {
'Fatty acyls': ['FA', 'NAGly', 'NAGlySer', 'NAOrn', 'NAE', 'CAR', 'FAHFA'],
'Glycerolipids': ['DG', 'EtherDG', 'DGDG', 'EtherDGDG', 'MGDG', 'EtherMGDG',
'SQDG', 'EtherSMGDG', 'MG', 'ADGGA', 'DGCC', 'DGGA',
'DGTS/A', 'LDGCC', 'LDGTS/A', 'EtherTG', 'TG'],
'Glycerophospholipids': ['LPA', 'PA', 'EtherLPC', 'EtherPC', 'LPC', 'PC',
'EtherLPE', 'EtherPE', 'EtherPE(P)', 'PlasmPE',
'LNAPE', 'LPE', 'PE', 'BMP', 'EtherLPG', 'EtherPG',
'HBMP', 'LPG', 'PG', 'CL', 'DLCL', 'MLCL',
'Ac2PIM1', 'Ac2PIM2', 'Ac3PIM2', 'Ac4PIM2',
'EtherPI', 'LPI', 'PI', 'EtherPS', 'LNAPS', 'LPS',
'PS', 'PEtOH', 'PMeOH', 'EtherOxPE', 'OxPC', 'OxPE',
'OxPG', 'OxPI', 'OxPS'],
'Prenol lipids': ['VAE', 'CoQ', 'Vitamine E'],
'Saccharolipids': ['LipidA'],
'Sphingolipids': ['GM3', 'SHexCer', 'SHexCer+O', 'Cer_ADS', 'Cer_AP',
'Cer_AS', 'Cer_BDS', 'Cer_BS', 'Cer_HDS', 'Cer_HS',
'Cer_EBDS', 'Cer_EODS', 'Cer_EOS', 'Cer_NDS', 'Cer_NP',
'Cer_NS', 'CerP', 'AHexCer',
'HexCer_ADS', 'HexCer_AP', 'HexCer_AS', 'HexCer_BDS',
'HexCer_BS', 'HexCer_HDS', 'HexCer_HS', 'HexCer_EOS',
'HexCer_NDS', 'HexCer_NP', 'HexCer_NS',
'Hex2Cer', 'Hex3Cer', 'ASM', 'PE-Cer', 'PE-Cer+O',
'PI-Cer', 'SM', 'SM+O',
'PhytoSph', 'SL', 'SL+O', 'DHSph', 'Sph'],
'Sterol lipids': ['CASulfate', 'CA', 'DCAE', 'GDCAE', 'GLCAE', 'TDCAE',
'TLCAE', 'AHexCAS', 'AHexCS', 'AHexSIS', 'AHexBRS',
'AHexSTS', 'Vitamine D', 'SSulfate', 'BRSE', 'CASE', 'CE',
'Cholesterol', 'SHex', 'SISE', 'STSE', 'SPE', 'BAHex',
'BASulfate', 'SPEHex', 'SPGHex', 'BRSLPHex', 'BRSPHex',
'CASLPHex', 'CASPHex', 'SISLPHex', 'SISPHex', 'STSLPHex',
'STSPHex']
}
ex_mass = {'C': 12, '13C': 13.0033548378, 'H': 1.007825, 'D': 2.01410178,
'N': 14.003074, 'O': 15.994915, 'P': 30.973762, 'S': 31.972071,
'e': 0.00054858, 'H+': 1.00727642, 'H2O': 18.010565, 'CO2': 43.98983}
adduct_dict_neg = {'[M-H]-': -1.00727642, '[M+CH3COO]-': 59.01385292}
adduct_dict_pos = {'[M]+': -0.00054858, '[M+H]+': 1.00727642, '[M+NH4]+': 18.03382555, '[M+Na]+': 22.9892207 }
msdial_std_columns = ['ID', 'Average Rt(min)', 'Average Mz', 'Metabolite name',
'Adduct type', 'Post curation result', 'Fill %', 'MS/MS assigned',
'Reference RT', 'Reference m/z', 'Formula', 'Ontology',
'INCHIKEY', 'SMILES', 'Annotation tag (VS1.0)', 'RT matched',
'm/z matched', 'MS/MS matched', 'Comment',
'Manually modified for quantification', 'Isotope tracking parent ID',
'Isotope tracking weight number', 'Total score',
'RT similarity', 'Dot product', 'Reverse dot product',
'Fragment presence %', 'S/N average', 'Spectrum reference file name',
'Spectrum reference file name', 'MS1 isotopic spectrum', 'MS/MS spectrum']
exclude_subclass = ['Others', 'CoQ', 'CL', 'OxTG', 'FAHFATG',
'Vitamin_D', 'Vitamin_E', 'Vitamin D', 'Vitamin E']
ref_oad_ratio = {'OAD01': 0.10, 'OAD02': 0.25, 'OAD03': 0.50, 'OAD04': 0.01,
'OAD05': 0.01, 'OAD06': 0.05, 'OAD07': 0.10, 'OAD08': 0.06,
'OAD09': 0.06, 'OAD10': 0.10, 'OAD11': 0.02, 'OAD12': 0.02,
'OAD13': 0.04, 'OAD14': 0.05, 'OAD15': 0.20, 'OAD16': 0.40,
'OAD17': 0.03, 'OAD18': 0.10, 'OAD19': 0.20, 'OAD20': 0.01}
rel_oad_ratio = {'OAD01': 0.20, 'OAD02': 0.50, 'OAD03': 1.00, 'OAD04': 0.02,
'OAD05': 0.02, 'OAD06': 0.10, 'OAD07': 0.20, 'OAD08': 0.12,
'OAD09': 0.20, 'OAD10': 0.40, 'OAD11': 0.04, 'OAD12': 0.04,
'OAD13': 0.08, 'OAD14': 0.10, 'OAD15': 0.40, 'OAD16': 0.80,
'OAD17': 0.06, 'OAD18': 0.20, 'OAD19': 0.40, 'OAD20': 0.02}
# rel_oad_ratio = {'OAD01': 0.15, 'OAD02': 0.46, 'OAD03': 1.00, 'OAD04': 0.06,
# 'OAD05': 0.05, 'OAD06': 0.15, 'OAD07': 0.30, 'OAD08': 0.11,
# 'OAD09': 0.13, 'OAD10': 0.31, 'OAD11': 0.06, 'OAD12': 0.04,
# 'OAD13': 0.11, 'OAD14': 0.09, 'OAD15': 0.35, 'OAD16': 0.71,
# 'OAD17': 0.07, 'OAD18': 0.18, 'OAD19': 0.32, 'OAD20': 0.04}
#endregion
class SingleAnalyzer(object):
def __init__(self, tabel_format, directry, prefix, input_data,
ms_tolerance_ppm, must_nl_cut_off_dict, cut_off_ratio,
file_name, sec_rep, sec_bar, each_rep, each_bar, timer):
sec_rep.set("Data Pre-processing")
sec_bar["maximum"] = 9
#region Data preprocessing
if tabel_format == 'Alignment':
raw_table = pd.read_csv(input_data, skiprows=[0,1,2,3], sep='\t')
raw_table = raw_table.rename(columns={
'Alignment ID': 'ID', 'Average Rt(min)': 'RT(min)',
'Average Mz': 'Precursor m/z'})
elif tabel_format == 'PeakList':
raw_table = pd.read_csv(input_data, sep='\t')
raw_table = raw_table.rename(columns={
'PeakID': 'ID', 'Title': 'Metabolite name', 'RT (min)': 'RT(min)',
'Adduct': 'Adduct type', 'InChIKey': 'INCHIKEY',
'MSMS spectrum': 'MS/MS spectrum'})
# elif tabel_format == 'Merged text':
# raw_table = pd.read_csv(input_data, sep='\t')
new_table = get_annotated_df(raw_table)
columns = new_table.columns.values
if 'Height' not in columns:
new_table['Height'] = 0
samples = [col for col in columns
if col not in msdial_std_columns and 'Blank' not in col]
for row, df in new_table.iterrows():
new_table.loc[row:row, ['Height']] = df[samples].mean()
if 'Data from' not in columns:
new_table['Data from'] = ''
self.target_table = new_table
self.target_table.fillna({'Comment': ''})
comments = self.target_table['Comment'].values.tolist()
comments = ['' if isinstance(v, float) else v for v in comments]
ex_list = ['isotope of', 'adduct linked to', 'Unit',
'similar chromatogram', 'found in']
user_comment_list = [refine_comments(txt, ex_list) for txt in comments]
self.target_table['User comment'] = user_comment_list
#endregion
sec_rep.set("Extracting MS/MS")
sec_bar.step(1)
#region Extracting MS/MS (Modified ver)
self.msms_dict = {}
target_id_list = self.target_table['ID'].values.tolist()
total = set_each_prgbar(each_bar, target_id_list)
for i, target_id in enumerate(target_id_list, start=1):
each_rep.set("{}/{}".format(i, total))
df = self.target_table[self.target_table['ID'] == target_id]
pair_list = str(df['MS/MS spectrum'].values[0]).split(' ')
fragment_list = [pair for pair in pair_list if pair != '']
mz_list = [float(v.split(':')[0]) for v in fragment_list]
intensity_list = [int(v.split(':')[1]) for v in fragment_list]
msms_df = pd.DataFrame({'frag m/z': mz_list, 'intensity': intensity_list})
self.msms_dict[target_id] = msms_df
each_bar.step(1)
#endregion
sec_rep.set("Constructing Structure Database")
sec_bar.step(1)
#region Construcing lipid structural info dict
self.lipid_structural_info_dict = {}
self.target_table['Precise m/z'] = 0
self.target_table['Precise m/z type'] = ''
total = set_each_prgbar(each_bar, self.target_table)
for i, (idx, one_df) in enumerate(self.target_table.iterrows(), start=1):
each_rep.set("{}/{}".format(i, total))
table_id = one_df['ID']
current_structural_dict = extract_lipid_structural_info(one_df)
msms_df = self.msms_dict[table_id]
ref_mz = current_structural_dict['Ref precursor Mz']
ms1_mz = one_df['Precursor m/z']
ms2_mz = 0
ref_front = ref_mz - 0.01
ref_tail = ref_mz + 0.01
ms2_df = msms_df[(msms_df['frag m/z']>=ref_front)
&(msms_df['frag m/z']<=ref_tail)]
if len(ms2_df) > 0:
ms2_mz = ms2_df['frag m/z'].values[0]
ms1_ppm = (ms1_mz-ref_mz)/ref_mz*1000*1000
ms2_ppm = (ms2_mz-ref_mz)/ref_mz*1000*1000
ms1_ppm = abs(math_floor(ms1_ppm, 2))
ms2_ppm = abs(math_floor(ms2_ppm, 2))
mz_type = ''
determined_mz = 0
if ms1_ppm <= 10:
mz_type, determined_mz = 'MS1', math_floor(ms1_mz, 4)
elif ms2_ppm <= 10:
mz_type, determined_mz = 'MS2', math_floor(ms2_mz, 4)
else:
ref_O_front = ref_mz + ex_mass['O'] - 0.01
ref_O_tail = ref_mz + ex_mass['O'] + 0.01
ms2_O_df = msms_df[(msms_df['frag m/z']>=ref_O_front)
& (msms_df['frag m/z']<=ref_O_tail)]
len_ms2_O_df = len(ms2_O_df)
if len_ms2_O_df > 0:
ms2_O_mz = ms2_O_df['frag m/z'].values[0] - ex_mass['O']
ms2_O_ppm = (ms2_O_mz-ref_mz)/ref_mz*1000*1000
ms2_O_ppm = abs(math_floor(ms2_O_ppm, 2))
if ms2_O_ppm <= 10:
mz_type, determined_mz = 'MS2+O', math_floor(ms2_O_mz, 4)
else:
mz_type, determined_mz = 'MS1>10ppm', math_floor(ms1_mz, 4)
self.target_table.loc[idx:idx, ['Precise m/z type', 'Precise m/z']] \
= mz_type, determined_mz
current_structural_dict['Precise precursor Mz'] \
= [mz_type, determined_mz]
current_structural_dict['MS2 Mz'] = math_floor(ms2_mz, 4)
self.lipid_structural_info_dict[table_id] = current_structural_dict
each_bar.step(1)
#endregion
sec_rep.set("MS/MS Data processing")
sec_bar.step(1)
#region Calcurating Ratio(%) and Delta (Modified ver)
digit, up = 4, 1
total = set_each_prgbar(each_bar, self.msms_dict)
for i, (idx, mz_int_df) in enumerate(self.msms_dict.items(), start=1):
each_rep.set("{}/{}".format(i, total))
max_int = mz_int_df['intensity'].max()
ref_mz = self.lipid_structural_info_dict[idx]['Ref precursor Mz']
det_mz = self.lipid_structural_info_dict[idx]['Precise precursor Mz'][1]
mzs = mz_int_df['frag m/z'].values.tolist()
ints = mz_int_df['intensity'].values.tolist()
deltas = [math_floor((det_mz - mz), digit) for mz in mzs]
ratios = [math_floor((v/max_int)*100, digit) for v in ints]
new_msms_df = pd.DataFrame({'frag m/z': mzs, 'intensity': ints,
'Delta': deltas, 'Ratio(%)': ratios})
new_msms_df = new_msms_df[new_msms_df['frag m/z'] <= (ref_mz + up)]
new_msms_df = new_msms_df.iloc[::-1].reset_index(drop=True)
self.msms_dict[idx] = new_msms_df
each_bar.step(1)
#endregion
sec_rep.set("Searching CID fragment ions")
sec_bar.step(1)
#region CID fragment ions search
self.cid_result_dict = {}
total = set_each_prgbar(each_bar, self.lipid_structural_info_dict)
for i, (idx, structure_dict) in enumerate(
self.lipid_structural_info_dict.items(), start=1):
each_rep.set("{}/{}".format(i, total))
msms_df = self.msms_dict[idx]
self.cid_result_dict[idx] = search_cid_fragment_ions(
structure_dict, msms_df, ms_tolerance_ppm)
each_bar.step(1)
#endregion
sec_rep.set("Updating Structure Database")
sec_bar.step(1)
#region Plasmalogen search
plasm_candidate_df = self.target_table[self.target_table['Ontology'].str.contains('Ether')]
total = set_each_prgbar(each_bar, plasm_candidate_df)
for i, (row, df) in enumerate(plasm_candidate_df.iterrows(), start=1):
each_rep.set("{}/{}".format(i, total))
table_id = df['ID']
subclass_result_dict = self.cid_result_dict[table_id]['Lipid subclass']
if subclass_result_dict:
plasm_ions = [key for key, v in subclass_result_dict.items() \
if 'Plasmalogen' in key and v[1]>0]
if plasm_ions:
structure_dict = self.lipid_structural_info_dict[table_id]
db_1 = structure_dict['Each moiety info']['db-1']
if db_1 > 0:
name = df['Metabolite name']
ontology = df['Ontology'].replace('Ether', 'Plasm')
self.target_table.loc[row:row, 'Ontology'] = ontology
structure_dict['Ontology'] = ontology
structure_dict['Each moiety info']['db-1'] = db_1 -1
if (db_1 -1 == 0):
pre_v = structure_dict['Unsaturated moiety']
structure_dict['Unsaturated moiety'] = pre_v -1
self.lipid_structural_info_dict[table_id] = structure_dict
each_bar.step(1)
#endregion
sec_rep.set("Analyzing OAD-MS/MS")
sec_bar.step(1)
#region OAD analysis
self.oad_result_dict = {}
self.determined_db_pos_dict = {}
total = set_each_prgbar(each_bar, self.lipid_structural_info_dict)
for i, (table_id, info_dict) in enumerate(self.lipid_structural_info_dict.items(), start=1):
name = self.target_table[self.target_table['ID'] == table_id]['Metabolite name'].values[0]
name = name.split('|')[1] if '|' in name else name
each_rep.set("{}/{} : {}".format(i, total, name))
unsaturated_moieties_num = info_dict['Unsaturated moiety']
ontology = info_dict['Ontology']
solved_moiety = len(info_dict['Each moiety info'])
is_moiety_solved = True
if ontology in triacyls_sphingolipids and solved_moiety < 6:
is_moiety_solved = False
elif ontology not in mono_acyl_list and solved_moiety < 4:
is_moiety_solved = False
if unsaturated_moieties_num > 0 and is_moiety_solved:
msms_df = self.msms_dict[table_id]
self.oad_result_dict[table_id] = determine_db_positions(
unsaturated_moieties_num, info_dict, msms_df,
ms_tolerance_ppm, must_nl_cut_off_dict, cut_off_ratio)
else:
self.oad_result_dict[table_id] = {'Resolved level': 'None',
'Validated num': 0, 'Each bools': [False], 'Moiety-1': {}}
each_bar.step(1)
#endregion
sec_rep.set("Reflecting Analysis Results")
sec_bar.step(1)
#region Update OAD analysis result into metabolite name & Generating Graph dict
self.target_table['OAD result name'] = ''
self.target_table['Solved level'] = 'None'
self.graph_dict = {}
total = set_each_prgbar(each_bar, self.oad_result_dict)
for i, (table_id, result_dict) in enumerate(self.oad_result_dict.items(), start=1):
each_rep.set("{}/{}".format(i, total))
idx = self.target_table[self.target_table['ID'] == table_id].index[0]
lipid_info = self.lipid_structural_info_dict[table_id]
name = self.target_table['Metabolite name'][idx]
oad_result_name = name.split('|')[1] if '|' in name else name
if any(result_dict['Each bools']):
oad_result_name = determine_oad_metabolite_name_N_description(
oad_result_dict=result_dict, structure_dict=lipid_info,
metabolite_name=oad_result_name)
self.target_table.loc[idx:idx, ['OAD result name', 'Solved level']] \
= oad_result_name, result_dict['Resolved level']
self.graph_dict[table_id] = {'OAD': {}}
self.graph_dict[table_id]['OAD'] = set_oad_graph_dict_value(
result_dict, lipid_info)
each_bar.step(1)
#endregion
sec_rep.set("Finalizing")
sec_bar.step(1)
each_rep.set("")
#region Generating analysis result files
self.target_table = self.target_table.sort_values(
['OAD result name', 'Precursor m/z'])
dataframe_path = f'{directry}/{prefix}_analysis_table.pkl'
msms_path = f'{directry}/{prefix}_extracted_msms.pickle'
cid_reslut_path = f'{directry}/{prefix}_cid_result.pickle'
oad_result_path = f'{directry}/{prefix}_oad_result.pickle'
structure_info_path = f'{directry}/{prefix}_structure_info.pickle'
graph_info_path = f'{directry}/{prefix}_graph_info.pickle'
self.target_table.to_pickle(dataframe_path)
with open(msms_path, 'wb') as output_msms_file:
pickle.dump(self.msms_dict, output_msms_file)
with open(cid_reslut_path, 'wb') as output_cid_file:
pickle.dump(self.cid_result_dict, output_cid_file)
with open(oad_result_path, 'wb') as output_oad_file:
pickle.dump(self.oad_result_dict, output_oad_file)
with open(structure_info_path, 'wb') as output_lipif_file:
pickle.dump(self.lipid_structural_info_dict, output_lipif_file)
with open(graph_info_path, 'wb') as output_graph_file:
pickle.dump(self.graph_dict, output_graph_file)
#endregion
sec_rep.set("Analysis Finished")
sec_bar.step(1)
class BatchAnalyzer(object):
def __init__(self, directry, prefix, alignment_path, peakid_path,
peaklists_dict, ms_tolerance_ppm, must_nl_cut_off_dict, cut_off_ratio,
normalized, sec_rep, sec_bar, each_rep, each_bar, timer):
sec_rep.set("Data Pre-processing")
sec_bar["maximum"] = 9
#region Data preprocessing
#Alignment table
raw_df_1 =
|
pd.read_csv(alignment_path, skiprows=[0,1,2,3], sep='\t')
|
pandas.read_csv
|
"""
This module contains all the dash components visible to the user and composes them to a layout.
"""
from datetime import datetime
import logging
from pathlib import Path
import dash_core_components as dcc
import dash_html_components as html
import dash_table
from dash_table.Format import Format, Scheme, Symbol
import pandas as pd
# Set pandas plotting backend to ploty. Requires plotly >= 4.8.0.
pd.options.plotting.backend = 'plotly'
app_route = 'circletask'
top_templates_path = Path(__file__).parents[1] / 'templates'
nav_html = (top_templates_path / 'nav.html').read_text()
dashapp_templates_path = Path(__file__).parent / 'templates'
intro_html = (dashapp_templates_path / 'information.html').read_text()
refs_html = (dashapp_templates_path / 'references.html').read_text()
# Index page.
html_layout = f'''<!DOCTYPE html>
<html>
<head>
{{%metas%}}
<title>{{%title%}}</title>
{{%favicon%}}
{{%css%}}
</head>
<body>
{nav_html}
{intro_html}
{{%app_entry%}}
{refs_html}
<footer>
{{%config%}}
{{%scripts%}}
{{%renderer%}}
</footer>
</body>
</html>'''
# ToDo: Move style setttings to less.
# Body
theme = {'font-family': 'Lobster',
'background-color': '#e7ecf7',
'height': '60vh',
}
def create_header():
""" The header for the dashboard. """
header_style = {'background-color': theme['background-color'], 'padding': '1.5rem', 'textAlign': 'center'}
header = html.Div(children=[html.Header(html.H2(children="EDA Dashboard", style=header_style)),
dcc.Markdown("To load the data and start analyzing, "
"please press the **REFRESH FROM DB** button.")])
return header
###############
# Components. #
###############
def generate_upload_component(upload_id):
""" Component to receive new data to upload to the server.
:param upload_id: Unique identifier for the component.
:type upload_id: str
:rtype: dash_core_components.Upload.Upload
"""
upload_widget = dcc.Upload(id=upload_id,
children=html.Div(["Drag and Drop or ", html.A("Select CSV Files"), " for upload."]),
accept=".csv",
style={'width': '100%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center'},
# Allow multiple files to be uploaded
multiple=True)
return upload_widget
def generate_daterange_picker():
date_picker = html.Div([
html.Div([html.Label('Date range:')], style={'marginInline': '5px', 'display': 'inline-block'}),
dcc.DatePickerRange(
id='date-picker-range',
min_date_allowed=datetime(2020, 6, 5),
initial_visible_month=datetime(2020, 6, 5),
display_format='MMM Do, YYYY',
start_date=datetime(2020, 6, 5).date(),
),
], style={'display': 'inline-block', 'margin': '0 3rem'})
return date_picker
def generate_user_select(dataframe):
""" Dropdown to filter for specific user data. """
if dataframe.empty:
options = dict()
else:
options = [{'label': p, 'value': p} for p in dataframe['user'].unique()]
user_select = html.Div([
html.Div([html.Label('Participant')], style={'marginInline': '5px', 'display': 'inline-block'}),
html.Div([dcc.Dropdown(
id='user-IDs',
options=options,
value=[],
placeholder='Filter...',
clearable=True,
multi=True,
)], style={'verticalAlign': 'middle', 'display': 'inline-block', 'minWidth': '100px'}),
])
return user_select
def dash_row(*children):
"""
:param children: Components to be displayed side by side, e.g. table and figure.
:type children: List
:return:
"""
row = html.Div(className='row', children=[*children])
sep = html.Hr()
return row, sep
###############
# Graphs. #
###############
def get_figure_div(graph, num, description):
""" Wrapper for graphs to add description and consistent APA-like styling.
:param graph: Graph object.
:type graph: dash_core_components.Graph.Graph
:param num: Ordinal number of figure.
:type num: int
:param description: Description of graph.
:type description: str
"""
div = html.Div(className='six columns',
children=[html.Div([graph], className='pretty_container'),
dcc.Markdown(f"*Figure {num}.* {description}")])
return div
###############
# Tables. #
###############
def get_table_div(table, num, title, description=None):
""" Wrapper for table to add title and description and consistent APA-like styling.
:param table: Table object.
:type table: dash_table.DataTable
:param num: Ordinal number of figure.
:type num: int
:param title: Title of the table.
:type title: str
:param description: Description of graph.
:type description: str
"""
table = html.Div(className='six columns',
children=[table,
html.P(f"Table {num}"),
dcc.Markdown(f"*{title}*"),
dcc.Markdown(f"*Note*: {description}" if description else ""),
])
return table
def table_type(df_column):
""" Return the type of column for a dash DataTable.
Doesn't work most of the time and just returns 'any'.
Note - this only works with Pandas >= 1.0.0
"""
if isinstance(df_column.dtype, pd.DatetimeTZDtype):
return 'datetime',
elif (isinstance(df_column.dtype, pd.StringDtype) or
isinstance(df_column.dtype, pd.BooleanDtype) or
isinstance(df_column.dtype, pd.CategoricalDtype) or
isinstance(df_column.dtype, pd.PeriodDtype)):
return 'text'
elif (df_column.dtype == 'int' or
isinstance(df_column.dtype, pd.SparseDtype) or
isinstance(df_column.dtype, pd.IntervalDtype) or
isinstance(df_column.dtype, pd.Int8Dtype) or
isinstance(df_column.dtype, pd.Int16Dtype) or
isinstance(df_column.dtype, pd.Int32Dtype) or
isinstance(df_column.dtype, pd.Int64Dtype)):
return 'numeric'
else:
return 'any'
def get_columns_settings(dataframe, order=None):
""" Get display settings of columns for tables.
:param dataframe: Data
:type dataframe: pandas.DataFrame
:param order: Custom order for columns. Use position, in case names change.
:type order: list[int]
:return: List of dicts. Columns displaying float values have special formatting.
:rtype: list
"""
columns = list()
if order is None:
cols = dataframe.columns
else:
# Reorder columns.
try:
cols = [dataframe.columns[i] for i in order]
except IndexError:
logging.log(logging.WARNING, "Order of columns out of range.")
cols = dataframe.columns
for c in cols:
# Nicer column names. Exclude df1 and df2 from renaming.
if c == 'dV':
label = '$\\Delta V$'
elif c == 'dVz':
label = '$\\Delta V_z$'
elif c == 'p-unc':
label = 'p'
elif c.startswith('p-'):
label = c
elif c == 'SS':
label = 'Sum of Squares'
elif c == 'MS':
label = 'Mean Square'
elif c == 'np2':
label = '$\\eta_{p}^{2}$'
elif c == 'eps':
label = '$\\epsilon$'
else:
label = c.replace("_", " ").title()
if 'Df' in label:
label = label.replace("Df1", "df1").replace("Df2", "df2")
if dataframe[c].dtype == 'float':
columns.append({'name': label,
'id': c,
'type': 'numeric',
'format': Format(precision=2, scheme=Scheme.fixed)})
else:
columns.append({'name': label, 'id': c, 'type': table_type(dataframe[c])})
return columns
def get_pca_columns_settings(dataframe):
""" Get display settings of columns for PC vs. UCM table.
:param dataframe: Angles between principal components and ucm vectors.
:type dataframe: pandas.DataFrame
:return: List of dicts. Columns displaying float values have special formatting.
:rtype: list
"""
columns = list()
for c in dataframe.columns:
if dataframe[c].dtype == 'float':
columns.append({'name': c.capitalize(),
'id': c,
'type': 'numeric',
'format': Format(nully='N/A',
precision=2,
scheme=Scheme.fixed,
symbol=Symbol.yes,
symbol_suffix=u'˚')})
else:
label = c.capitalize() if c != 'PC' else c
columns.append({'name': label, 'id': c, 'type': table_type(dataframe[c])})
return columns
def generate_table(dataframe, table_id):
""" Get a table to display data with conditional formatting.
:param dataframe: Data to be displayed
:param table_id: Unique identifier for the table.
:return: Dash DataTable
"""
table = dash_table.DataTable(
id=table_id,
data=dataframe.to_dict('records'),
columns=[{'name': i, 'id': i} for i in dataframe.columns],
export_format='csv',
filter_action='native',
sort_action='native',
sort_mode='multi',
style_table={'height': theme['height'], 'marginBottom': '0px'},
style_header={'fontStyle': 'italic',
'borderTop': '1px solid black',
'borderBottom': '1px solid black',
'textAlign': 'center'},
style_filter={'borderBottom': '1px solid grey'},
fixed_rows={'headers': True, 'data': 0},
style_as_list_view=True,
style_cell={
'minWidth': '0px', 'maxWidth': '20px', # 'width': '20px',
'whiteSpace': 'normal', # 'no-wrap',
'overflow': 'hidden',
'textOverflow': 'ellipsis',
'textAlign': 'center',
},
style_cell_conditional=[
{'if': {'column_id': 'user'},
'width': '5%'},
{'if': {'column_id': 'session'},
'width': '7%'},
{'if': {'column_id': 'condition'},
'width': '8%'},
{'if': {'column_id': 'block'},
'width': '5%'},
{'if': {'column_id': 'task'},
'width': '5%'},
{'if': {'column_id': 'outlier'},
'width': '5.5%'},
# 'display': 'none',
],
style_data={'border': '0px'},
style_data_conditional=[
{'if': {'filter_query': '{outlier} = 1'},
'color': 'red'},
],
css=[{
'selector': '.dash-cell div.dash-cell-value',
'rule': 'display: inline; white-space: inherit; overflow: inherit; text-overflow: inherit;'
}],
)
return table
def generate_simple_table(dataframe, table_id):
""" Create a table just showing the data.
No sorting or filterring.
:param dataframe: data to be displayed.
:param table_id: Unique identifier for the table.
:return: DataTable
"""
table = dash_table.DataTable(
id=table_id,
data=dataframe.to_dict('records'),
columns=get_columns_settings(dataframe),
export_format='csv',
style_header={'fontStyle': 'italic',
'borderTop': '1px solid black',
'borderBottom': '1px solid black',
'textAlign': 'center'},
style_cell={
'minWidth': '0px', 'maxWidth': '20px', # 'width': '20px',
'whiteSpace': 'normal', # 'no-wrap',
'overflow': 'hidden',
'textOverflow': 'ellipsis',
'textAlign': 'center',
},
style_data={'border': '0px', 'textAlign': 'center'},
# Bottom header border not visible, fake it with upper border of row 0.
style_data_conditional=[{
"if": {"row_index": 0},
'borderTop': '1px solid black'
}],
css=[{
'selector': '.dash-cell div.dash-cell-value',
'rule': 'display: inline; white-space: inherit; overflow: inherit; text-overflow: inherit;'
}],
)
return table
#######################
# Text #
#######################
def wilcoxon_rank_result():
comp = html.Div(className='six columns', children=[
html.H3("Difference between projections parallel and orthogonal to the UCM across participants."),
html.P("A Wilcoxon signed rank test is used to compare the difference between projections parallel and "
"orthogonal to the theoretical UCM across participants. Because of the high variability across "
"participants a non-parametric test is used."),
dcc.Markdown(id='wilcoxon_result', children="The result indicates that the parallel projection scores were "
"{decision}higher than the orthogonal projection scores, "
"Z={teststat}, *p = {p:.5f}*."),
])
return comp
#######################
# Compose components. #
#######################
def create_content():
""" Compose widgets into a layout. """
# Start with an empty dataframe, gets populated by callbacks anyway.
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import os
import PIL
import generate_tfrecord
import numpy as np
import pandas as pd
import tensorflow as tf
class CSVToTFExampleTest(tf.test.TestCase):
def _assertProtoEqual(self, proto_field, expectation):
proto_list = [p for p in proto_field]
self.assertListEqual(proto_list, expectation)
def test_csv_to_tf_example_one_raccoon_per_file(self):
"""Generate tf records for one raccoon from one file."""
image_file_name = 'tmp_raccoon_image.jpg'
image_data = np.random.rand(256, 256, 3)
save_path = os.path.join(self.get_temp_dir(), image_file_name)
image = PIL.Image.fromarray(image_data, 'RGB')
image.save(save_path)
column_names = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
raccoon_data = [('tmp_raccoon_image.jpg', 256, 256, 'raccoon', 64, 64, 192, 192)]
raccoon_df = pd.DataFrame(raccoon_data, columns=column_names)
grouped = generate_tfrecord.split(raccoon_df, 'filename')
for group in grouped:
example = generate_tfrecord.create_tf_example(group, self.get_temp_dir())
self._assertProtoEqual(
example.features.feature['image/height'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/width'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/filename'].bytes_list.value,
[image_file_name.encode('utf-8')])
self._assertProtoEqual(
example.features.feature['image/source_id'].bytes_list.value,
[image_file_name.encode('utf-8')])
self._assertProtoEqual(
example.features.feature['image/format'].bytes_list.value, [b'jpg'])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/class/text'].bytes_list.value,
[b'raccoon'])
self._assertProtoEqual(
example.features.feature['image/object/class/label'].int64_list.value,
[1])
def test_csv_to_tf_example_multiple_raccoons_per_file(self):
"""Generate tf records for multiple raccoons from one file."""
image_file_name = 'tmp_raccoon_image.jpg'
image_data = np.random.rand(256, 256, 3)
save_path = os.path.join(self.get_temp_dir(), image_file_name)
image = PIL.Image.fromarray(image_data, 'RGB')
image.save(save_path)
column_names = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
raccoon_data = [('tmp_raccoon_image.jpg', 256, 256, 'raccoon', 64, 64, 192, 192),
('tmp_raccoon_image.jpg', 256, 256, 'raccoon', 96, 96, 128, 128)]
raccoon_df =
|
pd.DataFrame(raccoon_data, columns=column_names)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import string
from sklearn.feature_extraction.text import CountVectorizer
# Step 1
print("-------------------Message DataFrame----------------------")
message =
|
pd.read_csv('SMSSpamCollection',sep='\t',names=["labels","message"])
|
pandas.read_csv
|
"""Functions to load literature data."""
import os
import pandas as pd
def load_cp6():
"""
Load CP6 time series.
Monthly total sales, in monetary terms on a standard scale, of tobacco
and related products marketed by a major company in the UK.
The time of the data runs from January 1955 to December 1959 inclusive.
"""
data_dir = os.path.dirname(os.path.abspath(__file__)) + '/data/'
tmp = pd.read_csv(data_dir + 'cp6__west_harrison.csv')
tmp["time"] =
|
pd.to_datetime(tmp["time"])
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""
This module holds Classes and Functions for solving linear optimisation
problems based on tabular data.
Please use this module with care. It is work in progress and properly
tested yet!
Contact: <NAME> <<EMAIL>>
SPDX-License-Identifier: MIT
"""
import datetime
import logging
import os
import pickle
import warnings
from copy import deepcopy
import oemof.solph as solph
import pandas as pd
from .external import Scenario
from .postprocessing import analyse_bus
from .postprocessing import analyse_costs
from .postprocessing import analyse_emissions
from .postprocessing import get_all_sequences
from .postprocessing import get_boundary_flows
from .postprocessing import get_trafo_flow
from .setup_model import add_buses
from .setup_model import add_links
from .setup_model import add_sinks
from .setup_model import add_sinks_fix
from .setup_model import add_sources
from .setup_model import add_sources_fix
from .setup_model import add_storages
from .setup_model import add_transformer
from .setup_model import check_active
from .setup_model import check_nonconvex_invest_type
from .setup_model import load_csv_data
class DistrictScenario(Scenario):
"""Scenario class for urban energy systems"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.input_path = kwargs.get("input_path", None)
self.emission_limit = kwargs.get("emission_limit", None)
self.location = kwargs.get("location", None)
self.number_of_time_steps = \
kwargs.get("number_of_time_steps", 10)
self.results = dict()
def load_csv(self, path=None):
if path is not None:
self.location = path
self.table_collection = load_csv_data(self.location)
return self
def check_input(self):
self.table_collection = check_active(self.table_collection)
self.table_collection = check_nonconvex_invest_type(
self.table_collection)
return self
def initialise_energy_system(self):
"""Initialises the oemof.solph Energysystem."""
date_time_index = pd.date_range(
"1/1/{0}".format(self.year),
periods=self.number_of_time_steps,
freq="H"
)
self.es = solph.EnergySystem(timeindex=date_time_index)
def create_nodes(self):
nd = self.table_collection
nod, busd = add_buses(nd['Bus'])
nod.extend(
add_sources(nd['Source'], busd, nd['Timeseries']) +
add_sources_fix(nd['Source_fix'], busd, nd['Timeseries']) +
add_sinks(nd['Sink'], busd, nd['Timeseries']) +
add_sinks_fix(nd['Sink_fix'], busd, nd['Timeseries']) +
add_storages(nd['Storages'], busd) +
add_transformer(nd['Transformer'], busd, nd['Timeseries'])
)
if 'Link' in nd.keys():
nod.extend(add_links(nd['Link'], busd))
return nod
def table2es(self):
if self.es is None:
self.initialise_energy_system()
self.check_input()
nodes = self.create_nodes()
self.es.add(*nodes)
def add_emission_constr(self):
if self.emission_limit is not None:
if self.model is not None:
solph.constraints.generic_integral_limit(
self.model, keyword='emission_factor',
limit=self.emission_limit)
else:
ValueError("The model must be created first.")
return self
def add_couple_invest_contr(self, couple_invest_flow):
"""
Adds a solph.contraint for coupling investment flows.
syntax of couple_invest_flow:
couple_invest_flow={
'flow1': ("label_from", "label_to"),
'flow2': ("label_from", "label_to"),
}
Make sure, that these flows are InvestmentFlows.
"""
flow1_from = self.es.groups[couple_invest_flow['flow1'][0]]
flow1_to = self.es.groups[couple_invest_flow['flow1'][1]]
investflow_1 = \
self.model.InvestmentFlow.invest[flow1_from, flow1_to]
flow2_from = self.es.groups[couple_invest_flow['flow2'][0]]
flow2_to = self.es.groups[couple_invest_flow['flow2'][1]]
investflow_2 = \
self.model.InvestmentFlow.invest[flow2_from, flow2_to]
solph.constraints.equate_variables(
self.model,
investflow_1,
investflow_2,
factor1=1,
name="couple_investment_flows"
)
def solve(self, with_duals=False, tee=True, logfile=None, solver=None,
couple_invest_flow=None, **kwargs):
if self.es is None:
self.table2es()
self.create_model()
self.add_emission_constr()
if couple_invest_flow is not None:
self.add_couple_invest_contr(couple_invest_flow)
logging.info("Optimising using {0}.".format(solver))
if with_duals:
self.model.receive_duals()
if self.debug:
filename = os.path.join(
solph.helpers.extend_basic_path("lp_files"), "q100opt.lp"
)
self.model.write(
filename, io_options={"symbolic_solver_labels": True}
)
logging.info("Store lp-file in {0}.".format(filename))
solver_kwargs = {
"solver_cmdline_options": kwargs.get(
"solver_cmdline_options", {})}
self.model.solve(
solver=solver, solve_kwargs={"tee": tee, "logfile": logfile},
**solver_kwargs
)
# store directly at district energy system
self.results["main"] = solph.processing.results(self.model)
self.results["meta"] = solph.processing.meta_results(self.model)
self.results["param"] = solph.processing.parameter_as_dict(self.es)
self.results["meta"]["scenario"] = self.scenario_info(solver)
if self.location is not None:
self.results["meta"]["in_location"] = self.location
self.results['meta']["datetime"] = datetime.datetime.now()
self.results["meta"]["solph_version"] = solph.__version__
self.results['meta']['emission_limit'] = self.emission_limit
self.results['meta']['solver']['solver'] = solver
self.results['costs'] = self.model.objective()
self.results['table_collection'] = self.table_collection
if hasattr(self.model, 'integral_limit_emission_factor'):
self.results['emissions'] = \
self.model.integral_limit_emission_factor()
self.results['timeindex'] = self.es.timeindex
def plot(self):
pass
def tables_to_csv(self, path=None):
"""Dump scenario into a csv-collection."""
if path is None:
bpath = os.path.join(os.path.expanduser("~"), ".q100opt")
if not os.path.isdir(bpath):
os.mkdir(bpath)
dpath = os.path.join(bpath, "dumps")
if not os.path.isdir(dpath):
os.mkdir(dpath)
path = os.path.join(dpath, "csv_export")
if not os.path.isdir(path):
os.mkdir(path)
for name, df in self.table_collection.items():
name = name.replace(" ", "_") + ".csv"
filename = os.path.join(path, name)
df.to_csv(filename)
logging.info("Scenario saved as csv-collection to {0}".format(path))
def tables_to_excel(self, dpath=None, filename=None):
"""Dump scenario into an excel-file."""
if dpath is None:
bpath = os.path.join(os.path.expanduser("~"), ".q100opt")
if not os.path.isdir(bpath):
os.mkdir(bpath)
dpath = os.path.join(bpath, "dumps")
if not os.path.isdir(dpath):
os.mkdir(dpath)
if filename is None:
filename = "ds_dump.xlsx"
writer = pd.ExcelWriter(os.path.join(dpath, filename))
for name, df in sorted(self.table_collection.items()):
df.to_excel(writer, name)
writer.save()
logging.info("Scenario saved as excel file to {0}".format(filename))
def dump(self, path=None, filename=None):
"""Dump results of District scenario."""
if path is None:
bpath = os.path.join(os.path.expanduser("~"), ".q100opt")
if not os.path.isdir(bpath):
os.mkdir(bpath)
dpath = os.path.join(bpath, "dumps")
if not os.path.isdir(dpath):
os.mkdir(dpath)
path = os.path.join(dpath, "energysystem")
if not os.path.isdir(path):
os.mkdir(path)
if filename is None:
filename = "ds_dump.oemof"
if not os.path.isdir(path):
os.makedirs(path)
dump_des = deepcopy(self)
if dump_des.model is not None:
setattr(dump_des, 'model', None)
if dump_des.es is not None:
setattr(dump_des, 'es', None)
pickle.dump(
dump_des.__dict__, open(os.path.join(path, filename), "wb")
)
logging.info("DistrictScenario dumped"
" to {} as {}".format(path, filename))
def restore(self, path=None, filename=None):
"""Restores a district energy system from dump."""
self.__dict__ = load_district_scenario(path, filename).__dict__
logging.info("DistrictEnergySystem restored.")
def analyse_results(self, heat_bus_label='b_heat',
elec_bus_label='b_elec'):
"""Calls all analysis methods."""
for label in [heat_bus_label, elec_bus_label]:
check_label(self.results['main'], label)
self.analyse_costs()
self.analyse_emissions()
self.analyse_kpi()
self.analyse_sequences()
self.results['sum'] = self.results['sequences'].sum()
self.analyse_boundary_flows()
self.analyse_heat_generation_flows(heat_bus_label=heat_bus_label)
self.analyse_heat_bus(heat_bus_label=heat_bus_label)
self.analyse_electricity_bus(elec_bus_label=elec_bus_label)
def analyse_costs(self):
"""Performs a cost analysis."""
if 'cost_analysis' not in self.results.keys():
self.results['cost_analysis'] = analyse_costs(
results=self.results
)
logging.info("Economic analysis completed.")
# check if objective and recalculation match
total_costs = self.results['cost_analysis']['all']['costs'].sum()
objective_value = self.results['meta']['objective']
if abs(total_costs - objective_value) > 0.01:
raise ValueError(
"The objective value and the re-calculated costs do not match!"
)
else:
logging.info(
"Check passed: Objective value and recalculated costs match."
)
return self.results['cost_analysis']
def analyse_emissions(self):
"""Performs a summary of emissions of the energy system."""
if 'emission_analysis' not in self.results.keys():
self.results['emission_analysis'] = analyse_emissions(
results=self.results
)
logging.info("Emission analysis completed.")
# check if constraint and recalculation match
total_em = self.results[
'emission_analysis']['sum']['emissions'].sum()
emission_value = self.results['emissions']
if abs(total_em - emission_value) > 0.01:
raise ValueError(
"The constraint emission value and the re-calculated emissions"
" do not match!"
)
else:
logging.info(
"Check passed: Constraint emission value and recalculated"
" emission match."
)
return self.results['emission_analysis']
def analyse_kpi(self, label_end_energy=None):
"""Description."""
if label_end_energy is None:
label_end_energy = ['demand_heat']
if 'kpi' not in self.results.keys():
costs = self.results['meta']['objective']
emissions = self.results['emissions']
end_energy = sum([
solph.views.node(
self.results['main'], x)["sequences"].values.sum()
for x in label_end_energy])
kpi_dct = {
'absolute costs [€/a]': costs,
'absolute emission [kg/a]': emissions,
'end_energy [kWh/a]': end_energy,
'specific costs [€/kWh]': costs/end_energy,
'specific emission [kg/kWh]': emissions/end_energy,
}
kpi = pd.Series(kpi_dct)
self.results['kpi'] = kpi
else:
kpi = self.results['kpi']
return kpi
def analyse_sequences(self):
"""..."""
if 'sequences' not in self.results.keys():
self.results['sequences'] = \
get_all_sequences(self.results['main'])
ind_length = len(self.results['timeindex'])
df_param = self.results['table_collection']['Timeseries'].copy()
df_param = df_param.iloc[:ind_length]
list_of_tuples = [
('parameter', x.split('.')[0], x.split('.')[1])
for x in df_param.columns
]
df_param.columns = pd.MultiIndex.from_tuples(list_of_tuples)
df_param.index = self.results['timeindex']
self.results['sequences'] = pd.concat(
[self.results['sequences'], df_param], axis=1
)
logging.info("All sequences processed into one DataFrame.")
return self.results['sequences']
def analyse_boundary_flows(self):
"""
Returns the sequences and sums of all sinks and sources.
See postprocessing.get_boundary_flows!
"""
if 'boundary_flows' not in self.results.keys():
self.results['boundary_flows'] = \
get_boundary_flows(self.results['main'])
logging.info("Boundary flows analysis completed.")
return self.results['boundary_flows']
def analyse_heat_generation_flows(self, heat_bus_label='b_heat'):
"""Gets all heat generation flows."""
if 'heat_generation' not in self.results.keys():
self.results['heat_generation'] = \
get_trafo_flow(self.results['main'], label_bus=heat_bus_label)
logging.info("Heat generation flow analysis completed.")
return self.results['heat_generation']
def analyse_heat_bus(self, heat_bus_label='b_heat'):
"""..."""
if 'heat_bus' not in self.results.keys():
self.results['heat_bus'] = \
analyse_bus(self.results['main'], bus_label=heat_bus_label)
logging.info("Heat bus analysed.")
return self.results['heat_bus']
def analyse_electricity_bus(self, elec_bus_label='b_elec'):
"""..."""
if 'electricity_bus' not in self.results.keys():
self.results['electricity_bus'] = \
analyse_bus(self.results['main'], bus_label=elec_bus_label)
logging.info("Electricity bus analysed.")
return self.results['electricity_bus']
def load_district_scenario(path, filename):
"""Load a DistrictScenario class."""
des_restore = DistrictScenario()
des_restore.__dict__ = \
pickle.load(open(os.path.join(path, filename), "rb"))
return des_restore
def check_label(results, label):
"""..."""
pass
class ParetoFront(DistrictScenario):
"""Class for calculation pareto fronts with costs and emission."""
def __init__(self, emission_limits=None, number_of_points=2,
dist_type='linear',
off_set=1,
**kwargs):
super().__init__(**kwargs)
self.number = number_of_points
self.dist_type = dist_type
self.off_set = off_set
self.table_collection_co2opt = None
self.ds_min_co2 = None
self.ds_max_co2 = None
self.e_min = None
self.e_max = None
self.emission_limits = emission_limits
self.district_scenarios = dict()
self.pareto_front = None
# ToDo: sort results District Scenarios
# self.ordered_scenarios = [
# str(x) for x in sorted([int(x) for x in self.des.keys()],
# reverse=True)
# ]
def _get_min_emission(self, **kwargs):
"""Calculates the pareto point with minimum emission."""
sc_co2opt = DistrictScenario(
emission_limit=1000000000,
table_collection=self.table_collection_co2opt,
number_of_time_steps=self.number_of_time_steps,
year=self.year,
)
sc_co2opt.solve(**kwargs)
return sc_co2opt
def _get_max_emssion(self, **kwargs):
sc_costopt = DistrictScenario(
emission_limit=1000000000,
table_collection=self.table_collection,
number_of_time_steps=self.number_of_time_steps,
year=self.year,
)
sc_costopt.solve(**kwargs)
return sc_costopt
def _calc_emission_limits(self):
"""Calculates the emission limits of the pareto front."""
if self.dist_type == 'linear':
limits = []
e_start = self.e_min + self.off_set
interval = (self.e_max - e_start) / (self.number - 1)
for i in range(self.number):
limits.append(e_start + i * interval)
elif self.dist_type == 'logarithmic':
limits = []
e_start = self.e_min + self.off_set
lim_last = self.e_max
limits.append(lim_last)
for i in range(self.number-2):
lim_last = lim_last - (lim_last - e_start) * 0.5
limits.append(lim_last)
limits.append(e_start)
else:
raise ValueError(
'No other method than "linear" for calculation the emission'
' limits implemented yet.'
)
return limits
def _get_pareto_results(self):
"""Gets all cost an emission values of pareto front."""
index = list(self.district_scenarios.keys())
columns = ['costs', 'emissions']
df_pareto = pd.DataFrame(index=index, columns=columns)
for r, _ in df_pareto.iterrows():
df_pareto.at[r, 'costs'] = \
self.district_scenarios[r].results['costs']
df_pareto.at[r, 'emissions'] = \
self.district_scenarios[r].results['emissions']
return df_pareto
def calc_pareto_front(self, dump_esys=False, **kwargs):
"""
Calculates the Pareto front for a given number of points, or
for given emission limits.
First, the cost-optimal and emission optimal solutions are calculated.
Therefore, two optimisation runs are performed.
For the emission optimisation, the table_collection is prepared by
exchanging the `variable_cost` values and the `emission_factor` values.
"""
if self.table_collection is not None:
self.table_collection_co2opt = \
co2_optimisation(self.table_collection)
else:
ValueError('Provide a table_collection!')
self.ds_min_co2 = self._get_min_emission(**kwargs)
self.ds_max_co2 = self._get_max_emssion(**kwargs)
self.e_min = self.ds_min_co2.results['meta']['objective']
self.e_max = self.ds_max_co2.results['emissions']
if self.emission_limits is None:
self.emission_limits = self._calc_emission_limits()
for e in self.emission_limits:
# Scenario name relative to emission range
e_rel = (e - self.e_min) / (self.e_max - self.e_min)
e_str = "{:.2f}".format(e_rel)
# e_str = str(int(round(e)))
ds_name = self.name + '_' + e_str
ds = DistrictScenario(
name=ds_name,
emission_limit=e,
table_collection=self.table_collection,
number_of_time_steps=self.number_of_time_steps,
year=self.year,
)
ds.solve(**kwargs)
self.district_scenarios.update(
{e_str: ds}
)
if dump_esys:
esys_path = os.path.join(self.results_fn, self.name,
"energy_system")
if not os.path.isdir(esys_path):
os.mkdir(esys_path)
ds.dump(path=esys_path, filename=e_str + '_dump.des')
self.results['pareto_front'] = self._get_pareto_results()
def store_results(self, path=None):
"""
Store main results and input table of pareto front in a not python
readable way (.xlsx / .csv).
"""
if path is None:
bpath = os.path.join(os.path.expanduser("~"), ".q100opt")
if not os.path.isdir(bpath):
os.mkdir(bpath)
dpath = os.path.join(bpath, "dumps")
if not os.path.isdir(dpath):
os.mkdir(dpath)
path = os.path.join(dpath, "pareto")
if not os.path.isdir(path):
os.mkdir(path)
# store table_collection
tables_path = os.path.join(path, "input_tables")
if not os.path.isdir(tables_path):
os.mkdir(tables_path)
for name, df in self.table_collection.items():
name = name.replace(" ", "_") + ".csv"
filename = os.path.join(tables_path, name)
df.to_csv(filename)
logging.info(
"Scenario saved as csv-collection to {0}".format(tables_path))
# store pareto results
path_pareto = os.path.join(path, 'pareto_results.xlsx')
self.results['pareto_front'].to_excel(path_pareto)
logging.info(
"Pareto front table saved as xlsx to {0}".format(path_pareto))
def dump(self, path=None, filename=None):
"""
Dumps the results of the pareto front instance.
The oemof.solph.EnergySystems and oemof.solph.Models of the
q100opt.DistrictScenarios are removed before dumping, only the results
are dumped.
"""
# delete all oemof.solph.EnergySystems and oemof.solph.Models
for _, v in self.__dict__.items():
if hasattr(v, 'es') or hasattr(v, 'model'):
setattr(v, 'es', None)
setattr(v, 'model', None)
for _, des in self.district_scenarios.items():
setattr(des, 'es', None)
setattr(des, 'model', None)
pickle.dump(
self.__dict__, open(os.path.join(path, filename), "wb")
)
logging.info(
"ParetoFront dumped to {} as {}".format(path, filename)
)
def restore(self, path=None, filename=None):
"""Restores a district energy system from dump."""
self.__dict__ = load_pareto_front(path, filename).__dict__
logging.info("DistrictEnergySystem restored.")
def analyse_results(self, heat_bus_label='b_heat',
elec_bus_label='b_elec'):
"""
Performs the analyse_results method of the DistrictScenario class
for all scenarios of the pareto front.
"""
for _, des in self.district_scenarios.items():
des.analyse_results(heat_bus_label=heat_bus_label,
elec_bus_label=elec_bus_label)
self.results['kpi'] = self.analyse_kpi()
self.results['heat_generation'] = self.analyse_heat_generation_flows(
heat_bus_label=heat_bus_label
)
self.results['sequences'] = self.analyse_sequences()
self.results['sum'] = self.results['sequences'].sum().unstack(level=0)
self.results['costs'] = self.get_all_costs()
self.results['emissions'] = self.get_all_emissions()
self.results['scalars'] = self.get_all_scalars()
def analyse_kpi(self, label_end_energy=None):
"""
Performs some postprocessing methods for all
DistrictEnergySystems.
"""
if label_end_energy is None:
label_end_energy = ['demand_heat']
d_kpi = {}
for e_key, des in self.district_scenarios.items():
d_kpi.update(
{e_key: des.analyse_kpi(label_end_energy=label_end_energy)}
)
df_kpi =
|
pd.concat(d_kpi, axis=1)
|
pandas.concat
|
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
def importSHGdata(names,inputpath,outputpath):
#open file for writing scans to
fScans = open(outputpath+'scans.txt','w+')
#initialize data frames to hold data
countsA = pd.DataFrame()
countsB = pd.DataFrame()
pos = pd.DataFrame()
#go through each file
for name in names:
#names of each file
filename = inputpath + '/' + name + '.txt'
#import countsA (signal),countsB (dark counts), and pos (stage position)
countsA[name] = pd.read_csv(filename,sep='\t')['countsA']
countsB[name] = pd.read_csv(filename,sep='\t')['countsB']
pos[name] = pd.read_csv(filename,sep='\t')['stage']
#determine interval by number of zeros in position
interval = len(pos[name]) - np.count_nonzero(pos[name])
#define function to turn time wave into scan by taking average of intervals
def findAverage(series):
#set number of points per position here
reshaped = np.reshape(series.values,(int(len(series.values)/interval),interval))
return pd.Series(np.mean(reshaped,1))
#apply function to time data to get the scan data
aveCountsA = countsA.apply(findAverage,axis=0)
aveCountsB = countsB.apply(findAverage,axis=0)
pos = pos.apply(findAverage,axis=0)
del countsA,countsB
#correct for dark counts
counts = aveCountsA.sub(aveCountsB)
del aveCountsA,aveCountsB
#create data frame to hold each scan and the position vector
data = counts.copy()
data.insert(0,'pos',pos[names[0]])
#write to file
fScans.write(data.to_csv(sep='\t', index=False, header=True))
fScans.close()
#plot individual
plt.figure()
plt.title('Individual Scan Data')
for column in pos.columns:
plt.plot(pos[column],counts[column],'.')
return data
#define fit func
def cosFunc(x, y0, A, f, phi):
return y0 + A*np.cos(f*x + phi)
#initial fitting function where all parameters are free
def initFit(data):
#x values from which to plot fit function
xvalues = np.linspace(0,99.7,1000)
#data frame to hold initial fitting parameters
initFitParams = pd.DataFrame(columns=['name','y0','y0error','A','Aerror','f','ferror','phi','phierror'])
initFitValues = pd.DataFrame({'pos':xvalues})
#fit, going through column by column and storing fit parameters in initFits
for column in data.drop('pos',axis=1).columns:
#calculate guesses for fit func
y0guess = np.mean(data[column])
Aguess = (np.amax(data[column])-np.amin(data[column]))/2
fguess = 0.05;
phiguess = 0;
guesses = [y0guess,Aguess,fguess,phiguess]
#fit it
popt, pcov = curve_fit(cosFunc,data['pos'],
data[column],p0=guesses)
#calculate standard error
pstd = np.sqrt(np.diag(pcov))
#create row and append it to the dataframe
tdf = pd.DataFrame({'name':[column],'y0':[popt[0]],'y0error':[pstd[0]],'A':[popt[1]],'Aerror':[pstd[1]],
'f':[popt[2]],'ferror':[pstd[2]],'phi':[popt[3]],'phierror':[pstd[3]]})
initFitParams = initFitParams.append(tdf,ignore_index=True)
#calculate fit and add it to fit values
initFitValues[column] = cosFunc(xvalues,popt[0],popt[1],popt[2],popt[3])
#resort columns
columnTitles = ['name','y0','y0error','A','Aerror','f','ferror','phi','phierror']
initFitParams = initFitParams.reindex(columns=columnTitles)
#plot the initial fits
plt.figure()
plt.title('Init Fits')
for column in data.drop('pos',axis=1).columns:
plt.plot(data['pos'],data[column],'.')
plt.plot(xvalues,initFitValues[column])
return initFitParams, initFitValues
#calculate fAve for second round of fitting
def calcFAve(initFitParams):
#calculate average of f values, then period
fAve = initFitParams['f'].abs().mean()
period = 2*np.pi/fAve
#calculate stdev
fStd = initFitParams['f'].abs().std()
periodError = period*(fStd/fAve)
#print
print('f = '+'%.2f'%period+' +- '+'%.2f'%periodError)
return fAve,fStd
#final fit function where f is held at fAve
def finalFit(data,fAve,fStd):
#x values from which to plot fit function
xvalues = np.linspace(0,99.7,1000)
#data frame to hold final fitting parameters and values
finalFitParams = pd.DataFrame(columns=['name','y0','y0error','A','Aerror','f','ferror','phi','phierror'])
finalFitValues = pd.DataFrame({'pos':xvalues})
#go through each column
for column in data.drop('pos',axis=1).columns:
#calculate guesses
y0guess = np.mean(data[column])
Aguess = (np.amax(data[column])-np.amin(data[column]))/2
phiguess = 0;
guesses = [y0guess,Aguess,phiguess]
#fit it, with f fixed
popt, pcov = curve_fit(lambda x, y0, A,
phi: cosFunc(x,y0, A, fAve, phi),
data['pos'],data[column],p0=guesses)
#calculate standard error
pstd = np.sqrt(np.diag(pcov))
#create row and append it to the dataframe
tdf =
|
pd.DataFrame({'name':[column],'y0':[popt[0]],'y0error':[pstd[0]],'A':[popt[1]],'Aerror':[pstd[1]],
'f':[fAve],'ferror':[fStd],'phi':[popt[2]],'phierror':[pstd[2]]})
|
pandas.DataFrame
|
import lightgbm as lgb
import numpy as np
import sklearn
import pandas as pd
from sklearn.datasets import load_svmlight_file
from sklearn.metrics import mean_squared_error
import riskrewardutil as rru
basePath = '/research/remote/petabyte/users/robert/Utilities/ltr-baseline/mslr10k/'
expPath = "/research/remote/petabyte/users/robert/LightGBM/Experiments/"
dataPath = basePath + 'dat/MSLR-WEB10K/'
modelPath = basePath + 'model/'
runPath = basePath + 'run/'
qrelsFile = dataPath + '../all.test.qrels'
#Count the amount of queries
def group_counts(arr):
d = np.ones(arr.size, dtype=int)
d[1:] = (arr[:-1] != arr[1:]).astype(int)
return np.diff(np.where(np.append(d, 1))[0])
name = "lgbm.2000.63.0.05.0.4.withTrisk"
combineddf = pd.DataFrame()
earlystop = [1000,1500,2000]
for stop in earlystop:
combineddf = pd.DataFrame()
name = name + '.earlystop%d' % (stop)
for fold in range(1,6):
suffix = name + ".fold%d" % (fold)
X, y, qidtrain = load_svmlight_file(dataPath + 'Fold%d/train.txt' % (fold), query_id=True)
train_data = lgb.Dataset(X, label=y, group=group_counts(qidtrain), free_raw_data=False)
X_valid, y_valid, qidValid = load_svmlight_file(dataPath + 'Fold%d/vali.txt' % (fold), query_id=True)
valid_data = lgb.Dataset(X_valid, label=y_valid, group=group_counts(qidValid), free_raw_data=False)
valid_data.reference = train_data
X_test, y_test, qid = load_svmlight_file(dataPath + 'Fold%d/test.txt' % (fold), query_id=True)
test_data = lgb.Dataset(X_test, label=y_test, group=group_counts(qid), free_raw_data=False)
#Global variables needed for custom metrics, qid and qrels for each valid file
qidvalid= qidValid
qrelsvalid = dataPath + 'Fold%d/vali.qrels' % (fold)
qidtrain = qidtrain
qrelstrain = dataPath + 'Fold%d/train.qrels' % (fold)
#Another global variables containing bm25 features for each fold
baselinename = 'resultsmslr10k/evalMetrics/baselinevalrun%d' % (fold)
baselineeval = 'resultsmslr10k/evalMetrics/baselinevaleval%d' % (fold)
baselinetrainname = 'resultsmslr10k/evalMetrics/baselinetrainrun%d' % (fold)
baselinetraineval = 'resultsmslr10k/evalMetrics/baselinetraineval%d' % (fold)
temppath = '/research/remote/petabyte/users/robert/LightGBM/Experiments/resultsmslr10k/evalMetrics/'
metrics = rru.riskrewardUtil(qidvalid, qrelsvalid, baselinename, baselineeval, qidtrain, qrelstrain, baselinetrainname, baselinetraineval, temppath)
eval_result = {}
#Setup Param File and generate different models for hyper parameter tuning
param = {'num_leaves':63, 'num_trees':2000, 'objective':'lambdarank',
'learning_rate': 0.05,'feature_fraction': 0.4,
'bagging_fraction': 0.8,'bagging_freq': 5,
'verbose': 1, 'early_stopping_rounds': stop}
param['metric'] = 'None'
#Train Model
num_round = 10
bst = lgb.train(param, train_data, num_round, valid_sets=[valid_data], feval=metrics.trisk1, evals_result=eval_result)
bst.save_model(modelPath + suffix)
combineddf = combineddf.append(metrics.predictgenerateRunFile(modelPath + suffix, runPath + suffix, X_test, qid))
evals =
|
pd.DataFrame.from_dict(eval_result)
|
pandas.DataFrame.from_dict
|
#Somu 30th March 2018
#Project 2018: Programming and Scripting
# This program is divided into 3 sections
# Section 1 : Using Iris Data, create the python-pandas data frame,
# Section 2 : Describe the Iris flower data stored in the dataframes
# Section 3 : Plot the graphs using the matplot library
# This program makes use of panda's, numpy and matplot libraries
#Technical Reference :
# https://stackoverflow.com/
# http://pandas.pydata.org/pandas-docs/version
# https://www.tutorialspoint.com/python_pandas/python_pandas_series.htm
# https://matplotlib.org/api/pyplot_api.html
# https://matplotlib.org/examples/index.html
# https://stackoverflow.com/questions/4270301/matplotlib-multiple-datasets-on-the-same-scatter-plot?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
# https://www.datascience.com/learn-data-science/tutorials/creating-data-visualizations-matplotlib-data-science-python
#Import the libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Define the variables
Gname,Gnamei,Gnameo = '','',''
Lsetosa,Lvirginica,Lversi =[],[],[]
#SECTION 1 -- Starts here
#Read the data from iris.csv file
#Function to create python list for each flower with attributes
#Create Dataframes for each flower using the python List
#Setosa flower: data frame - dfsetosa :: List name - Lsetosa
#virginica flower: data frame - dfvirginica :: List name - Lvirginica
#versicolor flower: data frame - dfversi :: List name - Lversi
# fstore function: Creates list for petel length, petal width, Sepal length and sepal width for each flower type
# 3 different list are created
def fstore(line):
Gplen = line.split(',')[0]
Gpwid = line.split(',')[1]
Gslen = line.split(',')[2]
Gswid = line.split(',')[3]
Gnameo = str(line.split(',')[4]).rstrip()
if Gnameo == 'Iris-setosa':
Lsetosa.append ([float(Gplen),float(Gpwid),float(Gslen),float(Gswid)])
elif Gnameo == 'Iris-versicolor':
Lversi.append ([float(Gplen),float(Gpwid),float(Gslen),float(Gswid)])
elif Gnameo == 'Iris-virginica':
Lvirginica.append ([float(Gplen),float(Gpwid),float(Gslen),float(Gswid)])
# Read the iris Data file from location data/iris.csv
# calls function "fstore" to create the list
with open("data/iris.csv") as f:
for line in f:
fstore(line)
# create dataframe for each flow from the list
dfsetosa = pd.DataFrame(Lsetosa, columns = ['Petal Length','Petal Width','Sepal Length', 'Sepal Width'])
dfvirginica = pd.DataFrame(Lvirginica, columns = ['Petal Length','Petal Width','Sepal Length', 'Sepal Width'])
dfversi = pd.DataFrame(Lversi, columns = ['Petal Length','Petal Width','Sepal Length', 'Sepal Width'])
#SECTION 1 -- Ends here
#SECTION 2 -- Starts here
#Using the dataframe describe function, display the below for each flower
#Count of the records/data for each flower
#Mean/Average of the flower attributes - Sepal wideth and legth; Petal width and length
#Standard diviation of the each attribute
#Minumum and maximum value of the each attribute
print (("\n"),"Setosa Flower :: Data Summary")
print (("\n"),dfsetosa.describe(include='all'))
print (("\n"),"Virginica Flower :: Data Summary")
print (("\n"),dfvirginica.describe(include='all'))
print (("\n"),"Versicolor Flower :: Data Summary")
print (("\n"),dfversi.describe(include='all'))
#SECTION 2 -- Ends here
#SECTION 3 -- Starts here
#Create a dataflower called dflower, which holds the data of all the 3 flowers
#Create columsn in the dataframe (dflower) for each flower attribute
#PL-Setosa -> represents the petal length of Setosa
#....
#PW-Veri -> represents the petal width of versicolor
#Use numpy provide linespace for the X and Y axis
#Use matplot to plot the grapsh (Scatter and Box)
#PLOT and SHOW Graphs using matplot library
#Blue colour in the Graph represents Setosa flower
#Red colout in the Graph represents Virginica flower
#Green colour in the Graph represents Versicolor flower
#Data frame to hold the values of Petal legnth and width's of Iris Setosa, Virginica and Versicolor
dflower =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import requests
import logging
import os
import cartoframes
import datetime
import cartosql
from collections import OrderedDict
import numpy as np
import sys
### Constants
LOG_LEVEL = logging.INFO
DATA_DIR = 'data'
DATA_LOCATION_URL = 'https://api.transitfeeds.com/v1/getLocations?key=258e3d67-9c2e-46db-9484-001ce6ff3cc7'
DATA_URL = 'https://api.transitfeeds.com/v1/getFeeds?key=258e3d67-9c2e-46db-9484-001ce6ff3cc7&location={}'
# Useful links:
# http://transitfeeds.com/api/
# https://developers.google.com/transit/gtfs/reference/#pathwaystxt
# Filename for local files
FILENAME = 'gtfs_points'
# asserting table structure rather than reading from input
CARTO_TABLE = 'cit_041_gtfs'
CARTO_SCHEMA = OrderedDict([
('the_geom', 'geometry'),
('feed_id', 'numeric'),
('feed_type', 'text'),
('feed_title', 'text'),
('loc_id', 'numeric'),
('ploc_id', 'numeric'),
('loc_title_l', 'text'),
('loc_title_s', 'text'),
('latitude', 'numeric'),
('longitude', 'numeric'),
('timestamp_epoch', 'numeric'),
('ts_latest', 'timestamp'),
('gtfs_zip', 'text'),
('gtfs_txt', 'text')
])
INPUT_DATE_FORMAT = '%Y%m%d'
DATE_FORMAT = '%Y-%m-%d'
TIME_FIELD = 'ts_latest'
MAX_TRIES = 8
CARTO_URL = 'https://{}.carto.com/api/v2/sql'
CARTO_USER = os.environ.get('CARTO_USER')
CARTO_KEY = os.environ.get('CARTO_KEY')
###
## Accessing remote data
###
DATASET_ID = 'ca607a0d-3ab9-4b22-b4fe-5c43b17e47c4'
def lastUpdateDate(dataset, date):
apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset)
headers = {
'Content-Type': 'application/json',
'Authorization': os.getenv('apiToken')
}
body = {
"dataLastUpdated": date.isoformat()
}
try:
r = requests.patch(url=apiUrl, json=body, headers=headers)
logging.info('[lastUpdated]: SUCCESS, ' + date.isoformat() + ' status code ' + str(r.status_code))
return 0
except Exception as e:
logging.info('[lastUpdated]: ' + str(e))
def get_most_recent_date(table):
# r = cartosql.getFields(TIME_FIELD, table, f='csv', post=True)
r = getFields(TIME_FIELD, table, f='csv', post=True)
dates = r.text.split('\r\n')[1:-1]
dates.sort()
most_recent_date = datetime.datetime.strptime(dates[-1], '%Y-%m-%d %H:%M:%S')
return most_recent_date
def formatDate(date):
""" Parse input date string and write in output date format """
return datetime.datetime.strptime(date, INPUT_DATE_FORMAT) \
.strftime(DATE_FORMAT)
def getFilename(date):
'''get filename from datestamp CHECK FILE TYPE'''
return os.path.join(DATA_DIR, '{}.csv'.format(
FILENAME.format(date=date.strftime('%Y%m%d'))))
def getGeom(lon, lat):
'''Define point geometry from latitude and longitude'''
geometry = {
'type': 'Point',
'coordinates': [float(lon), float(lat)]
}
return geometry
def convert_time_since_epoch(timestamp):
'''Function to convert seconds since the epoch to human readable time'''
value = datetime.datetime.fromtimestamp(timestamp)
return value.strftime('%Y-%m-%d')
def location():
'''Function to grab the unique location id (uids) from the locations api.'''
logging.info('Fetching location ids')
r = requests.get(DATA_LOCATION_URL)
json_obj = r.json()
json_obj_list = json_obj['results']
json_obj_list_get = json_obj_list.get('locations')
location_id = []
for dict in json_obj_list_get:
x = dict.get('id')
location_id.append(x)
logging.info('Location Ids Collected')
return location_id
def feeds():
'''Function to use the uids to obtain the feed information and put them into a pandas dataframe with all the dictionaries unpacked'''
feed_list = []
logging.info('Fetching Feed info')
for id in location():
r = requests.get(DATA_URL.format(id))
json_obj = r.json()
feed_results = json_obj['results']
feed_feeds = feed_results['feeds']
try:
feed_list.append(feed_feeds[0])
except:
continue
df_3 =
|
pd.DataFrame(feed_list)
|
pandas.DataFrame
|
import logging
from pathlib import Path
import glob
import pandas as pd
from src.defaults import PROJECT_DIR, plant_fuels, units
import numpy as np
class Transformer:
def __init__(self, input_path, output_path, start_year, end_year, benchmark_years):
""""""
self.input_path = Path(input_path)
self.output_path = Path(output_path)
self.start_year = int(start_year)
self.end_year = int(end_year)
self.benchmark_years = int(benchmark_years)
self.folder = str(self.input_path).split("/")[-1]
self.raw_tables = self.get_raw_data()
self.maximum_capacity = pd.read_csv(
str(input_path)
+ "../../../interim/maximum_capacity/proportion_technology_demand.csv"
)
self.electricity_demand = pd.read_csv(
str(input_path) + "../../../interim/electricity_demand/demand.csv"
)
def create_muse_dataset(self):
"""
Imports the starter kits datasets and converts them into a form used
for MUSE.
"""
logger = logging.getLogger(__name__)
logger.info("Converting raw data for {}.".format(self.folder))
scenarios = ["base", "net-zero", "fossil-fuel"]
scenarios_data = {}
for scenario in scenarios:
muse_data = {}
muse_data["input"] = {
"GlobalCommodities": self.generate_global_commodities()
}
muse_data["input"]["Projections"] = self.generate_projections()
muse_data["technodata"] = {"Agents": self.generate_agents_file()}
muse_data["technodata"]["power"] = {
"ExistingCapacity": self.create_existing_capacity_power()
}
muse_data["technodata"]["power"][
"Technodata"
] = self.convert_power_technodata()
muse_data["technodata"]["power"]["CommIn"] = self.get_power_comm_in(
technodata=muse_data["technodata"]["power"]["Technodata"]
)
muse_data["technodata"]["power"]["CommOut"] = self.get_comm_out(
technodata=muse_data["technodata"]["power"]["Technodata"]
)
muse_data["technodata"]["power"][
"TechnodataTimeslices"
] = self.get_technodata_timeslices(
technodata=muse_data["technodata"]["power"]["Technodata"]
)
muse_data["technodata"]["oil"] = {
"Technodata": self.convert_oil_technodata()
}
muse_data["technodata"]["oil"]["CommIn"] = self.get_oil_comm_in(
technodata=muse_data["technodata"]["oil"]["Technodata"]
)
muse_data["technodata"]["oil"]["CommOut"] = self.get_comm_out(
technodata=muse_data["technodata"]["oil"]["Technodata"]
)
muse_data["technodata"]["oil"][
"ExistingCapacity"
] = self.create_empty_existing_capacity(self.raw_tables["Table5"])
if self.electricity_demand["RegionName"].str.contains(self.folder).any():
self.electricity_demand = self.electricity_demand[
self.electricity_demand.RegionName == self.folder
]
muse_data["technodata"]["preset"] = self.generate_preset()
muse_data["technodata"]["power"][
"Technodata"
] = self.modify_max_capacities(
technodata=muse_data["technodata"]["power"]["Technodata"]
)
muse_data["technodata"]["power"]["Technodata"] = self.create_scenarios(
scenario, muse_data["technodata"]["power"]["Technodata"]
)
scenarios_data[scenario] = muse_data
logger.info("Writing processed data for {}".format(self.folder))
self.write_results(scenarios_data)
def get_raw_data(self):
"""
Imports all starter kits data into pandas.
"""
table_directories = glob.glob(str(self.input_path / Path("*.csv")))
tables = {}
for table_directory in table_directories:
table_name = table_directory.split("/")[-1].split("_")[0]
tables[table_name] = pd.read_csv(table_directory)
return tables
def write_results(self, results_data):
"""
Writes all the processed starter kits to CSV files for use in MUSE.
"""
import os
for scenario in results_data:
output_path_scenario = self.output_path / Path(scenario)
if (
not os.path.exists(output_path_scenario)
and type(results_data[scenario]) is dict
):
os.makedirs(output_path_scenario)
for folder in results_data[scenario]:
output_path_folder = output_path_scenario / Path(folder)
for sector in results_data[scenario][folder]:
output_path = output_path_scenario / Path(folder) / Path(sector)
if (
not os.path.exists(output_path)
and type(results_data[scenario][folder][sector]) is dict
):
os.makedirs(output_path)
elif not os.path.exists(output_path_folder):
os.makedirs(output_path_folder)
if type(results_data[scenario][folder][sector]) is pd.DataFrame:
results_data[scenario][folder][sector].to_csv(
str(output_path) + ".csv", index=False
)
else:
for csv in results_data[scenario][folder][sector]:
results_data[scenario][folder][sector][csv].to_csv(
str(output_path) + "/" + csv + ".csv", index=False
)
def generate_agents_file(self):
agents =
|
pd.read_csv("data/external/muse_data/default/technodata/Agents.csv")
|
pandas.read_csv
|
import os
import glob
import re
from collections import OrderedDict as odict
import numpy as np
import pandas as pd
def read_bin(filename, count=-1, keep_2D_zpos=False, part=True):
"""Read TRACMASS binary file"""
dtype = np.dtype([('id', 'i4'), ('jd', 'f8'),
('xpos','f4'), ('ypos','f4'),
('zpos','f4')])
runtraj = np.fromfile(open(filename), dtype, count=count)
dt64 = ((runtraj["jd"])*24*60*60-62135683200).astype("datetime64[s]")
df = pd.DataFrame(data={"id":runtraj["id"],
"xpos":runtraj["xpos"]-1,
"ypos":runtraj["ypos"]-1,
"zpos":runtraj["zpos"]-1},
index=pd.Series(dt64))
if (not keep_2D_zpos) and (len(df["zpos"].unique())==1):
del df["zpos"]
df.sort_index(inplace=True)
if part:
if type(part) is bool:
pstr = re.search(r'_r\d\d_', filename)
if not pstr:
return df
else:
part = int(pstr[0][2:-1])
print(part)
df["id"] = df.id.astype(np.uint32,copy=False)
df["id"] = part * 10**(int(np.log10(np.uint32(-1)))-1) + df.id
return df
def read_asc(filename, keep_2D_zpos=False):
"""Read TRACMASS ascii file"""
df =
|
pd.read_csv(filename, sep=" ", skipinitialspace=True,
names=["id","jd","xpos","ypos","zpos"], usecols=[0,1,2,3,4])
|
pandas.read_csv
|
import pandas as pd
import os
from utils.composition import _fractional_composition
def norm_form(formula):
comp = _fractional_composition(formula)
form = ''
for key, value in comp.items():
form += f'{key}{str(value)[0:9]}'
return form
def count_elems(string):
count = 0
switch = 1
for c in string:
if c.isalpha():
count += switch
switch = 0
if c.isnumeric():
switch = 1
return count
# %%
if __name__ == '__main__':
print('processing all model predictions and calculating metrics')
print('this will take a few minutes...')
# %%
results_path = 'publication_predictions'
benchmark_path = 'data/benchmark_data'
test_directories = os.listdir(results_path)
benchmark_props = os.listdir(benchmark_path)
benchmark_test_directories = [test for test in test_directories if "benchmark" in test]
dataset_results = {}
dataset_preds = {}
dataset_acts = {}
test_maes = pd.DataFrame()
df_stats = pd.DataFrame()
for benchmark in benchmark_props:
df_compositions = pd.DataFrame()
df_preds = pd.DataFrame()
df_acts = pd.DataFrame()
models = []
for directory in benchmark_test_directories:
df_train_orig = pd.read_csv(f'{benchmark_path}/{benchmark}/train.csv',
keep_default_na=False, na_values=[''])
df_val = pd.read_csv(f'{benchmark_path}/{benchmark}/val.csv',
keep_default_na=False, na_values=[''])
df_train = pd.concat([df_train_orig, df_val], ignore_index=True)
df_train['formula'] = [norm_form(formula) for formula in df_train['formula']]
df_train.index = df_train['formula']
files = os.listdir(f'{results_path}\{directory}')
file = [file for file in files if benchmark in file and 'test' in file]
if len(file) > 0:
models.append(directory.split('_')[0])
file = file[0]
df = pd.read_csv(f'{results_path}\{directory}\{file}',
keep_default_na=False, na_values=[''])
composition = df['formula']
pred = df['predicted']
act = df['actual']
print(f'processing {benchmark} {models[-1]}')
df_compositions = pd.concat([df_compositions, composition], axis=1)
df_preds = pd.concat([df_preds, pred], axis=1)
df_acts = pd.concat([df_acts, act], axis=1)
n_total = act.count() + df_val.shape[0] + df_train_orig.shape[0]
df_stats.at[benchmark, 'mean_test'] = act.mean()
df_stats.at[benchmark, 'std_test'] = act.std()
df_stats.at[benchmark, 'n_test'] = act.count()
df_stats.at[benchmark, 'mean_train'] = df_train['target'].mean()
df_stats.at[benchmark, 'std_train'] = df_train['target'].std()
df_stats.at[benchmark, 'n_train'] = df_train_orig.shape[0]
df_stats.at[benchmark, 'n_val'] = df_val.shape[0]
df_stats.at[benchmark, 'n_total'] = n_total
df_stats.at[benchmark, 'prop_train'] = df_train_orig.shape[0] / n_total
df_stats.at[benchmark, 'prop_val'] = df_val.shape[0] / n_total
df_stats.at[benchmark, 'prop_test'] = act.count() / n_total
df_compositions.columns = models
df_preds.columns = models
df_acts.columns = models
df_diff = df_preds - df_acts
df_mae = df_diff.abs().mean()
test_maes[benchmark] = df_mae
dataset_results[benchmark] = df_compositions
dataset_preds[benchmark] = df_preds
dataset_acts[benchmark] = df_acts
maes = test_maes.T
model_names = ['roost', 'mat2vec', 'onehot', 'elemnet', 'rf']
out_1 = maes[model_names]
out = pd.concat([out_1, df_stats], axis=1)
df_benchmark = out.copy()
# %%
results_path = 'publication_predictions'
matbench_path = 'data/matbench_cv'
test_directories = os.listdir(results_path)
matbench_props = os.listdir(matbench_path)
matbench_test_directories = [test for test in test_directories if "matbench" in test]
dataset_results = {}
dataset_preds = {}
dataset_acts = {}
test_maes = pd.DataFrame()
df_stats = pd.DataFrame()
for matbench in matbench_props:
df_compositions = pd.DataFrame()
df_preds = pd.DataFrame()
df_acts =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import glob
import os
from pandas.io.json.normalize import nested_to_record
import yaml
import natsort
def get_experiment_files(experiment_path: str, files: dict= {}):
# Assumes each directory (/ experiment run) has a unique cfg
cfg_files = glob.glob(f"{experiment_path}/**/cfg.yaml", recursive=True)
cfg_files = natsort.natsorted(cfg_files)
cfg_dfs = []
data = dict()
join_dfs = dict()
# -- Load cfgs
for run_index, cfg_file in enumerate(cfg_files):
data[run_index] = dict()
dir_name = os.path.dirname(cfg_file)
data[run_index]["dir_name"] = dir_name
run_name = dir_name.replace(experiment_path, "")
run_name = run_name[1:] if run_name[0] == "/" else run_name
data[run_index]["dir_name"] = run_name
# -- Read cfg
with open(os.path.join(cfg_file)) as handler:
config_data = yaml.load(handler, Loader=yaml.SafeLoader)
put_manual_id = False
if "experiment_id" in config_data:
experiment_id = config_data["experiment_id"]
else:
put_manual_id = True
experiment_id = config_data["cfg_id"]
run_id = getattr(config_data, "run_id", 0)
data[run_index]["experiment_id"] = experiment_id
data[run_index]["run_id"] = run_id
#cfg_df = pd.DataFrame(nested_to_record(config_data, sep="."), index=[0])
cfg_df = pd.DataFrame(nested_to_record(config_data))
cfg_df["run_name"] = run_name
cfg_df["run_index"] = run_index
cfg_dfs.append(cfg_df)
data["cfg"] = cfg_df
# -- Read logs
for file_name, file_type in files.items():
file_path = os.path.join(dir_name, file_name)
if not os.path.isfile(file_path):
file_path = None
continue
file_data = file_path
if hasattr(pd, str(file_type)) and file_path is not None:
# Some bad header for experiments Fix
# file_data = getattr(pd, file_type)(file_path, skiprows=1, names=['update', 'frames', 'FPS', 'duration', 'rreturn_mean', 'rreturn_std', 'rreturn_min', 'rreturn_max', 'num_frames_mean', 'num_frames_std', 'num_frames_min', 'num_frames_max', 'entropy', 'value', 'policy_loss', 'value_loss', 'grad_norm', 'value_ext', 'value_int', 'value_ext_loss', 'value_int_loss', 'return_mean', 'return_std', 'return_min', 'return_max'])
file_data = getattr(pd, file_type)(file_path)
if put_manual_id:
file_data["experiment_id"] = experiment_id
file_data["run_id"] = run_id
file_data["run_index"] = run_index
if file_name not in join_dfs:
join_dfs[file_name] = []
join_dfs[file_name].append(file_data)
data[file_name] = file_data
cfgs = pd.concat(cfg_dfs)
merge_dfs = cfgs.copy()
for join_df_name, join_df in join_dfs.items():
other_df =
|
pd.concat(join_df, sort=True)
|
pandas.concat
|
import re
from inspect import isclass
import numpy as np
import pandas as pd
import pytest
from mock import patch
import woodwork as ww
from woodwork.accessor_utils import (
_is_dask_dataframe,
_is_dask_series,
_is_koalas_dataframe,
_is_koalas_series,
init_series,
)
from woodwork.exceptions import (
ColumnNotPresentError,
IndexTagRemovedWarning,
ParametersIgnoredWarning,
TypeConversionError,
TypingInfoMismatchWarning,
WoodworkNotInitError,
)
from woodwork.logical_types import (
URL,
Address,
Age,
AgeFractional,
AgeNullable,
Boolean,
BooleanNullable,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
Integer,
IntegerNullable,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PersonFullName,
PhoneNumber,
PostalCode,
SubRegionCode,
Unknown,
)
from woodwork.table_accessor import (
WoodworkTableAccessor,
_check_index,
_check_logical_types,
_check_partial_schema,
_check_time_index,
_check_unique_column_names,
_check_use_standard_tags,
_infer_missing_logical_types,
)
from woodwork.table_schema import TableSchema
from woodwork.tests.testing_utils import (
is_property,
is_public_method,
to_pandas,
validate_subset_schema,
)
from woodwork.tests.testing_utils.table_utils import assert_schema_equal
from woodwork.utils import import_or_none
dd = import_or_none("dask.dataframe")
ks = import_or_none("databricks.koalas")
def test_check_index_errors(sample_df):
error_message = "Specified index column `foo` not found in dataframe"
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_index(dataframe=sample_df, index="foo")
if isinstance(sample_df, pd.DataFrame):
# Does not check for index uniqueness with Dask
error_message = "Index column must be unique"
with pytest.raises(LookupError, match=error_message):
_check_index(sample_df, index="age")
def test_check_logical_types_errors(sample_df):
error_message = "logical_types must be a dictionary"
with pytest.raises(TypeError, match=error_message):
_check_logical_types(sample_df, logical_types="type")
bad_logical_types_keys = {
"full_name": None,
"age": None,
"birthday": None,
"occupation": None,
}
error_message = re.escape(
"logical_types contains columns that are not present in dataframe: ['birthday', 'occupation']"
)
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_logical_types(sample_df, bad_logical_types_keys)
def test_check_time_index_errors(sample_df):
error_message = "Specified time index column `foo` not found in dataframe"
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_time_index(dataframe=sample_df, time_index="foo")
def test_check_unique_column_names_errors(sample_df):
if _is_koalas_dataframe(sample_df):
pytest.skip("Koalas enforces unique column names")
duplicate_cols_df = sample_df.copy()
if _is_dask_dataframe(sample_df):
duplicate_cols_df = dd.concat(
[duplicate_cols_df, duplicate_cols_df["age"]], axis=1
)
else:
duplicate_cols_df.insert(0, "age", [18, 21, 65, 43], allow_duplicates=True)
with pytest.raises(
IndexError, match="Dataframe cannot contain duplicate columns names"
):
_check_unique_column_names(duplicate_cols_df)
def test_check_use_standard_tags_errors():
error_message = "use_standard_tags must be a dictionary or a boolean"
with pytest.raises(TypeError, match=error_message):
_check_use_standard_tags(1)
def test_accessor_init(sample_df):
assert sample_df.ww.schema is None
sample_df.ww.init()
assert isinstance(sample_df.ww.schema, TableSchema)
def test_accessor_schema_property(sample_df):
sample_df.ww.init()
assert sample_df.ww._schema is not sample_df.ww.schema
assert sample_df.ww._schema == sample_df.ww.schema
def test_set_accessor_name(sample_df):
df = sample_df.copy()
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.name
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.name = "name"
df.ww.init()
assert df.ww.name is None
df.ww.name = "name"
assert df.ww.schema.name == "name"
assert df.ww.name == "name"
def test_rename_init_with_name(sample_df):
df = sample_df.copy()
df.ww.init(name="name")
assert df.ww.name == "name"
df.ww.name = "new_name"
assert df.ww.schema.name == "new_name"
assert df.ww.name == "new_name"
def test_name_error_on_init(sample_df):
err_msg = "Table name must be a string"
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.init(name=123)
def test_name_error_on_update(sample_df):
sample_df.ww.init()
err_msg = "Table name must be a string"
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.name = 123
def test_name_persists_after_drop(sample_df):
df = sample_df.copy()
df.ww.init()
df.ww.name = "name"
assert df.ww.name == "name"
dropped_df = df.ww.drop(["id"])
assert dropped_df.ww.name == "name"
assert dropped_df.ww.schema.name == "name"
def test_set_accessor_metadata(sample_df):
df = sample_df.copy()
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.metadata
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.metadata = {"new": "metadata"}
df.ww.init()
assert df.ww.metadata == {}
df.ww.metadata = {"new": "metadata"}
assert df.ww.schema.metadata == {"new": "metadata"}
assert df.ww.metadata == {"new": "metadata"}
def test_set_metadata_after_init_with_metadata(sample_df):
df = sample_df.copy()
df.ww.init(table_metadata={"new": "metadata"})
assert df.ww.metadata == {"new": "metadata"}
df.ww.metadata = {"new": "new_metadata"}
assert df.ww.schema.metadata == {"new": "new_metadata"}
assert df.ww.metadata == {"new": "new_metadata"}
def test_metadata_persists_after_drop(sample_df):
df = sample_df.copy()
df.ww.init()
df.ww.metadata = {"new": "metadata"}
assert df.ww.metadata == {"new": "metadata"}
dropped_df = df.ww.drop(["id"])
assert dropped_df.ww.metadata == {"new": "metadata"}
assert dropped_df.ww.schema.metadata == {"new": "metadata"}
def test_metadata_error_on_init(sample_df):
err_msg = "Table metadata must be a dictionary."
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.init(table_metadata=123)
def test_metadata_error_on_update(sample_df):
sample_df.ww.init()
err_msg = "Table metadata must be a dictionary."
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.metadata = 123
def test_accessor_physical_types_property(sample_df):
sample_df.ww.init(logical_types={"age": "Categorical"})
assert isinstance(sample_df.ww.physical_types, dict)
assert set(sample_df.ww.physical_types.keys()) == set(sample_df.columns)
for k, v in sample_df.ww.physical_types.items():
logical_type = sample_df.ww.columns[k].logical_type
if _is_koalas_dataframe(sample_df) and logical_type.backup_dtype is not None:
assert v == logical_type.backup_dtype
else:
assert v == logical_type.primary_dtype
def test_accessor_separation_of_params(sample_df):
# mix up order of acccessor and schema params
schema_df = sample_df.copy()
schema_df.ww.init(
name="test_name",
index="id",
semantic_tags={"id": "test_tag"},
time_index="signup_date",
)
assert schema_df.ww.semantic_tags["id"] == {"index", "test_tag"}
assert schema_df.ww.index == "id"
assert schema_df.ww.time_index == "signup_date"
assert schema_df.ww.name == "test_name"
def test_init_with_full_schema(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema", semantic_tags={"id": "test_tag"}, index="id")
schema = schema_df.ww._schema
head_df = schema_df.head(2)
assert head_df.ww.schema is None
head_df.ww.init_with_full_schema(schema=schema)
assert head_df.ww._schema is schema
assert head_df.ww.name == "test_schema"
assert head_df.ww.semantic_tags["id"] == {"index", "test_tag"}
iloc_df = schema_df.loc[[2, 3]]
assert iloc_df.ww.schema is None
iloc_df.ww.init_with_full_schema(schema=schema)
assert iloc_df.ww._schema is schema
assert iloc_df.ww.name == "test_schema"
assert iloc_df.ww.semantic_tags["id"] == {"index", "test_tag"}
# Extra parameters do not take effect
assert isinstance(iloc_df.ww.logical_types["id"], Integer)
def test_accessor_init_errors_methods(sample_df):
methods_to_exclude = ["init", "init_with_full_schema", "init_with_partial_schema"]
public_methods = [
method
for method in dir(sample_df.ww)
if is_public_method(WoodworkTableAccessor, method)
]
public_methods = [
method for method in public_methods if method not in methods_to_exclude
]
method_args_dict = {
"add_semantic_tags": [{"id": "new_tag"}],
"describe": None,
"pop": ["id"],
"describe": None,
"describe_dict": None,
"drop": ["id"],
"get_valid_mi_columns": None,
"mutual_information": None,
"mutual_information_dict": None,
"remove_semantic_tags": [{"id": "new_tag"}],
"rename": [{"id": "new_id"}],
"reset_semantic_tags": None,
"select": [["Double"]],
"set_index": ["id"],
"set_time_index": ["signup_date"],
"set_types": [{"id": "Integer"}],
"to_disk": ["dir"],
"to_dictionary": None,
"value_counts": None,
"infer_temporal_frequencies": None,
}
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
for method in public_methods:
func = getattr(sample_df.ww, method)
method_args = method_args_dict[method]
with pytest.raises(WoodworkNotInitError, match=error):
if method_args:
func(*method_args)
else:
func()
def test_accessor_init_errors_properties(sample_df):
props_to_exclude = ["iloc", "loc", "schema", "_dataframe"]
props = [
prop
for prop in dir(sample_df.ww)
if is_property(WoodworkTableAccessor, prop) and prop not in props_to_exclude
]
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
for prop in props:
with pytest.raises(WoodworkNotInitError, match=error):
getattr(sample_df.ww, prop)
def test_init_accessor_with_schema_errors(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init()
schema = schema_df.ww.schema
iloc_df = schema_df.iloc[:, :-1]
assert iloc_df.ww.schema is None
error = "Provided schema must be a Woodwork.TableSchema object."
with pytest.raises(TypeError, match=error):
iloc_df.ww.init_with_full_schema(schema=int)
error = (
"Woodwork typing information is not valid for this DataFrame: "
"The following columns in the typing information were missing from the DataFrame: {'ip_address'}"
)
with pytest.raises(ValueError, match=error):
iloc_df.ww.init_with_full_schema(schema=schema)
def test_accessor_with_schema_parameter_warning(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema", semantic_tags={"id": "test_tag"}, index="id")
schema = schema_df.ww.schema
head_df = schema_df.head(2)
warning = (
"A schema was provided and the following parameters were ignored: index, "
"time_index, logical_types, already_sorted, semantic_tags, use_standard_tags"
)
with pytest.warns(ParametersIgnoredWarning, match=warning):
head_df.ww.init_with_full_schema(
index="ignored_id",
time_index="ignored_time_index",
logical_types={"ignored": "ltypes"},
already_sorted=True,
semantic_tags={"ignored_id": "ignored_test_tag"},
use_standard_tags={"id": True, "age": False},
schema=schema,
)
assert head_df.ww.name == "test_schema"
assert head_df.ww.semantic_tags["id"] == {"index", "test_tag"}
def test_accessor_getattr(sample_df):
schema_df = sample_df.copy()
# We can access attributes on the Accessor class before the schema is initialized
assert schema_df.ww.schema is None
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
schema_df.ww.index
schema_df.ww.init()
assert schema_df.ww.name is None
assert schema_df.ww.index is None
assert schema_df.ww.time_index is None
assert set(schema_df.ww.columns.keys()) == set(sample_df.columns)
error = re.escape("Woodwork has no attribute 'not_present'")
with pytest.raises(AttributeError, match=error):
sample_df.ww.init()
sample_df.ww.not_present
def test_getitem(sample_df):
df = sample_df
df.ww.init(
time_index="signup_date",
index="id",
name="df_name",
logical_types={"age": "Double"},
semantic_tags={"age": {"custom_tag"}},
)
assert list(df.columns) == list(df.ww.schema.columns)
subset = ["id", "signup_date"]
df_subset = df.ww[subset]
pd.testing.assert_frame_equal(to_pandas(df[subset]), to_pandas(df_subset))
assert subset == list(df_subset.ww._schema.columns)
assert df_subset.ww.index == "id"
assert df_subset.ww.time_index == "signup_date"
subset = ["age", "email"]
df_subset = df.ww[subset]
pd.testing.assert_frame_equal(to_pandas(df[subset]), to_pandas(df_subset))
assert subset == list(df_subset.ww._schema.columns)
assert df_subset.ww.index is None
assert df_subset.ww.time_index is None
assert isinstance(df_subset.ww.logical_types["age"], Double)
assert df_subset.ww.semantic_tags["age"] == {"custom_tag", "numeric"}
subset = df.ww[[]]
assert len(subset.ww.columns) == 0
assert subset.ww.index is None
assert subset.ww.time_index is None
series = df.ww["age"]
pd.testing.assert_series_equal(to_pandas(series), to_pandas(df["age"]))
assert isinstance(series.ww.logical_type, Double)
assert series.ww.semantic_tags == {"custom_tag", "numeric"}
series = df.ww["id"]
pd.testing.assert_series_equal(to_pandas(series), to_pandas(df["id"]))
assert isinstance(series.ww.logical_type, Integer)
assert series.ww.semantic_tags == {"index"}
def test_getitem_init_error(sample_df):
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
sample_df.ww["age"]
def test_getitem_invalid_input(sample_df):
df = sample_df
df.ww.init()
error_msg = r"Column\(s\) '\[1, 2\]' not found in DataFrame"
with pytest.raises(ColumnNotPresentError, match=error_msg):
df.ww[["email", 2, 1]]
error_msg = "Column with name 'invalid_column' not found in DataFrame"
with pytest.raises(ColumnNotPresentError, match=error_msg):
df.ww["invalid_column"]
def test_accessor_equality(sample_df):
# Confirm equality with same schema and same data
schema_df = sample_df.copy()
schema_df.ww.init()
copy_df = schema_df.ww.copy()
assert schema_df.ww == copy_df.ww
# Confirm not equal with different schema but same data
copy_df.ww.set_time_index("signup_date")
assert schema_df.ww != copy_df.ww
# Confirm not equal with same schema but different data - only pandas
loc_df = schema_df.ww.loc[:2, :]
if isinstance(sample_df, pd.DataFrame):
assert schema_df.ww != loc_df
else:
assert schema_df.ww == loc_df
def test_accessor_shallow_equality(sample_df):
metadata_table = sample_df.copy()
metadata_table.ww.init(table_metadata={"user": "user0"})
diff_metadata_table = sample_df.copy()
diff_metadata_table.ww.init(table_metadata={"user": "user2"})
assert diff_metadata_table.ww.__eq__(metadata_table, deep=False)
assert not diff_metadata_table.ww.__eq__(metadata_table, deep=True)
schema = metadata_table.ww.schema
diff_data_table = metadata_table.ww.loc[:2, :]
same_data_table = metadata_table.ww.copy()
assert diff_data_table.ww.schema.__eq__(schema, deep=True)
assert same_data_table.ww.schema.__eq__(schema, deep=True)
assert same_data_table.ww.__eq__(metadata_table.ww, deep=False)
assert same_data_table.ww.__eq__(metadata_table.ww, deep=True)
assert diff_data_table.ww.__eq__(metadata_table.ww, deep=False)
if isinstance(sample_df, pd.DataFrame):
assert not diff_data_table.ww.__eq__(metadata_table.ww, deep=True)
def test_accessor_init_with_valid_string_time_index(time_index_df):
time_index_df.ww.init(name="schema", index="id", time_index="times")
assert time_index_df.ww.name == "schema"
assert time_index_df.ww.index == "id"
assert time_index_df.ww.time_index == "times"
assert isinstance(
time_index_df.ww.columns[time_index_df.ww.time_index].logical_type, Datetime
)
def test_accessor_init_with_numeric_datetime_time_index(time_index_df):
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints", logical_types={"ints": Datetime})
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(
name="schema", time_index="strs", logical_types={"strs": Datetime}
)
assert schema_df.ww.time_index == "ints"
assert schema_df["ints"].dtype == "datetime64[ns]"
def test_accessor_with_numeric_time_index(time_index_df):
# Set a numeric time index on init
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints")
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Integer)
assert date_col.semantic_tags == {"time_index", "numeric"}
# Specify logical type for time index on init
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints", logical_types={"ints": "Double"})
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"time_index", "numeric"}
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="strs", logical_types={"strs": "Double"})
date_col = schema_df.ww.columns["strs"]
assert schema_df.ww.time_index == "strs"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"time_index", "numeric"}
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(time_index="ints", logical_types={"ints": "Categorical"})
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(time_index="letters", logical_types={"strs": "Integer"})
# Set numeric time index after init
schema_df = time_index_df.copy()
schema_df.ww.init(logical_types={"ints": "Double"})
assert schema_df.ww.time_index is None
schema_df.ww.set_time_index("ints")
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"numeric", "time_index"}
def test_numeric_time_index_dtypes(numeric_time_index_df):
numeric_time_index_df.ww.init(time_index="ints")
assert numeric_time_index_df.ww.time_index == "ints"
assert isinstance(numeric_time_index_df.ww.logical_types["ints"], Integer)
assert numeric_time_index_df.ww.semantic_tags["ints"] == {"time_index", "numeric"}
numeric_time_index_df.ww.set_time_index("floats")
assert numeric_time_index_df.ww.time_index == "floats"
assert isinstance(numeric_time_index_df.ww.logical_types["floats"], Double)
assert numeric_time_index_df.ww.semantic_tags["floats"] == {"time_index", "numeric"}
numeric_time_index_df.ww.set_time_index("with_null")
assert numeric_time_index_df.ww.time_index == "with_null"
assert isinstance(
numeric_time_index_df.ww.logical_types["with_null"], IntegerNullable
)
assert numeric_time_index_df.ww.semantic_tags["with_null"] == {
"time_index",
"numeric",
}
def test_accessor_init_with_invalid_string_time_index(sample_df):
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
sample_df.ww.init(name="schema", time_index="full_name")
def test_accessor_init_with_string_logical_types(sample_df):
logical_types = {"full_name": "natural_language", "age": "Double"}
schema_df = sample_df.copy()
schema_df.ww.init(name="schema", logical_types=logical_types)
assert isinstance(schema_df.ww.columns["full_name"].logical_type, NaturalLanguage)
assert isinstance(schema_df.ww.columns["age"].logical_type, Double)
logical_types = {
"full_name": "NaturalLanguage",
"age": "IntegerNullable",
"signup_date": "Datetime",
}
schema_df = sample_df.copy()
schema_df.ww.init(
name="schema", logical_types=logical_types, time_index="signup_date"
)
assert isinstance(schema_df.ww.columns["full_name"].logical_type, NaturalLanguage)
assert isinstance(schema_df.ww.columns["age"].logical_type, IntegerNullable)
assert schema_df.ww.time_index == "signup_date"
def test_int_dtype_inference_on_init():
df = pd.DataFrame(
{
"ints_no_nans": pd.Series([1, 2]),
"ints_nan": pd.Series([1, np.nan]),
"ints_NA": pd.Series([1, pd.NA]),
"ints_NA_specified": pd.Series([1, pd.NA], dtype="Int64"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["ints_no_nans"].dtype == "int64"
assert df["ints_nan"].dtype == "float64"
assert df["ints_NA"].dtype == "category"
assert df["ints_NA_specified"].dtype == "Int64"
def test_bool_dtype_inference_on_init():
df = pd.DataFrame(
{
"bools_no_nans": pd.Series([True, False]),
"bool_nan": pd.Series([True, np.nan]),
"bool_NA": pd.Series([True, pd.NA]),
"bool_NA_specified": pd.Series([True, pd.NA], dtype="boolean"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["bools_no_nans"].dtype == "bool"
assert df["bool_nan"].dtype == "category"
assert df["bool_NA"].dtype == "category"
assert df["bool_NA_specified"].dtype == "boolean"
def test_str_dtype_inference_on_init():
df = pd.DataFrame(
{
"str_no_nans": pd.Series(["a", "b"]),
"str_nan": pd.Series(["a", np.nan]),
"str_NA": pd.Series(["a", pd.NA]),
"str_NA_specified": pd.Series([1, pd.NA], dtype="string"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["str_no_nans"].dtype == "category"
assert df["str_nan"].dtype == "category"
assert df["str_NA"].dtype == "category"
assert df["str_NA_specified"].dtype == "category"
def test_float_dtype_inference_on_init():
df = pd.DataFrame(
{
"floats_no_nans": pd.Series([1.1, 2.2]),
"floats_nan": pd.Series([1.1, np.nan]),
"floats_NA": pd.Series([1.1, pd.NA]),
"floats_nan_specified": pd.Series([1.1, np.nan], dtype="float"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["floats_no_nans"].dtype == "float64"
assert df["floats_nan"].dtype == "float64"
assert df["floats_NA"].dtype == "category"
assert df["floats_nan_specified"].dtype == "float64"
def test_datetime_dtype_inference_on_init():
df = pd.DataFrame(
{
"date_no_nans": pd.Series([pd.to_datetime("2020-09-01")] * 2),
"date_nan": pd.Series([pd.to_datetime("2020-09-01"), np.nan]),
"date_NA": pd.Series([pd.to_datetime("2020-09-01"), pd.NA]),
"date_NaT": pd.Series([pd.to_datetime("2020-09-01"), pd.NaT]),
"date_NA_specified": pd.Series(
[pd.to_datetime("2020-09-01"), pd.NA], dtype="datetime64[ns]"
),
}
)
df.ww.init()
assert df["date_no_nans"].dtype == "datetime64[ns]"
assert df["date_nan"].dtype == "datetime64[ns]"
assert df["date_NA"].dtype == "datetime64[ns]"
assert df["date_NaT"].dtype == "datetime64[ns]"
assert df["date_NA_specified"].dtype == "datetime64[ns]"
def test_datetime_inference_with_format_param():
df = pd.DataFrame(
{
"index": [0, 1, 2],
"dates": ["2019/01/01", "2019/01/02", "2019/01/03"],
"ymd_special": ["2019~01~01", "2019~01~02", "2019~01~03"],
"mdy_special": pd.Series(
["3~11~2000", "3~12~2000", "3~13~2000"], dtype="string"
),
}
)
df.ww.init(
name="df_name",
logical_types={
"ymd_special": Datetime(datetime_format="%Y~%m~%d"),
"mdy_special": Datetime(datetime_format="%m~%d~%Y"),
"dates": Datetime,
},
time_index="ymd_special",
)
assert df["dates"].dtype == "datetime64[ns]"
assert df["ymd_special"].dtype == "datetime64[ns]"
assert df["mdy_special"].dtype == "datetime64[ns]"
assert df.ww.time_index == "ymd_special"
assert isinstance(df.ww["dates"].ww.logical_type, Datetime)
assert isinstance(df.ww["ymd_special"].ww.logical_type, Datetime)
assert isinstance(df.ww["mdy_special"].ww.logical_type, Datetime)
df.ww.set_time_index("mdy_special")
assert df.ww.time_index == "mdy_special"
df = pd.DataFrame(
{
"mdy_special": pd.Series(
["3&11&2000", "3&12&2000", "3&13&2000"], dtype="string"
),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["mdy_special"].dtype == "category"
df.ww.set_types(logical_types={"mdy_special": Datetime(datetime_format="%m&%d&%Y")})
assert df["mdy_special"].dtype == "datetime64[ns]"
df.ww.set_time_index("mdy_special")
assert isinstance(df.ww["mdy_special"].ww.logical_type, Datetime)
assert df.ww.time_index == "mdy_special"
def test_timedelta_dtype_inference_on_init():
df = pd.DataFrame(
{
"delta_no_nans": (
pd.Series([pd.to_datetime("2020-09-01")] * 2)
- pd.to_datetime("2020-07-01")
),
"delta_nan": (
pd.Series([pd.to_datetime("2020-09-01"), np.nan])
- pd.to_datetime("2020-07-01")
),
"delta_NaT": (
pd.Series([pd.to_datetime("2020-09-01"), pd.NaT])
- pd.to_datetime("2020-07-01")
),
"delta_NA_specified": (
pd.Series([pd.to_datetime("2020-09-01"), pd.NA], dtype="datetime64[ns]")
- pd.to_datetime("2020-07-01")
),
}
)
df.ww.init()
assert df["delta_no_nans"].dtype == "timedelta64[ns]"
assert df["delta_nan"].dtype == "timedelta64[ns]"
assert df["delta_NaT"].dtype == "timedelta64[ns]"
assert df["delta_NA_specified"].dtype == "timedelta64[ns]"
def test_sets_category_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series(["a", "b", "c"], name=column_name),
pd.Series(["a", None, "c"], name=column_name),
pd.Series(["a", np.nan, "c"], name=column_name),
pd.Series(["a", pd.NA, "c"], name=column_name),
pd.Series(["a", pd.NaT, "c"], name=column_name),
]
logical_types = [
Categorical,
CountryCode,
Ordinal(order=["a", "b", "c"]),
PostalCode,
SubRegionCode,
]
for series in series_list:
series = series.astype("object")
for logical_type in logical_types:
if isclass(logical_type):
logical_type = logical_type()
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert df.ww.columns[column_name].logical_type == logical_type
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_object_dtype_on_init(latlong_df):
for column_name in latlong_df.columns:
ltypes = {
column_name: LatLong,
}
df = latlong_df.loc[:, [column_name]]
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, LatLong)
assert df[column_name].dtype == LatLong.primary_dtype
df_pandas = to_pandas(df[column_name])
expected_val = (3, 4)
if _is_koalas_dataframe(latlong_df):
expected_val = [3, 4]
assert df_pandas.iloc[-1] == expected_val
def test_sets_string_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series(["a", "b", "c"], name=column_name),
pd.Series(["a", None, "c"], name=column_name),
pd.Series(["a", np.nan, "c"], name=column_name),
pd.Series(["a", pd.NA, "c"], name=column_name),
]
logical_types = [
Address,
Filepath,
PersonFullName,
IPAddress,
NaturalLanguage,
PhoneNumber,
URL,
]
for series in series_list:
series = series.astype("object")
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_boolean_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series([True, False, True], name=column_name),
pd.Series([True, None, True], name=column_name),
pd.Series([True, np.nan, True], name=column_name),
pd.Series([True, pd.NA, True], name=column_name),
]
logical_types = [Boolean, BooleanNullable]
for series in series_list:
for logical_type in logical_types:
if series.isnull().any() and logical_type == Boolean:
continue
series = series.astype("object")
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_int64_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series([1, 2, 3], name=column_name),
pd.Series([1, None, 3], name=column_name),
pd.Series([1, np.nan, 3], name=column_name),
pd.Series([1, pd.NA, 3], name=column_name),
]
logical_types = [Integer, IntegerNullable, Age, AgeNullable]
for series in series_list:
series = series.astype("object")
for logical_type in logical_types:
if series.isnull().any() and logical_type in [Integer, Age]:
continue
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_float64_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series([1.1, 2, 3], name=column_name),
pd.Series([1.1, None, 3], name=column_name),
pd.Series([1.1, np.nan, 3], name=column_name),
]
logical_types = [Double, AgeFractional]
for series in series_list:
series = series.astype("object")
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_datetime64_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series(["2020-01-01", "2020-01-02", "2020-01-03"], name=column_name),
pd.Series(["2020-01-01", None, "2020-01-03"], name=column_name),
pd.Series(["2020-01-01", np.nan, "2020-01-03"], name=column_name),
pd.Series(["2020-01-01", pd.NA, "2020-01-03"], name=column_name),
pd.Series(
["2020-01-01", pd.NaT, "2020-01-03"], name=column_name, dtype="object"
),
]
logical_type = Datetime
for series in series_list:
series = series.astype("object")
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_invalid_dtype_casting():
column_name = "test_series"
# Cannot cast a column with pd.NA to Double
series = pd.Series([1.1, pd.NA, 3], name=column_name)
ltypes = {
column_name: Double,
}
err_msg = (
"Error converting datatype for test_series from type object to type "
"float64. Please confirm the underlying data is consistent with logical type Double."
)
df = pd.DataFrame(series)
with pytest.raises(TypeConversionError, match=err_msg):
df.ww.init(logical_types=ltypes)
# Cannot cast Datetime to Double
df = pd.DataFrame({column_name: ["2020-01-01", "2020-01-02", "2020-01-03"]})
df.ww.init(logical_types={column_name: Datetime})
err_msg = (
"Error converting datatype for test_series from type datetime64[ns] to type "
"float64. Please confirm the underlying data is consistent with logical type Double."
)
with pytest.raises(TypeConversionError, match=re.escape(err_msg)):
df.ww.set_types(logical_types={column_name: Double})
# Cannot cast invalid strings to integers
series = pd.Series(["1", "two", "3"], name=column_name)
ltypes = {
column_name: Integer,
}
err_msg = (
"Error converting datatype for test_series from type object to type "
"int64. Please confirm the underlying data is consistent with logical type Integer."
)
df = pd.DataFrame(series)
with pytest.raises(TypeConversionError, match=err_msg):
df.ww.init(logical_types=ltypes)
def test_underlying_index_set_no_index_on_init(sample_df):
if _is_dask_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Dask input")
if _is_koalas_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Koalas input")
input_index = pd.Int64Index([99, 88, 77, 66])
schema_df = sample_df.copy()
schema_df.index = input_index.copy()
pd.testing.assert_index_equal(input_index, schema_df.index)
schema_df.ww.init()
assert schema_df.ww.index is None
pd.testing.assert_index_equal(input_index, schema_df.index)
sorted_df = schema_df.ww.sort_values("full_name")
assert sorted_df.ww.index is None
pd.testing.assert_index_equal(pd.Int64Index([88, 77, 99, 66]), sorted_df.index)
def test_underlying_index_set(sample_df):
if _is_dask_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Dask input")
if _is_koalas_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Koalas input")
# Sets underlying index at init
schema_df = sample_df.copy()
schema_df.ww.init(index="full_name")
assert "full_name" in schema_df.columns
assert schema_df.index.name is None
assert (schema_df.index == schema_df["full_name"]).all()
# Sets underlying index on update
schema_df = sample_df.copy()
schema_df.ww.init(index="id")
schema_df.ww.set_index("full_name")
assert schema_df.ww.index == "full_name"
assert "full_name" in schema_df.columns
assert (schema_df.index == schema_df["full_name"]).all()
assert schema_df.index.name is None
# confirm removing Woodwork index doesn't change underlying index
schema_df.ww.set_index(None)
assert schema_df.ww.index is None
assert (schema_df.index == schema_df["full_name"]).all()
def test_underlying_index_reset(sample_df):
if _is_dask_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Dask input")
if _is_koalas_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Koalas input")
specified_index = pd.Index
unspecified_index = pd.RangeIndex
sample_df.ww.init()
assert type(sample_df.index) == unspecified_index
sample_df.ww.set_index("full_name")
assert type(sample_df.index) == specified_index
copied_df = sample_df.ww.copy()
warning = "Index mismatch between DataFrame and typing information"
with pytest.warns(TypingInfoMismatchWarning, match=warning):
copied_df.ww.reset_index(drop=True, inplace=True)
assert copied_df.ww.schema is None
assert type(copied_df.index) == unspecified_index
sample_df.ww.set_index(None)
assert type(sample_df.index) == specified_index
# Use pandas operation to reset index
reset_df = sample_df.ww.reset_index(drop=True, inplace=False)
assert type(sample_df.index) == specified_index
assert type(reset_df.index) == unspecified_index
sample_df.ww.reset_index(drop=True, inplace=True)
assert type(sample_df.index) == unspecified_index
def test_underlying_index_unchanged_after_updates(sample_df):
if _is_dask_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Dask input")
if _is_koalas_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Koalas input")
sample_df.ww.init(index="full_name")
assert "full_name" in sample_df
assert sample_df.ww.index == "full_name"
assert (sample_df.index == sample_df["full_name"]).all()
copied_df = sample_df.ww.copy()
dropped_df = copied_df.ww.drop("full_name")
assert "full_name" not in dropped_df
assert dropped_df.ww.index is None
assert (dropped_df.index == sample_df["full_name"]).all()
selected_df = copied_df.ww.select("Integer")
assert "full_name" not in dropped_df
assert selected_df.ww.index is None
assert (selected_df.index == sample_df["full_name"]).all()
iloc_df = copied_df.ww.iloc[:, 2:]
assert "full_name" not in iloc_df
assert iloc_df.ww.index is None
assert (iloc_df.index == sample_df["full_name"]).all()
loc_df = copied_df.ww.loc[:, ["id", "email"]]
assert "full_name" not in loc_df
assert loc_df.ww.index is None
assert (loc_df.index == sample_df["full_name"]).all()
subset_df = copied_df.ww[["id", "email"]]
assert "full_name" not in subset_df
assert subset_df.ww.index is None
assert (subset_df.index == sample_df["full_name"]).all()
reset_tags_df = sample_df.ww.copy()
reset_tags_df.ww.reset_semantic_tags("full_name", retain_index_tags=False)
assert reset_tags_df.ww.index is None
assert (reset_tags_df.index == sample_df["full_name"]).all()
remove_tags_df = sample_df.ww.copy()
remove_tags_df.ww.remove_semantic_tags({"full_name": "index"})
assert remove_tags_df.ww.index is None
assert (remove_tags_df.index == sample_df["full_name"]).all()
set_types_df = sample_df.ww.copy()
set_types_df.ww.set_types(
semantic_tags={"full_name": "new_tag"}, retain_index_tags=False
)
assert set_types_df.ww.index is None
assert (set_types_df.index == sample_df["full_name"]).all()
popped_df = sample_df.ww.copy()
popped_df.ww.pop("full_name")
assert popped_df.ww.index is None
assert (popped_df.index == sample_df["full_name"]).all()
def test_accessor_already_sorted(sample_unsorted_df):
if _is_dask_dataframe(sample_unsorted_df):
pytest.xfail("Sorting dataframe is not supported with Dask input")
if _is_koalas_dataframe(sample_unsorted_df):
pytest.xfail("Sorting dataframe is not supported with Koalas input")
schema_df = sample_unsorted_df.copy()
schema_df.ww.init(name="schema", index="id", time_index="signup_date")
assert schema_df.ww.time_index == "signup_date"
assert isinstance(
schema_df.ww.columns[schema_df.ww.time_index].logical_type, Datetime
)
sorted_df = (
to_pandas(sample_unsorted_df)
.sort_values(["signup_date", "id"])
.set_index("id", drop=False)
)
sorted_df.index.name = None
pd.testing.assert_frame_equal(
sorted_df, to_pandas(schema_df), check_index_type=False, check_dtype=False
)
schema_df = sample_unsorted_df.copy()
schema_df.ww.init(
name="schema", index="id", time_index="signup_date", already_sorted=True
)
assert schema_df.ww.time_index == "signup_date"
assert isinstance(
schema_df.ww.columns[schema_df.ww.time_index].logical_type, Datetime
)
unsorted_df = to_pandas(sample_unsorted_df.set_index("id", drop=False))
unsorted_df.index.name = None
pd.testing.assert_frame_equal(
unsorted_df, to_pandas(schema_df), check_index_type=False, check_dtype=False
)
def test_ordinal_with_order(sample_series):
if _is_koalas_series(sample_series) or _is_dask_series(sample_series):
pytest.xfail(
"Fails with Dask and Koalas - ordinal data validation not compatible"
)
ordinal_with_order = Ordinal(order=["a", "b", "c"])
schema_df = pd.DataFrame(sample_series)
schema_df.ww.init(logical_types={"sample_series": ordinal_with_order})
column_logical_type = schema_df.ww.logical_types["sample_series"]
assert isinstance(column_logical_type, Ordinal)
assert column_logical_type.order == ["a", "b", "c"]
schema_df = pd.DataFrame(sample_series)
schema_df.ww.init()
schema_df.ww.set_types(logical_types={"sample_series": ordinal_with_order})
logical_type = schema_df.ww.logical_types["sample_series"]
assert isinstance(logical_type, Ordinal)
assert logical_type.order == ["a", "b", "c"]
def test_ordinal_with_incomplete_ranking(sample_series):
if _is_koalas_series(sample_series) or _is_dask_series(sample_series):
pytest.xfail(
"Fails with Dask and Koalas - ordinal data validation not supported"
)
ordinal_incomplete_order = Ordinal(order=["a", "b"])
error_msg = re.escape(
"Ordinal column sample_series contains values that are not "
"present in the order values provided: ['c']"
)
schema_df = pd.DataFrame(sample_series)
with pytest.raises(ValueError, match=error_msg):
schema_df.ww.init(logical_types={"sample_series": ordinal_incomplete_order})
schema_df.ww.init()
with pytest.raises(ValueError, match=error_msg):
schema_df.ww.set_types(
logical_types={"sample_series": ordinal_incomplete_order}
)
def test_ordinal_with_nan_values():
nan_df = pd.DataFrame(pd.Series(["a", "b", np.nan, "a"], name="nan_series"))
ordinal_with_order = Ordinal(order=["a", "b"])
nan_df.ww.init(logical_types={"nan_series": ordinal_with_order})
column_logical_type = nan_df.ww.logical_types["nan_series"]
assert isinstance(column_logical_type, Ordinal)
assert column_logical_type.order == ["a", "b"]
def test_accessor_with_falsy_column_names(falsy_names_df):
if _is_dask_dataframe(falsy_names_df):
pytest.xfail("Dask DataFrames cannot handle integer column names")
schema_df = falsy_names_df.copy()
schema_df.ww.init(index=0, time_index="")
assert schema_df.ww.index == 0
assert schema_df.ww.time_index == ""
schema_df.ww.set_time_index(None)
assert schema_df.ww.time_index is None
schema_df.ww.set_time_index("")
assert schema_df.ww.time_index == ""
popped_col = schema_df.ww.pop("")
assert "" not in schema_df
assert "" not in schema_df.ww.columns
assert schema_df.ww.time_index is None
schema_df.ww.set_index(None)
assert schema_df.ww.index is None
schema_df.ww[""] = popped_col
assert schema_df.ww[""].name == ""
renamed_df = schema_df.ww.rename({0: "col_with_name"})
assert 0 not in renamed_df.columns
assert "col_with_name" in renamed_df.columns
def test_dataframe_methods_on_accessor(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema")
copied_df = schema_df.ww.copy()
assert schema_df is not copied_df
assert schema_df.ww._schema is not copied_df.ww._schema
assert copied_df.ww.schema == schema_df.ww.schema
pd.testing.assert_frame_equal(to_pandas(schema_df), to_pandas(copied_df))
ltype_dtype = "int64"
new_dtype = "string"
warning = (
"Operation performed by astype has invalidated the Woodwork typing information:\n "
f"dtype mismatch for column id between DataFrame dtype, {new_dtype}, and Integer dtype, {ltype_dtype}.\n "
"Please initialize Woodwork with DataFrame.ww.init"
)
with pytest.warns(TypingInfoMismatchWarning, match=warning):
new_df = schema_df.ww.astype({"id": new_dtype})
assert new_df["id"].dtype == new_dtype
assert new_df.ww.schema is None
assert schema_df.ww.schema is not None
def test_dataframe_methods_on_accessor_new_schema_object(sample_df):
sample_df.ww.init(
index="id",
semantic_tags={"email": "new_tag"},
table_metadata={"contributors": ["user1", "user2"], "created_on": "2/12/20"},
column_metadata={"id": {"important_keys": [1, 2, 3]}},
)
copied_df = sample_df.ww.copy()
assert sample_df.ww.schema == copied_df.ww.schema
assert sample_df.ww._schema is not copied_df.ww._schema
copied_df.ww.metadata["contributors"].append("user3")
assert copied_df.ww.metadata == {
"contributors": ["user1", "user2", "user3"],
"created_on": "2/12/20",
}
assert sample_df.ww.metadata == {
"contributors": ["user1", "user2"],
"created_on": "2/12/20",
}
copied_df.ww.reset_semantic_tags(retain_index_tags=False)
assert copied_df.ww.index is None
assert sample_df.ww.index == "id"
assert copied_df.ww.semantic_tags["email"] == set()
assert sample_df.ww.semantic_tags["email"] == {"new_tag"}
copied_df.ww.columns["id"].metadata["important_keys"].append(4)
assert copied_df.ww.columns["id"].metadata == {"important_keys": [1, 2, 3, 4]}
assert sample_df.ww.columns["id"].metadata == {"important_keys": [1, 2, 3]}
def test_dataframe_methods_on_accessor_inplace(sample_df):
# TODO: Try to find a supported inplace method for Dask, if one exists
if _is_dask_dataframe(sample_df):
pytest.xfail("Dask does not support sort_values or rename inplace.")
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema")
df_pre_sort = schema_df.copy()
schema_df.ww.sort_values(["full_name"], inplace=True)
assert schema_df.ww.name == "test_schema"
pd.testing.assert_frame_equal(
to_pandas(schema_df), to_pandas(df_pre_sort.sort_values(["full_name"]))
)
warning = "Operation performed by insert has invalidated the Woodwork typing information:\n "
"The following columns in the DataFrame were missing from the typing information: {'new_name'}.\n "
"Please initialize Woodwork with DataFrame.ww.init"
with pytest.warns(TypingInfoMismatchWarning, match=warning):
schema_df.ww.insert(loc=0, column="new_name", value=[1, 2, 3, 4])
assert "new_name" in schema_df.columns
assert schema_df.ww.schema is None
def test_dataframe_methods_on_accessor_returning_series(sample_df):
schema_df = sample_df[["id", "age", "is_registered"]]
schema_df.ww.init(name="test_schema")
dtypes = schema_df.ww.dtypes
assert schema_df.ww.name == "test_schema"
pd.testing.assert_series_equal(dtypes, schema_df.dtypes)
all_series = schema_df.ww.all()
assert schema_df.ww.name == "test_schema"
pd.testing.assert_series_equal(to_pandas(all_series), to_pandas(schema_df.all()))
def test_dataframe_methods_on_accessor_other_returns(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema")
shape = schema_df.ww.shape
assert schema_df.ww.name == "test_schema"
if _is_dask_dataframe(sample_df):
shape = (shape[0].compute(), shape[1])
assert shape == to_pandas(schema_df).shape
assert schema_df.ww.name == "test_schema"
if not _is_dask_dataframe(sample_df):
# keys() not supported with Dask
pd.testing.assert_index_equal(schema_df.ww.keys(), schema_df.keys())
def test_dataframe_methods_on_accessor_to_pandas(sample_df):
if isinstance(sample_df, pd.DataFrame):
pytest.skip("No need to test converting pandas DataFrame to pandas")
sample_df.ww.init(name="woodwork", index="id")
if _is_dask_dataframe(sample_df):
pd_df = sample_df.ww.compute()
elif _is_koalas_dataframe(sample_df):
pd_df = sample_df.ww.to_pandas()
pytest.skip(
"Bug #1071: Woodwork not initialized after to_pandas call with Koalas categorical column"
)
assert isinstance(pd_df, pd.DataFrame)
assert pd_df.ww.index == "id"
assert pd_df.ww.name == "woodwork"
def test_get_subset_df_with_schema(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(
time_index="signup_date",
index="id",
name="df_name",
logical_types={
"full_name": PersonFullName,
"email": EmailAddress,
"phone_number": PhoneNumber,
"age": Double,
"signup_date": Datetime,
},
semantic_tags={"full_name": ["new_tag", "tag2"], "age": "numeric"},
)
schema = schema_df.ww.schema
empty_df = schema_df.ww._get_subset_df_with_schema([])
assert len(empty_df.columns) == 0
assert empty_df.ww.schema is not None
pd.testing.assert_frame_equal(to_pandas(empty_df), to_pandas(schema_df[[]]))
validate_subset_schema(empty_df.ww.schema, schema)
just_index = schema_df.ww._get_subset_df_with_schema(["id"])
assert just_index.ww.index == schema.index
assert just_index.ww.time_index is None
pd.testing.assert_frame_equal(to_pandas(just_index), to_pandas(schema_df[["id"]]))
validate_subset_schema(just_index.ww.schema, schema)
just_time_index = schema_df.ww._get_subset_df_with_schema(["signup_date"])
assert just_time_index.ww.time_index == schema.time_index
assert just_time_index.ww.index is None
pd.testing.assert_frame_equal(
to_pandas(just_time_index), to_pandas(schema_df[["signup_date"]])
)
validate_subset_schema(just_time_index.ww.schema, schema)
transfer_schema = schema_df.ww._get_subset_df_with_schema(["phone_number"])
assert transfer_schema.ww.index is None
assert transfer_schema.ww.time_index is None
pd.testing.assert_frame_equal(
to_pandas(transfer_schema), to_pandas(schema_df[["phone_number"]])
)
validate_subset_schema(transfer_schema.ww.schema, schema)
def test_select_ltypes_no_match_and_all(sample_df, sample_correct_logical_types):
schema_df = sample_df.copy()
schema_df.ww.init(logical_types=sample_correct_logical_types)
assert len(schema_df.ww.select(PostalCode).columns) == 0
assert len(schema_df.ww.select(["PostalCode", PhoneNumber]).columns) == 1
all_types = ww.type_system.registered_types
assert len(schema_df.ww.select(exclude=all_types).columns) == 0
df_all_types = schema_df.ww.select(all_types)
pd.testing.assert_frame_equal(to_pandas(df_all_types), to_pandas(schema_df))
assert df_all_types.ww.schema == schema_df.ww.schema
def test_select_ltypes_strings(sample_df, sample_correct_logical_types):
schema_df = sample_df.copy()
schema_df.ww.init(logical_types=sample_correct_logical_types)
df_multiple_ltypes = schema_df.ww.select(
["PersonFullName", "email_address", "double", "BooleanNullable", "datetime"]
)
assert len(df_multiple_ltypes.columns) == 7
assert "phone_number" not in df_multiple_ltypes.columns
assert "id" not in df_multiple_ltypes.columns
df_single_ltype = schema_df.ww.select("person_full_name")
assert set(df_single_ltype.columns) == {"full_name"}
def test_select_ltypes_objects(sample_df, sample_correct_logical_types):
schema_df = sample_df.copy()
schema_df.ww.init(logical_types=sample_correct_logical_types)
df_multiple_ltypes = schema_df.ww.select(
[PersonFullName, EmailAddress, Double, BooleanNullable, Datetime]
)
assert len(df_multiple_ltypes.columns) == 7
assert "phone_number" not in df_multiple_ltypes.columns
assert "id" not in df_multiple_ltypes.columns
df_single_ltype = schema_df.ww.select(PersonFullName)
assert len(df_single_ltype.columns) == 1
def test_select_ltypes_mixed(sample_df, sample_correct_logical_types):
schema_df = sample_df.copy()
schema_df.ww.init(logical_types=sample_correct_logical_types)
df_mixed_ltypes = schema_df.ww.select(["PersonFullName", "email_address", Double])
assert len(df_mixed_ltypes.columns) == 4
assert "phone_number" not in df_mixed_ltypes.columns
def test_select_ltypes_mixed_exclude(sample_df, sample_correct_logical_types):
schema_df = sample_df.copy()
schema_df.ww.init(logical_types=sample_correct_logical_types)
df_mixed_ltypes = schema_df.ww.select(
exclude=["PersonFullName", "email_address", Double]
)
assert len(df_mixed_ltypes.columns) == 12
assert "full_name" not in df_mixed_ltypes.columns
assert "email_address" not in df_mixed_ltypes.columns
assert "double" not in df_mixed_ltypes.columns
assert "double_with_nan" not in df_mixed_ltypes.columns
def test_select_ltypes_table(sample_df, sample_correct_logical_types):
schema_df = sample_df.copy()
schema_df.ww.init(
name="testing",
index="id",
time_index="signup_date",
logical_types=sample_correct_logical_types,
semantic_tags={
"full_name": ["new_tag", "tag2"],
"age": "numeric",
},
)
df_no_indices = schema_df.ww.select("phone_number")
assert df_no_indices.ww.index is None
assert df_no_indices.ww.time_index is None
df_with_indices = schema_df.ww.select(["Datetime", "Integer"])
assert df_with_indices.ww.index == "id"
assert df_with_indices.ww.time_index == "signup_date"
df_values = schema_df.ww.select(["PersonFullName"])
assert df_values.ww.name == schema_df.ww.name
assert df_values.ww.columns["full_name"] == schema_df.ww.columns["full_name"]
def test_select_semantic_tags(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(
semantic_tags={
"full_name": "tag1",
"email": ["tag2"],
"age": ["numeric", "tag2"],
"phone_number": ["tag3", "tag2"],
"is_registered": "category",
},
time_index="signup_date",
)
df_one_match = schema_df.ww.select("numeric")
assert len(df_one_match.columns) == 6
assert "age" in df_one_match.columns
assert "id" in df_one_match.columns
df_multiple_matches = schema_df.ww.select("tag2")
assert len(df_multiple_matches.columns) == 3
assert "age" in df_multiple_matches.columns
assert "phone_number" in df_multiple_matches.columns
assert "email" in df_multiple_matches.columns
df_multiple_tags = schema_df.ww.select(["numeric", "time_index"])
assert len(df_multiple_tags.columns) == 7
assert "id" in df_multiple_tags.columns
assert "age" in df_multiple_tags.columns
assert "signup_date" in df_multiple_tags.columns
df_overlapping_tags = schema_df.ww.select(["numeric", "tag2"])
assert len(df_overlapping_tags.columns) == 8
assert "id" in df_overlapping_tags.columns
assert "age" in df_overlapping_tags.columns
assert "phone_number" in df_overlapping_tags.columns
assert "email" in df_overlapping_tags.columns
df_common_tags = schema_df.ww.select(["category", "numeric"])
assert len(df_common_tags.columns) == 8
assert "id" in df_common_tags.columns
assert "is_registered" in df_common_tags.columns
assert "age" in df_common_tags.columns
def test_select_semantic_tags_exclude(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(
semantic_tags={
"full_name": "tag1",
"email": ["tag2"],
"age": ["numeric", "tag2"],
"phone_number": ["tag3", "tag2"],
"is_registered": "category",
},
time_index="signup_date",
)
df_one_match = schema_df.ww.select(exclude="numeric")
assert len(df_one_match.columns) == 10
assert "age" not in df_one_match.columns
assert "id" not in df_one_match.columns
df_multiple_matches = schema_df.ww.select(exclude="tag2")
assert len(df_multiple_matches.columns) == 13
assert "age" not in df_multiple_matches.columns
assert "phone_number" not in df_multiple_matches.columns
assert "email" not in df_multiple_matches.columns
df_multiple_tags = schema_df.ww.select(exclude=["numeric", "time_index"])
assert len(df_multiple_tags.columns) == 9
assert "id" not in df_multiple_tags.columns
assert "age" not in df_multiple_tags.columns
assert "signup_date" not in df_multiple_tags.columns
df_overlapping_tags = schema_df.ww.select(exclude=["numeric", "tag2"])
assert len(df_overlapping_tags.columns) == 8
assert "id" not in df_overlapping_tags.columns
assert "age" not in df_overlapping_tags.columns
assert "phone_number" not in df_overlapping_tags.columns
assert "email" not in df_overlapping_tags.columns
df_common_tags = schema_df.ww.select(exclude=["category", "numeric"])
assert len(df_common_tags.columns) == 8
assert "id" not in df_common_tags.columns
assert "is_registered" not in df_common_tags.columns
assert "age" not in df_common_tags.columns
def test_select_single_inputs(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(
time_index="signup_date",
index="id",
name="df_name",
logical_types={
"full_name": PersonFullName,
"email": EmailAddress,
"phone_number": PhoneNumber,
"signup_date": Datetime(datetime_format="%Y-%m-%d"),
},
semantic_tags={
"full_name": ["new_tag", "tag2"],
"age": "numeric",
"signup_date": "date_of_birth",
},
)
df_ltype_string = schema_df.ww.select("person_full_name")
assert len(df_ltype_string.columns) == 1
assert "full_name" in df_ltype_string.columns
df_ltype_obj = schema_df.ww.select(IntegerNullable)
assert len(df_ltype_obj.columns) == 2
assert "age" in df_ltype_obj.columns
df_tag_string = schema_df.ww.select("index")
assert len(df_tag_string.columns) == 1
assert "id" in df_tag_string.columns
df_tag_instantiated = schema_df.ww.select("Datetime")
assert len(df_tag_instantiated.columns) == 2
assert "signup_date" in df_tag_instantiated.columns
def test_select_list_inputs(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(
time_index="signup_date",
index="id",
name="df_name",
logical_types={
"full_name": PersonFullName,
"email": EmailAddress,
"phone_number": PhoneNumber,
"signup_date": Datetime(datetime_format="%Y-%m-%d"),
},
semantic_tags={
"full_name": ["new_tag", "tag2"],
"age": "numeric",
"signup_date": "date_of_birth",
"email": "tag2",
"is_registered": "category",
},
)
df_just_strings = schema_df.ww.select(
["PersonFullName", "index", "tag2", "boolean_nullable"]
)
assert len(df_just_strings.columns) == 4
assert "id" in df_just_strings.columns
assert "full_name" in df_just_strings.columns
assert "email" in df_just_strings.columns
assert "is_registered" in df_just_strings.columns
df_mixed_selectors = schema_df.ww.select(
[PersonFullName, "index", "time_index", Integer]
)
assert len(df_mixed_selectors.columns) == 4
assert "id" in df_mixed_selectors.columns
assert "full_name" in df_mixed_selectors.columns
assert "signup_date" in df_mixed_selectors.columns
df_common_tags = schema_df.ww.select(
["category", "numeric", BooleanNullable, Datetime]
)
assert len(df_common_tags.columns) == 9
assert "is_registered" in df_common_tags.columns
assert "age" in df_common_tags.columns
assert "signup_date" in df_common_tags.columns
def test_select_semantic_tags_no_match(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(
time_index="signup_date",
index="id",
name="df_name",
logical_types={
"full_name": PersonFullName,
"email": EmailAddress,
"phone_number": PhoneNumber,
"signup_date": Datetime(datetime_format="%Y-%m-%d"),
},
semantic_tags={
"full_name": ["new_tag", "tag2"],
"age": "numeric",
"signup_date": "date_of_birth",
"email": "tag2",
},
)
assert len(schema_df.ww.select(["doesnt_exist"]).columns) == 0
df_multiple_unused = schema_df.ww.select(
["doesnt_exist", "boolean_nullable", "category", PhoneNumber]
)
assert len(df_multiple_unused.columns) == 3
df_unused_ltype = schema_df.ww.select(
["date_of_birth", "doesnt_exist", PostalCode, Integer]
)
assert len(df_unused_ltype.columns) == 3
def test_select_repetitive(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(
time_index="signup_date",
index="id",
name="df_name",
logical_types={
"full_name": PersonFullName,
"email": EmailAddress,
"phone_number": PhoneNumber,
"signup_date": Datetime(datetime_format="%Y-%m-%d"),
},
semantic_tags={
"full_name": ["new_tag", "tag2"],
"age": "numeric",
"signup_date": "date_of_birth",
"email": "tag2",
},
)
df_repeat_tags = schema_df.ww.select(["new_tag", "new_tag"])
assert len(df_repeat_tags.columns) == 1
assert set(df_repeat_tags.columns) == {"full_name"}
df_repeat_ltypes = schema_df.ww.select(["PhoneNumber", PhoneNumber, "phone_number"])
assert len(df_repeat_ltypes.columns) == 1
assert set(df_repeat_ltypes.columns) == {"phone_number"}
def test_select_instantiated_ltype():
ymd_format = Datetime(datetime_format="%Y~%m~%d")
df = pd.DataFrame(
{
"dates": ["2019/01/01", "2019/01/02", "2019/01/03"],
"ymd": ["2019~01~01", "2019~01~02", "2019~01~03"],
}
)
df.ww.init(logical_types={"ymd": ymd_format, "dates": Datetime})
new_df = df.ww.select("Datetime")
assert len(new_df.columns) == 2
new_df = df.ww.select(Datetime)
assert len(new_df.columns) == 2
err_msg = "Invalid selector used in include: Datetime cannot be instantiated"
with pytest.raises(TypeError, match=err_msg):
df.ww.select(ymd_format)
def test_select_return_schema(sample_df):
sample_df.ww.init()
# Multiple column matches
df_schema = sample_df.ww.select(include="Unknown", return_schema=True)
assert isinstance(df_schema, TableSchema)
assert len(df_schema.columns) == 2
assert df_schema == sample_df.ww.select(include="Unknown").ww.schema
# Single column match
single_schema = sample_df.ww.select(include="BooleanNullable", return_schema=True)
assert isinstance(single_schema, TableSchema)
assert len(single_schema.columns) == 1
assert single_schema == sample_df.ww.select(include="BooleanNullable").ww.schema
# No matches
empty_schema = sample_df.ww.select(include="PhoneNumber", return_schema=True)
assert isinstance(empty_schema, TableSchema)
assert len(empty_schema.columns) == 0
@pytest.mark.parametrize(
"ww_type, pandas_type",
[
(["Integer", "IntegerNullable"], "int"),
(["Double"], "float"),
(["Datetime"], "datetime"),
(["Unknown", "EmailAddress", "URL", "IPAddress"], "string"),
(["Categorical"], "category"),
(["Boolean", "BooleanNullable"], "boolean"),
],
)
def test_select_retains_column_order(ww_type, pandas_type, sample_df):
if _is_koalas_dataframe(sample_df) and pandas_type in ["category", "string"]:
pytest.skip("Koalas stores categories as strings")
sample_df.ww.init()
ww_schema_column_order = [
x for x in sample_df.ww.select(ww_type, return_schema=True).columns.keys()
]
pandas_column_order = [
x for x in sample_df.select_dtypes(include=pandas_type).columns
]
assert ww_schema_column_order == pandas_column_order
def test_select_include_and_exclude_error(sample_df):
sample_df.ww.init()
err_msg = "Cannot specify values for both 'include' and 'exclude' in a single call."
with pytest.raises(ValueError, match=err_msg):
sample_df.ww.select(include="Integer", exclude="Double")
with pytest.raises(ValueError, match=err_msg):
sample_df.ww.select(include=[], exclude=[])
def test_select_no_selectors_error(sample_df):
sample_df.ww.init()
err_msg = "Must specify values for either 'include' or 'exclude'."
with pytest.raises(ValueError, match=err_msg):
sample_df.ww.select()
def test_accessor_set_index(sample_df):
sample_df.ww.init()
sample_df.ww.set_index("id")
assert sample_df.ww.index == "id"
if isinstance(sample_df, pd.DataFrame):
# underlying index not set for Dask/Koalas
assert (sample_df.index == sample_df["id"]).all()
sample_df.ww.set_index("full_name")
assert sample_df.ww.index == "full_name"
if isinstance(sample_df, pd.DataFrame):
# underlying index not set for Dask/Koalas
assert (sample_df.index == sample_df["full_name"]).all()
sample_df.ww.set_index(None)
assert sample_df.ww.index is None
if isinstance(sample_df, pd.DataFrame):
# underlying index not set for Dask/Koalas
# Check that underlying index doesn't get reset when Woodwork index is removed
assert (sample_df.index == sample_df["full_name"]).all()
def test_accessor_set_index_errors(sample_df):
sample_df.ww.init()
error = "Specified index column `testing` not found in TableSchema."
with pytest.raises(ColumnNotPresentError, match=error):
sample_df.ww.set_index("testing")
if isinstance(sample_df, pd.DataFrame):
# Index uniqueness not validate for Dask/Koalas
error = "Index column must be unique"
with pytest.raises(LookupError, match=error):
sample_df.ww.set_index("age")
def test_set_types(sample_df):
sample_df.ww.init(index="full_name", time_index="signup_date")
original_df = sample_df.ww.copy()
sample_df.ww.set_types()
assert original_df.ww.schema == sample_df.ww.schema
pd.testing.assert_frame_equal(to_pandas(original_df), to_pandas(sample_df))
sample_df.ww.set_types(logical_types={"is_registered": "IntegerNullable"})
assert sample_df["is_registered"].dtype == "Int64"
sample_df.ww.set_types(
semantic_tags={"signup_date": ["new_tag"]},
logical_types={"full_name": "Categorical"},
retain_index_tags=False,
)
assert sample_df.ww.index is None
assert sample_df.ww.time_index is None
def test_set_types_errors(sample_df):
sample_df.ww.init(index="full_name")
error = "String invalid is not a valid logical type"
with pytest.raises(ValueError, match=error):
sample_df.ww.set_types(logical_types={"id": "invalid"})
if isinstance(sample_df, pd.DataFrame):
# Dask does not error on invalid type conversion until compute
# Koalas does conversion and fills values with NaN
error = (
"Error converting datatype for email from type string "
"to type float64. Please confirm the underlying data is consistent with "
"logical type Double."
)
with pytest.raises(TypeConversionError, match=error):
sample_df.ww.set_types(logical_types={"email": "Double"})
error = re.escape(
"Cannot add 'index' tag directly for column email. To set a column as the index, "
"use DataFrame.ww.set_index() instead."
)
with pytest.raises(ValueError, match=error):
sample_df.ww.set_types(semantic_tags={"email": "index"})
def test_pop(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(semantic_tags={"age": "custom_tag"})
original_schema = schema_df.ww.schema
popped_series = schema_df.ww.pop("age")
assert isinstance(popped_series, type(sample_df["age"]))
assert popped_series.ww.semantic_tags == {"custom_tag", "numeric"}
pd.testing.assert_series_equal(
to_pandas(popped_series),
pd.Series([pd.NA, 33, 33, 57], dtype="Int64", name="age"),
)
assert isinstance(popped_series.ww.logical_type, IntegerNullable)
assert "age" not in schema_df.columns
assert "age" not in schema_df.ww.columns
assert "age" not in schema_df.ww.logical_types.keys()
assert "age" not in schema_df.ww.semantic_tags.keys()
assert schema_df.ww.schema == original_schema.get_subset_schema(
list(schema_df.columns)
)
schema_df = sample_df.copy()
schema_df.ww.init(
name="table",
logical_types={"age": IntegerNullable},
semantic_tags={"age": "custom_tag"},
use_standard_tags=False,
)
popped_series = schema_df.ww.pop("age")
assert popped_series.ww.semantic_tags == {"custom_tag"}
def test_pop_index(sample_df):
sample_df.ww.init(index="id", name="df_name")
assert sample_df.ww.index == "id"
id_col = sample_df.ww.pop("id")
assert sample_df.ww.index is None
assert "index" in id_col.ww.semantic_tags
def test_pop_error(sample_df):
sample_df.ww.init(
name="table",
logical_types={"age": IntegerNullable},
semantic_tags={"age": "custom_tag"},
use_standard_tags=True,
)
with pytest.raises(
ColumnNotPresentError, match="Column with name 'missing' not found in DataFrame"
):
sample_df.ww.pop("missing")
def test_accessor_drop(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init()
single_input_df = schema_df.ww.drop("is_registered")
assert len(single_input_df.ww.columns) == (len(schema_df.columns) - 1)
assert "is_registered" not in single_input_df.ww.columns
assert (
to_pandas(schema_df)
.drop("is_registered", axis="columns")
.equals(to_pandas(single_input_df))
)
list_input_df = schema_df.ww.drop(["is_registered"])
assert len(list_input_df.ww.columns) == (len(schema_df.columns) - 1)
assert "is_registered" not in list_input_df.ww.columns
assert (
to_pandas(schema_df)
.drop("is_registered", axis="columns")
.equals(to_pandas(list_input_df))
)
# should be equal to the single input example above
assert single_input_df.ww.schema == list_input_df.ww.schema
assert to_pandas(single_input_df).equals(to_pandas(list_input_df))
multiple_list_df = schema_df.ww.drop(["age", "full_name", "is_registered"])
assert len(multiple_list_df.ww.columns) == (len(schema_df.columns) - 3)
assert "is_registered" not in multiple_list_df.ww.columns
assert "full_name" not in multiple_list_df.ww.columns
assert "age" not in multiple_list_df.ww.columns
assert (
to_pandas(schema_df)
.drop(["is_registered", "age", "full_name"], axis="columns")
.equals(to_pandas(multiple_list_df))
)
# Drop the same columns in a different order and confirm resulting DataFrame column order doesn't change
different_order_df = schema_df.ww.drop(["is_registered", "age", "full_name"])
assert different_order_df.ww.schema == multiple_list_df.ww.schema
assert to_pandas(multiple_list_df).equals(to_pandas(different_order_df))
def test_accessor_drop_inplace(sample_df):
sample_df.ww.init()
inplace_df = sample_df.copy()
inplace_df.ww.init()
if _is_dask_dataframe(sample_df):
error = "Drop inplace not supported for Dask"
with pytest.raises(ValueError, match=error):
inplace_df.ww.drop(["is_registered"], inplace=True)
elif _is_koalas_dataframe(sample_df):
error = "Drop inplace not supported for Koalas"
with pytest.raises(ValueError, match=error):
inplace_df.ww.drop(["is_registered"], inplace=True)
else:
inplace_df.ww.drop(["is_registered"], inplace=True)
assert len(inplace_df.ww.columns) == (len(sample_df.columns) - 1)
assert "is_registered" not in inplace_df.ww.columns
assert sample_df.drop("is_registered", axis="columns").equals(inplace_df)
def test_accessor_drop_indices(sample_df):
sample_df.ww.init(index="id", time_index="signup_date")
assert sample_df.ww.index == "id"
assert sample_df.ww.time_index == "signup_date"
dropped_index_df = sample_df.ww.drop("id")
assert "id" not in dropped_index_df.ww.columns
assert dropped_index_df.ww.index is None
assert dropped_index_df.ww.time_index == "signup_date"
dropped_time_index_df = sample_df.ww.drop(["signup_date"])
assert "signup_date" not in dropped_time_index_df.ww.columns
assert dropped_time_index_df.ww.time_index is None
assert dropped_time_index_df.ww.index == "id"
def test_accessor_drop_errors(sample_df):
sample_df.ww.init()
error = re.escape("Column(s) '['not_present']' not found in DataFrame")
with pytest.raises(ColumnNotPresentError, match=error):
sample_df.ww.drop("not_present")
with pytest.raises(ColumnNotPresentError, match=error):
sample_df.ww.drop(["age", "not_present"])
error = re.escape("Column(s) '['not_present1', 4]' not found in DataFrame")
with pytest.raises(ColumnNotPresentError, match=error):
sample_df.ww.drop(["not_present1", 4])
def test_accessor_rename(sample_df):
table_metadata = {"table_info": "this is text"}
id_description = "the id of the row"
id_origin = "base"
sample_df.ww.init(
index="id",
time_index="signup_date",
table_metadata=table_metadata,
column_descriptions={"id": id_description},
column_origins={"id": id_origin},
semantic_tags={"age": "test_tag"},
logical_types={"age": Double},
)
original_df = sample_df.ww.copy()
new_df = sample_df.ww.rename({"age": "birthday"})
assert to_pandas(sample_df.rename(columns={"age": "birthday"})).equals(
to_pandas(new_df)
)
# Confirm original dataframe hasn't changed
assert to_pandas(sample_df).equals(to_pandas(original_df))
assert sample_df.ww.schema == original_df.ww.schema
assert original_df.columns.get_loc("age") == new_df.columns.get_loc("birthday")
pd.testing.assert_series_equal(
to_pandas(original_df["age"]), to_pandas(new_df["birthday"]), check_names=False
)
# confirm that metadata and descriptions are there
assert new_df.ww.metadata == table_metadata
assert new_df.ww.columns["id"].description == id_description
assert new_df.ww.columns["id"].origin == id_origin
old_col = sample_df.ww.columns["age"]
new_col = new_df.ww.columns["birthday"]
assert old_col.logical_type == new_col.logical_type
assert old_col.semantic_tags == new_col.semantic_tags
new_df = sample_df.ww.rename({"age": "full_name", "full_name": "age"})
pd.testing.assert_series_equal(
to_pandas(original_df["age"]), to_pandas(new_df["full_name"]), check_names=False
)
pd.testing.assert_series_equal(
to_pandas(original_df["full_name"]), to_pandas(new_df["age"]), check_names=False
)
assert original_df.columns.get_loc("age") == new_df.columns.get_loc("full_name")
assert original_df.columns.get_loc("full_name") == new_df.columns.get_loc("age")
def test_accessor_rename_inplace(sample_df):
table_metadata = {"table_info": "this is text"}
id_description = "the id of the row"
id_origin = "base"
sample_df.ww.init(
index="id",
time_index="signup_date",
table_metadata=table_metadata,
column_descriptions={"id": id_description},
column_origins={"id": id_origin},
semantic_tags={"age": "test_tag"},
logical_types={"age": Double},
)
original_df = sample_df.ww.copy()
inplace_df = sample_df.ww.copy()
if _is_dask_dataframe(sample_df):
error = "Rename inplace not supported for Dask"
with pytest.raises(ValueError, match=error):
inplace_df.ww.rename({"age": "birthday"}, inplace=True)
elif _is_koalas_dataframe(sample_df):
error = "Rename inplace not supported for Koalas"
with pytest.raises(ValueError, match=error):
inplace_df.ww.rename({"age": "birthday"}, inplace=True)
else:
inplace_df.ww.rename({"age": "birthday"}, inplace=True)
assert original_df.columns.get_loc("age") == inplace_df.columns.get_loc(
"birthday"
)
pd.testing.assert_series_equal(
to_pandas(original_df["age"]),
to_pandas(inplace_df["birthday"]),
check_names=False,
)
# confirm that metadata and descriptions are there
assert inplace_df.ww.metadata == table_metadata
assert inplace_df.ww.columns["id"].description == id_description
assert inplace_df.ww.columns["id"].origin == id_origin
old_col = sample_df.ww.columns["age"]
new_col = inplace_df.ww.columns["birthday"]
assert old_col.logical_type == new_col.logical_type
assert old_col.semantic_tags == new_col.semantic_tags
new_df = sample_df.ww.copy()
new_df.ww.rename({"age": "full_name", "full_name": "age"}, inplace=True)
pd.testing.assert_series_equal(
to_pandas(original_df["age"]),
to_pandas(new_df["full_name"]),
check_names=False,
)
pd.testing.assert_series_equal(
to_pandas(original_df["full_name"]),
to_pandas(new_df["age"]),
check_names=False,
)
assert original_df.columns.get_loc("age") == new_df.columns.get_loc("full_name")
assert original_df.columns.get_loc("full_name") == new_df.columns.get_loc("age")
def test_accessor_rename_indices(sample_df):
sample_df.ww.init(index="id", time_index="signup_date")
renamed_df = sample_df.ww.rename(
{"id": "renamed_index", "signup_date": "renamed_time_index"}
)
assert "id" not in renamed_df.columns
assert "signup_date" not in renamed_df.columns
assert "renamed_index" in renamed_df.columns
assert "renamed_time_index" in renamed_df.columns
if isinstance(sample_df, pd.DataFrame):
# underlying index not set for Dask/Koalas
assert all(renamed_df.index == renamed_df["renamed_index"])
assert renamed_df.ww.index == "renamed_index"
assert renamed_df.ww.time_index == "renamed_time_index"
def test_accessor_schema_properties(sample_df):
sample_df.ww.init(index="id", time_index="signup_date")
schema_properties = [
"logical_types",
"semantic_tags",
"index",
"time_index",
"use_standard_tags",
]
for schema_property in schema_properties:
prop_from_accessor = getattr(sample_df.ww, schema_property)
prop_from_schema = getattr(sample_df.ww.schema, schema_property)
assert prop_from_accessor == prop_from_schema
# Assumes we don't have setters for any of these attributes
error = "can't set attribute"
with pytest.raises(AttributeError, match=error):
setattr(sample_df.ww, schema_property, "new_value")
def test_sets_koalas_option_on_init(sample_df_koalas):
if ks:
ks.set_option("compute.ops_on_diff_frames", False)
sample_df_koalas.ww.init()
assert ks.get_option("compute.ops_on_diff_frames") is True
def test_setitem_invalid_input(sample_df):
df = sample_df.copy()
df.ww.init(index="id", time_index="signup_date")
error_msg = "New column must be of Series type"
with pytest.raises(ValueError, match=error_msg):
df.ww["test"] = [1, 2, 3]
error_msg = "Cannot reassign index. Change column name and then use df.ww.set_index to reassign index."
with pytest.raises(KeyError, match=error_msg):
df.ww["id"] = df.id
error_msg = "Cannot reassign time index. Change column name and then use df.ww.set_time_index to reassign time index."
with pytest.raises(KeyError, match=error_msg):
df.ww["signup_date"] = df.signup_date
def test_setitem_indexed_column_on_unindexed_dataframe(sample_df):
sample_df.ww.init()
col = sample_df.ww.pop("id")
col.ww.add_semantic_tags(semantic_tags="index")
warning = 'Cannot add "index" tag on id directly to the DataFrame. The "index" tag has been removed from id. To set this column as a Woodwork index, please use df.ww.set_index'
with pytest.warns(IndexTagRemovedWarning, match=warning):
sample_df.ww["id"] = col
assert sample_df.ww.index is None
assert ww.is_schema_valid(sample_df, sample_df.ww.schema)
assert sample_df.ww["id"].ww.semantic_tags == {"numeric"}
def test_setitem_indexed_column_on_indexed_dataframe(sample_df):
sample_df.ww.init()
sample_df.ww.set_index("id")
col = sample_df.ww.pop("id")
warning = 'Cannot add "index" tag on id directly to the DataFrame. The "index" tag has been removed from id. To set this column as a Woodwork index, please use df.ww.set_index'
with pytest.warns(IndexTagRemovedWarning, match=warning):
sample_df.ww["id"] = col
assert sample_df.ww.index is None
assert ww.is_schema_valid(sample_df, sample_df.ww.schema)
assert sample_df.ww["id"].ww.semantic_tags == {"numeric"}
sample_df.ww.init(logical_types={"email": "Categorical"})
sample_df.ww.set_index("id")
col = sample_df.ww.pop("email")
col.ww.add_semantic_tags(semantic_tags="index")
warning = 'Cannot add "index" tag on email directly to the DataFrame. The "index" tag has been removed from email. To set this column as a Woodwork index, please use df.ww.set_index'
with pytest.warns(IndexTagRemovedWarning, match=warning):
sample_df.ww["email"] = col
assert sample_df.ww.index == "id"
assert sample_df.ww.semantic_tags["email"] == {"category"}
def test_setitem_indexed_column_on_unindexed_dataframe_no_standard_tags(sample_df):
sample_df.ww.init()
col = sample_df.ww.pop("id")
col.ww.init(semantic_tags="index", use_standard_tags=False)
warning = 'Cannot add "index" tag on id directly to the DataFrame. The "index" tag has been removed from id. To set this column as a Woodwork index, please use df.ww.set_index'
with pytest.warns(IndexTagRemovedWarning, match=warning):
sample_df.ww["id"] = col
assert sample_df.ww.index is None
assert ww.is_schema_valid(sample_df, sample_df.ww.schema)
assert sample_df.ww["id"].ww.semantic_tags == set()
def test_setitem_different_name(sample_df):
df = sample_df.copy()
df.ww.init()
new_series = pd.Series([1, 2, 3, 4], name="wrong", dtype="float")
if _is_koalas_dataframe(sample_df):
new_series = ks.Series(new_series)
# Assign series with name `wrong` to existing column with name `id`
df.ww["id"] = new_series
assert df.ww["id"].name == "id"
assert "id" in df.ww.columns
assert "wrong" not in df.ww.columns
assert "wrong" not in df.columns
new_series2 = pd.Series([1, 2, 3, 4], name="wrong2", dtype="float")
if _is_koalas_dataframe(sample_df):
new_series2 = ks.Series(new_series2)
# Assign series with name `wrong2` to new column with name `new_col`
df.ww["new_col"] = new_series2
assert df.ww["new_col"].name == "new_col"
assert "new_col" in df.ww.columns
assert "wrong2" not in df.ww.columns
assert "wrong2" not in df.columns
def test_setitem_new_column(sample_df):
df = sample_df.copy()
df.ww.init(use_standard_tags=False)
new_series = pd.Series([1, 2, 3, 4])
if _is_koalas_dataframe(sample_df):
new_series = ks.Series(new_series)
dtype = "int64"
df.ww["test_col2"] = new_series
assert "test_col2" in df.columns
assert "test_col2" in df.ww._schema.columns.keys()
assert isinstance(df.ww["test_col2"].ww.logical_type, Integer)
assert df.ww["test_col2"].ww.use_standard_tags is True
assert df.ww["test_col2"].ww.semantic_tags == {"numeric"}
assert df.ww["test_col2"].name == "test_col2"
assert df.ww["test_col2"].dtype == dtype
new_series =
|
pd.Series([1, 2, 3], dtype="float")
|
pandas.Series
|
import numpy as np
import pandas as pd
import glob
from pmdarima.arima import ndiffs
from pandas.tseries.offsets import QuarterBegin, QuarterEnd
from .hand_select import hand_select
import pandas_datareader.data as web
import xlrd, csv
from openpyxl.workbook import Workbook
from openpyxl.reader.excel import load_workbook, InvalidFileException
def set_date_as_index(df):
df.columns = [name.lower() for name in df.columns]
df["date"] = pd.to_datetime(df["date"])
df.set_index("date", inplace=True)
return df
def make_float(df):
df = df.replace(".", np.nan)
df = df.astype(float)
return df
def read_files(paths, fillna=True):
csv_list = []
xls_list = []
for path in paths:
csv_files = glob.glob(path + "/*.csv")
xls_files = glob.glob(path + "/*.xls")
for elt in csv_files:
df = pd.read_csv(elt)
df = set_date_as_index(df)
df = make_float(df)
if fillna:
df = df.fillna(method='ffill')
csv_list.append(df)
for elt in xls_files:
try:
df = pd.read_excel(elt)
df = set_date_as_index(df)
df = make_float(df)
if fillna:
df = df.fillna(method='ffill')
xls_files.append(df)
except Exception:
pass
return csv_list, xls_list
def make_stationary(df):
df = hand_select(df)
df = df.dropna()
columns = df.columns
for name in columns:
x = df[name].values
d_kpss = ndiffs(x, test='kpss')
d_adf = ndiffs(x, test='adf')
d_pp = ndiffs(x, test='pp')
d_ = max(d_kpss, d_adf, d_pp)
if d_ > 0:
new_name = name + '_diff' + str(d_)
if d_ == 1:
df[new_name] = df[name].diff()
elif d_ == 2:
df[new_name] = df[name].diff().diff()
elif d_ > 2:
raise ValueError('High order differentiation')
else:
raise Exception('Some thing is wrong')
df = df.drop(columns=[name])
return df
def open_xls_as_xlsx(filename):
# first open using xlrd
book = xlrd.open_workbook(filename)
index = 0
nrows, ncols = 0, 0
while nrows * ncols == 0:
sheet = book.sheet_by_index(index)
nrows = sheet.nrows
ncols = sheet.ncols
index += 1
# prepare a xlsx sheet
book1 = Workbook()
sheet1 = book1.active
for row in range(1, nrows):
for col in range(1, ncols):
sheet1.cell(row=row, column=col).value = sheet.cell_value(row, col)
return book1
def read_data(path, sheet=False, header='infer'):
file_format = path.split('.')[-1]
if 'msci' in path:
header = 6
if sheet is False:
# if file_format == 'csv':
# df = pd.read_csv(path, header=header)
# elif file_format == 'xls':
# df = open_xls_as_xlsx(path)
# else:
try:
df = pd.read_excel(path, header=header, engine='openpyxl')
except Exception:
try:
df = open_xls_as_xlsx(path)
except Exception as e:
try:
df = pd.read_csv(path, header=header)
except Exception as e:
raise Exception(e)
else:
try:
# excel_file = pd.ExcelFile(path)
# assert sheet in excel_file.sheet_names
# df = excel_file.parse(sheet, header=header)
df = pd.read_excel(path, header=header, engine='openpyxl', sheet_name=sheet)
except Exception:
raise Exception("Can not read sheet")
df.columns = [name.lower() for name in df.columns]
if 'year2' in df.columns:
drop_columns = ['year2']
else:
drop_columns = []
for elt in df.columns:
if 'unnamed' in elt:
drop_columns.append(elt)
df.drop(columns=drop_columns, inplace=True)
first_valid = df.iloc[:, 1].first_valid_index()
last_valid = df.iloc[:, 1].last_valid_index() + 1
df = df.iloc[first_valid:last_valid]
df.columns = df.columns.str.replace('.', '_')
df.columns = df.columns.str.replace(' ', '_')
df.columns = df.columns.str.replace('__', '_')
return df
def make_monthly_date(df, offset=True):
datetime = pd.to_datetime(
(
df['year'].astype(int) * 100
+ df['month'].astype(int)
).astype(str),
format='%Y%m'
)
if offset:
datetime += pd.tseries.offsets.MonthBegin(1)
else:
datetime = datetime
df['date'] = datetime
df.drop(columns=['year', 'month'], inplace=True)
df.set_index('date', inplace=True)
df.columns = [elt + '_monthly' for elt in df.columns]
return df
def make_quarterly_date(df, offset=True):
df['year'] = df['year'].str.lower()
df['year'] = df['year'].str.replace(r'(q\d)-(\d+)', r'\2-\1')
if offset:
# Bug that quarterbegin is March 01
df['date'] =
|
pd.to_datetime(df['year'])
|
pandas.to_datetime
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from mpl_toolkits.axes_grid1 import make_axes_locatable
from src.utils import unflatten
def rank_labels(pd_ser):
'''
rank behaviour variables and ignore labels of sparsed variables.
return label and a flatten array of the current values
'''
pd_ser = pd_ser.replace(to_replace=0, value=np.nan)
pd_ser = pd_ser.sort_values(ascending=False, )
behav_labels = list(pd_ser.index)
v_ranked = pd_ser.values
v_ranked_flat = np.zeros((len(behav_labels),1))
v_ranked_flat.flat[:v_ranked.shape[0]] = v_ranked
return v_ranked_flat, behav_labels
def plot_heatmap(ax, mat, x_labels, y_labels, cb_max, cmap=plt.cm.RdBu_r):
'''
plot one single genaric heatmap
Only when axis is provided
ax: the axis of figure
mat: 2-d matrix
x_labels, y_labels: lists of labels
cb_max: maxium value of the color bar
'''
graph = ax.matshow(mat, vmin=-cb_max, vmax=cb_max, cmap=cmap)
ax.set_xticks(np.arange(mat.shape[1]))
ax.set_yticks(np.arange(mat.shape[0]))
ax.set_xticklabels(x_labels, rotation='vertical')
ax.set_yticklabels(y_labels)
return graph
def single_heatmap(mat, x_labels, y_labels, cb_label):
'''
heat map with color bar
'''
cb_max = np.max(np.abs(mat))
fig = plt.figure()
ax = fig.add_subplot(111)
hm = ax.matshow(mat, vmin=-cb_max, vmax=cb_max, cmap=plt.cm.RdBu_r)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=1)
cb = fig.colorbar(hm, cax=cax)
cb.set_label(cb_label)
ax.set_xticks(np.arange(mat.shape[1]))
ax.set_yticks(np.arange(mat.shape[0]))
ax.set_xticklabels(x_labels)
ax.set_yticklabels(y_labels)
return fig
def plot_SCCA_FC_MWQ(FC_ws, behav_ws, region_labels, behav_labels, cb_max, cmap=plt.cm.RdBu_r):
'''
plotting tool for functional connectivity vs MRIQ
'''
plt.close('all')
fig = plt.figure(figsize=(15,4))
ax = fig.add_subplot(111)
brain = plot_heatmap(ax, FC_ws, region_labels, region_labels, cb_max, cmap)
# add a line to a diagnal
ax.plot([-0.5, len(region_labels)-0.5], [-0.5, len(region_labels)-0.5], ls='--', c='.3')
divider = make_axes_locatable(ax)
ax2 = divider.append_axes("right", size="1%", pad=8)
behav = plot_heatmap(ax2, behav_ws, [' '], behav_labels, cb_max, cmap)
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="50%", pad=0.25)
fig.colorbar(brain, cax=cax)
return fig
def map_labels(data, lables):
df =
|
pd.DataFrame(data, index=lables)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 5, 2020
@authors: enzoampil & jpdeleon
"""
# Import standard library
import os
from inspect import signature
from datetime import datetime
import warnings
from pathlib import Path
from string import digits
import requests
import json
import re
# Import modules
import numpy as np
import pandas as pd
from tqdm import tqdm
from bs4 import BeautifulSoup
from pandas.io.json import json_normalize
import matplotlib.pyplot as pl
import matplotlib as mpl
# Import from package
from fastquant import get_stock_data, DATA_PATH
warnings.simplefilter("ignore")
mpl.style.use("fivethirtyeight")
COOKIES = {
"BIGipServerPOOL_EDGE": "1427584378.20480.0000",
"JSESSIONID": "r2CYuOovD47c6FDnDoxHKW60.server-ep",
}
CALENDAR_FORMAT = "%m-%d-%Y"
TODAY = datetime.now().date().strftime(CALENDAR_FORMAT)
__all__ = [
"DisclosuresPSE",
"DisclosuresInvestagrams",
"get_company_disclosures",
]
class DisclosuresPSE:
"""
Disclosures scraped from PSE
Attribues
---------
disclosures_combined : pd.DataFrame
Company disclosure summary
"""
def __init__(
self,
symbol,
disclosure_type="all",
start_date="1-1-2020",
end_date=None,
verbose=True,
clobber=False,
):
"""
Parameters
----------
symbol : str
company symbol
disclosure_type : str
type of disclosure available
start_date : str
start date with format %m-%d-%Y
end_date : str
end date with format %m-%d-%Y
"""
self.symbol = symbol.upper()
self.start_date = start_date
self.end_date = TODAY if end_date is None else end_date
self.disclosure_type = disclosure_type
self.stock_data = None
self.verbose = verbose
self.clobber = clobber
if self.verbose:
print("Pulling {} disclosures summary...".format(self.symbol))
self.files = list(
Path(DATA_PATH).glob("{}_disclosures_*.csv".format(self.symbol))
)
self.fp = Path(
DATA_PATH,
"{}_disclosures_{}_{}.csv".format(
self.symbol, self.start_date, self.end_date
),
)
self.company_disclosures = self.get_company_disclosures()
self.disclosure_types = (
self.company_disclosures["Template Name"]
.apply(_remove_amend)
.unique()
)
if self.verbose:
print(
"Found {} disclosures between {} & {} with {} types:\n{}".format(
len(self.company_disclosures),
self.start_date,
self.end_date,
len(self.disclosure_types),
self.disclosure_types,
)
)
print("Pulling details in all {} disclosures...".format(self.symbol))
self.disclosure_tables = self.get_all_disclosure_tables()
self.disclosure_tables_df = self.get_all_disclosure_tables_df()
self.disclosure_backgrounds = self.get_disclosure_details()
self.disclosure_subjects = self.get_disclosure_details(
key="Subject of the Disclosure"
)
self.disclosures_combined = self.get_combined_disclosures()
errmsg = "{} not available between {} & {}.\n".format(
self.disclosure_type, self.start_date, self.end_date
)
errmsg += "Try {}.".format(self.disclosure_types)
if self.disclosure_type != "all":
assert self.disclosure_type in self.disclosure_types, errmsg
self.page_count, self.results_count = None, None
def __repr__(self):
"""show class description after istantiation
"""
fields = signature(self.__init__).parameters
values = ", ".join(repr(getattr(self, f)) for f in fields)
return "{}({})".format(type(self).__name__, values)
def get_stock_data(self, format="ohlc"):
"""overwrites get_stock_data
Note that stock data requires YYYY-MM-DD
"""
start_date = format_date(
self.start_date, informat=CALENDAR_FORMAT, outformat="%Y-%m-%d"
)
end_date = format_date(
self.end_date, informat=CALENDAR_FORMAT, outformat="%Y-%m-%d"
)
if self.verbose:
print("Pulling {} stock data...".format(self.symbol))
data = get_stock_data(
self.symbol,
start_date=start_date,
end_date=end_date,
format=format,
)
self.stock_data = data
return data
def get_company_disclosures_page(self, page=1):
"""
Gets company disclosures for one page
FIXME:
This can be loaded using:
cols = ['Company Name', 'Template Name', 'PSE Form Number',
'Announce Date and Time', 'Circular Number', 'edge_no', 'url']
self.company_disclosures = pd.read_csv(self.fp)[cols]
but posting request is fast anyway
"""
headers = {
"Origin": "https://edge.pse.com.ph",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-PH,en-US;q=0.9,en;q=0.8",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Accept": "*/*",
"Referer": "https://edge.pse.com.ph/announcements/form.do",
"X-Requested-With": "XMLHttpRequest",
"Connection": "keep-alive",
}
data = {
"pageNo": page,
"companyId": "",
"keyword": self.symbol,
"tmplNm": "",
"fromDate": self.start_date,
"toDate": self.end_date,
}
response = requests.post(
"https://edge.pse.com.ph/announcements/search.ax",
headers=headers,
cookies=COOKIES,
data=data,
)
if hasattr(response, "text"):
assert (
len(response.text) > 10
), "Empty response from edge.pse.com.ph"
html = response.text
# Indicating the parser (e.g. lxml) removes the bs warning
parsed_html = BeautifulSoup(html, "lxml")
current_page, page_count, results_count = re.findall(
r"[^A-Za-z\[\]\/\s]+",
parsed_html.find("span", {"class": "count"}).text,
)
current_page, self.page_count, self.results_count = (
int(current_page),
int(page_count),
int(results_count),
)
assert (
int(current_page) == page
), "Resulting page is not consistent with the requested page!"
table = parsed_html.find("table", {"class": "list"})
table_rows = table.find_all("tr")
lines = []
edge_nos = []
for tr in table_rows:
td = tr.find_all("td")
row = [tr.text for tr in td]
onclicks_raw = [
tr.a["onclick"]
for tr in td
if tr.a and "onclick" in tr.a.attrs.keys()
]
onclicks = [
s[s.find("('") + 2 : s.find("')")] for s in onclicks_raw
]
lines.append(row)
if onclicks:
edge_nos.append(onclicks[0])
columns = [el.text for el in table.find_all("th")]
if lines[1][0] == "no data.":
errmsg = "No disclosures between {} & {}. ".format(
self.start_date, self.end_date
)
errmsg += "Try longer date interval."
raise ValueError(errmsg)
df = pd.DataFrame(lines, columns=columns)
# Filter to rows where not all columns are null
df = df[df.isna().mean(axis=1) < 1]
df["edge_no"] = edge_nos
df["url"] = (
"https://edge.pse.com.ph/openDiscViewer.do?edge_no=" + df.edge_no
)
df["Announce Date and Time"] = pd.to_datetime(
df["Announce Date and Time"]
)
# ensure index starts at 0
return df.reset_index(drop=True)
def get_company_disclosures(self):
"""
Gets company disclosures for all pages
"""
first_page_df = self.get_company_disclosures_page(page=1)
print("{} pages detected!".format(self.page_count))
if self.page_count == 1:
disclosures_df = first_page_df
else:
page_dfs = [first_page_df]
# We skip the first since we already have it
for page_num in range(2, self.page_count + 1):
page_df = self.get_company_disclosures_page(page=page_num)
page_dfs.append(page_df)
pages_df = pd.concat(page_dfs).reset_index(drop=True)
disclosures_df = pages_df
return disclosures_df
def get_disclosure_file_id(self, edge_no):
"""
Returns file ID of a specified disclosure based on its edge number
ETA: 6.2 seconds per run
"""
headers = {
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Sec-Fetch-Site": "none",
"Sec-Fetch-Mode": "navigate",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-PH,en-US;q=0.9,en;q=0.8",
}
params = (("edge_no", edge_no),)
response = requests.get(
"https://edge.pse.com.ph/openDiscViewer.do",
headers=headers,
params=params,
cookies=COOKIES,
)
html = response.text
parsed_html = BeautifulSoup(html, "lxml")
s = parsed_html.iframe["src"]
file_id = s[s.find("file_id=") + 8 :]
return file_id
def get_disclosure_parsed_html(self, disclosure_file_id):
"""
Returns the bs parsed html for a disclosure given its file id
ETA: 6.55 seconds per run
"""
headers = {
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-Mode": "nested-navigate",
"Referer": "https://edge.pse.com.ph/openDiscViewer.do?edge_no=8a9a820ee365687cefdfc15ec263a54d",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-PH,en-US;q=0.9,en;q=0.8",
}
params = (("file_id", disclosure_file_id),)
response = requests.get(
"https://edge.pse.com.ph/downloadHtml.do",
headers=headers,
params=params,
cookies=COOKIES,
)
html = response.text
parsed_html = BeautifulSoup(html, "lxml")
return parsed_html
def parse_stock_inventory(self, stock_inventory_str):
stock_inventory_lol = [
row.split("\n") for row in stock_inventory_str.split("\n\n\n\n")
]
stock_inventory_df = pd.DataFrame(
stock_inventory_lol[1:], columns=stock_inventory_lol[0]
)
stock_inventory_df.iloc[:, 1] = (
stock_inventory_df.iloc[:, 1]
.apply(lambda x: x.replace(",", ""))
.astype(int)
)
return stock_inventory_df
def get_company_summary(self, edge_no):
"""
Return the company summary (at the top of edge.pse page) given edge_no
"""
file_id = self.get_disclosure_file_id(edge_no)
parsed_html = self.get_disclosure_parsed_html(file_id)
keys = []
values = []
for dt, dd in zip(
parsed_html.find_all("dt"), parsed_html.find_all("dd")
):
# Take out first token (number followed by a period)
key = " ".join(dt.text.strip().split()[1:])
value = dd.text.strip()
if "Title of Each Class\n" in value:
stock_inventory_df = self.parse_stock_inventory(value)
keys += stock_inventory_df.iloc[:, 0].values.tolist()
values += stock_inventory_df.iloc[:, 1].values.tolist()
else:
keys.append(key)
values.append(value)
company_summary_df = pd.DataFrame()
company_summary_df["key"] = keys
company_summary_df["value"] = values
return company_summary_df
def parse_table(self, table_el):
"""
Returns a table as a dataframe from a table html element
"""
table_dict = {"header": [], "value": []}
for tr in table_el.find_all("tr"):
th = None
td = None
if tr.find("th"):
th = tr.th.text
if tr.find("td"):
td = tr.td.text
table_dict["header"].append(th)
table_dict["value"].append(td)
return pd.DataFrame(table_dict)
def get_tables(self, parsed_html):
"""
Returns a list of tables as pd.DataFrame's from parsed HTML
"""
table_els = parsed_html.find_all("table")
table_dfs = []
for table_el in table_els:
table_df = self.parse_table(table_el)
table_dfs.append(table_df)
return table_dfs
def get_disclosure_tables(self, edge_no):
"""
Returns the disclosure details (at the bottom of edge.pse page) given edge_no
"""
file_id = self.get_disclosure_file_id(edge_no)
parsed_html = self.get_disclosure_parsed_html(file_id)
tables = self.get_tables(parsed_html)
k, v = [], []
for tab in tables:
header = tab.header.dropna().values
value = tab.value.dropna().values
for i, j in zip(header, value):
k.append(i)
v.append(j)
df = pd.DataFrame(np.c_[k, v], columns=["key", "value"])
return df
def load_disclosures(self):
"""Loads disclosures data from disk and append older or newer if necessary
"""
errmsg = "No cache file found."
assert len(self.files) > 0, errmsg
data = pd.read_csv(self.files[0])
data = data.dropna(subset=["Announce Date and Time"])
newest_date = data["Announce Date and Time"].iloc[1]
oldest_date = data["Announce Date and Time"].iloc[-1]
disclosure_details = {}
# append older disclosures
older = (
oldest_date > self.company_disclosures["Announce Date and Time"]
)
idxs1 = np.flatnonzero(older)
if older.sum() > 0:
for idx in tqdm(idxs1):
edge_no = self.company_disclosures.iloc[idx]["edge_no"]
df = self.get_disclosure_tables(edge_no)
disclosure_details[edge_no] = df
# load local data from disk
# FIXME: the JSON object must be str, bytes or bytearray, not float
for key, row in data.iterrows():
try:
edge_no = row["edge_no"]
df = json_normalize(json.loads(row["disclosure_table"])).T
df = df.reset_index()
df.columns = ["key", "value"]
disclosure_details[edge_no] = df
except Exception as e:
print(e)
# append newer disclosures
newer = (
newest_date < self.company_disclosures["Announce Date and Time"]
)
idxs2 = np.flatnonzero(newer)
# append newer disclosures
if newer.sum() > 0:
for idx in tqdm(idxs2):
edge_no = self.company_disclosures.iloc[idx]["edge_no"]
df = self.get_disclosure_tables(edge_no)
disclosure_details[edge_no] = df
if self.verbose:
print("Loaded: {}".format(self.files[0]))
if (older.sum() > 1) or (newer.sum() > 1):
# remove older file
os.remove(self.files[0])
if self.verbose:
print("Deleted: {}".format(self.files[0]))
self.clobber = True
return disclosure_details
def get_all_disclosure_tables(self):
"""
Returns a dict after iterating all disclosures
"""
if (len(self.files) == 0) or self.clobber:
disclosure_details = {}
for edge_no in tqdm(self.company_disclosures["edge_no"].values):
df = self.get_disclosure_tables(edge_no)
disclosure_details[edge_no] = df
else:
disclosure_details = self.load_disclosures()
return disclosure_details
def get_all_disclosure_tables_df(self):
"""
Returns disclosure tables as a dataframe
"""
values = []
for edge_no in self.disclosure_tables.keys():
df = self.disclosure_tables[edge_no]
df_dict = {k: v for k, v in df.values}
# Convert dictionary to string
values.append(json.dumps(df_dict))
return
|
pd.DataFrame(values, columns=["disclosure_table"])
|
pandas.DataFrame
|
# hackathon T - Hacks 3.0
# flask backend of data-cleaning website
import matplotlib.pyplot as plt
#import tensorflow as tf
#from tensorflow.keras import layers
import pandas as pd
import numpy as np
from flask import *
import os
from datetime import *
from subprocess import Popen, PIPE
from math import floor
import converter as con
from flask_ngrok import run_with_ngrok
from meanShift import Mean_Shift
from matplotlib import style
#import seaborn as sns
style.use('ggplot')
from sklearn.model_selection import train_test_split
from datetime import datetime
pd.options.display.max_rows = 10
pd.options.display.float_format = "{:.1f}".format
colors = 10*['g', 'r', 'b', 'c', 'k']
from pyparsing import (
Literal,
Word,
Group,
Forward,
alphas,
alphanums,
Regex,
ParseException,
CaselessKeyword,
Suppress,
delimitedList,
)
import math
import operator
exprStack = []
def push_first(toks):
exprStack.append(toks[0])
def push_unary_minus(toks):
for t in toks:
if t == "-":
exprStack.append("unary -")
else:
break
bnf = None
def BNF():
"""
expop :: '^'
multop :: '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
atom :: PI | E | real | fn '(' expr ')' | '(' expr ')'
factor :: atom [ expop factor ]*
term :: factor [ multop factor ]*
expr :: term [ addop term ]*
"""
global bnf
if not bnf:
# use CaselessKeyword for e and pi, to avoid accidentally matching
# functions that start with 'e' or 'pi' (such as 'exp'); Keyword
# and CaselessKeyword only match whole words
e = CaselessKeyword("E")
pi = CaselessKeyword("PI")
# fnumber = Combine(Word("+-"+nums, nums) +
# Optional("." + Optional(Word(nums))) +
# Optional(e + Word("+-"+nums, nums)))
# or use provided pyparsing_common.number, but convert back to str:
# fnumber = ppc.number().addParseAction(lambda t: str(t[0]))
fnumber = Regex(r"[+-]?\d+(?:\.\d*)?(?:[eE][+-]?\d+)?")
ident = Word(alphas, alphanums + "_$")
plus, minus, mult, div = map(Literal, "+-*/")
lpar, rpar = map(Suppress, "()")
addop = plus | minus
multop = mult | div
expop = Literal("^")
expr = Forward()
expr_list = delimitedList(Group(expr))
# add parse action that replaces the function identifier with a (name, number of args) tuple
def insert_fn_argcount_tuple(t):
fn = t.pop(0)
num_args = len(t[0])
t.insert(0, (fn, num_args))
fn_call = (ident + lpar - Group(expr_list) + rpar).setParseAction(
insert_fn_argcount_tuple
)
atom = (
addop[...]
+ (
(fn_call | pi | e | fnumber | ident).setParseAction(push_first)
| Group(lpar + expr + rpar)
)
).setParseAction(push_unary_minus)
# by defining exponentiation as "atom [ ^ factor ]..." instead of "atom [ ^ atom ]...", we get right-to-left
# exponents, instead of left-to-right that is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = Forward()
factor <<= atom + (expop + factor).setParseAction(push_first)[...]
term = factor + (multop + factor).setParseAction(push_first)[...]
expr <<= term + (addop + term).setParseAction(push_first)[...]
bnf = expr
return bnf
# map operator symbols to corresponding arithmetic operations
epsilon = 1e-12
opn = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"^": operator.pow,
}
fn = {
"sin": math.sin,
"cos": math.cos,
"tan": math.tan,
"exp": math.exp,
"abs": abs,
"trunc": int,
"round": round,
"sgn": lambda a: -1 if a < -epsilon else 1 if a > epsilon else 0,
# functionsl with multiple arguments
"multiply": lambda a, b: a * b,
"hypot": math.hypot,
# functions with a variable number of arguments
"all": lambda *a: all(a),
}
def evaluate_stack(s):
op, num_args = s.pop(), 0
if isinstance(op, tuple):
op, num_args = op
if op == "unary -":
return -evaluate_stack(s)
if op in "+-*/^":
# note: operands are pushed onto the stack in reverse order
op2 = evaluate_stack(s)
op1 = evaluate_stack(s)
return opn[op](op1, op2)
elif op == "PI":
return math.pi # 3.1415926535
elif op == "E":
return math.e # 2.718281828
elif op in fn:
# note: args are pushed onto the stack in reverse order
args = reversed([evaluate_stack(s) for _ in range(num_args)])
return fn[op](*args)
elif op[0].isalpha():
raise Exception("invalid identifier '%s'" % op)
else:
# try to evaluate as int first, then as float if int fails
try:
return int(op)
except ValueError:
return float(op)
def test(s):
val = "NA"
exprStack[:] = []
try:
results = BNF().parseString(s, parseAll=True)
val = evaluate_stack(exprStack[:])
except ParseException as pe:
print(s, "failed parse:", str(pe))
except Exception as e:
print(s, "failed eval:", str(e), exprStack)
return val
def feature_pie(filename, feature1, feature2, class_size = 10):
df = pd.read_csv(filename)
sums = df.groupby(df[feature1])[feature2].sum()
plt.axis('equal')
plt.pie(sums, labels=sums.index, autopct='%1.1f%%', shadow=True, startangle=140)
plt.title("Pie chart on basis of "+feature2)
name = filename.split('.')
plt.savefig(name[0]+".png")
plt.close()
def feature_scatter(filename, feature1, feature2):
df = pd.read_csv(filename)
plt.axis('equal')
plt.pie(feature1, feature2, autopct='%1.1f%%', shadow=True, startangle=140)
plt.title("Scatter plot between "+feature1+" and "+feature2)
name = filename.split('.')
plt.savefig(name[0]+".png")
plt.close()
def new_feature(filename, com, name):
df = pd.read_csv(filename)
com = com.split(',')
formula = "_"
temp = "_"
for i, c in enumerate(com):
if c == "formula":
formula = com[i+1]
temp = formula
vals = []
i = 0
print(name)
if name != " ":
i = 1
n = len(df)
for j in range(n):
for k, c in enumerate(com):
if k%2 == 0:
if c == "formula":
break
formula = formula.replace(c, str(df.at[j, com[k+1]]))
vals.append(test(formula))
formula = temp
col = len(df.axes[1])
print(vals)
df[name] = vals
"""
if name != " ":
df.insert(col, vals, True)
else:
df.insert(col, vals, True)
"""
del df['Unnamed: 0']
os.remove(filename)
df.to_csv(filename)
def disp(filename):
df = pd.read_csv(filename)
n_row = str(len(df))
n_col = str(len(df.axes[1]))
col = []
for c in df.columns:
col.append(c)
types = df.dtypes.tolist()
f = open(filename, "r+")
line0 = f.readline()
line1 = f.readline()
line2 = f.readline()
line3 = f.readline()
line4 = f.readline()
line5 = f.readline()
f.close()
return n_row, n_col, col, types, line0, line1, line2, line3, line4, line5
def stat(filename, feature, func):
df =
|
pd.read_csv(filename)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
def loadDataFrame(filename, logger):
""" Loads the file into a pandas dataframe
Args:
filename - Path to file
"""
dataframe = pd.read_excel(io=filename, header=0, na_values=['[]', '', 'NaN'])
logger.info("Finished Loading File: {} ".format(filename.split('\\')[-1]))
return dataframe
def getSpectra(dataframe, indices):
""" Returns the files for training and testing
Inputs
-----------
dataframe: pd.DataFrame object from which we need to get spectra
indices: row values for which we need the spectra
Returns
-----------
spec_vals: pd.DataFrame object containing spectra values for given
indices
"""
colList = dataframe.columns
spec_inds = [index for index in range(len(colList))
if colList[index].startswith('Spectrum_')]
spec_cols = colList[spec_inds]
spec_vals = dataframe[spec_cols].iloc[indices]
return spec_vals
def getDataOffline(dataframe, var='Titer'):
""" Returns the offline values for given variable and dataframe
Inputs
-----------
dataframe: pandas.DataFrame object
var: target variable for which offline values is to be returned
Default: 'Titer'
Returns
-----------
var_values: pandas.DataFrame object with offline values for given
variable var
spectra: pandas.DataFrame object with offline values of spectra
"""
offline_indices = np.where(pd.isnull(dataframe['Run']) == False)[0]
var_values = dataframe[var].iloc[offline_indices]
offline_notNaNs = np.where(
|
pd.isnull(var_values)
|
pandas.isnull
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 16 17:28:03 2017
@author: rwilson
"""
import shelve
import pickle
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
class utilities:
'''A logical collection of functions for interacting with
'''
@staticmethod
def DB_pd_data_load(Database, group):
'''Loads in a pandas dataframe stored in group from the Database.
Parameters
----------
Database : str
Relative location of database
group : str
The expected group name
Returns
-------
group_df : DataFrame
The PV data stored in the group ``PVdata`` as a pandas dataframe
'''
with pd.HDFStore.File(Database, 'r') as h5file:
# Check that the expected group name is found in the database
group_names = [key for key in h5file.keys()]
expected_group_name = group
if expected_group_name not in group_names:
raise KeyError('The %s group was not found within the %s database.' \
%(expected_group_name, Database))
# Load the database
group_df = pd.read_hdf(h5file, expected_group_name)
return group_df
@staticmethod
def hdf_csv_dump(DB_fdl):
''' Dumps the processed databases to CC, PV, TShdrs to csv files. Note
this function should be run in the run folder, not the database folder
---inputs---
DB_fdl: relative or absolute location to the folder where all database
files are located
'''
def hdf_to_csv(hdf_DB, tbl_name):
''' Save hdf DB to csv
hdf_DB: HDF5 database rel of abs path and name
tbl_name: Name of table in database
'''
df = pd.read_hdf(hdf_DB, tbl_name)
df.to_csv(DB_fdl+tbl_name+'.csv')
# Expected HDF5 table names
CC_tbl_name = 'CC'
PV_tbl_name = 'PV_df'
PV_full_tbl_name = 'PV_df_full'
TShdrs_tbl_name = 'TShdrs'
TS_df_tbl_name = 'TS_df'
# Expected HDF5 db names
DB_tbl = DB_fdl+'DB_tbl_processed.h5'
TS_cut = DB_fdl+'TS_cut.h5'
# Load expected param file
output = open(DB_fdl+'param.txt', 'rb')
param = pickle.load(output)
# Dump all expected DB tables to csv files
hdf_to_csv(DB_tbl, PV_tbl_name)
if param['matched']:
hdf_to_csv(DB_tbl, PV_full_tbl_name)
hdf_to_csv(DB_tbl, TShdrs_tbl_name)
hdf_to_csv(DB_tbl, CC_tbl_name)
@staticmethod
def run_dataLoad(DB_fdl):
''' Loads a previous processing session into memory ready for analysis.
- Inputs -
DB_fdl: input folder holding the expected databases in the form
'DB_fld/'
- Outputs -
PV_df: Main database holding PV, and CC data
TS_DB: Database of TS data
PV_df_full: Database including all PV data, empty if original PV and TS
data was coincident already.
'''
def from_pkl(fname):
''' Load pickel files
fname: file name rel or abs path
'''
try:
output = open(fname, 'rb')
obj_dict = pickle.load(output)
return obj_dict
except EOFError:
return False
def from_hdf5(DB_tbl, tbl_name):
'''Save expected df to hdf5 database
'''
df = pd.read_hdf(DB_tbl, tbl_name)
return df
# ------------------ Setup ------------------ #
# Load the param file data
param = from_pkl(DB_fdl+'param.txt')
# The database names
DB_tbl = pd.HDFStore(DB_fdl+'DB_tbl_processed.h5')
TS_cut = pd.HDFStore(DB_fdl+'TS_cut.h5')
# tabel names
PV_tbl_name = 'PV_df'
PV_full_tbl_name = 'PV_df_full'
TS_df_tbl_name = 'TS_df'
PV_df = from_hdf5(DB_tbl, PV_tbl_name)
if 'TSmatched' in param and param['TSmatched']:
PV_df_full = from_hdf5(DB_tbl, PV_full_tbl_name)
# TS_df = from_hdf5(TS_cut, TS_df_tbl_name)
TS_DB = from_hdf5(TS_cut, TS_df_tbl_name+'DB')
# TShdrs = from_hdf5(DB_tbl, TShdrs_tbl_name)
# CC = from_hdf5(DB_tbl, CC_tbl_name)
# Close the DB's
DB_tbl.close()
TS_cut.close()
return PV_df, TS_DB, PV_df_full, param
class dataStore:
'''This class is intended to handel the storage of all data aquired and or
generated during the processing.
'''
def __init__(self, param={}, PV_df=[], PV_df_full=[], TS_df=[],
TS_DB=[], TShdrs=[], CC=[]):
self.param = param
self.PV_df = PV_df
self.PV_df_full =PV_df_full
self.TS_df = TS_df
self.TS_DB = TS_DB
self.TShdrs = TShdrs
self.CC = CC
self.CC_tbl_name = 'CC'
self.PV_tbl_name = 'PV_df'
self.PV_full_tbl_name = 'PV_df_full'
self.TShdrs_tbl_name = 'TShdrs'
self.TS_df_tbl_name = 'TS_df'
self.DB_fdl = param['TSloc'].split('/')[-2]+'_DB/'
self.DB_tbl = self.DB_fdl+'DB_tbl_processed.h5'
self.TS_cut = self.DB_fdl+'TS_cut.h5'
def pre_process(self):
''' check inputs are in the correct or expected format, process if
required.
'''
def post_process(self):
''' check loaded DB is of the expected format, process if
required.
'''
self.TS_df = self.TS_df.as_matrix()
# self.PV_df.set_index('Date', drop=True, inplace=True)
# self.TS_df.drop('index', axis=1, inplace=True)
# self.TS_df = self.TS_df.as_matrix()
# self.TShdrs.set_index(['level_0', 'level_1'], inplace=True)
def checkSetup(self):
''' This functions checks the setup file to determine if any param have
changed. If yes, the processing will be re-run, otherweise the saved
datebases will be loaded. return True if change is detected
'''
def dict_compare(d1, d2):
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
intersect_keys = d1_keys.intersection(d2_keys)
added = d1_keys - d2_keys
removed = d2_keys - d1_keys
modified = {o : (d1[o], d2[o]) for o in intersect_keys if
np.all(d1[o] != d2[o])}
# same = set(o for o in intersect_keys if np.all(d1[o] == d2[o]))
if len(added) == 0 & len(removed) == 0 & len (modified) == 0:
return True
else:
return False
check = self.from_pkl('param.txt')
return not dict_compare(self.param, check)
def to_hdf5(self, DB_tbl,df,tbl_name,form):
'''Save expected df to hdf5 database
'''
if isinstance(df, np.ndarray):
df = pd.DataFrame(df)
df.to_hdf(DB_tbl, tbl_name, format=form)
else:
df.to_hdf(DB_tbl, tbl_name, format=form)
def from_hdf5(self, DB_tbl,tbl_name):
'''Save expected df to hdf5 database
'''
df = pd.read_hdf(DB_tbl, tbl_name)
return df
def from_sql_DB(self, tbl_name, DB_egn):
''' Load data from the sql_DB uponsd
'''
df = pd.read_sql_table(tbl_name, DB_egn)
return df
def from_pkl(self, fname):
''' Load pickel files
fname: file name rel or abs path
'''
try:
output = open(fname, 'rb')
obj_dict = pickle.load(output)
return obj_dict
except EOFError:
return False
def to_pkl(self, data, fname):
''' Save pickel files
data: data to pickel
fname: file name rel or abs path
'''
output = open(fname, 'w+b')
pickle.dump(data, output)
output.close()
def hdf_to_csv(self, hdf_DB, tbl_name):
''' Save hdf DB to csv
hdf_DB: HDF5 database rel of abs path and name
tbl_name: Name of table in database
'''
df = self.from_hdf5(DB_tbl,tbl_name)
df.to_csv(tbl_name)
def run_dataStore(self):
''' run the expected data storage workflow
'''
# The database names
DB_tbl = pd.HDFStore(self.DB_tbl)
TS_cut = pd.HDFStore(self.TS_cut)
# Save to hdf5 databases
self.to_hdf5(DB_tbl,self.PV_df,'PV_df','t')
if 'TSmatched' in self.param and self.param['TSmatched']:
self.to_hdf5(DB_tbl, self.PV_df_full, self.PV_full_tbl_name,'t')
self.to_hdf5(TS_cut, self.TS_df, self.TS_df_tbl_name,'f')
self.to_hdf5(TS_cut, self.TS_DB, self.TS_df_tbl_name+'DB','f')
self.to_hdf5(DB_tbl, self.TShdrs, self.TShdrs_tbl_name,'f')
self.to_hdf5(DB_tbl, self.CC, self.CC_tbl_name,'t')
DB_tbl.close()
TS_cut.close()
# Pickle the param file
self.to_pkl(self.param, self.DB_fdl+'param.txt')
def run_dataLoad(self):
''' run the expected data loading workflow
'''
# Pickle data
self.param = self.from_pkl(self.DB_fdl+'param.txt')
# The database names
DB_tbl =
|
pd.HDFStore(self.DB_tbl)
|
pandas.HDFStore
|
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks_dev/rolling.ipynb (unless otherwise specified).
__all__ = ['make_generic_rolling_features', 'make_generic_resampling_and_shift_features',
'create_rolling_resampled_features', 'make_generic_rolling_features',
'make_generic_resampling_and_shift_features', 'create_rolling_resampled_features']
# Cell
from functools import reduce, partial
import os
import datetime as dt
from tqdm import tqdm
from warnings import warn
import pandas as pd
import numpy as np
import numba
from dask import dataframe as dd
from dask import delayed
from dask.diagnostics import ProgressBar
# Cell
def _get_index_rolling_windows(rolling_obj):
'''
get positional indexes of rows of each rolling window
'''
if hasattr(rolling_obj, '_selection'):
previous_selection = getattr(rolling_obj, '_selection')
else:
previous_selection = None
INDEX_LIST = []
#define function to append values to global INDEX_LIST since rolling apply won't let return arrays
def f(x): INDEX_LIST.append(x.astype(int)); return 0
assert '__indexer__' not in rolling_obj.obj.columns, 'DataFrame should not contain any col with "__indexer__" name'
rolling_obj.obj = rolling_obj.obj.assign(__indexer__ = np.arange(len(rolling_obj.obj)), inplace = True)
rolling_obj._selection = '__indexer__'
rolling_obj.apply(f, raw = True)
rolling_obj.obj = rolling_obj.obj.drop(columns = ['__indexer__'])
delattr(rolling_obj, '_selection')
if not previous_selection is None:
setattr(rolling_obj, '_selection', previous_selection)
return INDEX_LIST
def _apply_custom_rolling(rolling_obj, func, raw = True, engine = 'numpy', *args, **kwargs):
engines = {
'numpy':_rolling_apply_custom_agg_numpy,
'pandas':_rolling_apply_custom_agg_pandas,
'numba':_rolling_apply_custom_agg_numpy_jit
}
_rolling_apply = engines[engine]
indexes = _get_index_rolling_windows(rolling_obj)
if hasattr(rolling_obj, '_selection'):
if getattr(rolling_obj, '_selection') is None:
values = _rolling_apply(rolling_obj.obj, indexes, func, *args, **kwargs)
values = _rolling_apply(rolling_obj.obj[rolling_obj._selection], indexes, func, *args, **kwargs)
else:
values = _rolling_apply(rolling_obj.obj, indexes, func, *args, **kwargs)
return values
def _rolling_apply_custom_agg_numpy_jit(df, indexes, func):
'''
applies some aggregation function over groups defined by index.
groups are numpy arrays
'''
dfv = df.values
# template of output to create empty array
#use this for jit version
shape = np.array(func(dfv[:1])).shape
#d = [np.empty(*shape) for _ in range(len(indexes))]
result_array = np.empty((len(indexes),*shape))
@numba.jit(forceobj=True)
def _roll_apply(dfv, indexes, func, result_array):
for i in np.arange(len(indexes)):
data = dfv[indexes[i]]
if len(data) > 0:
result = func(data)
result_array[i] = result
else:
result = np.empty(shape)
return result_array
return _roll_apply(dfv, indexes, func, result_array)
def _rolling_apply_custom_agg_numpy(df, indexes, func, *args, **kwargs):
'''
applies some aggregation function over groups defined by index.
groups are numpy arrays
'''
dfv = df.values
d = [[] for _ in range(len(indexes))]
for i in tqdm(range(len(indexes))):
data = dfv[indexes[i]]
if len(data) > 0:
result = func(data, *args, **kwargs)
d[i] = result
return d
def _rolling_apply_custom_agg_pandas(df, indexes, func, *args, **kwargs):
'''
applies some aggregation function over groups defined by index.
groups are pandas dataframes
'''
# template of output to create empty array
d = [[] for _ in range(len(indexes))]
for i in tqdm(range(len(indexes))):
data = df.iloc[indexes[i]]
if len(data) > 0:
result = func(data, *args, **kwargs)
d[i] = result
return
|
pd.concat(d)
|
pandas.concat
|
# -*- coding: utf-8 -*-
#from pyramid.arima import auto_arima
import numpy as np
import logging
import sys
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
from statsmodels.tsa.arima_model import ARIMA
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import acf
import matplotlib.pylab as plt
#from fbprophet import Prophet
#from tbats import BATS, TBATS
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import pandas as pd
def getClosest_DateCode(ref_data_seg, current_dateCode, last_year_date):
"""
This function returns the closest day with the same code according to last year reference data
:param current_date: this is the current date. The starting point to make forecast
:param current_dateCode: code into forecast date
:param last_year_date: last year date (reference data)
:return: day (date): closest day
"""
i = 0
first = True
code1 = 0
code2 = 0
day_plus = None
day_minus = None
if last_year_date.year == ref_data_seg['FECHA'].iloc[0].year:
while((code1!=current_dateCode) & (code2 != current_dateCode)):
if first: # TODO: refractor this part of code and put at the begining of the function
code1 = ref_data_seg.loc[pd.to_datetime(ref_data_seg['FECHA']) == last_year_date]['COD_LABORALIDAD'].item()
first = False
i = i+1
day_plus = pd.to_datetime(last_year_date)
day_minus = pd.to_datetime(last_year_date)
else:
try:
day_plus = day_plus + timedelta(days=i)
if (day_plus.year == last_year_date.year):
if len(ref_data_seg.loc[pd.to_datetime(ref_data_seg['FECHA']) == day_plus]['COD_LABORALIDAD'].index) == 0:
code1 = current_dateCode
else:
code1 = ref_data_seg.loc[
|
pd.to_datetime(ref_data_seg['FECHA'])
|
pandas.to_datetime
|
#!/usr/bin/env python
import argparse
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn.datasets
def load_data(cols=['CRIM', 'ZN', 'INDUS', 'CHAS', 'MEDV']):
"""
Loads to Boston Housing Dataset
"""
boston = sklearn.datasets.load_boston(return_X_y=False)
df1 =
|
pd.DataFrame(boston['data'], columns=boston['feature_names'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Remaining To-Do**
# - save 6x intermediate results (separate base & drought scenario files for all 3 case studies)
# - figure generation as a separate area of code
# - note that it might have been cleaner to concatenate all of these various scenario results, and process as a single data frame...
# # Overview
# **DayCent regional simulation results post-processing**
#
# This Jupyter Notebook is designed to facilitate post-processing and analysis of sets of raw DayCent results from a regional scale simulation. For more information, contact author [<NAME>](https://johnlfield.weebly.com/) at <<EMAIL>>
# ## DayCent background
# DayCent is a process-based model that simulates agro-ecosystem net primary production, soil organic matter dynamics, and nitrogen (M) cycling and trace gas emissions. DayCent is a daily-timestep version of the older CENTURY model. Both models were created and are currently maintained at the Colorado State University [Natural Resource Ecology Laboratory](https://www.nrel.colostate.edu/) (CSU-NREL), and source code is available upon request. DayCent model homepage: [https://www2.nrel.colostate.edu/projects/daycent/](https://www2.nrel.colostate.edu/projects/daycent/)
#
# 
# ## Regional workflow
# The primary spatial data inputs to DayCent are:
# * soil texture as a function of depth
# * historic daily weather (Tmin, Tmax, precip)
#
# Our DayCent spatial modeling workflow is based on a national-scale GIS database of current land use ([NLCD](https://www.mrlc.gov/national-land-cover-database-nlcd-2016)), soil ([SSURGO](https://www.nrcs.usda.gov/wps/portal/nrcs/detail/soils/survey/?cid=nrcs142p2_053627)), and weather ([NARR](https://www.ncdc.noaa.gov/data-access/model-data/model-datasets/north-american-regional-reanalysis-narr)) data layers housed at CSU-NREL. The python-based workflow consists of a collection of scripts that perform the following:
# 1. Selection of area to be simulated, specified based on current land cover and/or land biophysical factors (i.e., soil texutre, slope, land capability class rating, etc.)
# 2. Determination of individual unique DayCent model runs (i.e., **"strata"**) necessary to cover the heterogenity of soils and climate across the simulation area
# 3. Parallel execution of simulations on the CSU-NREL computing cluster
# 4. Results analysis and mapping (this routine)
# # Code & data imports
# ## Load python modules
import constants as con
from IPython.display import Image, display
import json
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import os
import pandas as pd
import plotly.express as px
import sys
from urllib.request import urlopen
# ## Load runtable
# Individual DayCent strata are specified via a .csv format "runtable" file, which contains the following information:
# * unique identifier (strata_no)
# * ID for DayCent spin-up archive (runno)
# * SSURGO soil map unit ID (mukey_int)
# * NARR climate grid ID (gridx & gridy)
# * county FIPS code (fips)
# * DayCent-format schedule file to simulate (sch_file)
# * latitude of the county centroid, used to set perennial grass phenology (latitude)
# * for simulations on abandoned agricultural land, year of peak ag land extent (peak_year)
# * land area represented by that strata (tot_ha)
# The code below loads the relevant runtable to a Pandas dataframe.
runtable = "eastern_US_runtable_incl81.csv"
run_df = pd.read_csv(runtable, skiprows=[1]) # skip SQL datatype row
run_df
# ## Load DayCent results
# Raw DayCent model output is spread across two files:
# * .lis files contain information related to per-area biomass harvest and soil carbon
# * year_summary.out contains per-area trace gas emissions
#
# Reading these DayCent results data directly from the NREL network via Pulse Secure is ideal for avoiding clutter on my local machine. However, that is only practical for smaller datasets; multi-GB data takes hours to load. I had also tried to develop code in this notebook to sub-set the large DayCent raw results files on the NREL network (see code_scraps.py), to minimize the volume of data being read over the network or downloaded locally. However, it seems that reading network data over Pulse Secure is the bottle-neck (not necessarily loading it into a large Pandas DataFrame), so running that sub-setting routine from this notebook on my own machine was similarly slow.
#
# I eventually found it quicker & more flexible to download the big raw data files locally via a shell Secure Copy command (with Pulse Secure DISabled), and process via a more normal linear workflow. The scp step takes approximately 2 min per GB. After that, reading the local data to memory is reasonable (~2 min), and merging & filtering steps are quick enough (usually ~1 min each) that there is no need to change my workflow (designed more for code length & clarity than memory management). Here's an example shell Secure Copy command, for reference:
# ```console
# scp <EMAIL>:/data/paustian/AFRI/simulations/results/2019-11-01,00.37__eastern_US_runtable_incl81__90__drought_sensitivity/year_summary.out /Users/johnfield/Desktop/2019-11-01,00.37__eastern_US_runtable_incl81__90__drought_sensitivity_year_summary.out
# ```
# specify simulation names for both baseline & drought tolerance results
base_sims = '2019-09-16,13.26__eastern_US_runtable_incl81__79__CBI_baseline'
drought_sims = '2019-11-01,00.37__eastern_US_runtable_incl81__90__drought_sensitivity'
# +
# # inspect head of raw data files over Pulse Secure
# results_path = '/Volumes/wcnr-network/Research/Paustian/AFRI/simulations/results/'
# base_fpath = os.path.join(results_path, base_sims, 'X.lis')
# drought_fpath = os.path.join(results_path, drought_sims, 'X.lis')
# # base python equivalent to a basd 'head' command
# with open(base_fpath) as myfile:
# head = [next(myfile) for x in range(5)]
# print(head)
# print()
# with open(drought_fpath) as myfile:
# head = [next(myfile) for x in range(5)]
# print(head)
# -
# First, we load and concatenate the .lis output for the relevant switchgrass variety scenarios.
# +
# %%time
results_path = '/Users/johnfield/Desktop/'
# .lis data import, skipping SQL datatype rows
base_lis_fpath = os.path.join(results_path, base_sims+'_X.lis')
base_lis_df = pd.read_csv(base_lis_fpath, skiprows=[1]) # 3.7 GB
base_lis_df['variety'] = 'base'
drought_lis_fpath = os.path.join(results_path, drought_sims+'_X.lis')
drought_lis_df = pd.read_csv(drought_lis_fpath, skiprows=[1]) # 3.7 GB
drought_lis_df['variety'] = 'drought_tol'
# concatenate scenario results
lis_df =
|
pd.concat([base_lis_df, drought_lis_df], axis=0)
|
pandas.concat
|
"""Module is for data (time series and anomaly list) processing.
"""
from typing import Dict, List, Optional, Tuple, Union, overload
import numpy as np
import pandas as pd
def validate_series(
ts: Union[pd.Series, pd.DataFrame],
check_freq: bool = True,
check_categorical: bool = False,
) -> Union[pd.Series, pd.DataFrame]:
"""Validate time series.
This functoin will check some common critical issues of time series that
may cause problems if anomaly detection is performed without fixing them.
The function will automatically fix some of them and raise errors for the
others.
Issues will be checked and automatically fixed include:
- Time index is not monotonically increasing;
- Time index contains duplicated time stamps (fix by keeping first values);
- (optional) Time index attribute `freq` is missed while the index follows
a frequency;
- (optional) Time series include categorical (non-binary) label columns
(to fix by converting categorical labels into binary indicators).
Issues will be checked and raise error include:
- Wrong type of time series object (must be pandas Series or DataFrame);
- Wrong type of time index object (must be pandas DatetimeIndex).
Parameters
----------
ts: pandas Series or DataFrame
Time series to be validated.
check_freq: bool, optional
Whether to check time index attribute `freq` is missed. Default: True.
check_categorical: bool, optional
Whether to check time series include categorical (non-binary) label
columns. Default: False.
Returns
-------
pandas Series or DataFrame
Validated time series.
"""
ts = ts.copy()
# check input type
if not isinstance(ts, (pd.Series, pd.DataFrame)):
raise TypeError("Input is not a pandas Series or DataFrame object")
# check index type
if not isinstance(ts.index, pd.DatetimeIndex):
raise TypeError(
"Index of time series must be a pandas DatetimeIndex object."
)
# check duplicated
if any(ts.index.duplicated(keep="first")):
ts = ts[ts.index.duplicated(keep="first") == False]
# check sorted
if not ts.index.is_monotonic_increasing:
ts.sort_index(inplace=True)
# check time step frequency
if check_freq:
if (ts.index.freq is None) and (ts.index.inferred_freq is not None):
ts = ts.asfreq(ts.index.inferred_freq)
# convert categorical labels into binary indicators
if check_categorical:
if isinstance(ts, pd.DataFrame):
ts = pd.get_dummies(ts)
if isinstance(ts, pd.Series):
seriesName = ts.name
ts = pd.get_dummies(
ts.to_frame(),
prefix="" if seriesName is None else seriesName,
prefix_sep="" if seriesName is None else "_",
)
if len(ts.columns) == 1:
ts = ts[ts.columns[0]]
ts.name = seriesName
return ts
def validate_events(
event_list: List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]],
point_as_interval: bool = False,
) -> List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]]:
"""Validate event list.
This function will check and fix some common issues in an event list (a
list of time windows), including invalid time window, overlapped time
windows, unsorted events, etc.
Parameters
----------
event_list: list
A list of events, where an event is a pandas Timestamp if it is
instantaneous or a 2-tuple of pandas Timestamps if it is a closed time
interval.
point_as_interval: bool, optional
Whether to return all instantaneous event as a close interval with
identicial start point and end point. Default: False.
Returns
-------
list:
A validated list of events.
"""
if not isinstance(event_list, list):
raise TypeError("Argument `event_list` must be a list.")
for event in event_list:
if not (
isinstance(event, pd.Timestamp)
or (
isinstance(event, tuple)
and (len(event) == 2)
and all([isinstance(event[i], pd.Timestamp) for i in [0, 1]])
)
):
raise TypeError(
"Every event in the list must be a pandas Timestamp, "
"or a 2-tuple of Timestamps."
)
time_window_ends = [] # type: List[pd.Timestamp]
time_window_type = [] # type: List[int]
for time_window in event_list:
if isinstance(time_window, tuple):
if time_window[0] <= time_window[1]:
time_window_ends.append(time_window[0])
time_window_type.append(+1)
time_window_ends.append(time_window[1])
time_window_type.append(-1)
else:
time_window_ends.append(time_window)
time_window_type.append(+1)
time_window_ends.append(time_window)
time_window_type.append(-1)
time_window_end_series = pd.Series(
time_window_type, index=pd.DatetimeIndex(time_window_ends), dtype=int
) # type: pd.Series
time_window_end_series.sort_index(kind="mergesort", inplace=True)
time_window_end_series = time_window_end_series.cumsum()
status = 0
merged_event_list = (
[]
) # type: List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]]
for t, v in time_window_end_series.iteritems(): # type: pd.Timestamp, int
if (status == 0) and (v > 0):
start = t # type: pd.Timestamp
status = 1
if (status == 1) and (v <= 0):
end = t # type: pd.Timestamp
merged_event_list.append([start, end])
status = 0
for i in range(1, len(merged_event_list)):
this_start = merged_event_list[i][0] # type: pd.Timestamp
this_end = merged_event_list[i][1] # type: pd.Timestamp
last_start = merged_event_list[i - 1][0] # type: pd.Timestamp
last_end = merged_event_list[i - 1][1] # type: pd.Timestamp
if last_end + pd.Timedelta("1ns") >= this_start:
merged_event_list[i] = [last_start, this_end]
merged_event_list[i - 1] = None
merged_event_list = [
w[0] if (w[0] == w[1] and not point_as_interval) else tuple(w)
for w in merged_event_list
if w is not None
]
return merged_event_list
@overload
def to_events(
labels: pd.Series,
freq_as_period: bool = True,
merge_consecutive: Optional[bool] = None,
) -> List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]]:
...
@overload
def to_events( # type: ignore
labels: pd.DataFrame,
freq_as_period: bool = True,
merge_consecutive: Optional[bool] = None,
) -> Dict[str, List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]]]:
...
def to_events(
labels: Union[pd.Series, pd.DataFrame],
freq_as_period: bool = True,
merge_consecutive: Optional[bool] = None,
) -> Union[
List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]],
Dict[str, List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]]],
]:
"""Convert binary label series to event list.
Parameters
----------
labels: pandas Series or DataFrame
Binary series of anomaly labels. If a DataFrame, each column is
regarded as a type of anomaly independently.
freq_as_period: bool, optional
Whether to regard time index with regular frequency (i.e. attribute
`freq` of time index is not None) as time intervals.
For example, DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03',
'2017-01-04', '2017-01-05'], dtype='datetime64[ns]', freq='D') has
daily frequency. If freq_as_period=True, each time point in the index
represents that day (24 hours). Otherwsie, each time point represents
the instantaneous time instance of 00:00:00 on that day.
Default: True.
merge_consecutive: bool, optional
Whether to merge consecutive events into a single time window. If not
specified, it is on automatically if the input time index has a regular
frequency and freq_as_period=True, and it is off otherwise. Default:
None.
Returns
-------
list or dict
- If input is a Series, output is a list of events where an event is a
pandas Timestamp if it is instantaneous or a 2-tuple of pandas
Timestamps if it is a closed time interval.
- If input is a DataFrame, every column is treated as an independent
binary series, and output is a dict where keys are column names and
values are event lists.
"""
if isinstance(labels, pd.Series):
labels = validate_series(
labels, check_freq=False, check_categorical=False
)
labels = labels == 1
if merge_consecutive is None:
if freq_as_period and (labels.index.freq is not None):
merge_consecutive = True
else:
merge_consecutive = False
if not merge_consecutive:
if freq_as_period and (labels.index.freq is not None):
period_end = pd.date_range(
start=labels.index[1],
periods=len(labels.index),
freq=labels.index.freq,
) - pd.Timedelta(
"1ns"
) # type: pd.DatetimeIndex
return [
(start, end) if start != end else start
for start, end in zip(
list(labels.index[labels]), list(period_end[labels])
)
]
else:
return list(labels.index[labels])
else:
labels_values = labels.values.astype(int).reshape(
-1, 1
) # type: np.ndarray
mydiff = np.vstack(
[
labels_values[0, :] - 0,
np.diff(labels_values, axis=0),
0 - labels_values[-1, :],
]
) # type: np.ndarray
starts = np.argwhere(mydiff == 1) # type: np.ndarray
ends = np.argwhere(mydiff == -1) # type: np.ndarray
if freq_as_period and (labels.index.freq is not None):
period_end = pd.date_range(
start=labels.index[1],
periods=len(labels.index),
freq=labels.index.freq,
) - pd.Timedelta("1ns")
return [
(labels.index[start], period_end[end - 1])
if labels.index[start] != period_end[end - 1]
else labels.index[start]
for start, end in zip(starts[:, 0], ends[:, 0])
]
else:
return [
(labels.index[start], labels.index[end - 1])
if start != end - 1
else labels.index[start]
for start, end in zip(starts[:, 0], ends[:, 0])
]
else:
if labels.columns.duplicated().any():
raise ValueError("Input DataFrame must have unique column names.")
return {
col: to_events(labels[col], freq_as_period, merge_consecutive)
for col in labels.columns
}
@overload
def to_labels(
lists: List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]],
time_index: pd.DatetimeIndex,
freq_as_period: bool = True,
) -> pd.Series:
...
@overload
def to_labels(
lists: Dict[
str, List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]]
],
time_index: pd.DatetimeIndex,
freq_as_period: bool = True,
) -> pd.DataFrame:
...
def to_labels(
lists: Union[
List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]],
Dict[
str, List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]]
],
],
time_index: pd.DatetimeIndex,
freq_as_period: bool = True,
) -> Union[pd.Series, pd.DataFrame]:
"""Convert event list to binary series along a time index.
Parameters
----------
lists: list or dict
A list of events, or a dict of lists of events.
- If list, a list of events where an event is a pandas Timestamp if it
is instantaneous or a 2-tuple of pandas Timestamps if it is a closed
time interval.
- If dict, each key-value pair represents an independent list of
events.
time_index: pandas DatatimeIndex
Time index to build the label series.
freq_as_period: bool, optional
Whether to regard time index with regular frequency (i.e. attribute
`freq` of time index is not None) as time intervals.
For example, DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03',
'2017-01-04', '2017-01-05'], dtype='datetime64[ns]', freq='D') has
daily frequency. If freq_as_period=True, each time piont represents
that day, and that day will be marked positive if an event in the event
list overlaps with the period of that day (24 hours). Otherwsie, each
time point represents the instantaneous time instance of 00:00:00 on
that day, and that time point will be marked positive if an event in
the event list covers it.
Default: True.
Returns
-------
pandas Series or DataFrame
Series of binary labels.
- If input is asingle list, the output is a Series.
- If input is a dict of lists, the output is a DataFrame where each
column corresponds a list in the dict.
"""
if not isinstance(time_index, pd.DatetimeIndex):
raise TypeError("Time index must be a pandas DatetimeIndex object.")
if not time_index.is_monotonic_increasing:
raise ValueError("Time index must be monotoic increasing.")
if isinstance(lists, list):
labels = pd.Series(False, index=time_index) # type: pd.Series
lists = validate_events(lists)
if freq_as_period and (time_index.freq is not None):
period_end = pd.date_range(
start=time_index[1],
periods=len(time_index),
freq=time_index.freq,
) - pd.Timedelta(
"1ns"
) # type: pd.DatetimeIndex
for event in lists:
isOverlapped = pd.Series(
index=time_index, dtype=bool
) # type: pd.Series
if isinstance(event, tuple):
isOverlapped = (time_index <= event[1]) & (
period_end >= event[0]
)
else:
isOverlapped = (time_index <= event) & (
period_end >= event
)
labels.loc[isOverlapped] = True
else:
for event in lists:
if isinstance(event, tuple):
labels.loc[
(labels.index >= event[0]) & (labels.index <= event[1])
] = True
else:
labels.loc[labels.index == event] = True
return labels
elif isinstance(lists, dict):
labels_df = pd.DataFrame(
False, index=time_index, columns=lists.keys()
) # pd.DataFrame
for col, key in zip(labels_df.columns, lists.keys()):
labels_df[col] = to_labels(lists[key], time_index, freq_as_period)
return labels_df
else:
raise TypeError("Argument `lists` must be a list or a dict.")
@overload
def expand_events(
lists: List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]],
left_expand: Union[pd.Timedelta, str, int] = 0,
right_expand: Union[pd.Timedelta, str, int] = 0,
freq_as_period: bool = True,
) -> List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]]:
...
@overload
def expand_events(
lists: Dict[
str, List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]]
],
left_expand: Union[pd.Timedelta, str, int] = 0,
right_expand: Union[pd.Timedelta, str, int] = 0,
freq_as_period: bool = True,
) -> Dict[str, List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]]]:
...
@overload
def expand_events(
lists: pd.Series,
left_expand: Union[pd.Timedelta, str, int] = 0,
right_expand: Union[pd.Timedelta, str, int] = 0,
freq_as_period: bool = True,
) -> pd.Series:
...
@overload
def expand_events( # type:ignore
lists: pd.DataFrame,
left_expand: Union[pd.Timedelta, str, int] = 0,
right_expand: Union[pd.Timedelta, str, int] = 0,
freq_as_period: bool = True,
) -> pd.DataFrame:
...
def expand_events( # type:ignore
events: Union[
List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]],
Dict[
str, List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]]
],
pd.Series,
pd.DataFrame,
],
left_expand: Union[pd.Timedelta, str, int] = 0,
right_expand: Union[pd.Timedelta, str, int] = 0,
freq_as_period: bool = True,
) -> Union[
List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]],
Dict[str, List[Union[Tuple[pd.Timestamp, pd.Timestamp], pd.Timestamp]]],
pd.Series,
pd.DataFrame,
]:
"""Expand duration of events.
Parameters
----------
events: list, dict, pandas Series, or pandas DataFrame
Events to be expanded.
- If list, a list of events where an event is a pandas Timestamp if it
is instantaneous or a 2-tuple of pandas Timestamps if it is a closed
time interval.
- If dict, each key-value pair represents an independent list of
events.
- If pandas Series, it is binary where 1 represents events cover this
time point.
- If pandas DataFrame, each column is treated as an independent Series.
left_expand: pandas Timedelta, str, or int, optional
Time range to expand backward.
- If str, it must be able to be converted into a pandas Timedelta
object.
- If int, it must be in nanosecond.
Default: 0.
right_expand: pandas Timedelta, str, or int, optional
Time range to expand forward.
- If str, it must be able to be converted into a pandas Timedelta
object.
- If int, it must be in nanosecond.
Default: 0.
freq_as_period: bool, optional
Whether to regard time index with regular frequency (i.e. attribute
`freq` of time index is not None) as time intervals. Only used when
input events is pandas Series or DataFrame.
For example, DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03',
'2017-01-04', '2017-01-05'], dtype='datetime64[ns]', freq='D') has
daily frequency. If freq_as_period=True, each time point in the index
represents that day (24 hours). Otherwsie, each time point represents
the instantaneous time instance of 00:00:00 on that day.
Default: True.
Returns
-------
list, dict, pandas Series, or pandas DataFrame
Expanded events.
"""
if not isinstance(left_expand, pd.Timedelta):
left_expand =
|
pd.Timedelta(left_expand)
|
pandas.Timedelta
|
import argparse
import os
import requests
import time
import random
from tqdm import tqdm
import pandas as pd
import tasking_manager_stats.data_management as dm
def get_args():
parser = argparse.ArgumentParser(description='Get users data from tasking manager API')
parser.add_argument('stats', type=str, help='Path of the stats CSV file containing contributors')
parser.add_argument('token', type=str, help='HOT tasking manager API token ')
return parser.parse_args()
def get_user_stats(user_list, token):
print('Download user data')
users = pd.DataFrame()
contributions_by_day = pd.DataFrame()
for user in tqdm(user_list):
url = 'https://tasking-manager-tm4-production-api.hotosm.org/api/v2/users/' + user + '/statistics/'
r = requests.get(url, headers={'Accept': 'application/json', 'Accept-Language': 'en',
'Authorization': 'Token ' + token})
data = r.json()
contrib_user = pd.DataFrame(data['contributionsByDay'])
contrib_user['Contributor'] = user
contributions_by_day = pd.concat([contributions_by_day, contrib_user], axis=0)
del data['countriesContributed']
del data['ContributionsByInterest']
del data['contributionsByDay']
df = pd.DataFrame(pd.Series(data)).transpose()
df.index = [user]
time.sleep(0.5 + random.random())
url = 'https://tasking-manager-tm4-production-api.hotosm.org/api/v2/users/' + user + '/openstreetmap/'
r = requests.get(url, headers={'Accept': 'application/json', 'Accept-Language': 'en',
'Authorization': 'Token ' + token})
data2 = r.json()
for k in data2.keys():
df[k] = data2[k]
users = pd.concat([users, df], axis=0)
time.sleep(0.5 + random.random())
users['level'] = 'ADVANCED'
users.loc[users['changesetCount'] < 500, 'level'] = 'INTERMEDIATE'
users.loc[users['changesetCount'] < 250, 'level'] = 'BEGINNER'
return users, contributions_by_day
if __name__ == '__main__':
args = get_args()
stats =
|
pd.read_csv(args.stats, encoding='ISO-8859-1')
|
pandas.read_csv
|
import dash
from dash import dcc
import dash_bootstrap_components as dbc
from dash import html
from dash.dependencies import Input, Output, State
import pandas as pd
import random
import re
#######################
# Helper functions
#######################
# # convert a dataframe into a dict where each item is another dict corresponding
# # to a row of the html table
def make_table(df):
# table header
rows = [html.Tr([html.Th(col) for col in list(df.columns)])]
# loop through each unique filename and create a list of the Html objects to make that row
for r in range(len(df.index)):
row = [html.Th(df.iloc[r,c]) for c in range(len(df.columns))]
rows.append(html.Tr(row))
return rows
def get_auto_picks(start_pick,end_pick,pl,n_teams,roster):
randweights = [0]*25+[1]*9+[2]*5+[3]*3+[4]*2+[5]*2+[6]+[7]+[8]+[9]
for pick_number in range(start_pick,end_pick):
# determine team needs
team = (teamnames[:n_teams+1]+teamnames[n_teams:0:-1])[pick_number % (2*n_teams)]
pln = remove_unneeded_players(pl, roster, team)
# use randomness to determine which player will be selected
pick_no = randweights[random.randrange(0,49)]
pick_idx = pln.sort_values('Rank',ascending=True).index[pick_no]
pos= pl.loc[pick_idx,'Position(s)']
# update players table
pl.loc[pick_idx,'Available'] = False
pl.loc[pick_idx,'Rd'] = (pick_number-1) // n_teams + 1
pl.loc[pick_idx,'Pick'] = (pick_number-1) % n_teams + 1
pl.loc[pick_idx,'Slot'] = determine_slot(pos,roster,pl.loc[pl.Team == team])
pl.loc[pick_idx,'Team'] = team
return pl
def determine_slot(pos, ros, teampl):
m = ros.merge(teampl,on='Slot',how='left')
# add alternative positions
altpos = (['MI'] if '2B' in pos or 'SS' in pos else []) + (
['CI'] if '1B' in pos or '3B' in pos else []) + ['UT','BE']
for p in pos.split(', ') + altpos:
for a in m.loc[m.Player.isna()].sort_values('Num')['Slot']:
if p == re.sub('\d$','',a):
return a
else:
return '-'
def remove_unneeded_players(pl,roster,team):
# Remove the players from pl that team doesn't need based on roster
teampl = pl.loc[pl.Team == team]
teamros = roster.merge(teampl,on = 'Slot',how='left')
needs = list(teamros.loc[teamros.Player.isna(),'Slot'].str.replace('\d+$','',regex=True))
# handle MI and CI
if 'MI' in needs:
needs = needs + ['SS','2B']
if 'CI' in needs:
needs = needs + ['1B','3B']
# filter players that don't match roster needs
if ('BE' not in needs) and ('UT' not in needs):
return pl.loc[pl['Position(s)'].str.match('|'.join(needs)) & pl['Available']]
else:
return pl.loc[pl['Available']]
#######################
# Initial Data Prep
#######################
players = pd.read_csv('players.csv')
players['Team'], players['Slot'], players['Rd'], players['Pick'] = (pd.NA, pd.NA, pd.NA, pd.NA)
teamnames = 'AABCDEFGHIJKLMNOPQRSTUVWXYZ'
#######################
# Dash app layout
#######################
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
server = app.server
# header for the app
header = [dbc.Row(html.H1('Draft Simulator')),
dbc.Row(html.Div(' ',style = {'height': "35px"}))
]
startsection = [
dbc.Row([
dbc.Col(
html.Div([
dcc.Dropdown(id='n-p-dropdown',options=list(range(5,16)),value=9),
html.Div(children='# of Pitchers')
],style = {'width':'90%'}), md=1),
dbc.Col(
html.Div([
dcc.Dropdown(id='n-of-dropdown',options=list(range(3,8)),value=3),
html.Div(children='# of Outfielders')
],style = {'width':'90%'}), md=1),
dbc.Col(
html.Div([
dcc.Dropdown(id='n-c-dropdown',options=list(range(1,4)),value=1),
html.Div(children='# of Catchers')
],style = {'width':'90%'}), md=1),
dbc.Col(
html.Div([
dcc.Dropdown(id='n-ci-dropdown',options=list(range(0,6)),value=1),
html.Div(children='# of Corner IF')
],style = {'width':'90%'}), md=1),
dbc.Col(
html.Div([
dcc.Dropdown(id='n-mi-dropdown',options=list(range(0,6)),value=1),
html.Div(children='# of Middle IF')
],style = {'width':'90%'}), md=1),
dbc.Col(
html.Div([
dcc.Dropdown(id='n-ut-dropdown',options=list(range(0,21)),value=2),
html.Div(children='# of Utility Players')
],style = {'width':'90%'}), md=1),
dbc.Col(
html.Div([
dcc.Dropdown(id='n-be-dropdown',options=list(range(0,21)),value=2),
html.Div(children='# of Bench Players')
],style = {'width':'15%'}), md=6)
],id = 'start-row-1'),
dbc.Row(html.Div(' ',style = {'height': "25px"})),
dbc.Row([
dbc.Col(
html.Div([
dcc.Dropdown(id='n-teams-dropdown',options=list(range(2,25)),value=10),
html.Div(children='Select number of teams')
],style = {'width':'75%'}), md=2),
dbc.Col(
html.Div([
dcc.Dropdown(id='position-dropdown'),
html.Div(children='Select your draft position')
],style = {'width':'75%'}), md=2),
dbc.Col(html.Button('Begin!',id='begin-button',style={'width': '25%'}),md=8)
],id = 'start-row-2')
]
# put the table of the sorted data in the left half of the screen
draftpanel = [
html.Div([
html.Div([
html.H3('Select Player'),
dbc.Row([
dbc.Col([
dcc.Dropdown(options = players.Rank.astype(str)+'. '+players.Name+' ('+players['Position(s)']+')'
,id = 'pick-dropdown'),
html.Button('Draft Player', id='draft-button', n_clicks=0)],md=5),
dbc.Col([
html.Table(make_table(pd.DataFrame({})),id='bat-proj-table',className='table'),
html.Table(make_table(pd.DataFrame({})),id='pit-proj-table',className='table')],md=7)
]),
html.Div(' ',style={'height':'20px'})
],id = 'draft-div'),
html.H3('Team Roster'),
dcc.Dropdown(id='team-roster-dropdown',options=['My-Team'], value = 'My-Team'),
html.Table(make_table(pd.DataFrame({})),id='roster-table',className='table')
],id='draft-panel',style={"width": "90%"})
]
pickspanel = [
html.Div([
html.H3('Last Picks'),
html.Table(make_table(pd.DataFrame({})),id='last-picks-table',className='table'),
html.Div(players.to_json(),id='players',style={'display': 'none'}),
html.Div(0,id='n-teams',style={'display': 'none'}),
html.Div(0,id='position',style={'display': 'none'}),
html.Div(0,id='pick-number',style={'display': 'none'}),
html.Div(0,id='roster',style={'display': 'none'})
],style = {"width": "90%"})
]
projpanel = [
html.Div([
html.H3('Projected Standings'),
dcc.RadioItems(['Stats','Ranks'],'Stats',id='proj-type-radioitems',style = {'width':'200%'}),
html.Table(make_table(pd.DataFrame({})),id='proj-standings-table',className='table')
])
]
# lay out the app based on the above panel definitions
app.layout = dbc.Container([
html.Div(header),
html.Div(startsection,id ='start-section'),
html.Div(dbc.Row([dbc.Col(draftpanel, md=5),
dbc.Col(projpanel, md=5),
dbc.Col(pickspanel, md=2)])
,id = 'main-section',style = {'display':'none'})
],fluid=True)
# #######################
# # Reactive callbacks
# #######################
@app.callback(
Output('roster','children'),
[Input('n-of-dropdown','value'),
Input('n-p-dropdown','value'),
Input('n-c-dropdown','value'),
Input('n-mi-dropdown','value'),
Input('n-ci-dropdown','value'),
Input('n-ut-dropdown','value'),
Input('n-be-dropdown','value'),
Input('begin-button','n_clicks')]
)
def update_roster(n_of,n_p,n_c,n_mi,n_ci,n_ut,n_be,n_clicks):
slots = (['C'+str(i+1) for i in range(n_c)] +
['1B','2B','3B','SS'] +
['OF'+str(i+1) for i in range(n_of)] +
['MI'+str(i+1) for i in range(n_mi)] +
['CI'+str(i+1) for i in range(n_ci)] +
['P'+str(i+1) for i in range(n_p)] +
['UT'+str(i+1) for i in range(n_ut)] +
['BE'+str(i+1) for i in range(n_be)])
roster = pd.DataFrame({'Slot': slots,'Num': list(range(len(slots)))})
return roster.to_json()
@app.callback(
Output('position-dropdown', 'options'),
[Input('n-teams-dropdown', 'value')]
)
def update_position_dropdown(num_teams):
return list(range(1,num_teams+1))
@app.callback(
[Output('pick-dropdown','options')],
[Input('players','children'),
Input('roster','children')]
)
def update_pick_options(players_json,roster_json):
pl = pd.read_json(players_json)
roster = pd.read_json(roster_json)
pln = remove_unneeded_players(pl, roster, 'My-Team')
return [list(pln.Rank.astype(str)+'. '+pln.Player+' ('+pln['Position(s)']+')')]
@app.callback(
Output('last-picks-table', 'children'),
[Input('players','children')],
[State('n-teams','children')]
)
def update_last_picks_table(players_json,n_teams):
pl = pd.read_json(players_json)
last_picks = pl.loc[~pl.Team.isna()]
last_picks['Pick'] = (last_picks['Rd']-1)*n_teams + last_picks['Pick']
last_picks.loc[last_picks.Team == 'My-Team','Team'] = 'Me'
return make_table(last_picks.sort_values('Pick',ascending = False)
[['Pick','Team','Player']].iloc[0:3*n_teams])
@app.callback(
Output('roster-table', 'children'),
[Input('players','children'),
Input('team-roster-dropdown','value')],
[State('roster','children')]
)
def update_roster_table(players_json,teamchoice,roster_json):
ros = pd.read_json(roster_json)
pl = pd.read_json(players_json)
pl['AVG'] = (pl['H']/pl['AB']).round(3)
pl['ERA'] = (9*pl['ER']/pl['IP']).round(2)
pl['WHIP'] = ((pl['BB']+pl['H.P'])/pl['IP']).round(2)
teampl = pl.loc[pl.Team == teamchoice]
retcols = ['Slot','Player','Rd','AB','R','HR','RBI','SB','AVG',
'IP', 'ERA', 'W', 'SO', 'SV', 'WHIP']
ret = ros.merge(teampl,on='Slot',how='left').sort_values('Num')
return make_table(ret[retcols])
@app.callback(
Output('bat-proj-table', 'children'),
[Input('pick-dropdown','value')],
[State('players','children')]
)
def update_bat_proj_table(pick,players_json):
pl = pd.read_json(players_json)
pickrank = int(pick.split('.')[0])
pick_idx = pl.loc[pl.Rank == pickrank].index[0]
pl['AVG'] = (pl['H']/pl['AB']).round(3)
if pl.loc[pick_idx,['AB']].count() > 0:
return make_table(pl.loc[[pick_idx],['AB', 'R', 'HR', 'RBI', 'SB','AVG']])
else:
return make_table(pd.DataFrame({}))
@app.callback(
Output('pit-proj-table', 'children'),
[Input('pick-dropdown','value')],
[State('players','children')]
)
def update_pit_proj_table(pick,players_json):
pl = pd.read_json(players_json)
pickrank = int(pick.split('.')[0])
pick_idx = pl.loc[pl.Rank == pickrank].index[0]
pl['WHIP'] = ((pl['BB']+pl['H.P'])/pl['IP']).round(2)
pl['ERA'] = (9*pl['ER']/pl['IP']).round(2)
if pl.loc[pick_idx,['IP']].count() > 0:
return make_table(pl.loc[[pick_idx],['IP', 'ERA', 'W', 'SO', 'SV', 'WHIP']])
else:
return make_table(pd.DataFrame({}))
@app.callback(
Output('proj-standings-table','children'),
[Input('players','children'),
Input('proj-type-radioitems','value')]
)
def update_proj_standings(players_json,proj_type):
df = pd.read_json(players_json)
dfg=df.groupby('Team')[['AB', 'H', 'R', 'HR', 'RBI', 'SB', 'IP', 'ER', 'W',
'SO', 'SV', 'H.P','BB']].sum().reset_index().sort_values('Team')
dfg['AVG'] = (dfg['H']/dfg['AB']).round(3)
dfg['ERA'] = (9*dfg['ER']/dfg['IP']).round(2)
dfg['WHIP'] = ((dfg['BB']+dfg['H.P'])/dfg['IP']).round(2)
ranks = {'Team':dfg.Team}
for m in ['R', 'HR', 'RBI', 'SB','AVG', 'W','SO', 'SV']:
ranks.update({m: dfg[m].rank(ascending=False)})
for m in ['ERA','WHIP']:
ranks.update({m: dfg[m].rank()})
rdf = pd.DataFrame(ranks,index=dfg.index)
rdf['Score'] = rdf.sum(axis=1)
if proj_type == 'Ranks':
return make_table(rdf.sort_values('Score'))
else:
dfg['Score'] = rdf.Score
return make_table(dfg[rdf.columns].sort_values('Score'))
@app.callback(
[Output('n-teams','children'),
Output('position','children'),
Output('pick-number','children'),
Output('players','children'),
Output('begin-button','n_clicks'),
Output('draft-button','n_clicks'),
Output('team-roster-dropdown','options'),
Output('main-section','style'),
Output('start-section','style')],
[Input('begin-button', 'n_clicks'),
Input('n-teams-dropdown','value'),
Input('position-dropdown','value'),
Input('draft-button','n_clicks'),
Input('pick-dropdown','value')],
[State('n-teams','children'),
State('position','children'),
State('pick-number','children'),
State('team-roster-dropdown','options'),
State('main-section','style'),
State('start-section','style'),
State('players','children'),
State('roster','children')]
)
def update_data(begin_clicks,n_teams,position,draft_clicks,pick,
prev_n_teams,prev_position,pick_number,prev_opts,
prev_style1,prev_style2,players_json,roster_json):
if begin_clicks is not None:
# prepare data frames
pl = pd.read_json(players_json)
ros = pd.read_json(roster_json)
# initial autopicks
pl = get_auto_picks(1, position, pl, n_teams, ros)
# list of team names
opts = ['My-Team'] + [teamnames[i] for i in range(1,n_teams+1) if i != position]
return (n_teams, position, position, pl.to_json(),
None, None, opts, {'display':'block'}, {'display':'none'})
elif draft_clicks is not None:
pl = pd.read_json(players_json)
pickrank = int(pick.split('.')[0])
pick_idx = pl.loc[pl.Rank == pickrank].index[0]
pos = pl.loc[pick_idx,'Position(s)']
ros = pd.read_json(roster_json)
pl.loc[pick_idx,'Available'] = False
pl.loc[pick_idx,'Rd'] = (pick_number-1) // n_teams + 1
pl.loc[pick_idx,'Pick'] = (pick_number-1) % n_teams + 1
pl.loc[pick_idx,'Slot'] = determine_slot(pos,ros,pl.loc[pl.Team == 'My-Team'])
pl.loc[pick_idx,'Team'] = 'My-Team'
# auto draft to next human pick or end of draft
human_picks = [position%(2*n_teams), (2*n_teams+1-position)%(2*n_teams)]
end_pick = pick_number+1
while (end_pick % (n_teams*2) not in human_picks) & (end_pick <= len(ros.Num)*n_teams):
end_pick += 1
pl = get_auto_picks(pick_number+1,end_pick,pl,n_teams,ros)
return (n_teams, position, end_pick, pl.to_json(),
None, None, prev_opts, prev_style1, prev_style2)
else:
return (prev_n_teams, prev_position, pick_number, players_json,
None, None, prev_opts, prev_style1, prev_style2)
@app.callback(
Output('draft-div','style'),
[Input('pick-number','children')],
[State('n-teams','children'),
State('roster','children'),
State('draft-div','style')]
)
def end_draft(pick_num,n_teams,roster_json,prev_style):
ros =
|
pd.read_json(roster_json)
|
pandas.read_json
|
import numpy as np
import matplotlib.pyplot as plt
import os
import pandas as pd
def Process_HBonds_File(filename):
df =
|
pd.read_csv(filename,delim_whitespace=True)
|
pandas.read_csv
|
import pandas as pd
from pandas.testing import assert_frame_equal
from dask_sql._compat import INT_NAN_IMPLEMENTED
def test_filter(c, df):
return_df = c.sql("SELECT * FROM df WHERE a < 2")
return_df = return_df.compute()
expected_df = df[df["a"] < 2]
assert_frame_equal(return_df, expected_df)
def test_filter_scalar(c, df):
return_df = c.sql("SELECT * FROM df WHERE True")
return_df = return_df.compute()
expected_df = df
assert_frame_equal(return_df, expected_df)
return_df = c.sql("SELECT * FROM df WHERE False")
return_df = return_df.compute()
expected_df = df.head(0)
assert_frame_equal(return_df, expected_df, check_index_type=False)
return_df = c.sql("SELECT * FROM df WHERE (1 = 1)")
return_df = return_df.compute()
expected_df = df
|
assert_frame_equal(return_df, expected_df)
|
pandas.testing.assert_frame_equal
|
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range, option_context
import pandas._testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(norows_df.dtypes, pd.Series(object, index=list("abc")))
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool_), ("c", np.float64)])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
tm.assert_series_equal(
df.iloc[:, 2:].dtypes, pd.Series(odict([("c", np.float_)]))
)
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_dtypes_gh8722(self, float_string_frame):
float_string_frame["bool"] = float_string_frame["A"] > 0
result = float_string_frame.dtypes
expected = Series(
{k: v.dtype for k, v in float_string_frame.items()}, index=result.index
)
tm.assert_series_equal(result, expected)
# compat, GH 8722
with option_context("use_inf_as_na", True):
df = DataFrame([[1]])
result = df.dtypes
tm.assert_series_equal(result, Series({0: np.dtype("int64")}))
def test_singlerow_slice_categoricaldtype_gives_series(self):
# GH29521
df = pd.DataFrame({"x": pd.Categorical("a b c d e".split())})
result = df.iloc[0]
raw_cat = pd.Categorical(["a"], categories=["a", "b", "c", "d", "e"])
expected = pd.Series(raw_cat, index=["x"], name=0, dtype="category")
tm.assert_series_equal(result, expected)
def test_timedeltas(self):
df = DataFrame(
dict(
A=Series(date_range("2012-1-1", periods=3, freq="D")),
B=Series([timedelta(days=i) for i in range(3)]),
)
)
result = df.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")], index=list("AB")
)
tm.assert_series_equal(result, expected)
df["C"] = df["A"] + df["B"]
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
],
index=list("ABC"),
)
tm.assert_series_equal(result, expected)
# mixed int types
df["D"] = 1
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
np.dtype("int64"),
],
index=list("ABCD"),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements are converted to strings when
# dtype is str, 'str', or 'U'
result = DataFrame({"A": input_vals}, dtype=string_dtype)
expected = DataFrame({"A": input_vals}).astype({"A": string_dtype})
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
""" Parsing source data into simple tsv datasets.
To parse Bgl3 and GB1, ENRICH2 MUST BE INSTALLED IN A SEPARATE CONDA ENVIRONEMNT NAMED 'enrich2' """
from os.path import isfile, join
import collections
import numpy as np
import pandas as pd
import enrich2
import utils
def parse_avgfp():
""" create the gfp dataset from raw source data """
source_fn = "source_data/avgfp/amino_acid_genotypes_to_brightness.tsv"
out_fn = "data/avgfp/avgfp.tsv"
if isfile(out_fn):
print("err: parsed avgfp dataset already exists: {}".format(out_fn))
return
# load the source data
data = pd.read_csv(source_fn, sep="\t")
# remove the wild-type entry
data = data.loc[1:]
# create columns for variants, number of mutations, and score
variants = data["aaMutations"].apply(lambda x: ",".join([x[1:] for x in x.split(":")]))
num_mutations = variants.apply(lambda x: len(x.split(",")))
score = data["medianBrightness"]
# create the dataframe
cols = ["variant", "num_mutations", "score"]
data_dict = {"variant": variants.values, "num_mutations": num_mutations.values, "score": score.values}
df = pd.DataFrame(data_dict, columns=cols)
# now add a normalized score column - these scores have the wild-type score subtracted from them
df["score_wt_norm"] = df["score"].apply(lambda x: x - 3.7192121319)
df.to_csv(out_fn, sep="\t", index=False)
def filter_dataset(df, threshold):
""" filter out variants that do not meet the required threshold for number of reads """
df = df[(df["inp"] + df["sel"]) >= threshold]
return df
def parse_bgl3_variant_list(ml, col_name):
""" creates a dataframe from the given list of variants """
# filter wild-type counts out, add to dataframe at the end
ml_no_wt = []
wt = []
for variant in ml:
if variant.startswith("WTcount"):
wt.append(int(variant.split(",")[-1].strip()))
else:
ml_no_wt.append(variant)
count_dict = collections.Counter(ml_no_wt)
frame = pd.DataFrame(index=count_dict.keys(), data=count_dict.values())
frame.columns = [col_name]
# add wild-type counts back in to datafrae
frame.loc["_wt"] = sum(wt)
return frame
def get_bgl3_count_df(output_dir=None):
""" combines the inp and sel variant lists into a single dataframe with counts """
inp_fn = "source_data/bgl3/unlabeled_Bgl3_mutations.txt"
sel_fn = "source_data/bgl3/positive_Bgl3_mutations.txt"
cache_fn = "bgl3_raw_counts.tsv"
if output_dir is None or not isfile(join(output_dir, cache_fn)):
print("Computing bgl3 count df from raw counts")
inp_variant_list = utils.load_lines(inp_fn)
sel_variant_list = utils.load_lines(sel_fn)
df = pd.concat([parse_bgl3_variant_list(inp_variant_list, "inp"),
parse_bgl3_variant_list(sel_variant_list, "sel")], axis=1, sort=True).fillna(0)
if output_dir is not None:
df.to_csv(join(output_dir, cache_fn), sep="\t")
return df
print("Loading cached count df from file: {}".format(join(output_dir, cache_fn)))
return pd.read_csv(join(output_dir, cache_fn), sep="\t", index_col=0)
def parse_bgl3():
""" create the bgl3 dataset from raw source data """
out_dir = "data/bgl3"
out_fn = "bgl3.tsv"
if isfile(join(out_dir, out_fn)):
print("err: parsed bgl3 dataset already exists: {}".format(join(out_dir, out_fn)))
return
# creates a single dataframe with counts from the given mutations lists
df = get_bgl3_count_df(output_dir=out_dir)
# filter the variants based on count threshold
threshold = 5
df = filter_dataset(df, threshold=threshold)
enrich2.create_e2_dataset(df, output_dir=out_dir, output_fn=out_fn)
def get_gb1_count_df(output_dir=None):
""" creates a single dataframe with raw counts for all gb1 variants """
cache_fn = "gb1_raw_counts.tsv"
if output_dir is None or not isfile(join(output_dir, cache_fn)):
print("Computing gb1 count df from raw counts")
single = pd.read_csv("source_data/gb1/single_mutants.csv")
double = pd.read_csv("source_data/gb1/double_mutants.csv")
wt = pd.read_csv("source_data/gb1/wild_type.csv")
# change position to a 0-index instead of the current 1-index
single["Position"] = single["Position"] - 1
double["Mut1 Position"] = double["Mut1 Position"] - 1
double["Mut2 Position"] = double["Mut2 Position"] - 1
single_strings = single.apply(lambda row: "".join(map(str, row[0:3])), axis=1)
double_strings = double.apply(lambda row: "{}{}{},{}{}{}".format(*row[0:6]), axis=1)
wt_string = pd.Series(["_wt"])
combined_strings = pd.concat([single_strings, double_strings, wt_string], axis=0).reset_index(drop=True)
combined_input_count = pd.concat([single["Input Count"], double["Input Count"], wt["Input Count"]], axis=0).reset_index(drop=True)
combined_selection_count = pd.concat([single["Selection Count"], double["Selection Count"], wt["Selection Count"]], axis=0).reset_index(drop=True)
# save a combined all variants file with variant and counts
cols = ["variant", "inp", "sel"]
data = {"variant": combined_strings.values, "inp": combined_input_count.values, "sel": combined_selection_count.values}
df = pd.DataFrame(data, columns=cols)
df = df.set_index("variant")
if output_dir is not None:
df.to_csv(join(output_dir, cache_fn), sep="\t")
return df
print("Loading cached count df from file: {}".format(join(output_dir, cache_fn)))
return pd.read_csv(join(output_dir, cache_fn), sep="\t", index_col=0)
def parse_gb1():
""" create the gb1 dataset from raw source data """
out_dir = "data/gb1"
out_fn = "gb1.tsv"
if isfile(join(out_dir, out_fn)):
print("err: parsed gb1 dataset already exists: {}".format(join(out_dir, out_fn)))
return
df = get_gb1_count_df(output_dir=out_dir)
threshold = 5
df = filter_dataset(df, threshold)
enrich2.create_e2_dataset(df, output_dir=out_dir, output_fn=out_fn)
def parse_pab1():
""" create the pab1 dataset from raw source data """
# Pab1 sequence starts at 126, but for simplicity in encoding and array access we will offset it to zero
pab1_wt_offset = 126
single_mutants_fn = "source_data/pab1/single_mutants_linear.csv"
double_mutants_fn = "source_data/pab1/double_mutants.csv"
out_fn = "data/pab1/pab1.tsv"
if isfile(out_fn):
print("err: parsed pab1 dataset already exists: {}".format(out_fn))
return
single = pd.read_csv(single_mutants_fn, skiprows=1)
single = single.dropna(how='all') # remove rows where all values are missing
double = pd.read_csv(double_mutants_fn)
# NOTE: Using the LINEAR scores here, these are not log ratios
# build up the wild type sequence when looking through single mutants
wt_seq = []
# single mutants and scores
single_variants = []
single_scores = []
aa_order = single.columns.values[2:]
for row in single.itertuples():
wt = row.Residue
wt_seq.append(wt)
pos = int(row.position)
for rep, score in zip(aa_order, row[3:]):
if not
|
pd.isnull(score)
|
pandas.isnull
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_targetdata.ipynb (unless otherwise specified).
__all__ = ['ensembl_post', 'chunks', 'post_transcript_sequence_chunk', 'post_transcript_sequence',
'build_transcript_aa_seq_df', 'ensembl_get', 'get_translation_overlap', 'build_translation_overlap_df',
'write_transcript_data', 'get_transcript_info', 'get_conservation', 'get_exon_conservation',
'get_transcript_conservation', 'get_transcript_conservation_safe', 'build_conservation_df',
'write_conservation_data']
# Cell
import requests
import json
import pandas as pd
from joblib import Parallel, delayed
from tqdm import tqdm
import warnings
import os
from scipy import stats
import multiprocessing
# Cell
def ensembl_post(ext, data, headers=None, params=None):
"""Generic wrapper for using POST requests to the ensembl rest API
:param ext: str, url extension
:param data: dict, query data
:param headers: dict or None, meta-information for query
:param params: dict or None, parameters for query
:return: Response object
"""
if params is None:
params = {}
if headers is None:
headers = {}
data = json.dumps(data)
r = requests.post("https://rest.ensembl.org"+ext, headers=headers, data=data, params=params)
if not r.ok:
r.raise_for_status()
else:
return r
# Cell
def chunks(lst, n):
"""Yield successive n-sized chunks from lst.
lst: list
n: int
returns: generator of list chunks
"""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def post_transcript_sequence_chunk(ids, params, headers):
"""Helper function for post_transcript_sequence
:param ids: list
:param params: dict
:param headers: dict
:return: dict
"""
data = {'ids': ids}
r = ensembl_post("/sequence/id/", data=data, params=params,
headers=headers)
seq = r.json()
return seq
def post_transcript_sequence(ensembl_ids, seq_type='protein', max_queries=50,
n_jobs=1, **kwargs):
"""Request multiple types of sequence by stable identifier. Supports feature masking and expand options.
Uses https://rest.ensembl.org/documentation/info/sequence_id_post
:param ensembl_ids: list of str
:param seq_type: str, one of [genomic, cds, cdna, protein]
:param max_queries: int, maximum number of queries for post
:param n_jobs: int, number of jobs to run in parallel
:param kwargs: additional parameter arguments
:return: list, dict of sequences 5' to 3' in the same orientation as the input transcript
"""
headers={"content-type" : "application/json", "accept" : "application/json"}
params = {'type': seq_type, **kwargs}
id_chunks = list(chunks(ensembl_ids, max_queries))
seqs = Parallel(n_jobs=n_jobs)(delayed(post_transcript_sequence_chunk)
(ids, params, headers) for ids in tqdm(id_chunks))
# flatten list
seqs = [item for sublist in seqs for item in sublist]
return seqs
# Cell
def build_transcript_aa_seq_df(design_df, transcript_id_col='Target Transcript',
transcript_len_col='Target Total Length', n_jobs=1):
"""Get amino acid sequence for transcripts of interest
:param design_df: DataFrame
:param transcript_id_col: str, column with ensembl transcript id
:param transcript_len_col: str, column with length of transcript
:param n_jobs: int, number of jobs to use to query transcripts
:return: DataFrame
"""
unique_transcripts = design_df[[transcript_id_col, transcript_len_col]].drop_duplicates()
unique_transcripts['Transcript Base'] = unique_transcripts[transcript_id_col].str.split('.', expand=True)[0]
print("Getting amino acid sequences")
aa_seqs = post_transcript_sequence(unique_transcripts['Transcript Base'].to_list(),
n_jobs=n_jobs)
aa_seq_df = (pd.DataFrame(aa_seqs)
.rename({'query': 'Transcript Base'}, axis=1))
missing_seqs = (unique_transcripts['Transcript Base'][~unique_transcripts['Transcript Base'].isin(
aa_seq_df['Transcript Base']
)])
if len(missing_seqs) > 0:
warnings.warn('Unable to find translations for the following transcripts: ' + ', '.join(missing_seqs))
aa_seq_len_df = (unique_transcripts.merge(aa_seq_df, on='Transcript Base'))
aa_seq_len_df['AA len'] = aa_seq_len_df['seq'].str.len()
filtered_aa_seq_len_df = (aa_seq_len_df[aa_seq_len_df[transcript_len_col] ==
(aa_seq_len_df['AA len'] + 1)*3 ]
.reset_index(drop=True))
filtered_seqs = (aa_seq_len_df['Transcript Base'][~aa_seq_len_df['Transcript Base'].isin(
filtered_aa_seq_len_df['Transcript Base']
)])
if len(filtered_seqs) > 0:
warnings.warn('Filtered transcripts where the transcript length and amino acid ' +
'sequence length did not agree: ' + ', '.join(filtered_seqs))
return filtered_aa_seq_len_df
# Cell
def ensembl_get(ext, query=None, headers=None, params=None):
"""Generic wrapper for using GET requests to the ensembl rest API
ext: str, url extension |
query: str or None, end of url extension specifying species, taxon, esnembl_id etc |
headers: dict or None, meta-information for query |
params: dict or None, parameters for query |
returns: Response object
"""
if query is None:
query = ''
if params is None:
params = {}
if headers is None:
headers = {}
r = requests.get("https://rest.ensembl.org"+ext+query, params=params, headers=headers)
if not r.ok:
r.raise_for_status()
else:
return r
def get_translation_overlap(ensembl_id):
"""Get features that overlap with translation, such as protein domains
:param ensembl_id: str
:return: DataFrame
"""
headers = {'content-type': 'application/json'}
ext = '/overlap/translation/' + ensembl_id
r = ensembl_get(ext, headers=headers)
decoded = r.json()
return decoded
# Cell
def build_translation_overlap_df(protein_ids, n_jobs=1):
"""Get protein domain information
:param protein_ids: list of str, ensemble protein IDs
:param n_jobs: int
:return: DataFrame
"""
print('Getting protein domains')
translation_overlap_list = Parallel(n_jobs=n_jobs)(delayed(get_translation_overlap)
(id) for id in tqdm(protein_ids))
# flatten list
translation_overlap_list = [item for sublist in translation_overlap_list for item in sublist]
translation_overlap_df = pd.DataFrame(translation_overlap_list).rename({'Parent': 'Transcript Base'}, axis=1)
return translation_overlap_df
# Cell
def write_transcript_data(design_df, transcript_id_col='Target Transcript',
transcript_len_col='Target Total Length', n_jobs=1,
overwrite=True, filepath='./data/target_data/',
aa_seq_name='aa_seqs.pq',
protein_domain_name='protein_domains.pq'):
"""Write amino acid sequences and protein domain information to parquet files
:param design_df: DataFrame
:param transcript_id_col: str
:param transcript_len_col: str
:param n_jobs: int
:param overwrite: bool, whether to overwrite existing file
:param filepath: str, directory for output sequences
:param aa_seq_name: str, name of amino acid sequence file
:param protein_domain_name: str, name of protein domain file
"""
if (os.path.isfile(filepath + aa_seq_name) or os.path.isfile(filepath + protein_domain_name)) and (not overwrite):
raise ValueError('Transcript data already exits and cannot be overwritten')
else:
transcript_aa_seq_df = build_transcript_aa_seq_df(design_df, transcript_id_col=transcript_id_col,
transcript_len_col=transcript_len_col,
n_jobs=n_jobs)
translation_overlap_df = build_translation_overlap_df(transcript_aa_seq_df['id'],
n_jobs=n_jobs)
if not os.path.isdir(filepath):
print('Creating new directory ' + filepath)
os.makedirs(filepath)
transcript_aa_seq_df.to_parquet(path=filepath + aa_seq_name, engine='pyarrow',
index=False)
translation_overlap_df.to_parquet(path=filepath + protein_domain_name, engine='pyarrow',
index=False)
# Cell
def get_transcript_info(base_transcript):
"""Using an ensembl transcript ID, get
:param base_transcript: str
:return: (exon_df, trans_sr, chr)
exon_df: DataFrame, with global exon start and end position
trans_sr: Series, with global translation start and stop positions for CDS and translation length
chr: str
"""
r = ensembl_get("/lookup/id/" + base_transcript + "?expand=1",
headers={"Content-Type": "application/json"}, params={'expand': '1'})
decoded = r.json()
exon_df = pd.DataFrame(decoded['Exon'])
trans_sr = pd.Series(decoded['Translation'])
chr = decoded['seq_region_name']
return exon_df, trans_sr, chr
# Cell
def get_conservation(chr, start, end, genome):
"""Get conservation scores for a given region of a genome
:param chr: str, chromosome number
:param start: int
:param end: int
:param genome: str
:return: DataFrame
"""
api_url = 'http://api.genome.ucsc.edu/getData/track'
if genome == 'hg38':
track = 'phyloP100way'
elif genome == 'mm39':
track = 'phyloP35way'
else:
raise ValueError('Genome not recognized')
chrom = 'chr' + chr
params = {
'genome': genome,
'track': track,
'start': start,
'end': end,
'chrom': chrom
}
results = requests.get(api_url, data=params)
if results.ok:
value_df = (pd.DataFrame([pd.Series(x) for x in pd.read_json(results.content.decode('utf8'))[chrom].values])
.rename(columns={'value': 'conservation'}))
else:
raise ValueError(results.reason)
return value_df
# Cell
def get_exon_conservation(exon_df, chr, genome):
"""Get conservation scores for each exon
:param exon_df: DataFrame
:param chr: str
:param genome: str
:return: DataFrame
"""
conservation_dict = {}
for i, row in exon_df.set_index('id').iterrows():
# subtract one since the nucleotide conservation corresponds to the "end" index
conservation_dict[i] = get_conservation(chr, row['start'] - 1, row['end'], genome)
# get the conservation of i
conservation_df = (
|
pd.concat(conservation_dict)
|
pandas.concat
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten":
|
pandas.StringDtype()
|
pandas.StringDtype
|
import pandas as pd
from pathlib import Path
dataset_dir = Path("Dataset/self-built-masked-face-recognition-dataset")
mask_dir = dataset_dir/"AFDB_masked_face_dataset"
mask_less_dir = dataset_dir/"AFDB_face_dataset"
# creating a dataframe object to store the masked and no masked images with their labels
df_object =
|
pd.DataFrame()
|
pandas.DataFrame
|
import os
import glob
from numpy import tile
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
methods = ['vanilla', 'BMS_in', 'BAS_in', 'baseline', 'baseline_na']
target_datasets = ['EuroSAT', 'CropDisease', 'ISIC', 'ChestX']
def get_logs_path(method, target):
root = './logs'
method_path = root + '/' + method
if os.path.isdir(method_path) == False:
print('The methode {}\'s path doesn\'t exist'.format(method))
log_path = method_path + '/' + target
if os.path.isdir(log_path) == False:
log_path = method_path
return log_path
def plot_all():
for method in methods:
for target in target_datasets:
log_path = get_logs_path(method, target)
best_check = log_path + '/' + 'checkpoint_best.pkl'
train_log = glob.glob(log_path + '/' + 'train_*.csv')
val_log = glob.glob(log_path + '/' + 'val_*.csv')
if (len(train_log) == 0) or (len(val_log) == 0):
raise ValueError('The path {} does not contain logs'.format(log_path))
continue
elif (len(train_log) > 1) or (len(val_log) > 1):
raise ValueError('The path {} contains extra logs'.format(log_path))
continue
else:
train_log = train_log[0].replace('\\', '/')
val_log = val_log[0].replace('\\', '/')
df = pd.read_csv(val_log)
columns = df.columns
df = pd.DataFrame(np.repeat(df.values,2,axis=0))
df.columns = columns
df['Loss_train'] = pd.read_csv(train_log)['Loss']
df.plot( y=["Loss_train", 'Loss_test'])
df.plot( y=["top1_base_test"] )
plt.title('{0}_{1}'.format(method, target))
plt.show()
def compare_baselines():
['baseline', 'baseline_na']
baseline_method = 'baseline'
baseline_na_method = 'baseline_na'
root = './logs'
baseline_path = root + '/' + baseline_method
baseline_na_path = root + '/' + baseline_na_method
baseline_check = baseline_path + '/' + 'checkpoint_best.pkl'
baseline_na_check = baseline_na_path + '/' + 'checkpoint_best.pkl'
baseline_train_log = glob.glob(baseline_path + '/' + 'train_*.csv')
baseline_val_log = glob.glob(baseline_path + '/' + 'val_*.csv')
baseline_na_train_log = glob.glob(baseline_na_path + '/' + 'train_*.csv')
baseline_na_val_log = glob.glob(baseline_na_path + '/' + 'val_*.csv')
baseline_train_log = baseline_train_log[0].replace('\\', '/')
baseline_val_log = baseline_val_log[0].replace('\\', '/')
baseline_na_train_log = baseline_na_train_log[0].replace('\\', '/')
baseline_na_val_log = baseline_na_val_log[0].replace('\\', '/')
df_baseline_train = pd.read_csv(baseline_train_log)
df_baseline_val = pd.read_csv(baseline_val_log)
df_baseline_na_train = pd.read_csv(baseline_na_train_log)
df_baseline_na_val = pd.read_csv(baseline_na_val_log)
columns = df_baseline_val.columns
df_baseline_val = pd.DataFrame(np.repeat(df_baseline_val.values,2,axis=0))
df_baseline_na_val = pd.DataFrame(np.repeat(df_baseline_na_val.values,2,axis=0))
df_baseline_val.columns = columns
df_baseline_na_val.columns = columns
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Data preparation script for GNN tracking.
This script processes h5 files of the ntuple and produces graph data on disk.
Will also save a csv file of the time to build each graph
The differences between Savannah's code is:
- CMS geometry instead of TrackML geometry (main difference is just the layers that connect to each other and their numbering)
- Intersecting lines capability removed (should be added back in)
"""
# System
import os
import sys
import time
import argparse
import logging
import multiprocessing as mp
from functools import partial
sys.path.append("../")
# Externals
import yaml
import pickle
import numpy as np
import pandas as pd
import csv
# Locals
from collections import namedtuple
Graph = namedtuple('Graph', ['x', 'edge_attr', 'edge_index', 'y', 'pid', 'pt', 'eta'])
# the following will create a list of accepted layer transistions
# there are 4 inner barrel layers
l = np.arange(1,5)
# creates cobinations (1,2), (2,3) etc.
layer_pairs = np.stack([l[:-1], l[1:]], axis=1)
n_det_layers = 18
# left_side endcap, creates (5,6), (6,7) etc.
EC_L = np.arange(5, 17)
EC_L_pairs = np.stack([EC_L[:-1], EC_L[1:]], axis=1)
layer_pairs = np.concatenate((layer_pairs, EC_L_pairs), axis=0)
# right side endcap
EC_R = np.arange(17, 29)
EC_R_pairs = np.stack([EC_R[:-1], EC_R[1:]], axis=1)
layer_pairs = np.concatenate((layer_pairs, EC_R_pairs), axis=0)
# transitions between any barrel layer and nearest endcap layer also allowed
barrel_EC_L_pairs = np.array([(1,5), (2,5), (3,5), (4,5)])
barrel_EC_R_pairs = np.array([(1,17), (2,17), (3,17), (4,17)])
layer_pairs = np.concatenate((layer_pairs, barrel_EC_L_pairs), axis=0)
layer_pairs = np.concatenate((layer_pairs, barrel_EC_R_pairs), axis=0)
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser('prepare.py')
add_arg = parser.add_argument
add_arg('config', nargs='?', default='configs/geometric.yaml')
add_arg('--n-workers', type=int, default=1)
add_arg('--task', type=int, default=0)
add_arg('--n-tasks', type=int, default=1)
add_arg('-v', '--verbose', action='store_true')
add_arg('--show-config', action='store_true')
add_arg('--interactive', action='store_true')
add_arg('--start-evt', type=int, default=1000)
add_arg('--end-evt', type=int, default=3000)
return parser.parse_args()
# Construct the graph
def calc_dphi(phi1, phi2):
"""Computes phi2-phi1 given in range [-pi,pi]"""
dphi = phi2 - phi1
dphi[dphi > np.pi] -= 2*np.pi
dphi[dphi < -np.pi] += 2*np.pi
return dphi
def calc_eta(r, z):
theta = np.arctan2(r, z)
return -1. * np.log(np.tan(theta / 2.))
def select_segments(hits1, hits2, phi_slope_max, z0_max,
layer1, layer2):
"""
Constructs a list of selected segments from the pairings
between hits1 and hits2, filtered with the specified
phi slope and z0 criteria.
Returns: pd DataFrame of (index_1, index_2), corresponding to the
DataFrame hit label-indices in hits1 and hits2, respectively.
"""
# Start with all possible pairs of hits
hit_pairs = hits1.reset_index().merge(hits2.reset_index(), on='evt', suffixes=('_1', '_2'))
#print(hit_pairs)
# Compute line through the points
dphi = calc_dphi(hit_pairs.phi_1, hit_pairs.phi_2)
dz = hit_pairs.z_2 - hit_pairs.z_1
dr = hit_pairs.r_2 - hit_pairs.r_1
eta_1 = calc_eta(hit_pairs.r_1, hit_pairs.z_1)
eta_2 = calc_eta(hit_pairs.r_2, hit_pairs.z_2)
deta = eta_2 - eta_1
dR = np.sqrt(deta**2 + dphi**2)
phi_slope = dphi / dr
z0 = hit_pairs.z_1 - hit_pairs.r_1 * dz / dr
# Filter segments according to phi slope and z0 criteria
good_seg_mask = ((phi_slope.abs() < phi_slope_max) &
(z0.abs() < z0_max))
dr = dr[good_seg_mask]
dphi = dphi[good_seg_mask]
dz = dz[good_seg_mask]
dR = dR[good_seg_mask]
return hit_pairs[good_seg_mask], dr, dphi, dz, dR
def construct_graph(hits, layer_pairs, phi_slope_max, z0_max,
feature_names, feature_scale, evt="-1"):
"""Construct one graph (i.e. from one event)
The graph contains:
- Node information: r,
- Edge information: dr, dR, dz, dphi
- Particle: Particle id, momentum and eta
- y label: 1 if a true edge, 0 otherwise
"""
t0 = time.time()
# Loop over layer pairs and construct segments
segments = []
seg_dr, seg_dphi, seg_dz, seg_dR = [], [], [], []
# for all accepted layer combinations construct segments
for (layer1, layer2) in layer_pairs:
# Find and join all hit pairs for one combo of layers at a time
try:
hits1 = hits[hits['layer_id']==layer1]
hits2 = hits[hits['layer_id']==layer2]
# If an event has no hits on a layer, we get a KeyError.
# In that case we just skip to the next layer pair
except KeyError as e:
logging.info('skipping empty layer: %s' % e)
continue
# Construct the segments
selected, dr, dphi, dz, dR = select_segments(hits1, hits2, phi_slope_max, z0_max,
layer1, layer2)
segments.append(selected)
seg_dr.append(dr)
seg_dphi.append(dphi)
seg_dz.append(dz)
seg_dR.append(dR)
# Combine segments from all layer pairs
#segmetns contains the index in the hit data frame of the two hits that may be connected
segments = pd.concat(segments)
seg_dr, seg_dphi = pd.concat(seg_dr), pd.concat(seg_dphi)
seg_dz, seg_dR =
|
pd.concat(seg_dz)
|
pandas.concat
|
# Cargamos el dataset
# Seleccionamos las columnas de tipo string
# Las pasamos a tipo categorico
# Iteramos por las categoricas y las sustituimos por sus cat.codes
from scipy.io import arff
import numpy as np
import pandas as pd
data_name = "asia"
missing_percentage = 0.2
data_types = "cccccccc"
i = 1
percentage_string = "0" + str(int(missing_percentage * 10))
base_path = "../vbsem_data/discrete/" + data_name + "/" + percentage_string + "/"
missing_data_path = base_path + data_name + "_" + percentage_string + "_" + str(i) + ".arff"
missing_data = arff.loadarff(missing_data_path)
missing_data =
|
pd.DataFrame(missing_data[0])
|
pandas.DataFrame
|
"""This module contains code to calculate the NCP for heterogeneous datasets"""
import logging
import math
import pandas as pd
from anytree import AnyNode
from pandas.api.types import is_categorical_dtype, is_datetime64_any_dtype, is_numeric_dtype
from kernel.util import flatten_set_valued_series, is_node, is_token_list, must_be_flattened
from kernel.recoding import recode_range_hierarchical
logger = logging.getLogger(__name__)
def calculate_normalized_certainty_penalty(original, anonymized, relational_attributes, textual_attributes_mapping):
"""
Takes the original dataset, the anonymized dataset, a list or relational quasi-identifying attributes,
and textual attributes and calculates the Normalized Certainty Penalty (NCP).
Parameters
----------
original: DataFrame
The orgininal dataframe.
anonymized: DataFrame
The anonymized dataframe.
relational_attributes: list
List containing relational attributes.
textual_attributes_mapping: dict
Mapping of textual attributes and their helper attributes.
Returns
-------
Tuple
Tuple with total information loss, relational information loss, and detailed textual information loss.
"""
# Calculate relation information loss
relational_information_loss = 0
for attribute in [attr for attr in original if attr in relational_attributes]:
ncp = __calculate_ncp_attribute(original[attribute], anonymized[attribute])
relational_information_loss = relational_information_loss + ncp
logger.debug("Information loss for attribute %s is %4.4f", attribute, ncp)
relational_information_loss = relational_information_loss / len(relational_attributes)
# Calculate textual information loss
if len(textual_attributes_mapping) > 0:
textual_information_loss = {}
total_loss = 0
# For each original textual attribute
for mapping in textual_attributes_mapping:
textual_attributes = textual_attributes_mapping[mapping]
textual_information_loss[mapping] = {}
original_textual_tokens = []
anonymized_textual_tokens = []
# Individual textual information loss per attribute and entity
for attribute in textual_attributes:
attribute_loss = __calculate_ncp_attribute(original[attribute], anonymized[attribute])
textual_information_loss[mapping][attribute] = attribute_loss
logger.debug("Information loss for entity type %s is %4.4f", attribute, attribute_loss)
# Total textual information loss per attribute
for index in original.index:
original_container = []
anonymized_container = []
for attribute in textual_attributes:
original_for_col = original.at[index, attribute]
anonymized_for_col = anonymized.at[index, attribute]
if original_for_col:
original_container += original_for_col
if anonymized_for_col:
anonymized_container += anonymized_for_col
if len(original_container) > 0:
original_textual_tokens.append(original_container)
else:
original_textual_tokens.append(None)
if len(anonymized_container) > 0:
anonymized_textual_tokens.append(anonymized_container)
else:
anonymized_textual_tokens.append(None)
attribute_total_loss = __calculate_ncp_attribute(pd.Series(original_textual_tokens, index=original.index), pd.Series(anonymized_textual_tokens, index=anonymized.index))
# Set total information loss for a single attribute
textual_information_loss[mapping]["total"] = attribute_total_loss
logger.debug("Information loss for attribute %s is %4.4f", mapping, attribute_total_loss)
total_loss += attribute_total_loss
# Set total information loss for all textual attributes
textual_information_loss["total"] = total_loss / len(textual_attributes_mapping)
return (relational_information_loss + textual_information_loss["total"]) / 2, relational_information_loss, textual_information_loss
return relational_information_loss, relational_information_loss, None
def __calculate_ncp_attribute(original_series, anonymized_series):
if must_be_flattened(original_series):
original_flattened, original_indexes, is_category = flatten_set_valued_series(original_series)
if is_categorical_dtype(original_series) or is_category:
original_flattened_series = pd.Series(original_flattened, index=original_indexes, dtype="category", name=original_series.name)
else:
original_flattened_series =
|
pd.Series(original_flattened, index=original_indexes, name=original_series.name)
|
pandas.Series
|
import pandas as pd
import numpy as np
import datetime
import json
import pickle
from pathlib import Path
from difflib import SequenceMatcher
from pickle_functions import *
from app_functions import *
from process_functions import write_log
path_input = Path.cwd() / 'input'
Path.mkdir(path_input, exist_ok = True)
path_life_table_BE = Path.cwd() / 'input' / 'sterftetafelsAE.xls'
path_geo_BE = Path.cwd() / 'input' / 'municipalities-belgium.geojson'
path_deaths_BE = Path.cwd() / 'input' / 'TF_DEATHS.xlsx'
path_pop_BE = Path.cwd() / 'input' / 'pop_muniBE.xlsx'
path_life_table_BE = Path.cwd() / 'input' / 'sterftetafelsAE.xls'
url_epistat = 'https://epistat.sciensano.be/Data/COVID19BE.xlsx'
BE_data_cases = clean_data_be(url_epistat, cases = True, hosp = False, deaths = False)
BE_data_hosp = clean_data_be(url_epistat, cases = False, hosp = True, deaths = False)
BE_data_cases['CASES'] = BE_data_cases.groupby(['DATE', 'PROVINCE'])['CASES'].sum()
BE_data_cases = BE_data_cases.groupby(['DATE','PROVINCE']).first()
BE_data_cases = BE_data_cases[['CASES']]
BE_data_cases = BE_data_cases.rename(columns={"CASES": "Cases"})
BE_data_hosp['Released from hospital'] = BE_data_hosp.groupby(['PROVINCE'])['NEW_OUT'].cumsum()
BE_data_hosp['Total hospitalized'] = BE_data_hosp.groupby(['PROVINCE'])['NEW_IN'].cumsum()
BE_data_hosp = BE_data_hosp.rename(columns={"TOTAL_IN": "Hospitalized", 'TOTAL_IN_ICU': 'ICU', 'TOTAL_IN_RESP': 'Respiratory'})
BE_data_hosp = BE_data_hosp.reset_index()
BE_data_hosp = BE_data_hosp.rename(columns={"index": "DATE"})
BE_data_hosp['DATE'] = BE_data_hosp['DATE'].astype('str')
BE_data_hosp = BE_data_hosp.set_index(['DATE','PROVINCE'])
BE_total_prov = BE_data_cases.merge(BE_data_hosp, left_index = True, right_index = True, how='outer')
BE_total_prov['Cases'] = BE_total_prov['Cases'].fillna(0.0)
BE_total_prov.insert(loc = 2, column = 'Cumulative cases', value = BE_total_prov.groupby(['PROVINCE'])['Cases'].cumsum())
BE_total_prov_merged = BE_total_prov.reset_index('PROVINCE').copy()
BE_total_merged = BE_total_prov_merged.copy()
BE_total_merged['PROVINCE'] = 'Belgium'
BE_total_merged = BE_total_merged.groupby(level = 0).sum(min_count = 1)
BE_data_deaths = clean_data_be(url_epistat, cases = False, hosp = False, deaths = True)
BE_total_deaths = cum_deaths_by_date(BE_data_deaths)
BE_total_merged = BE_total_merged.merge(BE_total_deaths, left_index = True, right_index = True, how='outer')
for date in set(BE_total_prov_merged.index):
for var in ['Cumulative cases', 'Released from hospital', 'Total hospitalized']:
temp_data = BE_total_prov_merged[var].loc[date].reset_index()
for i in range(len(temp_data[var])):
if np.isnan(temp_data.iloc[i][var]):
BE_total_merged.at[date, var] = np.nan
available_provinces = ['Belgium']
for prov in sorted(set(BE_total_prov_merged['PROVINCE'])):
available_provinces.append(prov)
BE_reg_deaths = clean_data_be(url_epistat, cases = False, hosp = False, deaths = True)
BE_reg_cases = clean_data_be(url_epistat, cases = True, hosp = False, deaths = False)
BE_reg_pop = pd.read_excel(path_pop_BE, sheet_name = 'Bevolking in 2019', header = [1])
BE_reg_pop = BE_reg_pop.loc[(BE_reg_pop['Woonplaats'] == 'Vlaams Gewest') | (BE_reg_pop['Woonplaats'] == 'Waals Gewest') | (BE_reg_pop['Woonplaats'] == 'Brussels Hoofdstedelijk Gewest')]
BE_reg_pop = BE_reg_pop.rename(columns = {'Woonplaats': 'Region', 'Mannen': 'Male', 'Vrouwen': 'Female', 'Totaal': 'Total'})
BE_reg_pop['Region'].loc[BE_reg_pop['Region'] == 'Vlaams Gewest'] = 'Flanders'
BE_reg_pop['Region'].loc[BE_reg_pop['Region'] == 'Waals Gewest'] = 'Wallonia'
BE_reg_pop['Region'].loc[BE_reg_pop['Region'] == 'Brussels Hoofdstedelijk Gewest'] = 'Brussels'
df_reg_male_deaths = BE_reg_deaths.loc[BE_reg_deaths['SEX'] == 'M'].copy()
df_reg_female_deaths = BE_reg_deaths.loc[BE_reg_deaths['SEX'] == 'F'].copy()
df_reg_male_cases = BE_reg_cases.loc[BE_reg_cases['SEX'] == 'M'].copy()
df_reg_female_cases = BE_reg_cases.loc[BE_reg_cases['SEX'] == 'F'].copy()
BE_reg_total_deaths = aggregate_regions(BE_reg_deaths, 'DEATHS')
BE_reg_total_cases = aggregate_regions(BE_reg_cases, 'CASES')
BE_reg_male_deaths = aggregate_regions(df_reg_male_deaths, 'DEATHS')
BE_reg_female_deaths = aggregate_regions(df_reg_female_deaths, 'DEATHS')
BE_reg_male_cases = aggregate_regions(df_reg_male_cases, 'CASES')
BE_reg_female_cases = aggregate_regions(df_reg_female_cases, 'CASES')
df_epistat_muni =
|
pd.read_excel(url_epistat, sheet_name = 'CASES_MUNI_CUM', usecols = ['CASES', 'TX_DESCR_FR', 'TX_DESCR_NL', 'NIS5'])
|
pandas.read_excel
|
import unittest
import pandas as pd
import numpy as np
import threading
import functools
import inspect
import math
import warnings
import traceback
from parameterized import parameterized
from scipy.io.arff import loadarff
from scipy.stats import ttest_1samp, ks_2samp
from sklearn.cluster import KMeans
class TestTimeoutException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# thanks to https://gist.github.com/vadimg/2902788
def timeout(duration, default=None):
def decorator(func):
class InterruptableThread(threading.Thread):
def __init__(self, args, kwargs):
threading.Thread.__init__(self)
self.args = args
self.kwargs = kwargs
self.result = default
self.daemon = True
self.exception = None
def run(self):
try:
self.result = func(*self.args, **self.kwargs)
except Exception as e:
self.exception = e
@functools.wraps(func)
def wrap(*args, **kwargs):
it = InterruptableThread(args, kwargs)
it.start()
it.join(duration)
if it.is_alive():
raise TestTimeoutException('timeout after %i seconds for test %s' % (duration, func))
if it.exception:
raise it.exception
return it.result
return wrap
return decorator
class test_SKLEARN_KMeans(unittest.TestCase):
params = [("{'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}", {'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}),
("{'n_clusters':3,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}", {'n_clusters':3,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}),
("{'n_clusters':4,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}", {'n_clusters':4,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}),
("{'n_clusters':2,'n_init':10,'tol':0.0001,'init':'random','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}", {'n_clusters':2,'n_init':10,'tol':0.0001,'init':'random','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}),
("{'n_clusters':2,'n_init':1,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}", {'n_clusters':2,'n_init':1,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}),
("{'n_clusters':2,'n_init':19,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}", {'n_clusters':2,'n_init':19,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}),
("{'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':100,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}", {'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':100,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}),
("{'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':500,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}", {'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':500,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}),
("{'n_clusters':2,'n_init':10,'tol':0.000001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}", {'n_clusters':2,'n_init':10,'tol':0.000001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}),
("{'n_clusters':2,'n_init':10,'tol':0.00001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}", {'n_clusters':2,'n_init':10,'tol':0.00001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}),
("{'n_clusters':2,'n_init':10,'tol':0.001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}", {'n_clusters':2,'n_init':10,'tol':0.001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}),
("{'n_clusters':2,'n_init':10,'tol':0.01,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}", {'n_clusters':2,'n_init':10,'tol':0.01,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}),
("{'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':True,'verbose':0,'algorithm':'auto',}", {'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':True,'verbose':0,'algorithm':'auto',}),
("{'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':False,'verbose':0,'algorithm':'auto',}", {'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':False,'verbose':0,'algorithm':'auto',}),
("{'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':1,'algorithm':'auto',}", {'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':1,'algorithm':'auto',}),
("{'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':False,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}", {'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':False,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}),
("{'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':-1,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}", {'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':-1,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}),
("{'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':2,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}", {'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':2,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'auto',}),
("{'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'full',}", {'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'full',}),
("{'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'elkan',}", {'n_clusters':2,'n_init':10,'tol':0.0001,'init':'k-means++','n_jobs':None,'max_iter':300,'copy_x':True,'precompute_distances':'auto','verbose':0,'algorithm':'elkan',}),
]
def assert_morphtest(self, evaluation_type, testcase_name, iteration, deviations_clust, pval_ttest, deviations_pvals, no_exception, exception_type, exception_message, exception_stacktrace):
if no_exception:
if evaluation_type=='clust_exact':
self.assertEqual(deviations_clust, 0)
elif evaluation_type=='clust_stat':
self.assertTrue(pval_ttest > 0.05)
elif evaluation_type=='score_stat':
self.assertEqual(deviations_pvals, 0)
else:
raise ValueError('invalid evaluation_type: %s (allowed: clust_exact, clust_stat, score_stat)' % evaluation_type)
else:
raise RuntimeError('%s encountered: %s %s' % exception_type, exception_message, exception_stacktrace)
def flip_same_clusters(self, morph_clusters, expected_clusters):
flipped_clusters = {}
for morph_cluster in morph_clusters:
flipped = False
for exp_cluster in expected_clusters:
if morph_clusters[morph_cluster] == expected_clusters[exp_cluster]:
flipped_clusters[exp_cluster] = expected_clusters[exp_cluster]
flipped = True
break
if not flipped:
flipped_clusters[morph_cluster] = morph_clusters[morph_cluster]
return flipped_clusters
def create_cluster_map(self, data):
cluster_map = {}
for i, c in enumerate(data):
if c not in cluster_map:
cluster_map[c] = [i]
else:
cluster_map[c].append(i)
return cluster_map
def create_scores_map(self, cluster_map, scores):
scores_map = {}
for c in cluster_map:
for i in cluster_map[c]:
if c not in scores_map:
scores_map[c] = [scores[i]]
else:
scores_map[c].append(scores[i])
return scores_map
@parameterized.expand(params)
@timeout(21600)
def test_Uniform(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/Uniform_%i_training.arff' % iter)
data_df = pd.DataFrame(data)
data_df = pd.get_dummies(data_df)
clusterer = KMeans(**kwargs)
np.random.seed(42)
clusterer.fit_predict(data_df.values)
@parameterized.expand(params)
@timeout(21600)
def test_MinFloat(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/MinFloat_%i_training.arff' % iter)
data_df = pd.DataFrame(data)
data_df = pd.get_dummies(data_df)
clusterer = KMeans(**kwargs)
np.random.seed(42)
clusterer.fit_predict(data_df.values)
@parameterized.expand(params)
@timeout(21600)
def test_VerySmall(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/VerySmall_%i_training.arff' % iter)
data_df = pd.DataFrame(data)
data_df = pd.get_dummies(data_df)
clusterer = KMeans(**kwargs)
np.random.seed(42)
clusterer.fit_predict(data_df.values)
@parameterized.expand(params)
@timeout(21600)
def test_MinDouble(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/MinDouble_%i_training.arff' % iter)
data_df = pd.DataFrame(data)
data_df = pd.get_dummies(data_df)
clusterer = KMeans(**kwargs)
np.random.seed(42)
clusterer.fit_predict(data_df.values)
@parameterized.expand(params)
@timeout(21600)
def test_MaxFloat(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/MaxFloat_%i_training.arff' % iter)
data_df = pd.DataFrame(data)
data_df = pd.get_dummies(data_df)
clusterer = KMeans(**kwargs)
np.random.seed(42)
clusterer.fit_predict(data_df.values)
@parameterized.expand(params)
@timeout(21600)
def test_VeryLarge(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/VeryLarge_%i_training.arff' % iter)
data_df = pd.DataFrame(data)
data_df = pd.get_dummies(data_df)
clusterer = KMeans(**kwargs)
np.random.seed(42)
clusterer.fit_predict(data_df.values)
@parameterized.expand(params)
@timeout(21600)
def test_MaxDouble(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/MaxDouble_%i_training.arff' % iter)
data_df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 01 00:00:00 2020
@author: <NAME> (<EMAIL>)
Supervisors: <NAME> & <NAME>
Algorithm name:
Depth-Scanning Algorithm (DSA)
Framework:
1. Automatic generation of synthetic waveforms for all possible depth phases.
2. Match-filtering of all possible depth phases.
3. Preliminary determination of the focal depth.
4. Final solution based on travel time residuals.
Input:
1. Three-component waveforms.
Notice: SAC format. Header at least should has corrected 'dist' and 'baz'.
2. Velocity model.
Notice: TauP Toolkit format (see Section 5 in
https://www.seis.sc.edu/downloads/TauP/taup.pdf )
Output:
Focal depth (median)
For more details please refer to our paper below:
<NAME>., <NAME>., & <NAME>. (2020). Depth-Scanning Algorithm: Accurate, Automatic, and
Efficient Determination of Focal Depths for Local and Regional Earthquakes.
Journal of Geophysical Research: Solid Earth, 125, e2020JB019430.
https://doi.org/10.1029/2020JB019430
Any questions or advices? Please contact:
<EMAIL>
<EMAIL>
<EMAIL>
"""
from obspy.taup import TauPyModel, taup_create
import matplotlib.pyplot as plt
from obspy.geodetics.base import kilometer2degrees
from obspy.core import UTCDateTime
import matplotlib.pyplot as pltDebug
import math
import numpy as np
from scipy.signal import hilbert, find_peaks
from obspy import read, read_inventory
import pandas as pd
from scipy.stats import kurtosis as kurt
import scipy.stats as stats
from scipy import signal
import os, fnmatch, sys
import timeit
start = timeit.default_timer()
import shutil
import csv
plt.rcParams["font.family"] = "Times New Roman"
#%%-- subroutine: load input parameters from 'DSA_SETTINGS.txt'
def load_settings():
'''
PARAMETER DESCRIPTION
par1 data directory, including wavefroms and velocity model
par2 velocity model name (this string should not include '.nd')
par3 tolerance between the observed and predicted differential travel times (second)
par4 cross-correlation coefficient threshold
par5 minimal frequency used for band-pass filter (Hz)
par6 maximal frequency used for band-pass filter (Hz)
par7 minimal scanning depth candidate (interger, km)
par8 maximal scanning depth candidate (interger, km)
par9 for monitoring: 1 -> active, 0 -> inactive
par10 plot Steps 1 and 2 of DSA: 1 -> active, 0 -> inactive
'''
try:
SETTINGS =
|
pd.read_csv('./DSA_SETTINGS.txt', delim_whitespace=True, index_col='PARAMETER')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 14 09:54:44 2020
@author: <NAME>
"""
import nlp as nlp
import pandas as pd
from textblob import TextBlob
import matplotlib.pyplot as plt
def get_sentence_from_array(array_of_word):
array_of_sentence = []
for elem in array_of_word:
sentence = ""
for word in elem :
sentence = sentence + " " + word
array_of_sentence.append(sentence)
df = pd.DataFrame(array_of_sentence ,columns = ["tweet"])
return df
def get_sentiment_analyse(tweet_list,schema=True):
sentiments=[]
subjectivity=[]
for i in tweet_list["tweet"]:
textblb = TextBlob(i)
sentiments.append(textblb.polarity)
subjectivity.append(textblb.subjectivity)
pospol=0
negpol=0
nopol=0
subcount=0
for i in sentiments:
if (i ==0):
nopol+=1
elif(i>0):
pospol+=1
elif(i<0):
negpol+=1
for i in subjectivity:
if(i>0.5):
subcount+=1
print(' Zero polarities are '+ str(nopol))
print(' Positive polarities are '+ str(pospol))
print(' Negative polarities are '+str(negpol))
print('Number of subjective tweets are : '+ str(subcount))
Average_senti = sum(sentiments)/len(sentiments)
print("Average sentiment is: "+str(Average_senti))
if schema :
pols=['Neutral', 'Positive', 'Negative']
polcount=[nopol,pospol,negpol]
plt.pie(polcount, labels = pols,autopct='%1.2f%%')
tweet_list['Sentiment']=sentiments
tweet_list['Subjectivity']=subjectivity
print(tweet_list.head())
return (Average_senti)
if __name__ == "__main__":
df =
|
pd.read_csv("data/1001tweets_on_bitcoin.csv", sep="\\", names=['Content'])
|
pandas.read_csv
|
import numpy as np
import numpy.typing as npt
import pandas as pd
from fraud_detector.feature_engineering import feat_engineering_pipe
import optuna
from lightgbm import LGBMClassifier
from imblearn.pipeline import Pipeline
from imblearn.over_sampling import RandomOverSampler
from sklearn.metrics import recall_score, precision_score
from sklearn.model_selection import train_test_split
from functools import partial
from typing import Any, Tuple, Union
import pickle
def get_model() -> Pipeline:
"""Return a instance of the model's pipeline."""
lgbm_pipe = Pipeline(
[
("over", RandomOverSampler()),
("lgbm", LGBMClassifier(n_jobs=-1)),
]
)
return lgbm_pipe
def obj(
trial: optuna.trial.Trial,
train_data: Tuple[pd.DataFrame, pd.DataFrame],
eval_data: Tuple[pd.DataFrame, pd.DataFrame],
) -> Union[float, Any]:
"""Objective function of the hyperparameter optimization."""
params = {
"over__sampling_strategy": trial.suggest_float("over__sampling_strategy", 0.5, 1.0),
"lgbm__learning_rate": trial.suggest_float("lgbm__learning_rate", 1e-4, 5e-1, log=True),
"lgbm__reg_alpha": trial.suggest_float("lgbm__reg_alpha", 1e-3, 1.0, log=True),
"lgbm__reg_lambda": trial.suggest_float("lgbm__reg_lambda", 1e-3, 1.0, log=True),
"lgbm__subsample": trial.suggest_float("lgbm__subsample", 0.4, 1.0),
"lgbm__colsample_bytree": trial.suggest_float("lgbm__colsample_bytree", 0.4, 1.0),
"lgbm__min_child_samples": trial.suggest_int("lgbm__min_child_samples", 1, 100, 1),
"lgbm__num_leaves": trial.suggest_int("lgbm__num_leaves", 2, 50, 1),
"lgbm__subsample_freq": trial.suggest_int("lgbm__subsample_freq", 1, 10, 1),
"lgbm__n_estimators": trial.suggest_int("lgbm__n_estimators", 100, 5000, 1),
}
model = get_model().set_params(**params)
model.fit(train_data[0], train_data[1])
preds = model.predict(eval_data[0])
return recall_score(eval_data[1], preds)
def get_test_metrics(
y_true: Union[pd.Series, npt.NDArray[Any]], y_pred: Union[pd.Series, npt.NDArray[Any]]
) -> pd.DataFrame:
"""Compute performance metrics on the test set."""
recall1 = recall_score(y_true, y_pred)
recall0 = recall_score(y_true, y_pred, pos_label=0)
precision1 = precision_score(y_true, y_pred)
precision0 = precision_score(y_true, y_pred, pos_label=0)
if isinstance(y_true, np.ndarray):
y_true = pd.Series(y_true)
proportions = y_true.value_counts(normalize=True)
metrics = pd.DataFrame(
data={
"recall": [recall0, recall1],
"precision": [precision0, precision1],
"proportion": [proportions[0], proportions[1]],
},
index=["class_0", "class_1"],
)
return metrics
def train_model(dfs: Tuple[pd.DataFrame, pd.DataFrame]) -> Tuple[Pipeline, pd.DataFrame]:
"""Train and validate the model."""
train_set, test_set = dfs
X = train_set.drop(columns=["id", "fraude"])
y = train_set["fraude"]
X_train, X_eval, y_train, y_eval = train_test_split(X, y, stratify=y, test_size=0.1, random_state=15)
study = optuna.create_study(direction="maximize")
study.optimize(partial(obj, train_data=(X_train, y_train), eval_data=(X_eval, y_eval)), n_trials=50)
model = get_model().set_params(**study.best_params)
model.fit(X, y)
test_preds = model.predict(test_set.drop(columns=["id", "fraude"]))
test_metrics = get_test_metrics(test_set["fraude"], test_preds)
return (model, test_metrics)
def model_training_pipe(df: pd.DataFrame) -> pd.DataFrame:
"""Execute the entire pipeline of model training, including the steps of feature engineering
and selection and hyperparameter tuning. The resulting model is saved in the file "model.pkl".
Args:
df (pandas DataFrame): Dataset to train and test the model.
Returns:
pandas DataFrame: metrics on the test set.
"""
train_data, test_data = feat_engineering_pipe(df)
model, test_metrics = train_model((train_data, test_data))
pickle.dump(model, open("./models/model.pkl", "wb"))
return test_metrics
if __name__ == "__main__":
df =
|
pd.read_csv("./data/dados_fraude.tsv", sep="\t")
|
pandas.read_csv
|
"""### Model Developing"""
# Importing Modules
import pandas as pd
import numpy as np
import time
from tqdm import tqdm
import matplotlib
import matplotlib.pyplot as plt
#libraries for preprocessing
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
#libraries for evaluation
from sklearn.metrics import mean_squared_log_error,r2_score,mean_squared_error
from sklearn.model_selection import train_test_split
#libraries for models
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import LassoCV,RidgeCV
#from yellowbrick.regressor import AlphaSelection
from sklearn.linear_model import Lasso
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVR
from sklearn.ensemble import AdaBoostRegressor
import xgboost as xgb
import warnings
warnings.filterwarnings('ignore')
#Loading Dataframe
df=pd.read_csv("data/vehicles_Manheim_Final.csv")
#df=df.drop('Unnamed: 0',axis=1)
df2=df.copy()
#df.head()
#defining numerical and categorical values
num_col=['Year', 'Odometer']
cat_cols=['Make','Model','Color','Trans','4x4','Top']
# Label Encoding
le=preprocessing.LabelEncoder()
df[cat_cols]=df[cat_cols].apply(le.fit_transform)
df.head(2)
# Scaling numerical data
norm = StandardScaler()
df['Price'] = np.log(df['Price'])
df['Odometer'] = norm.fit_transform(np.array(df['Odometer']).reshape(-1,1))
df['Year'] = norm.fit_transform(np.array(df['Year']).reshape(-1,1))
df['Model'] = norm.fit_transform(np.array(df['Model']).reshape(-1,1))
#scaling target variable
q1,q3=(df['Price'].quantile([0.25,0.75]))
o1=q1-1.5*(q3-q1)
o2=q3+1.5*(q3-q1)
df=df[(df.Price>=o1) & (df.Price<=o2)]
#df.head(2)
# Function to split dataset int training and test
def trainingData(df,n):
X = df.iloc[:,n]
y = df.iloc[:,-1:].values.T
y=y[0]
X_train,X_test,y_train,y_test=train_test_split(X,y,train_size=0.9,test_size=0.1,random_state=0)
return (X_train,X_test,y_train,y_test)
X_train,X_test,y_train,y_test=trainingData(df,list(range(len(list(df.columns))-1)))
# Some of models will predict neg values so this function will remove that values
def remove_neg(y_test,y_pred):
ind=[index for index in range(len(y_pred)) if(y_pred[index]>0)]
y_pred=y_pred[ind]
y_test=y_test[ind]
y_pred[y_pred<0]
return (y_test,y_pred)
#function for evaluation of model
def result(y_test,y_pred):
r=[]
r.append(mean_squared_log_error(y_test, y_pred))
r.append(np.sqrt(r[0]))
r.append(r2_score(y_test,y_pred))
r.append(round(r2_score(y_test,y_pred)*100,4))
return (r)
#dataframe that store the performance of each model
accu=pd.DataFrame(index=['MSLE', 'Root MSLE', 'R2 Score','Accuracy(%)'])
"""### Linear Regression"""
# Fitting model
LR=LinearRegression()
LR.fit(X_train,y_train)
y_pred=LR.predict(X_test)
# Calculating error/accuracy
y_test_1,y_pred_1=remove_neg(y_test,y_pred)
r1_lr=result(y_test_1,y_pred_1)
print("---------- Linear Regression ----------")
print('Coefficients: \n', LR.coef_)
print("MSLE : {}".format(r1_lr[0]))
print("Root MSLE : {}".format(r1_lr[1]))
print("R2 Score : {} or {}%".format(r1_lr[2],r1_lr[3]))
accu['Linear Regression']=r1_lr
# Ploting feature importance graph
coef = pd.Series(LR.coef_, index = X_train.columns)
imp_coef = coef.sort_values()
matplotlib.rcParams['figure.figsize'] = (6.0, 6.0)
imp_coef.plot(kind = "barh")
plt.title("Feature importance using Linear Regression Model")
plt.savefig('plots/Linear-Regression-Feature-Importance.jpg')
#plt.show()
# Visualization of true value and predicted
df_check =
|
pd.DataFrame({'Actual': y_test_1, 'Predicted': y_pred_1})
|
pandas.DataFrame
|
#https://realpython.com/beautiful-soup-web-scraper-python/
import requests
from bs4 import BeautifulSoup
import pandas
import sys
file =
|
pandas.read_csv('[Insert links csv file path]')
|
pandas.read_csv
|
"""
Builds the fundamental dataset for top N market cap equitities from WRDS.
Requires WRDS account. Enter username and password when prompted.
# N = number of securities sorted by market cap
# Exclude GICS codes
Features: active, date, gvkey, year, month, mom1m, mom3m, mom6m, mom9m,
mrkcap, entval, saleq_ttm, cogsq_ttm, xsgaq_ttm, oiadpq_ttm,
niq_ttm, cheq_mrq, rectq_mrq, invtq_mrq, acoq_mrq,
ppentq_mrq, aoq_mrq, dlcq_mrq, apq_mrq, txpq_mrq,
lcoq_mrq, ltq_mrq, csho_1yr_avg
It takes around 30 mins to build the dataset for N=100 and date starting from 1980-01-01
"""
import wrds
import pandas as pd
import datetime
from dateutil.relativedelta import relativedelta
import numpy as np
import pickle
from time import time
from wrds_data_processing import DataProcessing
from configparser import SafeConfigParser, NoOptionError
import argparse as ap
import sys
start_time = time()
# Parse arguments
parser = ap.ArgumentParser(description="Build Data from WRDS")
parser.add_argument("--N",default=10,type=int,
help="Number of equities sorted by market cap")
parser.add_argument("--exclude_gics", default=[],
help="Excludes the industries with list of GICS codes")
parser.add_argument("--filename", help="Name of the output data file",
required = True)
parser.add_argument("--test_mode",default='no',help="Test mode with small N")
args = vars(parser.parse_args())
N = args['N']
try:
exclude_gics = args['exclude_gics'].split(',')
except AttributeError:
exclude_gics = args['exclude_gics']
out_filename = args['filename']
test_mode = args['test_mode']
# Connect to WRDS data engine
db = wrds.Connection()
#############################################################################
#### SQL Query-----------------------------------------------------------####
#############################################################################
# Initialize dictionary to store top N gvkeys for every month
top_gvkey_month = {}
top_N_eq_gvkey_list_all = set()
start_date = '2013-01-01'
curr_date = datetime.datetime.strptime(start_date,'%Y-%m-%d')
# Go through months starting with the start date and find top N companies by mrk cap
# for that month.
# Reference df for primary security
q10 = ("select gvkey,primiss from compm.secm")
primiss_df = db.raw_sql(q10)
while curr_date < datetime.datetime.now():
# prev_date = curr_date + relativedelta(months=-3)
curr_date_string = curr_date.strftime('%Y-%m-%d')
# prev_date_string = prev_date.strftime('%Y-%m-%d')
print(curr_date.date())
# Query to get list of companies with top 2000 market cap for the given month
q1a = ("select distinct a.gvkey,a.latest,b.cshoq,b.prccq,b.mkvaltq,b.cshoq*b.prccq as market_cap,b.curcdq "
"from "
"(select gvkey,max(datadate) as latest "
"from "
"compm.fundq where datadate < '%s' "
"group by gvkey) a inner join "
"(select gvkey,datadate,mkvaltq,cshoq,prccq,curcdq "
"from compm.fundq where cshoq>0 and prccq>0 and curcdq='USD' and mkvaltq>0) b "
"on a.gvkey = b.gvkey and a.latest=b.datadate "
"order by market_cap desc "
"limit %i")%(curr_date_string,N)
mrk_df = db.raw_sql(q1a)
# merge the security flag
mrk_df = mrk_df.merge(primiss_df,on='gvkey',how='left')
gvkey_list_month = mrk_df['gvkey'][mrk_df['primiss']=='P'].values.tolist()
top_gvkey_month[curr_date.date()] = gvkey_list_month
top_N_eq_gvkey_list_all |= set(gvkey_list_month)
# increment the date for next month
curr_date = curr_date + relativedelta(months=1)
top_N_eq_gvkey_list_all = list(top_N_eq_gvkey_list_all)
# Query to get GIC codes and remove the exclude_gics list
q1b = ("select gvkey,gsector "
"from compa.company ")
df_gic = db.raw_sql(q1b)
exclude_gvkey_list = df_gic['gvkey'][df_gic['gsector'].isin(exclude_gics)].tolist()
# remove gvkey of associated gic code from the main list
top_N_eq_gvkey_list = [k for k in top_N_eq_gvkey_list_all if k not in exclude_gvkey_list]
# Check for continuation of companies and update their status (active or not)
# Compare the gvkey list with the most recent list if it exists
# Update the current gvkey list with the inactive ones
# Read the gvkey config file which contains the most recent list
config_gvkey = SafeConfigParser()
config_gvkey.read('gvkey-hist.dat')
config_gvkey.set('gvkey_list', '# Used to keep track of most recent requity list. No need to edit', '')
# Initialize active dict
active = {key: 1 for key in top_N_eq_gvkey_list}
if test_mode != 'yes':
try:
mr_gvk_list = config_gvkey.get('gvkey_list', 'most_recent_list').split(',')
inactive_list = [k for k in mr_gvk_list if k not in top_N_eq_gvkey_list]
# Add inactive gvkey
for inactive_gvk in inactive_list:
active[inactive_gvk] = 0
# Update the current gvkey list with the inactive ones
top_N_eq_gvkey_list = list(set().union(top_N_eq_gvkey_list,inactive_list))
# create the most recent list in the config file if it doesn't exist
config_gvkey.set('gvkey_list', 'most_recent_list', ','.join(top_N_eq_gvkey_list))
except NoOptionError:
# create the most recent list in the config file if it doesn't exist
config_gvkey.set('gvkey_list', 'most_recent_list', ','.join(top_N_eq_gvkey_list))
# save to a file
with open('gvkey-hist.dat', 'w') as configfile:
config_gvkey.write(configfile)
# change format to be compatible with sql query
top_N_eq_gvkey = tuple(["'%s'"%str(i) for i in top_N_eq_gvkey_list])
top_N_eq_gvkey = ",".join(top_N_eq_gvkey)
# Query to get fundamental Data
q2 = ("select datadate,gvkey,tic,saleq,cogsq,xsgaq,oiadpq,niq,"
"cheq, rectq, invtq, acoq, ppentq, aoq, dlcq, apq, txpq, lcoq, ltq, dlttq, cshoq, seqq, atq "
"from compm.fundq "
"where gvkey in (%s) and datadate > '%s' ")%(top_N_eq_gvkey,start_date)
fundq_df = db.raw_sql(q2)
# Add gics_sector as a column
fundq_df = pd.merge(fundq_df,df_gic,how='left',on=['gvkey'])
# Query to get price data
q3 = ("select gvkey,datadate,prccm,ajexm "
"from compm.secm "
"where gvkey in (%s) ")%top_N_eq_gvkey
price_df_all = db.raw_sql(q3).sort_values('datadate')
price_df_all.datadate = pd.to_datetime(price_df_all.datadate,format='%Y-%m-%d')
# Query to get stock_split data
q4 = ("select gvkey,datadate,split "
"from compm.sec_split "
"where gvkey in (%s) ")%top_N_eq_gvkey
stock_split_df_all = db.raw_sql(q4).sort_values('datadate')
stock_split_df_all.datadate =
|
pd.to_datetime(stock_split_df_all.datadate,format='%Y-%m-%d')
|
pandas.to_datetime
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
# ### Connect to Postgres
# In[2]:
import psycopg2
conn=psycopg2.connect(database="ernie",user="wenxi",host="localhost",password="<PASSWORD>")
conn.set_client_encoding('UTF8')
conn.autocommit=True
curs=conn.cursor()
# In[3]:
curs.execute("SET SEARCH_PATH TO sb_plus;")
# In[4]:
df = pd.read_sql('SELECT * FROM sb_plus_complete_kinetics', conn)
# In[5]:
# Sort dataframe by cited_1, cited_2, co_cited_year and reset index to make sure the table is correct
df = df.sort_values(['cited_1', 'cited_2', 'co_cited_year'])
df = df.reset_index().drop(columns='index')
# In[271]:
df
# ### Filling in missing years between first_possible_year and last_co_cited_year
# In[7]:
# Distinct pairs
pairs = pd.DataFrame(df.groupby(['cited_1','cited_2']).size()).reset_index().drop(columns=0)
pairs
# In[8]:
# number of pairs that have to fillin first year
len(pairs) - len(df[df['first_possible_year'] == df['co_cited_year']].groupby(['cited_1','cited_2']).size())
# In[9]:
x1 = pd.DataFrame(df.groupby(['cited_1','cited_2', 'first_possible_year'])['co_cited_year'].min()).reset_index()
x1
# In[10]:
# add first row for pairs that first_possible_year is not equal to co_cited_year
l = list([x1['cited_1'], x1['cited_2'], x1['co_cited_year'], x1['first_possible_year']])
l_new = [[],[],[],[]]
for i in range(len(l[0])):
if l[2][i] > l[3][i]:
l_new[0].append(l[0][i])
l_new[1].append(l[1][i])
l_new[2].append(l[3][i])
l_new[3].append(l[3][i])
# In[11]:
x2 = pd.DataFrame({'cited_1':l_new[0], 'cited_2':l_new[1], 'co_cited_year':l_new[2], 'first_possible_year':l_new[3]})
x2
# In[12]:
x3 = pd.concat([df, x2], axis=0)
x3
# In[13]:
# Fill in zeros for frequency, next values for all other columns, and sort values by co_cited_year and reset index
x3 = x3.sort_values(['cited_1','cited_2','co_cited_year']).reset_index().drop(columns='index')
x3['frequency'] = x3['frequency'].fillna(0)
x3 = x3.fillna(method='bfill')
# In[14]:
x3
# In[15]:
# Double check the number of pairs is correct
len(x3.groupby(['cited_1','cited_2']).size())
# In[16]:
# Double check all first_possible_year is filled in as the first co_cited_year
len(x3[x3['first_possible_year'] == x3['co_cited_year']])
# In[17]:
l = list([x3['cited_1'], x3['cited_2'], x3['co_cited_year']])
# In[89]:
# Fill in all missing years
import timeit
start = timeit.default_timer()
l_new = [[],[],[]]
for i in range(len(l[0])-1):
if (l[0][i] == l[0][i+1]) & (l[1][i] == l[1][i+1]):
if l[2][i+1] - l[2][i] > 1:
l_new[0].append(l[0][i])
l_new[1].append(l[1][i])
l_new[2].append(l[2][i])
for j in range(1, (l[2][i+1] - l[2][i])):
l_new[0].append(l[0][i])
l_new[1].append(l[1][i])
l_new[2].append(l[2][i]+j)
else:
l_new[0].append(l[0][i])
l_new[1].append(l[1][i])
l_new[2].append(l[2][i])
else:
l_new[0].append(l[0][i])
l_new[1].append(l[1][i])
l_new[2].append(l[2][i])
l_new[0].append(l[0][len(l[0])-1])
l_new[1].append(l[1][len(l[0])-1])
l_new[2].append(l[2][len(l[0])-1])
stop = timeit.default_timer()
# In[90]:
# How long it takes to finish filling in missing years
print(stop-start)
# 227s for 51,613 pairs
# In[91]:
# The number of rows increased because all missing years have been appended
len(l_new[2])
# In[92]:
df_new = pd.DataFrame({'cited_1':l_new[0], 'cited_2':l_new[1], 'co_cited_year':l_new[2]})
# In[93]:
df_new2 = df_new.merge(x3, how='left', on=['cited_1', 'cited_2', 'co_cited_year'])
# In[94]:
# Fill in zeros for frequency
df_new2['frequency'] = df_new2['frequency'].fillna(0)
# In[95]:
# Forward fill in values for all other columns
df_new2 = df_new2.fillna(method='ffill')
# In[96]:
df_new2
# In[97]:
# Recalculate the min_frequency for all pairs since filling missing years will change the min_frequency to be 0
df_new3 = pd.DataFrame(df_new2.groupby(['cited_1', 'cited_2'])['frequency'].min()).reset_index()
x_new = df_new3.merge(df_new2, how='left', on=['cited_1','cited_2']).drop(columns='min_frequency').rename(columns={'frequency_x':'min_frequency','frequency_y':'frequency'})
# In[98]:
x_new
# ### Find Sleeping Beauty Pairs
#
# (1) Minimum sleeping duration = 10 starting from the first_possible_year
#
# (2) During sleeping duration the average co-citation frequency <= 1 and each year co-citation frequency <= 2
#
# (3) Calculate the slope between the first year after sleeping duration and the first peak year
#
# (4) Calculate B_index on pairs
# In[99]:
# Calculate B_index on 51,613 Pairs
x_new
# In[100]:
# add a column of first_possible_year_frequency
pub_freq = x_new[['cited_1', 'cited_2','frequency']][x_new['co_cited_year'] == x_new['first_possible_year']].rename(columns={'frequency':'pub_year_frequency'})
x_new = x_new.merge(pub_freq, how='left', on=['cited_1', 'cited_2'])
# In[101]:
# add a column of max{1, ct}
x_new['max_1_ct'] = np.where(x_new['frequency'] > 1, x_new['frequency'], 1)
# In[102]:
# Calculate b index based on x_new's equation
x_new['b_index'] = (((x_new['max_frequency'] - x_new['pub_year_frequency'])/(x_new['peak_year'] - x_new['first_possible_year'])) * (x_new['co_cited_year'] - x_new['first_possible_year']) + x_new['pub_year_frequency'] - x_new['frequency'])/(x_new['max_1_ct'])
# In[103]:
# Sum across years until peak_year
sb_index = pd.DataFrame(x_new[x_new['co_cited_year'] <= x_new['peak_year']].groupby(['cited_1','cited_2'])['b_index'].sum())
sb_index = sb_index.sort_values('b_index', ascending=False).reset_index()
# In[104]:
sb_index
# In[105]:
# Statistical summary of b_index for all pairs
sb_index['b_index'].describe()
# In[106]:
# Extract sb pairs by applying van Raan's conditions
import warnings
warnings.filterwarnings('ignore')
start = timeit.default_timer()
z = pd.DataFrame(columns=x_new.columns)
for i in range(len(pairs)):
g = x_new[(x_new['cited_1'] == pairs['cited_1'][i]) & (x_new['cited_2'] == pairs['cited_2'][i])]
g = g.reset_index().drop(columns='index')
g['awake_year_index'] = ''
if g.index[g['frequency'] > 2].min() >= 10:
g['awake_year_index'][g.index[g['frequency'] > 2].min()] = 1
if g[0:g.index[g['frequency'] > 2].min()]['frequency'].mean() <= 1:
z = pd.concat([z,g], ignore_index=True)
stop = timeit.default_timer()
# In[107]:
# How long it takes to find sb pairs
print(stop-start)
# 341s for 51,613 pairs
# In[108]:
z1 = z.copy()
# In[109]:
z1
# In[110]:
# Number of pairs that satisfy our stringent conditions for being identified as sleeping beauty co-citation pairs
pairs1 = pd.DataFrame(z1.groupby(['cited_1','cited_2']).size()).reset_index().drop(columns=0)
pairs1
# In[111]:
#zz1 = pd.DataFrame(z1.groupby(['cited_1','cited_2'])['frequency'].sum()).reset_index()
#zz1.to_csv('1196_pairs_frequency.csv')
# In[112]:
# Statistical summary of sb pairs extracted
ss = pairs1.merge(sb_index, how='left', on=['cited_1','cited_2']).sort_values('b_index', ascending=False).reset_index().drop(columns='index')
ss['b_index'].describe()
# In[113]:
z2 = pd.DataFrame(columns=z1.columns)
for i in range(len(pairs1)):
g = z1[(z1['cited_1'] == pairs1['cited_1'][i]) & (z1['cited_2'] == pairs1['cited_2'][i])]
g = g.reset_index().drop(columns='index')
g['awake_year'] = ''
g['awake_frequency'] = ''
tmp1 = g.loc[g['awake_year_index'] == 1, 'co_cited_year'].iloc[0]
tmp2 = g.loc[g['awake_year_index'] == 1, 'frequency'].iloc[0]
g['awake_year'] = tmp1
g['awake_frequency'] = tmp2
z2 = pd.concat([z2,g], ignore_index=True)
# In[114]:
z2 = z2.drop(columns='awake_year_index')
# In[115]:
z2['awake_duration'] = z2['peak_year'] - z2['awake_year']
z2['sleep_duration'] = z2['awake_year'] - z2['first_possible_year']
# In[116]:
# Calculate slope for sb pairs
z2['slope'] = ''
for i in range(len(z2)):
if z2['awake_duration'][i] == 0:
z2['slope'][i] = np.nan
else:
z2['slope'][i] = (z2['max_frequency'][i] - z2['awake_frequency'][i])/z2['awake_duration'][i]
# In[117]:
z2
# In[118]:
# Statistic summary of column slope
zz = pd.DataFrame(z2.groupby(['cited_1','cited_2','slope'])['frequency'].sum()).sort_values('slope', ascending=False).reset_index()
zz
# In[119]:
zz['slope'].describe()
# In[120]:
#zz.to_csv('slope_frequency.csv')
# In[259]:
zz2 = pd.DataFrame(ss.merge(z2, how='outer', on=['cited_1','cited_2']).groupby(['cited_1','cited_2','max_frequency','sleep_duration'])['slope'].max()).reset_index()
zz2
# In[260]:
zz2 = zz2.merge(ss, how='left', on=['cited_1','cited_2'])
zz2.to_csv('1196_pairs_all_values.csv')
# In[137]:
intersect_sb = sb_index.merge(zz2, how='right', on=['cited_1','cited_2'])
intersect_sb
# In[143]:
len(intersect_sb[intersect_sb['b_index'] >= intersect_sb['b_index'].quantile(0.50)])
# In[150]:
#plt.plot(intersect_sb['b_index'])
plt.hist(intersect_sb['b_index'], density=True)
plt.xlabel('beauty coefficient')
# In[146]:
out = pd.DataFrame(z2.groupby(['cited_1','cited_2'])['frequency'].sum()).sort_values('frequency').reset_index()
out
# In[267]:
from sqlalchemy import create_engine
engine = create_engine('postgresql://wenxi:temp_ERNIE_1234@localhost:5432/ernie')
# In[268]:
zz2.head(0).to_sql('sb_1196_pairs_all_values', engine, if_exists='replace',index=False)
# In[270]:
import io
conn = engine.raw_connection()
cur = conn.cursor()
#cur.execute("SET SEARCH_PATH TO sb_plus;")
output = io.StringIO()
zz2.to_csv(output, sep='\t', header=False, index=False)
output.seek(0)
contents = output.getvalue()
cur.copy_from(output, 'sb_1196_pairs_all_values', null="") # null values become ''
conn.commit()
# In[127]:
#zz2 = zz2.merge(ss, how='left', on=['cited_1','cited_2'])
#zz2.to_csv('1196_pairs_all_values.csv')
# In[128]:
# Pairs with slope = NA:
z2[z2['slope'].isna()].groupby(['cited_1', 'cited_2']).size()
# 10 pairs:
# 2580 5944246702 44
# 970456 2364893 33
# 1686592 2364893 31
# 2364893 33744566767 34
# 15231889 17138283 43
# 16262898 18934508 37
# 17769362 18485995 40
# 18039087 18513290 40
# 20909036 84944438568 34
# 41906953 58149417364 39
# In[129]:
# frequency of those pairs with slope = NA
z2[z2['slope'].isna()].groupby(['cited_1', 'cited_2'])['frequency'].sum()
# In[449]:
import matplotlib.pyplot as plt
for i in range(len(pairs1)):
g = z2[(z2['cited_1'] == pairs1['cited_1'][i]) & (z2['cited_2'] == pairs1['cited_2'][i])]
g = g.reset_index().drop(columns='index')
plt.title([(pairs1['cited_1'][i],pairs1['cited_2'][i]), i+1])
plt.axvline(g['awake_year'][0])
plt.xlabel('co_cited_year')
plt.ylabel('frequency')
plt.xlim(1970, 2019)
plt.ylim(0, 50)
plt.plot(g['co_cited_year'], g['frequency'], color='green')
plt.axhline(y=2,linestyle='-',color='orange')
plt.legend(['Awake Year', 'Kinetics', 'Frequency = 2'])
plt.show()
# ### Generat Plot: 3 Sleeping Beauty Pairs Based on Slope
# In[450]:
na = pd.DataFrame(z2[z2['slope'].isna()].groupby(['cited_1', 'cited_2']).size()).drop(columns=0).reset_index()
na
# In[451]:
# Plot for sb pairs with slope = NA
for i in range(len(na)):
g = z2[(z2['cited_1'] == na['cited_1'][i]) & (z2['cited_2'] == na['cited_2'][i])]
g = g.reset_index().drop(columns='index')
plt.title([(na['cited_1'][i],na['cited_2'][i])])
plt.axvline(g['awake_year'][0])
plt.xlabel('co_cited_year')
plt.ylabel('frequency')
plt.xlim(1984, 2019)
#plt.ylim(0, 50)
plt.plot(g['co_cited_year'], g['frequency'], color='green')
plt.axhline(y=2,linestyle='-',color='orange')
plt.legend(['Awake Year', 'Kinetics', 'Frequency = 2'])
plt.show()
# In[452]:
# Plot for sb pairs with min slope
g = z2[z2['slope'] == z2['slope'].min()]
g = g.reset_index().drop(columns='index')
plt.title([(g['cited_1'][0],g['cited_2'][0])])
plt.axvline(g['awake_year'][0])
plt.xlabel('co_cited_year')
plt.ylabel('frequency')
plt.xlim(1984, 2019)
#plt.ylim(0, 50)
plt.plot(g['co_cited_year'], g['frequency'], color='green')
plt.axhline(y=2,linestyle='-',color='orange')
plt.legend(['Awake Year', 'Kinetics', 'Frequency = 2'])
plt.show()
# In[453]:
mean = pd.DataFrame(z2[(z2['slope'] <= 2.38) & (z2['slope'] >= 2.35)].groupby(['cited_1', 'cited_2']).size()).drop(columns=0).reset_index()
mean
# In[454]:
# Plot for sb pairs with mean slope
for i in range(len(mean)):
g = z2[(z2['cited_1'] == mean['cited_1'][i]) & (z2['cited_2'] == mean['cited_2'][i])]
g = g.reset_index().drop(columns='index')
plt.title([(mean['cited_1'][i],mean['cited_2'][i])])
plt.axvline(g['awake_year'][0])
plt.xlabel('co_cited_year')
plt.ylabel('frequency')
plt.xlim(1984, 2019)
#plt.ylim(0, 50)
plt.plot(g['co_cited_year'], g['frequency'], color='green')
plt.axhline(y=2,linestyle='-',color='orange')
plt.legend(['Awake Year', 'Kinetics', 'Frequency = 2'])
plt.show()
# In[258]:
ax_1 = z2[(z2['cited_1'] == 1686592) & (z2['cited_2'] == 2364893)].reset_index().drop(columns='index')
ax_2 = z2[(z2['cited_1'] == 4465903) & (z2['cited_2'] == 6073669)].reset_index().drop(columns='index')
ax_3 = z2[(z2['cited_1'] == 22465686) & (z2['cited_2'] == 22638979)].reset_index().drop(columns='index')
ax_1_awake_x = ax_1['awake_year'][0]
ax_1_awake_y = ax_1['peak_year'][0]
ax_1_peak_x = ax_1['frequency'][ax_1['awake_year'] == ax_1['co_cited_year']].item()
ax_1_peak_y = ax_1['frequency'][ax_1['peak_year'] == ax_1['co_cited_year']].item()
ax_2_awake_x = ax_2['awake_year'][0]
ax_2_awake_y = ax_2['peak_year'][0]
ax_2_peak_x = ax_2['frequency'][ax_2['awake_year'] == ax_2['co_cited_year']].item()
ax_2_peak_y = ax_2['frequency'][ax_2['peak_year'] == ax_2['co_cited_year']].item()
ax_3_awake_x = ax_3['awake_year'][0]
ax_3_awake_y = ax_3['peak_year'][0]
ax_3_peak_x = ax_3['frequency'][ax_3['awake_year'] == ax_3['co_cited_year']].item()
ax_3_peak_y = ax_3['frequency'][ax_3['peak_year'] == ax_3['co_cited_year']].item()
fig, (ax1, ax2, ax3) = plt.subplots(1,3, figsize=(12,7), sharex=True, sharey=True)
#ax1.set_title('Sleeping Beauty Pair with Largest Slope', fontsize=14)
ax1.plot(ax_1['co_cited_year'], ax_1['frequency'], color='black')
ax1.plot(ax_1['awake_year'][0], 0, marker='^', color='red')
ax1.plot([ax_1_awake_x, ax_1_awake_y], [ax_1_peak_x, ax_1_peak_y], 'blue', linestyle=':',marker='*')
ax1.axhline(y=2, linestyle='-',color='grey')
#ax1.set_xlabel('co_cited_year', fontsize=13)
#ax1.set_ylabel('frequency', fontsize=13)
#ax2.set_title('Sleeping Beauty Pair with Mean Slope', fontsize=14)
ax2.plot(ax_2['co_cited_year'], ax_2['frequency'], color='black')
ax2.plot(ax_2['awake_year'][0], 0, marker='^', color='red')
ax2.plot([ax_2_awake_x, ax_2_awake_y], [ax_2_peak_x, ax_2_peak_y], 'blue', linestyle=':',marker='*', linewidth=4.0)
ax2.axhline(y=2, linestyle='-',color='grey')
#ax2.set_xlabel('co_cited_year', fontsize=13)
#ax2.set_ylabel('frequency', fontsize=13)
#ax3.set_title('Sleeping Beauty Pair with Smal Slope', fontsize=14)
ax3.plot(ax_3['co_cited_year'], ax_3['frequency'], color='black')
ax3.plot(ax_3['awake_year'][0], 0, marker='^', color='red')
ax3.plot([ax_3_awake_x, ax_3_awake_y], [ax_3_peak_x, ax_3_peak_y], 'blue', linestyle=':',marker='*', linewidth=4.0)
ax3.axhline(y=2, linestyle='-',color='grey')
#ax3.set_xlabel('co_cited_year', fontsize=13)
#ax3.set_ylabel('frequency', fontsize=13)
fig.text(0.5, 0.06, 'co_cited_year', ha='center', fontsize=15)
fig.text(0.08, 0.5, 'frequency', va='center', rotation='vertical', fontsize=15)
#fig.tight_layout()
fig.savefig('output.png', dpi=300)
fig.savefig('output.pdf', dpi=300)
fig.savefig('output.tiff', dpi=300)
# In[456]:
#output = pd.concat([ax_1,ax_2,ax_3])
#output.to_csv('sb_plot.csv')
# ### Find a Non-Sleeping Beaty Pair
# In[457]:
kk = x_new.merge(pairs1, how='right', on=['cited_1','cited_2'])
# In[458]:
kk1 = pd.concat([kk, x_new]).drop_duplicates(keep=False)
# In[459]:
kk1_pairs = pd.DataFrame(kk1.groupby(['cited_1','cited_2']).size()).drop(columns=0).reset_index()
kk1_pairs
# In[460]:
# Select one with same scale and looks nice
g = kk1[(kk1['cited_1'] == 20896) & (kk1['cited_2'] == 33845282341)]
g = g.reset_index().drop(columns='index')
plt.title([(kk1_pairs['cited_1'][i],kk1_pairs['cited_2'][i])])
plt.xlabel('co_cited_year')
plt.ylabel('frequency')
plt.xlim(1984, 2019)
plt.ylim(0, 25)
plt.plot(g['co_cited_year'], g['frequency'], color='green')
plt.show()
# In[461]:
#g.to_csv('non_sb_plot.csv')
# ### Generat Plot: 3 Sleeping Beauty Pairs and 3 Non-Sleeping Beauty Pairs Based on Frequency
# In[462]:
# Statistical summary of total frequency for sb pairs
z2.groupby(['cited_1','cited_2'])['frequency'].sum().describe()
# In[463]:
tt = pd.DataFrame(z2.groupby(['cited_1','cited_2'])['frequency'].sum())
tt
# In[464]:
# Max total frequency
tt[tt['frequency'] == tt['frequency'].max()]
# In[465]:
max_freq = z2[(z2['cited_1'] == 16823810) & (z2['cited_2'] == 84965520932)].reset_index().drop(columns='index')
# In[466]:
# Mean total frequency
# tt['frequency'].mean() = 285.947
tt[(tt['frequency'] <= 286) & (tt['frequency'] >= 285)]
# In[467]:
mean_freq = z2[(z2['cited_1'] == 14905513) & (z2['cited_2'] == 21344602)].reset_index().drop(columns='index')
# In[468]:
# Min total frequency
tt[tt['frequency'] == tt['frequency'].min()]
# In[469]:
min_freq = z2[(z2['cited_1'] == 23020183) & (z2['cited_2'] == 25752384)].reset_index().drop(columns='index')
# In[470]:
#sb_total_frequency = pd.concat([max_freq, mean_freq, min_freq])
#sb_total_frequency.to_csv('sb_total_frequency.csv')
# In[471]:
# Statistical summary of total frequency for non-sb pairs
kk1.groupby(['cited_1','cited_2'])['frequency'].sum().describe()
# In[472]:
tt1 = pd.DataFrame(kk1.groupby(['cited_1','cited_2'])['frequency'].sum())
tt1
# In[473]:
# Max total frequency
tt1[tt1['frequency'] == tt1['frequency'].max()]
# In[474]:
max_freq1 = kk1[(kk1['cited_1'] == 189651) & (kk1['cited_2'] == 345491105)].reset_index().drop(columns='index')
# In[475]:
# Mean total frequency
# tt1['frequency'].mean() = 265.128
tt1[(tt1['frequency'] <= 265.13) & (tt1['frequency'] >= 265)]
# In[476]:
mean_freq1 = kk1[(kk1['cited_1'] == 194808) & (kk1['cited_2'] == 33744541191)].reset_index().drop(columns='index')
# In[477]:
# Min total frequency
tt1[tt1['frequency'] == tt1['frequency'].min()]
# In[478]:
min_freq1 = kk1[(kk1['cited_1'] == 24444465579) & (kk1['cited_2'] == 33744749965)].reset_index().drop(columns='index')
# In[479]:
#non_sb_total_frequency = pd.concat([max_freq1, mean_freq1, min_freq1])
#non_sb_total_frequency.to_csv('non_sb_total_frequency.csv')
# In[480]:
# g = kk1[(kk1['cited_1'] == 45149145322) & (kk1['cited_2'] == 85011817347)]
# g = g.reset_index().drop(columns='index')
# plt.xlabel('co_cited_year')
# plt.ylabel('frequency')
# plt.xlim(1975, 2019)
# #plt.ylim(0, 25)
# plt.plot(g['co_cited_year'], g['frequency'], color='green')
# plt.show()
# ### Get Individual Publications of Sb Pairs
# In[481]:
z2
# In[482]:
single_pub = pd.DataFrame(set(z2['cited_1'].unique().tolist() + z2['cited_2'].unique().tolist()))
single_pub.columns = ['cited_paper']
# In[483]:
len(single_pub)
# In[484]:
# Calculate kinetics of individual publications by Neo4j: ERNIE-Neo4j-sb-plus-kinetics-single-pub
single_pub.to_csv('single_pub.csv', index=False)
# ### Read in Kinetics of Individual Publications of Sb Pairs that Calculated by Neo4j and Do Pre-processing Step
# In[21]:
sp =
|
pd.read_csv('single_pub_kinetics.csv')
|
pandas.read_csv
|
import numpy as np
import pytest
from pandas import DataFrame, Index, MultiIndex, Series, concat, date_range
import pandas._testing as tm
import pandas.core.common as com
@pytest.fixture
def four_level_index_dataframe():
arr = np.array(
[
[-0.5109, -2.3358, -0.4645, 0.05076, 0.364],
[0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
[-0.6662, -0.5243, -0.358, 0.89145, 2.5838],
]
)
index = MultiIndex(
levels=[["a", "x"], ["b", "q"], [10.0032, 20.0, 30.0], [3, 4, 5]],
codes=[[0, 0, 1], [0, 1, 1], [0, 1, 2], [2, 1, 0]],
names=["one", "two", "three", "four"],
)
return DataFrame(arr, index=index, columns=list("ABCDE"))
@pytest.mark.parametrize(
"key, level, exp_arr, exp_index",
[
("a", "lvl0", lambda x: x[:, 0:2], Index(["bar", "foo"], name="lvl1")),
("foo", "lvl1", lambda x: x[:, 1:2], Index(["a"], name="lvl0")),
],
)
def test_xs_named_levels_axis_eq_1(key, level, exp_arr, exp_index):
# see gh-2903
arr = np.random.randn(4, 4)
index = MultiIndex(
levels=[["a", "b"], ["bar", "foo", "hello", "world"]],
codes=[[0, 0, 1, 1], [0, 1, 2, 3]],
names=["lvl0", "lvl1"],
)
df = DataFrame(arr, columns=index)
result = df.xs(key, level=level, axis=1)
expected = DataFrame(exp_arr(arr), columns=exp_index)
tm.assert_frame_equal(result, expected)
def test_xs_values(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs(("bar", "two")).values
expected = df.values[4]
tm.assert_almost_equal(result, expected)
def test_xs_loc_equality(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs(("bar", "two"))
expected = df.loc[("bar", "two")]
tm.assert_series_equal(result, expected)
def test_xs_missing_values_in_index():
# see gh-6574
# missing values in returned index should be preserved
acc = [
("a", "abcde", 1),
("b", "bbcde", 2),
("y", "yzcde", 25),
("z", "xbcde", 24),
("z", None, 26),
("z", "zbcde", 25),
("z", "ybcde", 26),
]
df = DataFrame(acc, columns=["a1", "a2", "cnt"]).set_index(["a1", "a2"])
expected = DataFrame(
{"cnt": [24, 26, 25, 26]},
index=Index(["xbcde", np.nan, "zbcde", "ybcde"], name="a2"),
)
result = df.xs("z", level="a1")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("key, level", [("one", "second"), (["one"], ["second"])])
def test_xs_with_duplicates(key, level, multiindex_dataframe_random_data):
# see gh-13719
frame = multiindex_dataframe_random_data
df = concat([frame] * 2)
assert df.index.is_unique is False
expected = concat([frame.xs("one", level="second")] * 2)
result = df.xs(key, level=level)
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
import pandas as pd
TRAIN_PATH = 'data/multinli_1.0/multinli_1.0_train.txt'
DEV_PATH = 'data/multinli_1.0/multinli_1.0_dev_matched.txt'
#things get a bit weird here as we use the dev set as the test set
#and make a test set from the train set
train_df = pd.read_csv(TRAIN_PATH, sep='\t', error_bad_lines=False, keep_default_na=False)
test_df = pd.read_csv(DEV_PATH, sep='\t', keep_default_na=False)
def df_to_list(df):
return list(zip(df['sentence1'], df['sentence2'], df['gold_label']))
train_data = df_to_list(train_df)
test_data = df_to_list(test_df)
def filter_no_consensus(data):
return [(sent1, sent2, label) for (sent1, sent2, label) in data if label != '-']
print(f'Examples before filtering:',len(train_data), len(test_data))
train_data = filter_no_consensus(train_data)
test_data = filter_no_consensus(test_data)
train_data = train_data[:-10000]
dev_data = train_data[-10000:]
print(f'Examples after filtering:',len(train_data), len(dev_data), len(test_data))
import spacy
nlp = spacy.load('en')
example_sentence = train_data[12345][0]
print(f'Before tokenization: {example_sentence}')
tokenized_sentence = [token.text for token in nlp(example_sentence)]
print(f'Tokenized: {tokenized_sentence}')
from tqdm import tqdm
def tokenize(string):
return ' '.join([token.text for token in nlp.tokenizer(string)])
def tokenize_data(data):
return [(tokenize(sent1), tokenize(sent2), label) for (sent1, sent2, label) in tqdm(data)]
train_data = tokenize_data(train_data)
dev_data = tokenize_data(dev_data)
test_data = tokenize_data(test_data)
train_df = pd.DataFrame.from_records(train_data)
dev_df = pd.DataFrame.from_records(dev_data)
test_df =
|
pd.DataFrame.from_records(test_data)
|
pandas.DataFrame.from_records
|
import pandas as pd
import numpy as np
import seaborn as sns
from scipy.io import mmread
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from os.path import join as pjoin
import os
### Load control data ###
data = mmread(
"/Users/andrewjones/Documents/beehive/differential_covariance/perturb_seq/data/GSM2396857_dc_0hr.mtx.txt"
)
gene_names = (
pd.read_csv(
"/Users/andrewjones/Documents/beehive/differential_covariance/perturb_seq/data/GSM2396857_dc_0hr_genenames.csv",
index_col=0,
)
.iloc[:, 0]
.values
)
cell_names = (
pd.read_csv(
"/Users/andrewjones/Documents/beehive/differential_covariance/perturb_seq/data/GSM2396857_dc_0hr_cellnames.csv",
index_col=0,
)
.iloc[:, 0]
.values
)
data_dense = pd.DataFrame(data.toarray(), columns=cell_names, index=gene_names)
print("Loaded {} cells and {} genes".format(data_dense.shape[1], data_dense.shape[0]))
## Read in the metadata about which guides infected which genes
metadata = pd.read_csv(
"/Users/andrewjones/Documents/beehive/differential_covariance/perturb_seq/data/GSM2396857_dc_0hr_cbc_gbc_dict.csv",
header=None,
names=["guide", "cells"],
)
metadata["targeted_gene"] = [x.split("_")[1] for x in metadata.guide.values]
## Pull out the cell barcodes for each guide
# barcode_dict maps guide names (keys) to cell barcodes (values)
cells_split = [y.split(", ") for y in metadata.cells.values]
barcode_dict = {}
for ii, guide in enumerate(metadata.guide.values):
barcode_dict[guide] = np.array(cells_split[ii])
# Get cells with only one guide
cells_unique, cells_counts = np.unique(
np.concatenate([x.split(", ") for x in metadata.cells.values]), return_counts=True
)
cells_with_one_guide = cells_unique[cells_counts == 1]
cells_with_one_guide = np.intersect1d(cells_with_one_guide, data_dense.columns.values)
data_dense = data_dense[cells_with_one_guide]
### Load treatment data ###
data = mmread(
"/Users/andrewjones/Documents/beehive/differential_covariance/perturb_seq/data/GSM2396856_dc_3hr.mtx.txt"
)
# gene and cell names
gene_names = (
pd.read_csv(
"/Users/andrewjones/Documents/beehive/differential_covariance/perturb_seq/data/GSM2396856_dc_3hr_genenames.csv",
index_col=0,
)
.iloc[:, 0]
.values
)
cell_names = (
pd.read_csv(
"/Users/andrewjones/Documents/beehive/differential_covariance/perturb_seq/data/GSM2396856_dc_3hr_cellnames.csv",
index_col=0,
)
.iloc[:, 0]
.values
)
# format into dataframe
data_dense_3hr = pd.DataFrame(data.toarray(), columns=cell_names, index=gene_names)
print(
"Loaded {} cells and {} genes".format(
data_dense_3hr.shape[1], data_dense_3hr.shape[0]
)
)
## Get the guide data for the same guide as above
metadata_3hr = pd.read_csv(
"/Users/andrewjones/Documents/beehive/differential_covariance/perturb_seq/data/GSM2396856_dc_3hr_cbc_gbc_dict_strict.csv",
header=None,
names=["guide", "cells"],
)
## Pull out the cell barcodes for each guide
cells_split = [y.split(", ") for y in metadata_3hr.cells.values]
barcode_dict_3hr = {}
for ii, guide in enumerate(metadata_3hr.guide.values):
barcode_dict_3hr[guide] = np.array(cells_split[ii])
# Get cells with only one guide
cells_unique, cells_counts = np.unique(
np.concatenate([x.split(", ") for x in metadata_3hr.cells.values]),
return_counts=True,
)
cells_with_one_guide = cells_unique[cells_counts == 1]
cells_with_one_guide = np.intersect1d(
cells_with_one_guide, data_dense_3hr.columns.values
)
data_dense_3hr = data_dense_3hr[cells_with_one_guide]
# Only take guides that have data for both timepoints
guides_with_both_timepoints = np.array(
[
x
for x in metadata.guide.values
if (x in barcode_dict.keys()) and (x in barcode_dict_3hr.keys())
]
)
metadata_both_timepoints = metadata[metadata.guide.isin(guides_with_both_timepoints)]
# Get targeted genes with multiple guides
targeted_gene_counts = metadata_both_timepoints.targeted_gene.value_counts()
genes_with_multiple_guides = targeted_gene_counts.index.values[targeted_gene_counts > 1]
## Use scry (poisson deviance) to compute most variable genes
# Function for computing size factors
def compute_size_factors(m):
# given matrix m with samples in the columns
# compute size factors
sz = np.sum(m.values, axis=0) # column sums (sum of counts in each cell)
lsz = np.log(sz)
# make geometric mean of sz be 1 for poisson
sz_poisson = np.exp(lsz - np.mean(lsz))
return sz_poisson
def poisson_deviance(X, sz):
LP = X / sz # recycling
LP[LP > 0] = np.log(LP[LP > 0]) # log transform nonzero elements only
# Transpose to make features in cols, observations in rows
X = X.T
ll_sat = np.sum(np.multiply(X, LP.T), axis=0)
feature_sums = np.sum(X, axis=0)
ll_null = feature_sums * np.log(feature_sums / np.sum(sz))
return 2 * (ll_sat - ll_null)
def deviance_feature_selection(X):
# Remove cells without any counts
X = X[np.sum(X, axis=1) > 0]
# Compute size factors
sz = compute_size_factors(X)
# Compute deviances
devs = poisson_deviance(X, sz)
# Get associated gene names
gene_names = X.index.values
assert gene_names.shape[0] == devs.values.shape[0]
return devs.values, gene_names
devs, gene_names = deviance_feature_selection(data_dense)
# Save data for all targeted genes and guides
NUM_GENES = 500
for one_gene in genes_with_multiple_guides:
save_dir = pjoin("../data/targeted_genes", one_gene)
print("Gene: {}".format(one_gene))
# top_genes = gene_names[np.argsort(-devs)[:NUM_GENES]]
# ------ Save data for this targeted gene (only genes with highest variance) -----------
one_gene_guides = metadata_both_timepoints[
metadata_both_timepoints.targeted_gene == one_gene
].guide.unique()
# loop over guides
all_data_0hr = []
all_data_3hr = []
for ii, one_guide in enumerate(one_gene_guides):
## 0hr data
corresponding_cells = barcode_dict[one_guide]
corresponding_cells = np.intersect1d(
corresponding_cells, data_dense.columns.values
)
data_one_guide = data_dense[corresponding_cells]
## 3hr data
corresponding_cells = barcode_dict_3hr[one_guide]
corresponding_cells_complete = np.intersect1d(
corresponding_cells, data_dense_3hr.columns.values
)
data_one_guide_3hr = data_dense_3hr[corresponding_cells_complete]
curr_shared_genes = np.intersect1d(
data_one_guide.index.values, data_one_guide_3hr.index.values
)
if ii == 0:
shared_genes = curr_shared_genes
else:
shared_genes = np.intersect1d(shared_genes, curr_shared_genes)
all_data_0hr = []
all_data_3hr = []
# loop over guides
for ii, one_guide in enumerate(one_gene_guides):
## 0hr data
corresponding_cells = barcode_dict[one_guide]
corresponding_cells = np.intersect1d(
corresponding_cells, data_dense.columns.values
)
data_one_guide = data_dense[corresponding_cells]
## 3hr data
corresponding_cells = barcode_dict_3hr[one_guide]
corresponding_cells_complete = np.intersect1d(
corresponding_cells, data_dense_3hr.columns.values
)
data_one_guide_3hr = data_dense_3hr[corresponding_cells_complete]
# Only take genes that exist across guides.
data_one_guide_0hr_aligned = data_one_guide.transpose()[shared_genes]
data_one_guide_3hr_aligned = data_one_guide_3hr.transpose()[shared_genes]
## Get only the most variable genes
# data_one_guide_0hr_aligned = data_one_guide#.transpose()#[top_genes]
# data_one_guide_3hr_aligned = data_one_guide_3hr#.transpose()#[top_genes]
assert (
data_one_guide_0hr_aligned.shape[1] == data_one_guide_3hr_aligned.shape[1]
)
# append to this gene's list of expression
all_data_0hr.append(data_one_guide_0hr_aligned)
all_data_3hr.append(data_one_guide_3hr_aligned)
all_data_0hr = pd.concat(all_data_0hr)
all_data_3hr = pd.concat(all_data_3hr)
all_data_curr_guide =
|
pd.concat([all_data_0hr, all_data_3hr])
|
pandas.concat
|
# -*- coding: utf-8 -*-
from datetime import datetime
from io import StringIO
import re
import numpy as np
import pytest
from pandas.compat import lrange
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, option_context
from pandas.util import testing as tm
import pandas.io.formats.format as fmt
lorem_ipsum = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod"
" tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim"
" veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex"
" ea commodo consequat. Duis aute irure dolor in reprehenderit in"
" voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur"
" sint occaecat cupidatat non proident, sunt in culpa qui officia"
" deserunt mollit anim id est laborum.")
def expected_html(datapath, name):
"""
Read HTML file from formats data directory.
Parameters
----------
datapath : pytest fixture
The datapath fixture injected into a test by pytest.
name : str
The name of the HTML file without the suffix.
Returns
-------
str : contents of HTML file.
"""
filename = '.'.join([name, 'html'])
filepath = datapath('io', 'formats', 'data', 'html', filename)
with open(filepath, encoding='utf-8') as f:
html = f.read()
return html.rstrip()
@pytest.fixture(params=['mixed', 'empty'])
def biggie_df_fixture(request):
"""Fixture for a big mixed Dataframe and an empty Dataframe"""
if request.param == 'mixed':
df = DataFrame({'A': np.random.randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
df.loc[:20, 'A'] = np.nan
df.loc[:20, 'B'] = np.nan
return df
elif request.param == 'empty':
df = DataFrame(index=np.arange(200))
return df
@pytest.fixture(params=fmt._VALID_JUSTIFY_PARAMETERS)
def justify(request):
return request.param
@pytest.mark.parametrize('col_space', [30, 50])
def test_to_html_with_col_space(col_space):
df = DataFrame(np.random.random(size=(1, 3)))
# check that col_space affects HTML generation
# and be very brittle about it.
result = df.to_html(col_space=col_space)
hdrs = [x for x in result.split(r"\n") if re.search(r"<th[>\s]", x)]
assert len(hdrs) > 0
for h in hdrs:
assert "min-width" in h
assert str(col_space) in h
def test_to_html_with_empty_string_label():
# GH 3547, to_html regards empty string labels as repeated labels
data = {'c1': ['a', 'b'], 'c2': ['a', ''], 'data': [1, 2]}
df = DataFrame(data).set_index(['c1', 'c2'])
result = df.to_html()
assert "rowspan" not in result
@pytest.mark.parametrize('df,expected', [
(DataFrame({'\u03c3': np.arange(10.)}), 'unicode_1'),
(DataFrame({'A': ['\u03c3']}), 'unicode_2')
])
def test_to_html_unicode(df, expected, datapath):
expected = expected_html(datapath, expected)
result = df.to_html()
assert result == expected
def test_to_html_decimal(datapath):
# GH 12031
df = DataFrame({'A': [6.0, 3.1, 2.2]})
result = df.to_html(decimal=',')
expected = expected_html(datapath, 'gh12031_expected_output')
assert result == expected
@pytest.mark.parametrize('kwargs,string,expected', [
(dict(), "<type 'str'>", 'escaped'),
(dict(escape=False), "<b>bold</b>", 'escape_disabled')
])
def test_to_html_escaped(kwargs, string, expected, datapath):
a = 'str<ing1 &'
b = 'stri>ng2 &'
test_dict = {'co<l1': {a: string,
b: string},
'co>l2': {a: string,
b: string}}
result = DataFrame(test_dict).to_html(**kwargs)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('index_is_named', [True, False])
def test_to_html_multiindex_index_false(index_is_named, datapath):
# GH 8452
df = DataFrame({
'a': range(2),
'b': range(3, 5),
'c': range(5, 7),
'd': range(3, 5)
})
df.columns = MultiIndex.from_product([['a', 'b'], ['c', 'd']])
if index_is_named:
df.index = Index(df.index.values, name='idx')
result = df.to_html(index=False)
expected = expected_html(datapath, 'gh8452_expected_output')
assert result == expected
@pytest.mark.parametrize('multi_sparse,expected', [
(False, 'multiindex_sparsify_false_multi_sparse_1'),
(False, 'multiindex_sparsify_false_multi_sparse_2'),
(True, 'multiindex_sparsify_1'),
(True, 'multiindex_sparsify_2')
])
def test_to_html_multiindex_sparsify(multi_sparse, expected, datapath):
index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],
names=['foo', None])
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)
if expected.endswith('2'):
df.columns = index[::2]
with option_context('display.multi_sparse', multi_sparse):
result = df.to_html()
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('max_rows,expected', [
(60, 'gh14882_expected_output_1'),
# Test that ... appears in a middle level
(56, 'gh14882_expected_output_2')
])
def test_to_html_multiindex_odd_even_truncate(max_rows, expected, datapath):
# GH 14882 - Issue on truncation with odd length DataFrame
index = MultiIndex.from_product([[100, 200, 300],
[10, 20, 30],
[1, 2, 3, 4, 5, 6, 7]],
names=['a', 'b', 'c'])
df = DataFrame({'n': range(len(index))}, index=index)
result = df.to_html(max_rows=max_rows)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('df,formatters,expected', [
(DataFrame(
[[0, 1], [2, 3], [4, 5], [6, 7]],
columns=['foo', None], index=lrange(4)),
{'__index__': lambda x: 'abcd' [x]},
'index_formatter'),
(DataFrame(
{'months': [datetime(2016, 1, 1), datetime(2016, 2, 2)]}),
{'months': lambda x: x.strftime('%Y-%m')},
'datetime64_monthformatter'),
(DataFrame({'hod': pd.to_datetime(['10:10:10.100', '12:12:12.120'],
format='%H:%M:%S.%f')}),
{'hod': lambda x: x.strftime('%H:%M')},
'datetime64_hourformatter')
])
def test_to_html_formatters(df, formatters, expected, datapath):
expected = expected_html(datapath, expected)
result = df.to_html(formatters=formatters)
assert result == expected
def test_to_html_regression_GH6098():
df = DataFrame({
'clé1': ['a', 'a', 'b', 'b', 'a'],
'clé2': ['1er', '2ème', '1er', '2ème', '1er'],
'données1': np.random.randn(5),
'données2': np.random.randn(5)})
# it works
df.pivot_table(index=['clé1'], columns=['clé2'])._repr_html_()
def test_to_html_truncate(datapath):
index = pd.date_range(start='20010101', freq='D', periods=20)
df = DataFrame(index=index, columns=range(20))
result = df.to_html(max_rows=8, max_cols=4)
expected = expected_html(datapath, 'truncate')
assert result == expected
@pytest.mark.parametrize('sparsify,expected', [
(True, 'truncate_multi_index'),
(False, 'truncate_multi_index_sparse_off')
])
def test_to_html_truncate_multi_index(sparsify, expected, datapath):
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
df = DataFrame(index=arrays, columns=arrays)
result = df.to_html(max_rows=7, max_cols=7, sparsify=sparsify)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('option,result,expected', [
(None, lambda df: df.to_html(), '1'),
(None, lambda df: df.to_html(border=0), '0'),
(0, lambda df: df.to_html(), '0'),
(0, lambda df: df._repr_html_(), '0'),
])
def test_to_html_border(option, result, expected):
df = DataFrame({'A': [1, 2]})
if option is None:
result = result(df)
else:
with option_context('display.html.border', option):
result = result(df)
expected = 'border="{}"'.format(expected)
assert expected in result
def test_display_option_warning():
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.options.html.border
@pytest.mark.parametrize('biggie_df_fixture', ['mixed'], indirect=True)
def test_to_html(biggie_df_fixture):
# TODO: split this test
df = biggie_df_fixture
s = df.to_html()
buf = StringIO()
retval = df.to_html(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
df.to_html(columns=['B', 'A'], col_space=17)
df.to_html(columns=['B', 'A'],
formatters={'A': lambda x: '{x:.1f}'.format(x=x)})
df.to_html(columns=['B', 'A'], float_format=str)
df.to_html(columns=['B', 'A'], col_space=12, float_format=str)
@pytest.mark.parametrize('biggie_df_fixture', ['empty'], indirect=True)
def test_to_html_empty_dataframe(biggie_df_fixture):
df = biggie_df_fixture
df.to_html()
def test_to_html_filename(biggie_df_fixture, tmpdir):
df = biggie_df_fixture
expected = df.to_html()
path = tmpdir.join('test.html')
df.to_html(path)
result = path.read()
assert result == expected
def test_to_html_with_no_bold():
df = DataFrame({'x': np.random.randn(5)})
html = df.to_html(bold_rows=False)
result = html[html.find("</thead>")]
assert '<strong' not in result
def test_to_html_columns_arg():
df = DataFrame(tm.getSeriesData())
result = df.to_html(columns=['A'])
assert '<th>B</th>' not in result
@pytest.mark.parametrize('columns,justify,expected', [
(MultiIndex.from_tuples(
list(zip(np.arange(2).repeat(2), np.mod(lrange(4), 2))),
names=['CL0', 'CL1']),
'left',
'multiindex_1'),
(MultiIndex.from_tuples(
list(zip(range(4), np.mod(lrange(4), 2)))),
'right',
'multiindex_2')
])
def test_to_html_multiindex(columns, justify, expected, datapath):
df = DataFrame([list('abcd'), list('efgh')], columns=columns)
result = df.to_html(justify=justify)
expected = expected_html(datapath, expected)
assert result == expected
def test_to_html_justify(justify, datapath):
df = DataFrame({'A': [6, 30000, 2],
'B': [1, 2, 70000],
'C': [223442, 0, 1]},
columns=['A', 'B', 'C'])
result = df.to_html(justify=justify)
expected = expected_html(datapath, 'justify').format(justify=justify)
assert result == expected
@pytest.mark.parametrize("justify", ["super-right", "small-left",
"noinherit", "tiny", "pandas"])
def test_to_html_invalid_justify(justify):
# GH 17527
df = DataFrame()
msg = "Invalid value for justify parameter"
with pytest.raises(ValueError, match=msg):
df.to_html(justify=justify)
def test_to_html_index(datapath):
# TODO: split this test
index = ['foo', 'bar', 'baz']
df = DataFrame({'A': [1, 2, 3],
'B': [1.2, 3.4, 5.6],
'C': ['one', 'two', np.nan]},
columns=['A', 'B', 'C'],
index=index)
expected_with_index = expected_html(datapath, 'index_1')
assert df.to_html() == expected_with_index
expected_without_index = expected_html(datapath, 'index_2')
result = df.to_html(index=False)
for i in index:
assert i not in result
assert result == expected_without_index
df.index = Index(['foo', 'bar', 'baz'], name='idx')
expected_with_index = expected_html(datapath, 'index_3')
assert df.to_html() == expected_with_index
assert df.to_html(index=False) == expected_without_index
tuples = [('foo', 'car'), ('foo', 'bike'), ('bar', 'car')]
df.index = MultiIndex.from_tuples(tuples)
expected_with_index = expected_html(datapath, 'index_4')
assert df.to_html() == expected_with_index
result = df.to_html(index=False)
for i in ['foo', 'bar', 'car', 'bike']:
assert i not in result
# must be the same result as normal index
assert result == expected_without_index
df.index = MultiIndex.from_tuples(tuples, names=['idx1', 'idx2'])
expected_with_index = expected_html(datapath, 'index_5')
assert df.to_html() == expected_with_index
assert df.to_html(index=False) == expected_without_index
@pytest.mark.parametrize('classes', [
"sortable draggable",
["sortable", "draggable"]
])
def test_to_html_with_classes(classes, datapath):
df = DataFrame()
expected = expected_html(datapath, 'with_classes')
result = df.to_html(classes=classes)
assert result == expected
def test_to_html_no_index_max_rows(datapath):
# GH 14998
df = DataFrame({"A": [1, 2, 3, 4]})
result = df.to_html(index=False, max_rows=1)
expected = expected_html(datapath, 'gh14998_expected_output')
assert result == expected
def test_to_html_multiindex_max_cols(datapath):
# GH 6131
index = MultiIndex(levels=[['ba', 'bb', 'bc'], ['ca', 'cb', 'cc']],
codes=[[0, 1, 2], [0, 1, 2]],
names=['b', 'c'])
columns = MultiIndex(levels=[['d'], ['aa', 'ab', 'ac']],
codes=[[0, 0, 0], [0, 1, 2]],
names=[None, 'a'])
data = np.array(
[[1., np.nan, np.nan], [np.nan, 2., np.nan], [np.nan, np.nan, 3.]])
df =
|
DataFrame(data, index, columns)
|
pandas.DataFrame
|
"""Main application to run experiments"""
import logging
from logger.tqdm_logging_handler import TqdmLoggingHandler
logging.basicConfig(level=logging.INFO, handlers=[TqdmLoggingHandler()])
logger = logging.getLogger(__name__)
import sys
import getopt
import pandas as pd
import os
import json
from configuration.configuration_reader import ConfigurationReader
from evaluation.information_loss import calculate_normalized_certainty_penalty
from evaluation.partition import get_partition_lengths, calculate_mean_partition_size, calculate_std_partition_size, get_partition_split_share
from kernel.anonymization_kernel import AnonymizationKernel
from nlp.sensitive_terms_recognizer import SensitiveTermsRecognizer
from preprocessing.data_reader import DataReader
from preprocessing.preprocessor import Preprocessor
from pathlib import Path
def main(argv):
"""Main entrypoint for the anonymization tool"""
# Default parameters
configuration_file = ''
input_file = ''
use_cache = True
weight = 0.5
strategy = "gdf"
result_dir = None
# Read and set tool parameters
try:
opts, _ = getopt.getopt(argv, "c:i:r:w:v", ["config=", "input=", "weight=", "result_dir=", "verbose"])
except getopt.GetoptError:
logger.error('experiment_runner.py -c <config_file> -i <input_file> -w <relational_weight>')
sys.exit(2)
for opt, arg in opts:
if opt in ("-c", "--config"):
configuration_file = arg
if opt in ("-i", "--input"):
input_file = arg
base = os.path.basename(input_file)
if not result_dir:
result_dir = os.path.splitext(base)[0]
if opt in ("-w", "--weight"):
weight = float(arg)
strategy = "mondrian"
if opt in ("-r", "--result_dir"):
result_dir = arg
if opt in ("-v", "--verbose"):
logging.getLogger().setLevel(logging.DEBUG)
result_path = Path("experiment_results") / result_dir
result_path.mkdir(parents=True, exist_ok=True)
# Let's get started
logger.info("Anonymizing input file %s", input_file)
# Initialize and read configuration
configuration_reader = ConfigurationReader()
config = configuration_reader.read(configuration_file)
# Read data using data types defined in the configuration
data_reader = DataReader(config)
df = data_reader.read(input_file)
# Initialize the sensitive terms recognizer
sensitive_terms_recognizer = SensitiveTermsRecognizer(config, use_cache)
# Initialize the preprocessor (preprocessor is stateful, so pass df at the beginning)
pp = Preprocessor(sensitive_terms_recognizer, config, df)
# Run through preprocessing of dataframe: Data cleansing, analysis of textual attributes, resolving of redundant information, and compression
pp.clean_textual_attributes()
pp.analyze_textual_attributes()
pp.find_redundant_information()
pp.compress()
# Get sensitive terms dictionary and preprocessed dataframe
terms = pp.get_sensitive_terms()
df = pp.get_df()
# Initialize the anonymization kernel by providing the sensitive terms dictionary, the configuration, the sensitive terms recognizer, and the preprocessor
kernel = AnonymizationKernel(terms, config, sensitive_terms_recognizer, pp)
unanonymized = df
# Determine k values for experiment
k_values = [2, 3, 4, 5, 10, 20, 50]
biases = config.get_biases()
# Set strategy names
if strategy == "mondrian":
strategy_name = "mondrian-{}".format(weight)
elif strategy == "gdf":
strategy_name = strategy
# Parameters for calculating metrics
quasi_identifiers = config.get_quasi_identifiers()
textual_attribute_mapping = pp.get_textual_attribute_mapping()
# Prepare dataframes and json to store experiment results
total_information_loss = pd.DataFrame(index=k_values, columns=[strategy_name])
total_information_loss.index.name = 'k'
relational_information_loss = pd.DataFrame(index=k_values, columns=[strategy_name])
relational_information_loss.index.name = 'k'
textual_information_loss =
|
pd.DataFrame(index=k_values, columns=[strategy_name])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
|
tm.assert_frame_equal(df, expected)
|
pandas.util.testing.assert_frame_equal
|
import pandas as pd
import pytest
from ploomber.validators import (Assert, data_frame_validator, validate_schema,
validate_values)
from ploomber.validators import string
def test_Assert():
assert_ = Assert()
assert_(False, 'Error message')
assert_(True, 'Another error message')
assert len(assert_) == 1
assert assert_.messages_error == ['Error message']
assert repr(assert_) == 'Assert oject with 1 error messages'
with pytest.raises(AssertionError) as excinfo:
assert_.check()
assert str(excinfo.value) == '1 error found:\nError message'
@pytest.fixture
def assert_():
assert_ = Assert()
assert_(False, '1')
assert_(False, '2')
assert_(False, '3')
return assert_
def test_Assert_iter(assert_):
assert list(assert_) == ['1', '2', '3']
def test_Assert_str_without_errors():
assert str(Assert()) == 'No errors found'
def test_Assert_str_with_errors(assert_):
assert '3 errors found' in str(assert_)
assert all(msg in str(assert_) for msg in ('1', '2', '3'))
def test_Assert_with_warning(assert_):
assert_.warn(False, '4')
assert '3 errors found' in str(assert_)
assert all(msg in str(assert_) for msg in ('1', '2', '3'))
assert '1 warning' in str(assert_)
assert '4' in str(assert_)
def test_Assert_with_warnings(assert_):
assert_.warn(False, '4')
assert_.warn(False, '5')
assert '3 errors found' in str(assert_)
assert all(msg in str(assert_) for msg in ('1', '2', '3'))
assert '2 warnings' in str(assert_)
assert all(msg in str(assert_) for msg in ('4', '5'))
def test_allows_optional_columns():
df =
|
pd.DataFrame({'a': [0], 'b': [0]})
|
pandas.DataFrame
|
import os
import csv
import collections
import numpy as np
import statsmodels.stats.inter_rater
import pandas as pd
import json
def checkEqual(iterator):
return len(set(iterator)) <= 1
class HumanClassification:
def __init__(self, human_classifications_location,classs):
self.dataset = {}
self.classs = classs
self.human_names = []
for filename in os.listdir(human_classifications_location):
human_name = filename.split(".")[0]
self.human_names.append(human_name)
with open(human_classifications_location + filename, encoding="utf-8-sig", newline='') as csvfile:
reader = csv.DictReader(csvfile, delimiter=";")
for row in reader:
idx = int(row["id"])
age = int(row["age"])
diagnostic = row["diagnostic"].strip().replace("\r","").replace("\n","")
classs = True if row[self.classs] == "True" else False
if idx not in self.dataset:
self.dataset[idx] = {}
self.dataset[idx]["diagnostic"] = diagnostic
self.dataset[idx]["age"] = age
self.dataset[idx][self.classs] = {}
self.dataset[idx][self.classs][human_name] = classs
else:
self.dataset[idx][self.classs][human_name] = classs
def calculate_fleiss(self):
self.matrix_data = []
for point in self.dataset.values():
classifications = point[self.classs].values()
counts = collections.Counter(classifications)
self.matrix_data.append((counts[True],counts[False]))
self.matrix_data = np.array(self.matrix_data)
self.fleiss = statsmodels.stats.inter_rater.fleiss_kappa(self.matrix_data)
def extract_disagreements(self,disagreements_file_location):
self.disagreements = {}
for idx,data in self.dataset.items():
classifications = data[self.classs].values()
if not checkEqual(classifications):
self.disagreements[idx] = {}
self.disagreements[idx]["diagnostic"] = data["diagnostic"]
self.disagreements[idx]["age"] = data["age"]
for name,classification in data[self.classs].items():
self.disagreements[idx][name] = classification
self.disagreements_df = pd.DataFrame.from_dict(self.disagreements, orient='index')
self.disagreements_df.to_csv(disagreements_file_location, index_label="id")
def calculate_venn(self):
self.venn_data={name:[] for name in self.human_names}
for idx,data in self.dataset.items():
classifications = data[self.classs]
for name,classification in classifications.items():
if classification:
self.venn_data[name].append(idx)
else:
self.venn_data[name].append(idx*-1)
def write_report(self,report_location):
self.report = {
"fleiss" : self.fleiss,
"dataset_n" : len(self.dataset),
"agreements_n": len(self.dataset) - len(self.disagreements),
"disagreements_n": len(self.disagreements),
"venn_data": self.venn_data,
"disagreements" : self.disagreements,
"dataset" : self.dataset
}
with open(report_location, 'w', encoding='utf-8') as json_file:
json.dump(self.report, json_file, indent=2, ensure_ascii=False)
class GroundTruthGenerator:
def __init__(self, human_dataset, super_dataset,classs,delimiter):
self.classs = classs
with open(human_dataset, encoding="utf-8") as json_file:
self.dataset = json.load(json_file)["dataset"]
with open(super_dataset, encoding="utf-8-sig", newline='') as csvfile:
reader = csv.DictReader(csvfile,delimiter=delimiter)
for row in reader:
self.dataset[row["id"]][self.classs]["gt"] = True if row[self.classs] == "True" else False
def write_ground_truth(self, ground_truth_file_location):
self.ground_truth = {}
for idx,data in self.dataset.items():
self.ground_truth[idx] = {}
self.ground_truth[idx]["diagnostic"] = data["diagnostic"]
self.ground_truth[idx]["age"] = data["age"]
if "gt" not in data[self.classs]:
self.ground_truth[idx][self.classs] = list(data[self.classs].values())[0]
else:
self.ground_truth[idx][self.classs] = data[self.classs]["gt"]
self.ground_truth_df =
|
pd.DataFrame.from_dict(self.ground_truth, orient='index')
|
pandas.DataFrame.from_dict
|
#!/usr/bin/env python
# coding: utf-8
# In[3]:
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.patheffects
import matplotlib.pyplot as plt
import seaborn as sns
import sys
from decimal import Decimal
from matplotlib import gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.font_manager import FontProperties
from matplotlib import transforms
from scipy import stats
from scipy.spatial import distance
from scipy.cluster import hierarchy
from statsmodels.sandbox.stats import multicomp
mpl.rcParams['figure.dpi'] = 90
# ## style pre-sets
# In[4]:
NOTEBOOK_PRESET = {"style": "ticks", "font": "Helvetica", "font_scale": 1.2, "context": "notebook"}
NOTEBOOK_FONTSIZE = 10
# In[5]:
PAPER_PRESET = {"style": "ticks", "font": "Helvetica", "context": "paper",
"rc": {"font.size":8,"axes.titlesize":8,
"axes.labelsize":8, 'axes.linewidth':0.5,
"legend.fontsize":8, "xtick.labelsize":8,
"ytick.labelsize":8, "xtick.major.size": 3.0,
"ytick.major.size": 3.0, "axes.edgecolor": "black",
"xtick.major.pad": 3.0, "ytick.major.pad": 3.0}}
PAPER_FONTSIZE = 8
# ## palette pre-sets
# In[6]:
husl = sns.color_palette("husl", 9)
BETTER_TYPE_PALETTE = {"CONTROL": husl[3], "CONTROL_SNP": husl[4], "WILDTYPE": husl[5], "FLIPPED": husl[6],
"SNP": husl[7], "DELETION": husl[0], "SCRAMBLED": "lightgray", "RANDOM": "darkgray"}
# In[ ]:
TSS_CLASS_PALETTE = {"Enhancer": sns.color_palette("deep")[1],
"intergenic": sns.color_palette("deep")[2], "protein_coding": sns.color_palette("deep")[5],
"div_lnc": sns.color_palette("deep")[3], "div_pc": sns.color_palette("deep")[0]}
# In[ ]:
COLOR_DICT = {"A": "crimson", "C": "mediumblue", "G": "orange", "T": "forestgreen"}
# ## label pre-sets
# In[7]:
BETTER_TYPE_ORDER1 = ["CONTROL", "CONTROL_SNP", "WILDTYPE", "FLIPPED", "SNP", "SCRAMBLED", "RANDOM"]
BETTER_TYPE_ORDER2 = ["CONTROL", "CONTROL_SNP", "WILDTYPE", "FLIPPED", "SNP", "DELETION", "SCRAMBLED", "RANDOM"]
# In[ ]:
TSS_CLASS_ORDER = ["Enhancer", "intergenic", "div_lnc", "protein_coding", "div_pc"]
# ## class
# In[ ]:
class Scale(matplotlib.patheffects.RendererBase):
def __init__(self, sx, sy=None):
self._sx = sx
self._sy = sy
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
affine = affine.identity().scale(self._sx, self._sy)+affine
renderer.draw_path(gc, tpath, affine, rgbFace)
# ## plotting functions
# In[ ]:
def add_margin(ax,x=0.05,y=0.05):
# This will, by default, add 5% to the x and y margins. You
# can customise this using the x and y arguments when you call it.
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xmargin = (xlim[1]-xlim[0])*x
ymargin = (ylim[1]-ylim[0])*y
ax.set_xlim(xlim[0]-xmargin,xlim[1]+xmargin)
ax.set_ylim(ylim[0]-ymargin,ylim[1]+ymargin)
# In[8]:
def mimic_r_boxplot(ax):
for i, patch in enumerate(ax.artists):
r, g, b, a = patch.get_facecolor()
col = (r, g, b, 1)
patch.set_facecolor((r, g, b, .5))
patch.set_edgecolor((r, g, b, 1))
# Each box has 6 associated Line2D objects (to make the whiskers, fliers, etc.)
# Loop over them here, and use the same colour as above
line_order = ["lower", "upper", "whisker_1", "whisker_2", "med", "fliers"]
for j in range(i*6,i*6+6):
elem = line_order[j%6]
line = ax.lines[j]
if "whisker" in elem:
line.set_visible(False)
line.set_color(col)
line.set_mfc(col)
line.set_mec(col)
if "fliers" in elem:
line.set_alpha(0.5)
# In[ ]:
def annotate_pval(ax, x1, x2, y, h, text_y, val, fontsize):
from decimal import Decimal
ax.plot([x1, x1, x2, x2], [y, y+h, y+h, y], lw=1, c="black", linewidth=0.5)
if val < 0.0004:
text = "{:.2e}".format(Decimal(val))
#text = "**"
elif val < 0.05:
text = "%.3f" % val
#text = "*"
else:
text = "%.2f" % val
ax.text((x1+x2)*.5, text_y, text, ha='center', va='bottom', color="black", size=fontsize)
# In[ ]:
def neg_control_plot(df, order, palette, fontsize, cell_type, ax, figsize, ylabel, sharey, title, save, plotname):
df_sub = df[df["better_type"].isin(order)].drop_duplicates()
if ax == None:
plt.figure(figsize=figsize)
ax = sns.boxplot(data=df_sub, x="better_type", y="overall_mean", order=order, palette=palette, linewidth=1,
saturation=1, flierprops = dict(marker='o', markersize=5))
else:
sns.boxplot(data=df_sub, x="better_type", y="overall_mean", order=order, palette=palette, linewidth=1,
saturation=1, flierprops = dict(marker='o', markersize=5), ax=ax)
ax.set_xticklabels(order, rotation=30)
mimic_r_boxplot(ax)
# calc p-vals b/w dists
rand_dist = np.asarray(df[df["better_type"] == "random"]["overall_mean"])
ctrl_dist = np.asarray(df[df["better_type"] == "control"]["overall_mean"])
rand_dist = rand_dist[~np.isnan(rand_dist)]
ctrl_dist = ctrl_dist[~np.isnan(ctrl_dist)]
rand_u, rand_pval = stats.mannwhitneyu(rand_dist, ctrl_dist, alternative="two-sided", use_continuity=False)
if sharey:
ax.set_ylim((-10, 10))
ax.yaxis.set_ticks(np.arange(-10, 11, 5))
y_1 = 8
y_2 = 6
text_y_1 = 7.5
text_y_2 = 5.5
else:
ax.set_ylim((np.min(rand_dist)-2, np.max(wt_dist)+3.5))
y_1 = np.max(wt_dist)+1.85
y_2 = np.max(wt_dist)+0.75
text_y_1 = np.max(wt_dist)+1.65
text_y_2 = np.max(wt_dist)+0.55
# statistical annotation
annotate_pval(ax, 0, 2, y_1, 0, text_y_1, rand_pval, fontsize)
annotate_pval(ax, 1, 2, y_2, 0, text_y_2, scram_pval, fontsize)
ax.set_ylabel(ylabel)
ax.set_xlabel("")
if title:
ax.set_title("%s" % (cell_type))
if save:
plt.savefig("%s/%s.pdf" % (figs_dir, plotname), dpi="figure", bbox_inches="tight")
# In[ ]:
def plot_activ_and_tiles(figsize, df, reps, color, palette, x_margin_percent, tss, x_tick_size, save, plotname):
fig = plt.figure(figsize=(figsize))
gs = gridspec.GridSpec(2, 1, height_ratios=[4, 1], hspace=0)
activ_ax = plt.subplot(gs[0])
tile_ax = plt.subplot(gs[1])
## plot activities ##
df["adjusted_tile_start"] = df["actual_start"] + ((df["actual_end"] - df["actual_start"])/2)
cols = list(reps)
cols.extend(["element_id", "element", "adjusted_tile_start", "combined_sig"])
df_sub = df[cols]
# sort and melt
df_sub = df_sub.sort_values(by="adjusted_tile_start")
df_melt = pd.melt(df_sub, id_vars=["element_id", "element", "adjusted_tile_start", "combined_sig"])
sns.swarmplot(data=df_melt, x="adjusted_tile_start", y="value", ax=activ_ax, color="lightslategrey", size=5,
hue="combined_sig", palette=palette)
sns.boxplot(data=df_melt, x="adjusted_tile_start", y="value", ax=activ_ax,
showcaps=False, showfliers=False, whiskerprops={'linewidth':0},
zorder=1, hue="combined_sig", palette=palette, dodge=False)
# fix boxplot colors
for i,artist in enumerate(activ_ax.artists):
# Set the linecolor on the artist to the facecolor, and set the facecolor to None
col = artist.get_facecolor()
artist.set_edgecolor(col)
artist.set_facecolor('None')
# Each box has 6 associated Line2D objects (to make the whiskers, fliers, etc.)
# Loop over them here, and use the same colour as above
for j in range(i*3,i*3+3):
line = activ_ax.lines[j]
line.set_color(col)
line.set_mfc(col)
line.set_mec(col)
add_margin(activ_ax, x=x_margin_percent, y=0)
activ_ax.xaxis.set_visible(False)
activ_ax.set_ylabel("MPRA activity")
activ_ax.legend_.remove()
## plot tiles ##
for i, elem_id in enumerate(df.sort_values(by="tile_number").element_id):
tile_num = df[df["element_id"] == elem_id]["tile_number"].iloc[0]
tile_start = df[df["element_id"] == elem_id]["actual_start"].iloc[0]
tile_end = df[df["element_id"] == elem_id]["actual_end"].iloc[0]
tile_strand = df[df["element_id"] == elem_id]["strand"].iloc[0]
if i % 2 == 0:
y = 0.5
else:
y = 0
tile_ax.plot((tile_start, tile_end), (y, y), color="black", linewidth=5, solid_capstyle="butt")
tile_ax.get_xaxis().get_major_formatter().set_useOffset(False)
tile_ax.get_xaxis().get_major_formatter().set_scientific(False)
tile_ax.plot((tss, tss), (0.75, 1.4), '-', color=color)
if tile_strand == "+":
tile_ax.arrow(tss, 1.4, 40, 0, fc=color, ec=color, head_width=0.45, head_length=30, linewidth=1)
else:
tile_ax.arrow(tss, 1.4, -40, 0, fc=color, ec=color, head_width=0.45, head_length=30, linewidth=1)
#tile_ax.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))
tile_ax.set_xticks(np.arange(df.actual_start.min(), df.actual_start.max()+200, 144))
plt.setp(tile_ax.get_xticklabels(), fontsize=x_tick_size)
tile_ax.set_ylim((-0.5, 1.75))
tile_ax.yaxis.set_visible(False)
tile_ax.spines["left"].set_visible(False)
tile_ax.spines["right"].set_visible(False)
tile_ax.spines["bottom"].set_visible(False)
if save:
fig.savefig(plotname, dpi="figure", bbox_inches="tight")
plt.show()
# In[1]:
def plot_dendrogram(linkage, max_dist, title):
plt.figure(figsize=(25, 8))
dg = hierarchy.dendrogram(linkage, show_leaf_counts=True)
dists = []
for i, d, c in zip(dg['icoord'], dg['dcoord'], dg['color_list']):
x = 0.5 * sum(i[1:3])
y = d[1]
plt.plot(x, y, 'o', c=c)
if y > max_dist:
plt.annotate("%.3g" % y, (x, y), xytext=(0, -5),
textcoords='offset points',
va='top', ha='center')
dists.append(y)
plt.axhline(y=max_dist)
plt.title(title)
plt.show()
return dists
# In[ ]:
def pearsonfunc(x, y, **kws):
r, p = stats.pearsonr(x, y)
ax = plt.gca()
ax.annotate("pearson r = {:.2f}\np = {:.2e}".format(r, Decimal(p)),
xy=(.1, .9), xycoords=ax.transAxes)
def spearmanfunc(x, y, **kws):
r, p = stats.spearmanr(x, y)
ax = plt.gca()
ax.annotate("spearman r = {:.2f}\np = {:.2e}".format(r, Decimal(p)),
xy=(.1, .9), xycoords=ax.transAxes)
# In[ ]:
def plot_peaks_and_tfbs(figsize, seq_len, seq_name, cell, scores, yerrs, motif_vals, bases, plotname, save):
fig = plt.figure(figsize=figsize)
gs = gridspec.GridSpec(3, 1, height_ratios=[4, 3, 1], hspace=0.2)
peak_ax = plt.subplot(gs[0])
motif_ax = plt.subplot(gs[1])
# plot deletion values
xs = list(range(0, seq_len))
peak_ax.bar(xs, scores, yerr=yerrs, color="lightgray", edgecolor="gray", linewidth=0.5, ecolor="gray",
error_kw={"elinewidth": 0.75})
# labels
peak_ax.set_xlim((-0.5, seq_len))
peak_ax.set_xlabel("")
peak_ax.set_ylabel("log2(del/WT)", fontsize=5)
peak_ax.xaxis.set_visible(False)
peak_ax.set_title("filtered scores and peaks: %s (%s)" % (seq_name, cell))
# plot motif nums
xs = list(range(0, seq_len))
max_motif_val = np.nanmax(np.abs(motif_vals))
motif_ax.axhline(y=0, color="darkgrey", linewidth=0.5, linestyle="dashed")
motif_ax.plot(xs, motif_vals, color="black", linewidth=0.75, zorder=10)
# labels
motif_ax.set_xlim((-0.5, seq_len))
motif_ax.set_ylim((-max_motif_val-1, max_motif_val+1))
motif_ax.set_xlabel("nucleotide number")
motif_ax.set_ylabel(r'$\Delta$ motifs', fontsize=5)
motif_ax.xaxis.set_visible(False)
plt.show()
if save:
fig.savefig("%s.pdf" % (plotname), dpi="figure", bbox_inches="tight", transparent=True)
plt.close()
# In[ ]:
def paired_swarmplots_w_pval(n_rows, n_cols, figsize, snp_df, data_df, fontsize, figs_dir, plotname, save):
fig, axarr = plt.subplots(figsize=figsize, squeeze=False)
pal = {"ref": "grey", "alt": sns.color_palette()[2]}
median_width = 0.3
# make axes objects
axes = []
counter = 0
for r in range(n_rows):
for c in range(n_cols):
if counter < len(snp_df):
ax = plt.subplot2grid((n_rows, n_cols), (r, c))
axes.append(ax)
counter += 1
# add plots
counter = 0
for i, row in snp_df.iterrows():
ax = axes[counter]
wt_id = row.wt_id
snp_id = row.unique_id
df = data_df[data_df["unique_id"].isin([wt_id, snp_id])]
df = df.sort_values(by="wt_or_snp", ascending=False)
if not "NA" in str(row.combined_padj) and not
|
pd.isnull(row.combined_padj)
|
pandas.isnull
|
import joblib
import sys
import pandas as pd
import numpy as np
from sklearn.preprocessing import normalize
from pathlib import Path
import h5py
import sys
#import ogr, os
#import gdal, osr
import math
from math import pi
import random
import string
import warnings
#import arguments
#from the tile path, get the info needed for BRDF correction and append them to a dataframe
#full_path = "/orange/ewhite/NeonData/OSBS/DP3.30006.001/2018/FullSite/D03/2018_OSBS_4/L3/Spectrometer/Reflectance/NEON_D03_OSBS_DP3_405000_3285000_reflectance.h5"
#mod = joblib.load('/blue/ewhite/s.marconi/NeonSpeciesClassification/outputs/model_ALL_final_model.pkl')
#epsg = 32610
#year = 2018
#site= "BART"
pt_kld = "/blue/ewhite/s.marconi/NeonSpeciesClassification/data/0411.csv"
model_pkl= sys.argv[1]
pt_kld = sys.argv[2]
year = sys.argv[3]
site= sys.argv[4]
full_path = sys.argv[5]
epsg = 32610
ras_dir = "/orange/idtrees-collab/species_classification/"
#global_brdf_correction functions
def calculate_geom_kernel(df, sns_zn = 0, ross = "thick", li = "dense"):
relative_az = df["sns_az"] - df["sol_az"]
solar_zn_ = np.arctan(10*np.tan(df["sol_zn"]))
sensor_zn_ = np.arctan(10*np.tan(sns_zn))
D = np.sqrt((np.tan(solar_zn_)**2) + (np.tan(sensor_zn_)**2) - \
2*np.tan(solar_zn_)* np.tan(sensor_zn_)*np.cos(relative_az))
# Eq 49. Wanner et al. JGRA 1995
t_num = 2. * np.sqrt(D**2 + (np.tan(solar_zn_)*np.tan(sensor_zn_)* \
np.sin(relative_az))**2)
t_denom = (1/np.cos(solar_zn_)) + (1/np.cos(sensor_zn_))
t_ = np.minimum(1,np.maximum(t_num/t_denom, -1))
t = np.arccos(t_)
# Eq 33,48. Wanner et al. JGRA 1995
O = (1/np.pi) * (t - np.sin(t)*np.cos(t)) * t_denom
# Eq 51. Wanner et al. JGRA 1995
cosPhase_ = np.cos(solar_zn_)*np.cos(sensor_zn_) + \
np.sin(solar_zn_)* np.sin(sensor_zn_)* np.cos(relative_az)
#
if(li == 'sparse'):
# Eq 32. Wanner et al. JGRA 1995
k_geom = O - (1/np.cos(solar_zn_)) - (1/np.cos(sensor_zn_)) + \
0.5*(1+ cosPhase_) * (1/np.cos(sensor_zn_))
elif(li == 'dense'):
# Eq 47. Wanner et al. JGRA 1995
k_geom = (((1+cosPhase_) * (1/np.cos(sensor_zn_)))/ (t_denom - O)) - 2
#
return(k_geom)
def generate_volume_kernel(df, sns_zn = 0, ross = "thick", li = "dense"):
relative_az = df["sns_az"] - df["sol_az"]
#Ross kernels
############
# Eq 2. Schlapfer et al. IEEE-TGARS 2015
phase = np.arccos(np.cos(df["sol_zn"])*np.cos(sns_zn) + \
np.sin(df["sol_az"])*np.sin(sns_zn)* np.cos(relative_az))
if(ross == 'thick'):
# Eq 13. Wanner et al. JGRA 1995
k_vol = ((np.pi/2 - phase)*np.cos(phase) + \
np.sin(phase))/(np.cos(df["sns_zn"]) * np.cos(df["sol_zn"])) - np.pi/4
elif(ross == 'thin'):
# Eq 13. Wanner et al. JGRA 1995
k_vol = ((np.pi/2 - phase)* np.cos(phase) + \
np.sin(phase))/(np.cos(df["sns_zn"])*np.cos(df["sol_zn"])) - np.pi/2
return(k_vol)
def generate_topographic_coeffs(df):
relative_az = df["aspect"] - df["sol_az"]
cos_i = np.cos(df["sol_zn"]) * np.cos(df["slope"])+ np.sin(df["sol_zn"]) * \
np.sin(df["slope"])*np.cos(relative_az)
c1 = np.cos(df["sol_zn"])*np.cos(df["slope"])
#Ross kernels
############
return(cos_i, c1)
#calculate brdf correction on the tile's DF
def calculate_brdf(hsi, coeffs_brdf, topo_coeff):
# calculate scattering kernels
k_geom = calculate_geom_kernel(hsi, sns_zn = hsi["sns_zn"])
k_vol = generate_volume_kernel(hsi, sns_zn = hsi["sns_zn"])
# calculate scattering kernels (at NADIR)
k_geom_nadir = calculate_geom_kernel(hsi)
k_vol_nadir = generate_volume_kernel(hsi)
#generate topographic coefficients
topo_coeffs = generate_topographic_coeffs(hsi)
#
# k_geom = calculate_geom_kernel(hsi, sns_zn = hsi["sns_zn"])
# k_vol = generate_volume_kernel(hsi, sns_zn = hsi["sns_zn"])
# calculate scattering kernels (at NADIR)
# k_geom_nadir = calculate_geom_kernel(hsi)
# k_vol_nadir = generate_volume_kernel(hsi)
#generate topographic coefficients
cos_i, c1 = generate_topographic_coeffs(hsi)
#metadata = hsi %>% select(!contains("band"))
#hsi = hsi %>% select(contains("band"))
topo = cos_i
#X = pd.concat([k_vol,k_geom, k_geom_nadir, k_vol_nadir, topo],axis=1)
#if hsi.shape[1] > 367:
hsi = hsi.rename(columns={"band_368": "CHM"})
#
#if hsi.shape[1] > 369:
hsi = hsi.filter(regex='band')
#
refl = np.zeros(hsi.shape, dtype=np.float)
for ii in range(refl.shape[1]):
y = hsi.iloc[:,ii]/10000
#apply coefficients to perform correction
#k_vol+k_geom
brdf = coeffs_brdf.iloc[ii, 1] * k_vol + \
coeffs_brdf.iloc[ii, 2] * k_geom + \
coeffs_brdf.iloc[ii, 0]
brdf_nd = coeffs_brdf.iloc[ii, 1] * k_vol_nadir + \
coeffs_brdf.iloc[ii, 2] * k_geom_nadir + \
coeffs_brdf.iloc[ii, 0]
#calculate BRDF correction
bdrf_cor = brdf_nd/brdf
#apply coefficients to perform topographic correction
topo_cor = (c1 * topo_coeff.iloc[ii, 1] +
topo_coeff.iloc[ii, 0]) / (topo * \
topo_coeff.iloc[ii, 1] + topo_coeff.iloc[ii, 0])
#bnd = bnd/10000
refl[..., ii] = y * bdrf_cor * topo_cor
#
return(refl)
def tile_solar_angle(full_path):
hdf5_file = h5py.File(full_path, 'r')
file_attrs_string = str(list(hdf5_file.items()))
file_attrs_string_split = file_attrs_string.split("'")
sitename = file_attrs_string_split[1]
flight_paths = hdf5_file[sitename]["Reflectance/Metadata/Ancillary_Imagery/Data_Selection_Index"].attrs["Data_Files"]
flight_paths=str(flight_paths).split(",")
which_paths = np.unique(hdf5_file[sitename]["Reflectance/Metadata/Ancillary_Imagery/Data_Selection_Index"].value)
solar_angle = []
for pt in which_paths:
#if pt is negative, get any from the available to avoid error(the pixel is blank anyway)
if pt < 0:
flight = (flight_paths)[ which_paths[-1]].split("_")[5]
else:
flight = (flight_paths)[pt].split("_")[5]
#
sol_az = hdf5_file[sitename]["Reflectance/Metadata/Logs/"][str(flight)]["Solar_Azimuth_Angle"].value
sol_zn = hdf5_file[sitename]["Reflectance/Metadata/Logs/"][str(flight)]["Solar_Zenith_Angle"].value
solar_angle.append([pt, sol_az, sol_zn])
return(solar_angle)
def h5refl2array(full_path, epsg):
#refl, refl_md, wavelengths, sol_az, sol_zn, sns_az, sns_zn, slope, aspect = h5refl2array(full_path, epsg = epsg)
hdf5_file = h5py.File(full_path, 'r')
file_attrs_string = str(list(hdf5_file.items()))
file_attrs_string_split = file_attrs_string.split("'")
sitename = file_attrs_string_split[1]
epsg = hdf5_file[sitename]["Reflectance/Metadata/Coordinate_System/EPSG Code"].value
solar_angles = tile_solar_angle(full_path)
#Extract the reflectance & wavelength datasets
reflArray = hdf5_file[sitename]['Reflectance']
refl =reflArray['Reflectance_Data'].value
wavelengths = reflArray['Metadata']['Spectral_Data']['Wavelength'].value
# Create dictionary containing relevant metadata information
refl_md = {}
refl_md['mapInfo'] = reflArray['Metadata']['Coordinate_System']['Map_Info'].value
refl_md['wavelength'] = reflArray['Metadata']['Spectral_Data']['Wavelength'].value
refl_md['shape'] = refl.shape
#Extract no data value & scale factor
refl_md['noDataVal'] = float(reflArray['Reflectance_Data'].attrs['Data_Ignore_Value'])
refl_md['scaleFactor'] = float(reflArray['Reflectance_Data'].attrs['Scale_Factor'])
#metadata['interleave'] = reflData.attrs['Interleave']
refl_md['bad_band_window1'] = np.array([1340, 1445])
refl_md['bad_band_window2'] = np.array([1790, 1955])
refl_md['epsg'] = str(epsg).split("'")[1]
#
#get tiles for BRDF correction
sns_az = hdf5_file[sitename]['Reflectance/Metadata/to-sensor_azimuth_angle']
sns_zn = hdf5_file[sitename]['Reflectance/Metadata/to-sensor_zenith_angle']
slope = hdf5_file[sitename]['Reflectance/Metadata/Ancillary_Imagery/Slope']
aspect = hdf5_file[sitename]['Reflectance/Metadata/Ancillary_Imagery/Aspect']
elevation = hdf5_file[sitename]['Reflectance/Metadata/Ancillary_Imagery/Smooth_Surface_Elevation']
#
#get solar angles as array to leverage flightpaths mosaic
flightpaths = hdf5_file[sitename]['Reflectance/Metadata/Ancillary_Imagery/Data_Selection_Index'].value
sol_zn = hdf5_file[sitename]['Reflectance/Metadata/Ancillary_Imagery/Data_Selection_Index'].value
sol_az = hdf5_file[sitename]['Reflectance/Metadata/Ancillary_Imagery/Data_Selection_Index'].value
for pt in range(len(solar_angles)):
sol_az[flightpaths==solar_angles[pt][0]] = solar_angles[pt][1]
sol_zn[flightpaths==solar_angles[pt][0]] = solar_angles[pt][2]
#
mapInfo_string = str(refl_md['mapInfo']);
mapInfo_split = mapInfo_string.split(",")
mapInfo_split
#
# Extract the resolution & convert to floating decimal number
refl_md['res'] = {}
refl_md['res']['pixelWidth'] = float(mapInfo_split[5])
refl_md['res']['pixelHeight'] = float(mapInfo_split[6])
# Extract the upper left-hand corner coordinates from mapInfo
xMin = float(mapInfo_split[3]) # convert from string to floating point number
yMax = float(mapInfo_split[4])
#
# Calculate the xMax and yMin values from the dimensions
xMax = xMin + (refl_md['shape'][1] * refl_md['res']['pixelWidth']) # xMax = left edge + (# of columns * resolution)",
yMin = yMax - (refl_md['shape'][0] * refl_md['res']['pixelHeight']) # yMin = top edge - (# of rows * resolution)",
refl_md['extent'] = (xMin, xMax, yMin, yMax) # useful format for plotting
refl_md['ext_dict'] = {}
refl_md['ext_dict']['xMin'] = xMin
refl_md['ext_dict']['xMax'] = xMax
refl_md['ext_dict']['yMin'] = yMin
refl_md['ext_dict']['yMax'] = yMax
hdf5_file.close
#
return refl, refl_md, sitename, wavelengths, sol_az, sol_zn, sns_az, sns_zn, slope, aspect, elevation
def stack_subset_bands(reflArray, reflArray_metadata, bands, clipIndex):
subArray_rows = clipIndex['yMax'] - clipIndex['yMin']
subArray_cols = clipIndex['xMax'] - clipIndex['xMin']
#
stackedArray = np.zeros((subArray_rows, subArray_cols, len(bands)), dtype=np.int16)
band_clean_dict = {}
band_clean_names = []
#
for i in range(len(bands)):
band_clean_names.append("b" + str(bands[i]) + "_refl_clean")
band_clean_dict[band_clean_names[i]] = subset_clean_band(reflArray, reflArray_metadata, clipIndex, bands[i])
stackedArray[..., i] = band_clean_dict[band_clean_names[i]]
#
return stackedArray
def subset_clean_band(reflArray, reflArray_metadata, clipIndex, bandIndex):
bandCleaned = reflArray[clipIndex['yMin']:clipIndex['yMax'], clipIndex['xMin']:clipIndex['xMax'],
bandIndex - 1].astype(np.int16)
#
return bandCleaned
def array2raster(newRaster, reflBandArray, reflArray_metadata, extent, ras_dir, epsg):
NP2GDAL_CONVERSION = {
"uint8": 1,
"int8": 1,
"uint16": 2,
"int16": 3,
"uint32": 4,
"int32": 5,
"float32": 6,
"float64": 7,
"complex64": 10,
"complex128": 11,
}
#
pwd = os.getcwd()
os.chdir(ras_dir)
cols = reflBandArray.shape[1]
rows = reflBandArray.shape[0]
bands = reflBandArray.shape[2]
pixelWidth = float(reflArray_metadata['res']['pixelWidth'])
pixelHeight = -float(reflArray_metadata['res']['pixelHeight'])
originX = extent['xMin']
originY = extent['yMax']
#
driver = gdal.GetDriverByName('GTiff')
gdaltype = NP2GDAL_CONVERSION[reflBandArray.dtype.name]
print(gdaltype)
print(newRaster)
print(cols, rows, bands)
outRaster = driver.Create(newRaster, cols, rows, bands, gdaltype)
outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
# outband = outRaster.GetRasterBand(1)
# outband.WriteArray(reflBandArray[:,:,x])
for band in range(bands):
outRaster.GetRasterBand(band + 1).WriteArray(reflBandArray[:, :, band])
#
outRasterSRS = osr.SpatialReference()
#outRasterSRS.ImportFromEPSG(reflArray_metadata['epsg'])
#outRasterSRS.ExportToWkt()
outRasterSRS.ImportFromEPSG(epsg)
outRaster.SetProjection(outRasterSRS.ExportToWkt())
outRaster.FlushCache()
os.chdir(pwd)
def calc_clip_index(clipExtent, h5Extent, xscale=1, yscale=1):
h5rows = h5Extent['yMax'] - h5Extent['yMin']
h5cols = h5Extent['xMax'] - h5Extent['xMin']
#
ind_ext = {}
ind_ext['xMin'] = round((clipExtent['xMin'] - h5Extent['xMin']) / xscale)
ind_ext['xMax'] = round((clipExtent['xMax'] - h5Extent['xMin']) / xscale)
ind_ext['yMax'] = round(h5rows - (clipExtent['yMin'] - h5Extent['yMin']) / yscale)
ind_ext['yMin'] = round(h5rows - (clipExtent['yMax'] - h5Extent['yMin']) / yscale)
#
return ind_ext
def extract_hsi_and_brdf_data(full_path, epsg, ras_dir, year,site, ross="thick", li="dense"):
warnings.filterwarnings("ignore")
refl, refl_md, sitename, wavelengths, sol_az, sol_zn, sns_az, sns_zn, slope, aspect, elevation = h5refl2array(full_path, epsg = "326")
print(refl.shape)
rgb = np.r_[0:425]
rgb = np.delete(rgb, np.r_[419:425])
rgb = np.delete(rgb, np.r_[281:313])
rgb = np.delete(rgb, np.r_[191:211])
xmin, xmax, ymin, ymax = refl_md['extent']
#
clipExtent = {}
clipExtent['xMin'] = xmin
clipExtent['yMin'] = ymin
clipExtent['yMax'] = ymax
clipExtent['xMax'] = xmax
print(clipExtent)
subInd = calc_clip_index(clipExtent, refl_md['ext_dict'])
subInd['xMax'] = int(subInd['xMax'])
subInd['xMin'] = int(subInd['xMin'])
subInd['yMax'] = int(subInd['yMax'])
subInd['yMin'] = int(subInd['yMin'])
refl = refl[(subInd['yMin']):subInd['yMax'], (subInd['xMin']):subInd['xMax'],rgb]
sns_az = sns_az[(subInd['yMin']):subInd['yMax'], (subInd['xMin']):subInd['xMax']]
sns_zn = sns_zn[(subInd['yMin']):subInd['yMax'], (subInd['xMin']):subInd['xMax']]
sol_az = sol_az[(subInd['yMin']):subInd['yMax'], (subInd['xMin']):subInd['xMax']]
sol_zn = sol_zn[(subInd['yMin']):subInd['yMax'], (subInd['xMin']):subInd['xMax']]
slope = slope[(subInd['yMin']):subInd['yMax'], (subInd['xMin']):subInd['xMax']]
aspect = aspect[(subInd['yMin']):subInd['yMax'], (subInd['xMin']):subInd['xMax']]
elevation = elevation[(subInd['yMin']):subInd['yMax'], (subInd['xMin']):subInd['xMax']]
# mask away bad pixels
ndvi = (refl[:, :,90] - refl[:,:,58])/(refl[:, :,58] +refl[:, :,90]) > 0.5
nir860 = (refl[:, :,96] + refl[:, :,97])/20000 > 0.1
mask = (sns_zn < 10000) * (aspect < 10000) * (slope < 10000) * (sns_az < 10000) * ndvi * nir860
#
# convert degrees in radiants
slope = (slope * pi) / 180
aspect = (aspect * pi) / 180
sns_az = (sns_az * pi) / 180
sns_zn = (sns_zn * pi) / 180
sol_az = (sol_az * pi) / 180
sol_zn = (sol_zn * pi) / 180
#
subArray_rows = subInd['yMax'] - subInd['yMin']
subArray_cols = subInd['xMax'] - subInd['xMin']
hcp = np.zeros((subArray_rows, subArray_cols, len(rgb)), dtype=np.int16)
#load info in multi-layer array
band_clean_dict = {}
band_clean_names = []
for i in range(len(rgb)):
band_clean_names.append("b" + str([i]) + "_refl_clean")
band_clean_dict[band_clean_names[i]] = np.squeeze(refl[:, :, [i]].astype(np.int16))
hcp[..., i] = band_clean_dict[band_clean_names[i]]
#
del(refl, ndvi, nir860)
tmp = elevation.reshape([elevation.shape[0],elevation.shape[1],1])
del(elevation)
hcp = np.concatenate([tmp, hcp], -1)
#
external_dat = aspect.reshape([aspect.shape[0],aspect.shape[1],1])
del(aspect)
tmp = slope.reshape([slope.shape[0],slope.shape[1],1])
external_dat = np.concatenate([external_dat, tmp], -1)
del(slope)
tmp = sns_zn.reshape([sns_zn.shape[0],sns_zn.shape[1],1])
external_dat = np.concatenate([external_dat, tmp], -1)
del(sns_zn)
tmp = sns_az.reshape([sns_az.shape[0],sns_az.shape[1],1])
external_dat = np.concatenate([external_dat, tmp], -1)
del(sns_az)
tmp = sol_zn.reshape([sol_zn.shape[0],sol_zn.shape[1],1])
external_dat = np.concatenate([external_dat, tmp], -1)
del(sol_zn)
tmp = sol_az.reshape([sol_az.shape[0],sol_az.shape[1],1])
external_dat = np.concatenate([external_dat, tmp], -1)
del(sol_az, tmp)
hcp = np.concatenate([hcp, external_dat],-1)
del(external_dat)
#save hcp into a tiff file [reflectance]
#itc_id = str(int(year)) + "_" +site+"_" + str(int(xmin)) + "_" + str(int(ymin))
#ii = str(itc_id + ".tif")
#array2raster(str(ii), hcp, refl_md, clipExtent, ras_dir = str(ras_dir), epsg = int(refl_md['epsg']))
# output the dataframe
hcp = hcp.reshape(-1,hcp.shape[2])
hcp = pd.DataFrame(hcp)
cl_nm = ["band_"+ str(i).zfill(1) for i in range(1,368)]
hcp.columns = cl_nm + ["CHM", "aspect","slope","sns_zn","sns_az","sol_zn", "sol_az"]
return(hcp, refl_md)
def kld_transform(brick, pt_kld):
kld_groups = pd.read_csv(pt_kld, header=None)
kld_groups = kld_groups.rename(columns={0: "_kld_grp"})
all_data = np.zeros([brick.shape[0],1])
for jj in np.unique(kld_groups):
which_bands = kld_groups._kld_grp == jj
#min
new_col = np.apply_along_axis(min, 1, brick[:,which_bands])[...,None]
all_data = np.append(all_data, new_col, 1)
#mean
new_col = np.apply_along_axis(np.mean, 1, brick[:,which_bands])[...,None]
all_data = np.append(all_data, new_col, 1)
#max
new_col = np.apply_along_axis(max, 1, brick[:,which_bands])[...,None]
all_data = np.append(all_data, new_col, 1)
#
all_data = pd.DataFrame(all_data)
all_data = all_data.drop([0], axis=1)
return(all_data)
output_df, refl_md = extract_hsi_and_brdf_data(full_path, epsg, ras_dir, year,site)
coeffs_brdf = pd.read_csv("/blue/ewhite/s.marconi/NeonSpeciesClassification//data/corrections/angle_all_all_brdf_coeffs_1920.csv")
topo_coeff = pd.read_csv("/blue/ewhite/s.marconi/NeonSpeciesClassification//data/corrections/topo_all_all_brdf_coeffs_1920.csv")
ave_coords = pd.read_csv("/blue/ewhite/s.marconi/NeonSpeciesClassification//data/site_encoder.csv")
#transform into BRDF corrected tile
which_site = ave_coords.siteName == site
coords = ave_coords[which_site][['latitude', 'longitude', 'siteID']]
coords = pd.concat([coords]*output_df.shape[0])
brdf = calculate_brdf(output_df, coeffs_brdf, topo_coeff)
elevation = output_df[['CHM']]
#filter for greennes and shadows
ndvi = (brdf[:,89]- brdf[:,57])/(brdf[:,57] + brdf[:,89]) <0.5
nir860 = (brdf[:,95] + brdf[:,96])/2 <0.1
brdf[ndvi,:]= np.NAN
brdf[nir860,:]= np.NAN
# apply KLD reduction
brdf = brdf[:,10:357]
# map pixels with NAN
which_pixels_dropped = np.isnan(brdf).any(axis=1)
brdf = brdf[~np.isnan(brdf).any(axis=1)]
#remove bad pixels
nbands = brdf.shape[1]
brdf[:,1:nbands] = normalize(brdf[:,1:nbands])
brdf = normalize(brdf)
brdf = kld_transform(brdf, pt_kld)
coords = coords[~which_pixels_dropped]
elevation = output_df.CHM[~which_pixels_dropped]
#brdf = pd.DataFrame(brdf[:,1:46])
brdf.reset_index(drop=True, inplace=True)
elevation.reset_index(drop=True, inplace=True)
coords.reset_index(drop=True, inplace=True)
brdf =
|
pd.concat([elevation, brdf, coords], axis=1)
|
pandas.concat
|
# -*- coding: utf-8 -*-
import string
from collections import OrderedDict
from datetime import date, datetime
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.common_metadata import make_meta, store_schema_metadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.naming import DEFAULT_METADATA_VERSION
from kartothek.io_components.metapartition import (
MetaPartition,
_unique_label,
parse_input_to_metapartition,
partition_labels_from_mps,
)
from kartothek.serialization import DataFrameSerializer, ParquetSerializer
def test_store_single_dataframe_as_partition(
store, metadata_storage_format, metadata_version
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(
label="test_label", data={"core": df}, metadata_version=metadata_version
)
meta_partition = mp.store_dataframes(
store=store,
df_serializer=ParquetSerializer(),
dataset_uuid="dataset_uuid",
store_metadata=True,
metadata_storage_format=metadata_storage_format,
)
assert len(meta_partition.data) == 0
expected_key = "dataset_uuid/core/test_label.parquet"
assert meta_partition.files == {"core": expected_key}
assert meta_partition.label == "test_label"
files_in_store = list(store.keys())
expected_num_files = 1
assert len(files_in_store) == expected_num_files
stored_df = DataFrameSerializer.restore_dataframe(store=store, key=expected_key)
pdt.assert_frame_equal(df, stored_df)
files_in_store.remove(expected_key)
assert len(files_in_store) == expected_num_files - 1
def test_store_single_dataframe_as_partition_no_metadata(store, metadata_version):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(
label="test_label", data={"core": df}, metadata_version=metadata_version
)
partition = mp.store_dataframes(
store=store,
df_serializer=ParquetSerializer(),
dataset_uuid="dataset_uuid",
store_metadata=False,
)
assert len(partition.data) == 0
expected_file = "dataset_uuid/core/test_label.parquet"
assert partition.files == {"core": expected_file}
assert partition.label == "test_label"
# One meta one actual file
files_in_store = list(store.keys())
assert len(files_in_store) == 1
stored_df = DataFrameSerializer.restore_dataframe(store=store, key=expected_file)
pdt.assert_frame_equal(df, stored_df)
def test_load_dataframe_logical_conjunction(
store, meta_partitions_files_only, metadata_version, metadata_storage_format
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(
label="cluster_1",
data={"core": df},
metadata_version=metadata_version,
logical_conjunction=[("P", ">", 4)],
)
meta_partition = mp.store_dataframes(
store=store,
df_serializer=None,
dataset_uuid="dataset_uuid",
store_metadata=True,
metadata_storage_format=metadata_storage_format,
)
predicates = None
loaded_mp = meta_partition.load_dataframes(store=store, predicates=predicates)
data = {
"core": pd.DataFrame(
{"P": [5, 6, 7, 8, 9], "L": [5, 6, 7, 8, 9], "TARGET": [15, 16, 17, 18, 19]}
).set_index(np.arange(5, 10))
}
pdt.assert_frame_equal(loaded_mp.data["core"], data["core"])
predicates = [[("L", ">", 6), ("TARGET", "<", 18)]]
loaded_mp = meta_partition.load_dataframes(store=store, predicates=predicates)
data = {
"core": pd.DataFrame({"P": [7], "L": [7], "TARGET": [17]}).set_index(
np.array([7])
)
}
pdt.assert_frame_equal(loaded_mp.data["core"], data["core"])
predicates = [[("L", ">", 2), ("TARGET", "<", 17)], [("TARGET", "==", 19)]]
loaded_mp = meta_partition.load_dataframes(store=store, predicates=predicates)
data = {
"core": pd.DataFrame(
{"P": [5, 6, 9], "L": [5, 6, 9], "TARGET": [15, 16, 19]}
).set_index(np.array([5, 6, 9]))
}
pdt.assert_frame_equal(loaded_mp.data["core"], data["core"])
def test_store_multiple_dataframes_as_partition(
store, metadata_storage_format, metadata_version
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_2 = pd.DataFrame({"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]})
mp = MetaPartition(
label="cluster_1",
data={"core": df, "helper": df_2},
metadata_version=metadata_version,
)
meta_partition = mp.store_dataframes(
store=store,
df_serializer=None,
dataset_uuid="dataset_uuid",
store_metadata=True,
metadata_storage_format=metadata_storage_format,
)
expected_file = "dataset_uuid/core/cluster_1.parquet"
expected_file_helper = "dataset_uuid/helper/cluster_1.parquet"
assert meta_partition.files == {
"core": expected_file,
"helper": expected_file_helper,
}
assert meta_partition.label == "cluster_1"
files_in_store = list(store.keys())
assert len(files_in_store) == 2
stored_df = DataFrameSerializer.restore_dataframe(store=store, key=expected_file)
pdt.assert_frame_equal(df, stored_df)
files_in_store.remove(expected_file)
stored_df = DataFrameSerializer.restore_dataframe(
store=store, key=expected_file_helper
)
pdt.assert_frame_equal(df_2, stored_df)
files_in_store.remove(expected_file_helper)
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_load_dataframes(
meta_partitions_files_only, store_session, predicate_pushdown_to_io
):
expected_df = pd.DataFrame(
OrderedDict(
[
("P", [1]),
("L", [1]),
("TARGET", [1]),
("DATE", pd.to_datetime([date(2010, 1, 1)])),
]
)
)
expected_df_2 = pd.DataFrame(OrderedDict([("P", [1]), ("info", ["a"])]))
mp = meta_partitions_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
mp = meta_partitions_files_only[0].load_dataframes(
store=store_session, predicate_pushdown_to_io=predicate_pushdown_to_io
)
assert len(mp.data) == 2
data = mp.data
pdt.assert_frame_equal(data["core"], expected_df, check_dtype=False)
pdt.assert_frame_equal(data["helper"], expected_df_2, check_dtype=False)
empty_mp = MetaPartition("empty_mp", metadata_version=mp.metadata_version)
empty_mp.load_dataframes(
store_session, predicate_pushdown_to_io=predicate_pushdown_to_io
)
assert empty_mp.data == {}
def test_remove_dataframes(meta_partitions_files_only, store_session):
mp = meta_partitions_files_only[0].load_dataframes(store=store_session)
assert len(mp.data) == 2
mp = mp.remove_dataframes()
assert mp.data == {}
def test_load_dataframes_selective(meta_partitions_files_only, store_session):
expected_df = pd.DataFrame(
OrderedDict(
[
("P", [1]),
("L", [1]),
("TARGET", [1]),
("DATE", pd.to_datetime([date(2010, 1, 1)])),
]
)
)
mp = meta_partitions_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
mp = meta_partitions_files_only[0].load_dataframes(
store=store_session, tables=["core"]
)
assert len(mp.data) == 1
data = mp.data
pdt.assert_frame_equal(data["core"], expected_df, check_dtype=False)
def test_load_dataframes_columns_projection(
meta_partitions_evaluation_files_only, store_session
):
expected_df = pd.DataFrame(OrderedDict([("P", [1]), ("L", [1]), ("HORIZON", [1])]))
mp = meta_partitions_evaluation_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
mp = meta_partitions_evaluation_files_only[0].load_dataframes(
store=store_session, tables=["PRED"], columns={"PRED": ["P", "L", "HORIZON"]}
)
assert len(mp.data) == 1
data = mp.data
pdt.assert_frame_equal(data["PRED"], expected_df, check_dtype=False)
def test_load_dataframes_columns_raises_missing(
meta_partitions_evaluation_files_only, store_session
):
mp = meta_partitions_evaluation_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
with pytest.raises(ValueError) as e:
meta_partitions_evaluation_files_only[0].load_dataframes(
store=store_session,
tables=["PRED"],
columns={"PRED": ["P", "L", "HORIZON", "foo", "bar"]},
)
assert str(e.value) == "Columns cannot be found in stored dataframe: bar, foo"
def test_load_dataframes_columns_table_missing(
meta_partitions_evaluation_files_only, store_session
):
# test behavior of load_dataframes for columns argument given
# specifying table that doesn't exist
mp = meta_partitions_evaluation_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
with pytest.raises(
ValueError,
match=r"You are trying to read columns from invalid table\(s\). .*PRED_typo.*",
):
mp.load_dataframes(
store=store_session,
columns={"PRED_typo": ["P", "L", "HORIZON", "foo", "bar"]},
)
# ensure typo in tables argument doesn't raise, as specified in docstring
dfs = mp.load_dataframes(store=store_session, tables=["PRED_typo"])
assert len(dfs) > 0
def test_from_dict():
df = pd.DataFrame({"a": [1]})
dct = {"data": {"core": df}, "label": "test_label"}
meta_partition = MetaPartition.from_dict(dct)
pdt.assert_frame_equal(meta_partition.data["core"], df)
assert meta_partition.metadata_version == DEFAULT_METADATA_VERSION
def test_eq():
df = pd.DataFrame({"a": [1]})
df_same = pd.DataFrame({"a": [1]})
df_other = pd.DataFrame({"a": [2]})
df_diff_col = pd.DataFrame({"b": [1]})
df_diff_type = pd.DataFrame({"b": [1.0]})
meta_partition = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df}}
)
assert meta_partition == meta_partition
meta_partition_same = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df_same}}
)
assert meta_partition == meta_partition_same
meta_partition_diff_label = MetaPartition.from_dict(
{"label": "another_label", "data": {"core": df}}
)
assert meta_partition != meta_partition_diff_label
assert meta_partition_diff_label != meta_partition
meta_partition_diff_files = MetaPartition.from_dict(
{"label": "another_label", "data": {"core": df}, "files": {"core": "something"}}
)
assert meta_partition != meta_partition_diff_files
assert meta_partition_diff_files != meta_partition
meta_partition_diff_col = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df_diff_col}}
)
assert meta_partition != meta_partition_diff_col
assert meta_partition_diff_col != meta_partition
meta_partition_diff_type = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df_diff_type}}
)
assert meta_partition != meta_partition_diff_type
assert meta_partition_diff_type != meta_partition
meta_partition_diff_metadata = MetaPartition.from_dict(
{
"label": "test_label",
"data": {"core": df_diff_type},
"dataset_metadata": {"some": "metadata"},
}
)
assert meta_partition != meta_partition_diff_metadata
assert meta_partition_diff_metadata != meta_partition
meta_partition_different_df = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df_other}}
)
assert not meta_partition == meta_partition_different_df
meta_partition_different_label = MetaPartition.from_dict(
{"label": "test_label", "data": {"not_core": df_same}}
)
assert not meta_partition == meta_partition_different_label
meta_partition_empty_data = MetaPartition.from_dict(
{"label": "test_label", "data": {}}
)
assert meta_partition_empty_data == meta_partition_empty_data
meta_partition_more_data = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df, "not_core": df}}
)
assert not (meta_partition == meta_partition_more_data)
assert not meta_partition == "abc"
def test_add_nested_to_plain():
mp = MetaPartition(
label="label_1",
files={"core": "file"},
data={"core": pd.DataFrame({"test": [1, 2, 3]})},
indices={"test": [1, 2, 3]},
dataset_metadata={"dataset": "metadata"},
)
to_nest = [
MetaPartition(
label="label_2",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
),
MetaPartition(
label="label_22",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
),
]
mp_nested = to_nest[0].add_metapartition(to_nest[1])
mp_add_nested = mp.add_metapartition(mp_nested)
mp_iter = mp.add_metapartition(to_nest[0]).add_metapartition(to_nest[1])
assert mp_add_nested == mp_iter
def test_add_nested_to_nested():
mps1 = [
MetaPartition(
label="label_1",
files={"core": "file"},
data={"core": pd.DataFrame({"test": [1, 2, 3]})},
indices={"test": [1, 2, 3]},
dataset_metadata={"dataset": "metadata"},
),
MetaPartition(
label="label_33",
files={"core": "file"},
data={"core": pd.DataFrame({"test": [1, 2, 3]})},
indices={"test": [1, 2, 3]},
dataset_metadata={"dataset": "metadata"},
),
]
mpn_1 = mps1[0].add_metapartition(mps1[1])
mps2 = [
MetaPartition(
label="label_2",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
),
MetaPartition(
label="label_22",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
),
]
mpn_2 = mps2[0].add_metapartition(mps2[1])
mp_nested_merge = mpn_1.add_metapartition(mpn_2)
mp_iter = mps1.pop()
for mp_ in [*mps1, *mps2]:
mp_iter = mp_iter.add_metapartition(mp_)
assert mp_nested_merge == mp_iter
def test_eq_nested():
mp_1 = MetaPartition(
label="label_1",
files={"core": "file"},
data={"core": pd.DataFrame({"test": [1, 2, 3]})},
indices={"test": [1, 2, 3]},
dataset_metadata={"dataset": "metadata"},
)
mp_2 = MetaPartition(
label="label_2",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
)
mp = mp_1.add_metapartition(mp_2)
assert mp == mp
assert mp != mp_2
assert mp_2 != mp
mp_other = MetaPartition(
label="label_3", data={"core": pd.DataFrame({"test": [4, 5, 6]})}
)
mp_other = mp_1.add_metapartition(mp_other)
assert mp != mp_other
assert mp_other != mp
def test_nested_incompatible_meta():
mp = MetaPartition(
label="label_1",
data={"core": pd.DataFrame({"test": np.array([1, 2, 3], dtype=np.int8)})},
metadata_version=4,
)
mp_2 = MetaPartition(
label="label_2",
data={"core": pd.DataFrame({"test": np.array([4, 5, 6], dtype=np.float64)})},
metadata_version=4,
)
with pytest.raises(ValueError):
mp.add_metapartition(mp_2)
def test_concatenate_no_change():
input_dct = {
"first_0": pd.DataFrame({"A": [1], "B": [1]}),
"second": pd.DataFrame({"A": [3], "B": [3], "C": [3]}),
}
dct = {"label": "test_label", "data": input_dct}
meta_partition = MetaPartition.from_dict(dct)
result = meta_partition.concat_dataframes()
assert result == meta_partition
def test_concatenate_identical_col_df():
input_dct = {
"first_0": pd.DataFrame({"A": [1], "B": [1]}),
"first_1":
|
pd.DataFrame({"A": [2], "B": [2]})
|
pandas.DataFrame
|
import pandas as pd, numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn import svm
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
subm = pd.read_csv('../input/sample_submission.csv')
label_cols = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
train['none'] = 1-train[label_cols].max(axis=1)
COMMENT = 'comment_text'
train[COMMENT].fillna("unknown", inplace=True)
test[COMMENT].fillna("unknown", inplace=True)
import re, string
re_tok = re.compile(r'([{string.punctuation}])')
def tokenize(s): return re_tok.sub(r' \1 ', s).split()
n = train.shape[0]
vec = TfidfVectorizer(ngram_range=(1,2), tokenizer=tokenize,
min_df=3, max_df=0.9, strip_accents='unicode', use_idf=1,
smooth_idf=1, sublinear_tf=1 )
trn_term_doc = vec.fit_transform(train[COMMENT])
test_term_doc = vec.transform(test[COMMENT])
def pr(y_i, y):
p = x[y==y_i].sum(0)
return (p+1) / ((y==y_i).sum()+1)
x = trn_term_doc
test_x = test_term_doc
def get_mdl(y):
y = y.values
r = np.log(pr(1,y) / pr(0,y))
m = svm.SVC()
# m = LogisticRegression(C=4, dual=True)
x_nb = x.multiply(r)
return m.fit(x, y), r
preds = np.zeros((len(test), len(label_cols)))
for i, j in enumerate(label_cols):
print('fit', j)
m,r = get_mdl(train[j])
preds[:,i] = m.predict(test_x.multiply(r))[:,1]
submid =
|
pd.DataFrame({'id': subm["id"]})
|
pandas.DataFrame
|
'''
MIT License
Copyright (c) 2020 Minciencia
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import sys
import tweepy
import pandas as pd
import datetime
def tweeting(consumer_key, consumer_secret, my_access_token, my_access_token_secret, carrier):
# Authentication
my_auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
my_auth.set_access_token(my_access_token, my_access_token_secret)
my_api = tweepy.API(my_auth)
# tweet
if carrier == 'reportediario':
my_positividad = pd.read_csv('../output/producto49/Positividad_Diaria_Media_T.csv')
my_positividad_ag = pd.read_csv('../output/producto49/Positividad_Diaria_Media_Ag_T.csv')
my_mediamovil = pd.read_csv('../output/producto75/MediaMovil_casos_nuevos_T.csv')
my_casos_nuevos_totales = pd.read_csv('../output/producto5/TotalesNacionales_T.csv')
casos_nuevos_totales = int(pd.to_numeric(my_casos_nuevos_totales.iloc[my_casos_nuevos_totales.index.max()][7]))
casos_nuevos_antigeno = int(pd.to_numeric(my_casos_nuevos_totales.iloc[my_casos_nuevos_totales.index.max()][19]))
mediamovil_nacional = int(pd.to_numeric(my_mediamovil.iloc[my_mediamovil.index.max()][17]))
variacion_nacional = float(100*(pd.to_numeric(my_mediamovil.iloc[my_mediamovil.index.max()][17]) - pd.to_numeric(
my_mediamovil.iloc[my_mediamovil.index.max() - 7][17]))/pd.to_numeric(my_mediamovil.iloc[my_mediamovil.index.max()][17]))
positividad_nacional = float(100*pd.to_numeric(my_positividad.iloc[my_positividad.index.max()][5]))
variacion_positividad = float(100*(pd.to_numeric(my_positividad.iloc[my_positividad.index.max()][5]) - pd.to_numeric(
my_positividad.iloc[my_positividad.index.max() - 7][5]))/pd.to_numeric(my_positividad.iloc[my_positividad.index.max()][5]))
positividad_nacional = ("%.2f" % positividad_nacional)
positividad = float(100*pd.to_numeric(my_positividad.iloc[my_positividad.index.max()][4]))
positividad_hoy = ("%.2f" % positividad)
casos_nuevos = str(int(my_positividad.iloc[my_positividad.index.max()][2]))
muestras = str(int(my_positividad.iloc[my_positividad.index.max()][1]))
tests_antigeno = str(int(my_positividad_ag.iloc[my_positividad_ag.index.max()][1]))
positividad_ag = float(100 * pd.to_numeric(my_positividad_ag.iloc[my_positividad_ag.index.max()][4]))
positividad_ag_hoy = ("%.2f" % positividad_ag)
# create update elements
tweet_text = '🤖Actualicé el reporte diario del @ministeriosalud de hoy 💫 Gracias a la Subsecretaría de Salud Pública y de Redes Asistenciales. Hay '+str(mediamovil_nacional)+' casos nuevos promedio en los últimos 7 días, con positividad de '+str(positividad_nacional)+'%. Más detalles en los productos en la imagen. https://github.com/MinCiencia/Datos-COVID19'
reply2_text = '🤖El total de casos confirmados hoy es '+str(casos_nuevos_totales)+', de los cuales '+str(casos_nuevos_antigeno)+' fueron confirmados con test de antígeno y '+casos_nuevos+' con PCR+. De las '+muestras+' muestras que se analizaron en las últimas 24 horas en laboratorios nacionales, un '+positividad_hoy+'% resultó positivo.'
reply3_text = '🤖Además, de los '+str(tests_antigeno)+ ' tests de antígeno realizados en el territorio nacional durante las últimas 24h, un '+positividad_ag_hoy+'% resultó positivo.'
if variacion_nacional >= 0 and variacion_positividad >= 0:
variacion_nacional = ("%.2f" % variacion_nacional)
variacion_positividad = ("%.2f" % variacion_positividad)
reply1_text = '🤖 En comparación con la semana anterior, la media móvil de los últimos 7 días para casos nuevos creció en '+str(variacion_nacional)+'% y la positividad en '+str(variacion_positividad)+'% a nivel nacional. Detalles a nivel regional en: https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto75 y https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto49'
elif variacion_nacional >= 0 and variacion_positividad < 0:
variacion_nacional = ("%.2f" % variacion_nacional)
variacion_positividad = ("%.2f" % variacion_positividad)
reply1_text = '🤖 En comparación con la semana anterior, la media móvil de los últimos 7 días para casos nuevos creció en '+str(variacion_nacional)+'% y la positividad bajó en '+str(variacion_positividad)+'% a nivel nacional. Detalles a nivel regional en: https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto75 y https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto49'
elif variacion_nacional < 0 and variacion_positividad < 0:
variacion_nacional = ("%.2f" % variacion_nacional)
variacion_positividad = ("%.2f" % variacion_positividad)
reply1_text = '🤖 En comparación con la semana anterior, la media móvil de los últimos 7 días para casos nuevos bajó en '+str(variacion_nacional)+'% y la positividad en '+str(variacion_positividad)+'% a nivel nacional. Detalles a nivel regional en: https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto75 y https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto49'
elif variacion_nacional < 0 and variacion_positividad >= 0:
variacion_nacional = ("%.2f" % variacion_nacional)
variacion_positividad = ("%.2f" % variacion_positividad)
reply1_text = '🤖 En comparación con la semana anterior, la media móvil de los últimos 7 días para casos nuevos bajó en ' + str(
variacion_nacional) + '% y la positividad aumentó en ' + str(
variacion_positividad) + '% a nivel nacional. Detalles a nivel regional en: https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto75 y https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto49'
# Generate text tweet with media (image)
media1= my_api.media_upload('./img/Datos covid_Bot_A_g1.png')
media2= my_api.media_upload('./img/Datos covid_Bot_A_g2.png')
media3= my_api.media_upload('./img/Datos covid_Bot_A_g3.png')
media4= my_api.media_upload('./img/Datos covid_Bot_A_g4.png')
try:
tweet = my_api.update_status(status=tweet_text,
media_ids=[media1.media_id, media2.media_id, media3.media_id, media4.media_id])
tweet2 = my_api.update_status(status=reply1_text, in_reply_to_status_id=tweet.id)
tweet3 = my_api.update_status(status=reply2_text, in_reply_to_status_id=tweet2.id)
tweet3 = my_api.update_status(status=reply3_text, in_reply_to_status_id=tweet3.id)
except tweepy.TweepError as error:
if error.api_code == 187:
# Do something special
print('duplicate message')
elif carrier == 'mmamp':
# create update elements
tweet_text = '🤖Actualicé los datos de calidad del aire en todo el territorio nacional, desde las estaciones del SINCA del @MMAChile 💫. Mira específicamente qué actualicé en la imagen y clona el GitHub https://github.com/MinCiencia/Datos-COVID19'
media1= my_api.media_upload('./img/Datos covid_Bot_G_g1.png')
# media2= my_api.media_upload('./img/Datos covid_Bot_A_g2.png')
# media3= my_api.media_upload('./img/Datos covid_Bot_A_g3.png')
# media4= my_api.media_upload('./img/Datos covid_Bot_A_g4.png')
# Generate text tweet with media (image)
my_api.update_status(status=tweet_text, media_ids=[media1.media_id])
elif carrier == 'informeepi':
my_epi= pd.read_csv('../output/producto1/Covid-19_T.csv')
fecha_informe = my_epi.iloc[my_epi.index.max()-1][0]
# create update elements
tweet_text = '🤖Actualicé los datos del Informe Epidemiológico publicado por @ministeriosalud de hoy 💫, con los datos correspondientes al '+fecha_informe+'. Gracias al equipo de especialistas en epidemiología. Mira qué actualicé en la imagen y clona el GitHub https://github.com/MinCiencia/Datos-COVID19'
reply1_text = '🤖A partir de este momento, todas mis respuestas sobre comunas del país 🇨🇱, corresponden al último informe. Más detalles en https://github.com/MinCiencia/Datos-COVID19'
media1= my_api.media_upload('./img/Datos covid_Bot_B_g1.png')
media2= my_api.media_upload('./img/Datos covid_Bot_B_g2.png')
media3= my_api.media_upload('./img/Datos covid_Bot_B_g3.png')
media4= my_api.media_upload('./img/Datos covid_Bot_B_g4.png')
# Generate text tweet with media (image)
tweet = my_api.update_status(status=tweet_text, media_ids=[media1.media_id,media2.media_id,media3.media_id,media4.media_id])
my_api.update_status(status=reply1_text, in_reply_to_status_id=tweet.id)
elif carrier == 'vacunacion':
now = datetime.datetime.now()
my_vacunacion = pd.read_csv('../output/producto76/vacunacion_t.csv')
vacunados = int(pd.to_numeric(my_vacunacion.iloc[my_vacunacion.index.max()][1]))
vacunados_pauta_completa = int(pd.to_numeric(my_vacunacion.iloc[my_vacunacion.index.max()][2])) + int(pd.to_numeric(my_vacunacion.iloc[my_vacunacion.index.max()][3]))
my_vacunacion_avance = 100*vacunados/16696002
my_vacunacion_avance_pauta_completa = 100*vacunados_pauta_completa/16696002
my_vacunacion_avance = ("%.2f" % my_vacunacion_avance)
my_vacunacion_avance_pauta_completa = ("%.2f" % my_vacunacion_avance_pauta_completa)
dosis_dia = vacunados + vacunados_pauta_completa + int(pd.to_numeric(my_vacunacion.iloc[my_vacunacion.index.max()][4])) - (pd.to_numeric(my_vacunacion.iloc[my_vacunacion.index.max()-1][1]) + pd.to_numeric(my_vacunacion.iloc[my_vacunacion.index.max()-1][2]) + pd.to_numeric(my_vacunacion.iloc[my_vacunacion.index.max()-1][3]) + pd.to_numeric(my_vacunacion.iloc[my_vacunacion.index.max()-1][4]))
my_vacunacion = my_vacunacion[1:]
my_vacunacion['total_dosis'] = pd.to_numeric(my_vacunacion['Total']) +
|
pd.to_numeric(my_vacunacion['Total.1'])
|
pandas.to_numeric
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Statistical tests."""
import numpy as np
import pandas as pd
import scikit_posthocs as sp
import scipy.stats as ss
SIGNIFICANCE_THRESHOLD = 0.05
def _create_pairwise_table(benchmark_snapshot_df, key, statistical_test):
"""Given a benchmark snapshot data frame and a statistical test function,
returns a p-value table. The |alternative| parameter defines the alternative
hypothesis to be tested. Use "two-sided" for two-tailed (default), and
"greater" or "less" for one-tailed test.
The p-value table is a square matrix where each row and column represents a
fuzzer, and each cell contains the resulting p-value of the pairwise
statistical test of the fuzzer in the row and column of the cell.
"""
groups = benchmark_snapshot_df.groupby('fuzzer')
samples = groups[key].apply(list)
fuzzers = samples.index
data = []
for f_i in fuzzers:
row = []
for f_j in fuzzers:
value = np.nan
if f_i != f_j and set(samples[f_i]) != set(samples[f_j]):
value = statistical_test(samples[f_i], samples[f_j])
row.append(value)
data.append(row)
return
|
pd.DataFrame(data, index=fuzzers, columns=fuzzers)
|
pandas.DataFrame
|
import os
import glob
import json
import typing
import shutil
try:
import torch
except ImportError:
torch = None # type: ignore
import cv2
import numpy as np
import pandas as pd
import typing_extensions as tx
if typing.TYPE_CHECKING:
from .detector import Detector
from .. import metrics, core
# pylint: disable=too-few-public-methods
class CallbackProtocol(tx.Protocol):
"""A protocol defining how we expect callbacks to behave."""
def __call__(
self,
detector: "Detector",
summaries: typing.List[typing.Dict[str, typing.Any]],
data_dir: str,
) -> typing.Dict[str, typing.Any]:
pass
def best_weights(
filepath, metric="loss", method="min", key="saved"
) -> CallbackProtocol:
"""A callback that saves the best model weights according to a metric.
Args:
metric: The metric to track. Use dot notation for nested attributes
(e.g., val_mAP.{class_name}).
method: How to handle the metric ("min" minimizes the metric while "max"
maximizes it).
key: What name to use for the saved flag.
"""
# pylint: disable=unused-argument
def callback(detector, summaries, data_dir):
saved = False
summaries_df = pd.json_normalize(summaries)
best_idx = (
summaries_df[metric].idxmax()
if method == "max"
else summaries_df[metric].idxmin()
)
if best_idx == len(summaries_df) - 1:
detector.save_weights(filepath)
saved = True
return {key: saved}
return callback
def csv_logger(filepath) -> CallbackProtocol:
"""A callback that saves a CSV of the summaries to a specific
filepath.
Args:
filepath: The filepath where the logs will be saved.
"""
# pylint: disable=unused-argument
def callback(detector, summaries, data_dir):
pd.json_normalize(summaries).to_csv(filepath, index=False)
return {}
return callback
def load_json(filepath: str):
"""Load JSON from file"""
with open(filepath, "r", encoding="utf8") as f:
return json.loads(f.read())
def data_dir_to_collections(data_dir: str, threshold: float, detector: "Detector"):
"""Convert a temporary training artifact directory into a set
of train and validation (if present) true/predicted collections."""
return {
split: {
"collections": {
"true_collection": core.SceneCollection(
[
core.Scene(
image=filepath,
annotation_config=detector.annotation_config,
annotations=[
core.Annotation(
detector.annotation_config[cIdx], x1, y1, x2, y2
)
for x1, y1, x2, y2, cIdx in np.load(
filepath + ".bboxes.npz"
)["bboxes"]
],
)
for filepath in images
],
annotation_config=detector.annotation_config,
),
"pred_collection": core.SceneCollection(
[
core.Scene(
image=filepath,
annotation_config=detector.annotation_config,
annotations=annotations,
)
for filepath, annotations in zip(
images,
detector.invert_targets(
{
"output": [
{
k: torch.Tensor(v)
for k, v in np.load(
filepath + ".output.npz"
).items()
}
for filepath in images
]
},
threshold=threshold,
),
)
],
annotation_config=detector.annotation_config,
),
},
"transforms": transforms,
"metadata":
|
pd.json_normalize(metadatas)
|
pandas.json_normalize
|
from base64 import b64encode
from numpy import cumsum, diff, exp, true_divide, add, append, nan, concatenate, array, abs as npabs
from pandas import DataFrame, Series
from sklearn.metrics import mean_squared_error, SCORERS
from xgboost import XGBClassifier, XGBRegressor
from lightgbm import LGBMClassifier, LGBMRegressor
from catboost import CatBoostClassifier, CatBoostRegressor
from shap import TreeExplainer, summary_plot
from plotly.graph_objects import Figure, Waterfall
from plotly.io import to_image
from .base import InsolverBaseWrapper
from .extensions import InsolverCVHPExtension, InsolverPDPExtension, AUTO_SPACE_CONFIG
class InsolverGBMWrapper(InsolverBaseWrapper, InsolverCVHPExtension, InsolverPDPExtension):
"""Insolver wrapper for Gradient Boosting Machines.
Parameters:
backend (str): Framework for building GBM, 'xgboost', 'lightgbm' and 'catboost' are supported.
task (str): Task that GBM should solve: Classification or Regression. Values 'reg' and 'class' are supported.
n_estimators (:obj:`int`, optional): Number of boosting rounds. Equals 100 by default.
objective (:obj:`str` or :obj:`callable`): Objective function for GBM to optimize.
load_path (:obj:`str`, optional): Path to GBM model to load from disk.
**kwargs: Parameters for GBM estimators except `n_estimators` and `objective`. Will not be changed in hyperopt.
"""
def __init__(self, backend, task=None, objective=None, n_estimators=100, load_path=None, **kwargs):
super(InsolverGBMWrapper, self).__init__(backend)
self.init_args = self.get_init_args(vars())
self.algo, self._backends = 'gbm', ['xgboost', 'lightgbm', 'catboost']
self._tasks = ['class', 'reg']
self._back_load_dict = {'xgboost': self._pickle_load, 'lightgbm': self._pickle_load,
'catboost': self._pickle_load}
self._back_save_dict = {'xgboost': self._pickle_save, 'lightgbm': self._pickle_save,
'catboost': self._pickle_save}
self.n_estimators, self.objective, self.params = n_estimators, objective, None
if backend not in self._backends:
raise NotImplementedError(f'Error with the backend choice. Supported backends: {self._backends}')
if load_path is not None:
self.load_model(load_path)
else:
if task in self._tasks:
gbm_init = {
'class': {'xgboost': XGBClassifier, 'lightgbm': LGBMClassifier, 'catboost': CatBoostClassifier},
'reg': {'xgboost': XGBRegressor, 'lightgbm': LGBMRegressor, 'catboost': CatBoostRegressor}
}
objectives = {
'regression': {'xgboost': 'reg:squarederror', 'lightgbm': 'regression', 'catboost': 'RMSE'},
'binary': {'xgboost': 'binary:logistic', 'lightgbm': 'binary', 'catboost': 'Logloss'},
'multiclass': {'xgboost': 'multi:softmax', 'lightgbm': 'multiclass', 'catboost': 'MultiClass'},
'poisson': {'xgboost': 'count:poisson', 'lightgbm': 'poisson', 'catboost': 'Poisson'},
'gamma': {'xgboost': 'reg:gamma', 'lightgbm': 'gamma',
'catboost': 'Tweedie:variance_power=1.9999999'}
}
self.objective_ = (objectives[self.objective][self.backend] if self.objective in objectives.keys()
else self.objective)
kwargs.update({'objective': self.objective_, 'n_estimators': self.n_estimators})
self.model, self.params = gbm_init[task][self.backend](**(kwargs if kwargs is not None else {})), kwargs
def __params_gbm(**params):
params.update(self.params)
return gbm_init[task][self.backend](**params)
self.object = __params_gbm
else:
raise NotImplementedError(f'Task parameter supports values in {self._tasks}.')
self._update_meta()
def fit(self, X, y, report=None, **kwargs):
"""Fit a Gradient Boosting Machine.
Args:
X (:obj:`pd.DataFrame`, :obj:`pd.Series`): Training data.
y (:obj:`pd.DataFrame`, :obj:`pd.Series`): Training target values.
report (:obj:`list`, :obj:`tuple`, optional): A list of metrics to report after model fitting, optional.
**kwargs: Other parameters passed to Scikit-learn API .fit().
"""
self.model.fit(X, y, **kwargs)
if not hasattr(self.model, 'feature_name_'):
self.model.feature_name_ = X.columns if isinstance(X, DataFrame) else [X.name]
self._update_meta()
if report is not None:
if isinstance(report, (list, tuple)):
prediction = self.model.predict(X)
print(DataFrame([[x.__name__, x(y, prediction)] for x
in report]).rename({0: 'Metrics', 1: 'Value'}, axis=1).set_index('Metrics'))
def predict(self, X, **kwargs):
"""Predict using GBM with feature matrix X.
Args:
X (:obj:`pd.DataFrame`, :obj:`pd.Series`): Samples.
**kwargs: Other parameters passed to Scikit-learn API .predict().
Returns:
array: Returns predicted values.
"""
return self.model.predict(X if not hasattr(self.model, 'feature_name_')
else X[self.model.feature_name_], **kwargs)
def shap(self, X, show=False, plot_type='bar'):
"""Method for shap values calculation and corresponding plot of feature importances.
Args:
X (:obj:`pd.DataFrame`, :obj:`pd.Series`): Data for shap values calculation.
show (:obj:`boolean`, optional): Whether to plot a graph.
plot_type (:obj:`str`, optional): Type of feature importance graph, takes value in ['dot', 'bar'].
Returns:
JSON containing shap values.
"""
explainer = TreeExplainer(self.model)
X =
|
DataFrame(X)
|
pandas.DataFrame
|
from pipeline.feature_engineering.preprocessing.abstract_preprocessor import Preprocessor
from pipeline.feature_engineering.preprocessing.replacement_strategies.mean_replacement_strategy import MeanReplacementStrategy
from pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy import DelRowReplacementStrategy
from pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy import ReplacementValReplacementStrategy
from overrides import overrides
import traceback
import os
import pandas
from sklearn.decomposition import PCA
import numpy
class SussexHuaweiPreprocessor(Preprocessor):
def __init__(self):
super().__init__()
@overrides
def segment_data(self, data, mode, label_column=None, args=None):
"""
Segements a time series based on a label column, semantic segementation of a fixed interval.
:param data:
:param mode:
:param label_column:
:param args:
:return:
"""
try:
if data is None or mode is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if mode == 'semantic':
raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)
if mode == 'labels':
# 1. Select all data with desired label value
data_segments = []
for target_label in args:
selected_data = data[data[label_column] == target_label]
# 2. Split by non-subsequent indices
# Source for next 3 lines after comment:
# https://stackoverflow.com/questions/56257329/how-to-split-a-dataframe-based-on-consecutive-index
non_sequence = pandas.Series(selected_data.index).diff() != 1
grouper = non_sequence.cumsum().values
selected_data_segments = [group for _, group in selected_data.groupby(grouper)]
for segment in selected_data_segments:
data_segments.append(segment)
return data_segments
if mode == 'fixed_interval':
segment_length = args[0]
aggregate = args[1]
exact_length = args[2]
segments_aggregated = []
split = lambda df, chunk_size : numpy.array_split(df, len(df) // chunk_size + 1, axis=0)
# 1. Ensure index is datetime index and standardize type
data.index = pandas.DatetimeIndex(data.index.astype('datetime64[1s]'))
#2. Segment data
segments = split(data, segment_length)
if not exact_length:
for segment in segments:
segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]'))
return segments
#3. Remove segments that are too long or too short after splitting
min_length_subsegements = []
for segment in segments:
if segment.shape[0] == segment_length:
min_length_subsegements.append(segment)
if not aggregate:
for segment in min_length_subsegements:
segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]'))
return min_length_subsegements
#3. Resample and aggregate data
segments_combined = None
for segment in min_length_subsegements:
segment = segment.reset_index()
segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]'))
segment = self.resample_quantitative_data(segment,
freq="{}s".format(segment_length),
mode = 'mean')
if segments_combined is None:
segments_combined = segment
else:
segments_combined = pandas.concat([segments_combined, segment], axis=0)
if segments_combined is not None:
segments_combined = segments_combined.reset_index()
segments_combined.index = pandas.DatetimeIndex(
segments_combined.index.astype('datetime64[1s]'))
segments_aggregated.append(segments_combined)
return segments_aggregated
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def de_segment_data(self, data_segments, selected_columns=None, axis = 0):
"""
Desegements as time series.
:param data_segments:
:param selected_columns:
:param axis:
:return:
"""
try:
data = None
for ind in range(len(data_segments)):
if data is None:
data = data_segments[ind][selected_columns]
else:
data = pandas.concat([data, data_segments[ind][selected_columns]], axis=axis)
data = data.reset_index(drop=True)
return data
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def remove_nans(self, data, replacement_mode, replacement_value=None):
"""
Remove NaNs
:param data:
:param replacement_mode: string, 'mean', 'replacement_val', 'delet_row'
:param replacement_value: any type, used as value if replacment_mode is 'default_val'
:return: pandas.DataFrame
"""
try:
if data is None or replacement_mode is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if replacement_mode == 'mean':
return MeanReplacementStrategy().replace(data, 'NaN')
if replacement_mode == 'del_row':
return DelRowReplacementStrategy().replace(data, 'NaN')
if replacement_mode == 'replacement_val':
return ReplacementValReplacementStrategy().replace(data, 'NaN', replacement_vals=replacement_value)
raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def remove_outliers_from_quantitative_data(self, data, replacement_mode, columns, quantile = None, threshold = None):
"""
Removes outlieres either based on quantile or a threshold value.
:param data:
:param replacement_mode:
:param columns:
:param quantile:
:param threshold:
:return:
"""
try:
if data is None or replacement_mode is None or columns is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(columns, list) or not isinstance(replacement_mode, str):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if len(columns) < 1:
raise ValueError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value)
if replacement_mode == 'quantile':
# Source for next 7 lines of code after comment:
# https://nextjournal.com/schmudde/how-to-remove-outliers-in-data
for column in columns:
not_outliers = data[column].between(
data[column].quantile(1.0 - quantile),
data[column].quantile(quantile)
)
data[column] = data[column][not_outliers]
index_names = data[~not_outliers].index
data.drop(index_names, inplace=True)
old_index = data.index
data = data.reset_index(drop=False)
data = data.set_index(old_index)
return data
if replacement_mode == 'threshold':
raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)
raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def resample_quantitative_data(self, data, freq, mode = None):
"""
Resamples quantitative data.
:param data:
:param freq:
:param mode:
:return:
"""
# Source:
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html
# https://jakevdp.github.io/PythonDataScienceHandbook/03.11-working-with-time-series.html
try:
if data is None or freq is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(freq, str):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if mode == 'mean' or mode is None:
return data.resample(freq).mean()
if mode == 'sum':
return data.resample(freq).sum()
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def convert_unix_to_datetime(self, data, column, unit):
"""
Converts unix time stamps to date time.
:param data:
:param column:
:param unit:
:return:
"""
# Source:
# https://stackoverflow.com/questions/19231871/convert-unix-time-to-readable-date-in-pandas-dataframe
# https://stackoverflow.com/questions/42698421/pandas-to-datetime-from-milliseconds-produces-incorrect-datetime
try:
if data is None or column is None or unit is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(column, str) or not isinstance(unit, str):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
data[column] = pandas.to_datetime(data[column], unit=unit)
return data
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def remove_unwanted_labels(self, data, unwanted_labels, replacement_mode):
"""
Remove rows that have an unwanted label.
:param data:
:param unwanted_labels:
:param replacement_mode:
:return:
"""
try:
if data is None or replacement_mode is None or unwanted_labels is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(unwanted_labels, list) or not isinstance(replacement_mode, str):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if replacement_mode == 'del_row':
return DelRowReplacementStrategy().replace(data, 'unwanted_labels', unwanted_labels)
raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def project_accelerometer_to_global_coordinates(self, data, target_columns, mode, args=None):
"""
Project accelerometer data from local vehicle coordinates to a global coordinate system.
:param data:
:param target_columns:
:param mode:
:param args:
:return:
"""
try:
if data is None or target_columns is None or mode is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(target_columns, list):
raise TypeError(type(data))
if mode == 'mean_estimate_gravity':
raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)
if mode == 'gyroscope':
raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)
if mode == 'gravity':
if len(target_columns) != len(args):
raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value)
for ind, column in enumerate(target_columns):
data[column] = data[column] - data[args[ind]]
return data
if mode == 'orientation':
if len(target_columns)+1 != len(args):
raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value)
# Source for theory behind below calculation
# https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation
# https://en.wikipedia.org/wiki/Homogeneous_coordinates
# #https://stackoverflow.com/questions/2422750/in-opengl-vertex-shaders-what-is-w-and-why-do-i-divide-by-it
for ind, column in enumerate(target_columns):
data[column] = data[column] * (data[args[ind]] / data[args[3]])
return data
raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def label_data(self, labels, data):
"""
Combines labels vector and data matrix.
:param labels:
:param data:
:return:
"""
try:
if data is None or labels is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not (isinstance(data, pandas.DataFrame) and isinstance(labels, pandas.DataFrame)):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if (len(labels) != len(data)):
raise TypeError(self.messages.PROVIDED_FRAME_DOESNT_MATCH_DATA.value)
return pandas.concat((labels, data), axis=1)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def znormalize_quantitative_data(self, data, columns = None, mean = None, std = None):
"""
Apply z-normalization to a data set.
:param data:
:param columns:
:param mean:
:param std:
:return:
"""
try:
if data is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if not all(column in data.keys() for column in columns):
raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value)
if mean is None and std is None:
if columns is not None:
mean = data[columns].mean()
std = data[columns].std()
data[columns] = (data[columns] - data[columns].mean()) / data[columns].std()
else:
mean = data.mean()
std = data.std()
data = (data - data.mean()) / data.std()
elif mean is not None and std is not None:
if columns is not None:
data[columns] = (data[columns] - mean) / std
else:
data = (data - mean) / std
else:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
return data, mean, std
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def min_max_normalize_quantitative_data(self, data, columns=None):
"""
Apply min-max-normalization to a data set.
:param data:
:param columns:
:return:
"""
try:
if data is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if not all(column in data.keys() for column in columns):
raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value)
if columns is not None:
data[columns]=(data[columns]-data[columns].min())/(data[columns].max()-data[columns].min()) # to center around 0.0 substract 0.5
else:
data = (data - data.min()) / (data.max() - data.min()) # to center around 0.0 substract 0.5
return data
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def re_represent_data(self, current_representation, target_representation, data):
"""
Change representation of a data set.
:param current_representation:
:param target_representation:
:param data:
:return:
"""
raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)
@overrides
def reduce_quantitativ_data_dimensionality(self, data, mode, reduced_column_name = 'reduced', columns = None):
"""
Apply a dimensionality reduction technique to a data set.
:param data:
:param mode:
:param reduced_column_name:
:param columns:
:return:
"""
try:
if data is None or mode is None or reduced_column_name is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(reduced_column_name, str):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if mode == 'euclidean':
# Source:
# https://thispointer.com/pandas-apply-apply-a-function-to-each-row-column-in-dataframe/
# https://www.google.com/search?client=ubuntu&channel=fs&q=euclidean+norm&ie=utf-8&oe=utf-8
# https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html
reduced = data[columns].apply(numpy.square, axis=1)[columns].sum(axis=1).apply(numpy.sqrt) #**(1/2) alternative
old_index = data.index
data = pandas.concat([data, reduced], axis=1)
data = data.rename(columns={0: reduced_column_name})
data = data.reset_index(drop=True)
data = data.set_index(old_index)
return data
if mode == 'manhatten':
reduced = data[columns].sum(axis=1).apply(numpy.abs, axis=1)
old_index = data.index
data = pandas.concat([data, reduced], axis=1)
data = data.rename(columns={0: reduced_column_name})
data = data.reset_index(drop=True)
data = data.set_index(old_index)
return data
if mode == 'pca':
# Source:
# https://stackoverflow.com/questions/23282130/principal-components-analysis-using-pandas-dataframe
# https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html
# https://en.wikipedia.org/wiki/Principal_component_analysis
pca = PCA(n_components=1)
pca.fit(data[columns])
reduced = pandas.DataFrame((numpy.dot(pca.components_, data[columns].T).T))
reduced = reduced.rename(columns={0:reduced_column_name})
reduced = reduced.reset_index(drop=True)
old_index = data.index
data = data.reset_index(drop=True)
data =
|
pandas.concat([data, reduced], axis=1)
|
pandas.concat
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn
import pandas as pd
from datetime import datetime
from dateutil import parser
import time
from scipy.stats import gaussian_kde
from sklearn.gaussian_process import GaussianProcessClassifier
from pandas_datareader import data
import numexpr as ne
seaborn.set()
def numpy_learner_func():
# numpy 计数
rng = np.random.RandomState(0)
x_data = rng.randint(10, size=(3, 4))
print("rng:{}".format(rng))
print("x_data{}".format(x_data))
# 计小于6的数 , np.count_nonzero, np.sum, np.where
num1 = np.count_nonzero(x_data < 6)
num2 = np.sum(x_data < 6)
num3 = np.any(x_data < 6)
num4 = np.all(x_data < 6)
num5 = np.where(x_data < 6)[0]
print(x_data < 6, num3, num4, num5, num5.shape[0])
print("num1 is {}".format(num1))
print("num2 is {}".format(num2))
print(x_data[x_data < 6])
print(9 and 0)
# numpy newaxis 给数组新增维度
x = np.arange(3)
print(x, x.shape)
x1 = x[:, np.newaxis]
print(x1, x1.shape)
x2 = x[:, np.newaxis, np.newaxis]
print(x2, x2.shape)
x3 = np.zeros(10)
np.add.at(x3, [0, 1, 5], 1)
print(x3)
# print("x4 is {}".format(x4))
i = [2, 3, 3, 4, 4, 4]
x3[i] += 1
print(x3)
# np.random.seed(42)
x_np = np.random.randn(100)
bins = np.linspace(-5, 5, 20)
# zeros_like 返回与参数一样shape的数组
counts = np.zeros_like(bins)
print("counts is {}".format(counts))
# np.searchsorted 将数字x_np插入到排好序的list中,返回相应的下标
j = np.searchsorted(bins, x_np)
print("j is {}".format(j))
# np.searchsorted()
# ## numpy 排序 np.sort()返回排好序的新数组
srt_array = np.array([2, 1, 4, 3, 5])
print("sorted:{}".format(np.sort(srt_array)))
# x.sort() Python内置函数sort(),对原数组进行排序,返回原数组
print("x.sort() is {}".format(srt_array.sort()))
sorted_arr = np.array([99, 0, 3, 1, 90])
# np.argsort()返回数组中排序之后的下标
print("np.argsort(srt_array) is {}".format(np.argsort(sorted_arr)))
# np.sort(axis = None)按照维度排序
axis_arr = np.random.RandomState(42).randint(0, 10, (4, 6))
print("the array is {}".format(axis_arr))
print("sort each column of axis_arr, returns {}".format(np.sort(axis_arr, axis=0)))
print("sort each row of axis_arr, returns {}".format(np.sort(axis_arr, axis=1)))
# 部分排序, 分区排序
np_part = np.array([3, 8, 4, 99, 5, 1, 88]) # 1 3 4 5 88 99 3,4, 1, 5,8, 99, 88
print("np_part partition sorted is {}".format(np.partition(np_part, 3,)))
def K_nearest_neighbors_func():
X = np.random.RandomState(42).rand(10, 2) # 10X2 array
plt.scatter(X[:, 0], X[:, 1], s=100)
x_newaxis = X[:, np.newaxis, :]
print("X[:, np.newaxis, :]:", x_newaxis)
print(x_newaxis.shape)
x_newaxis_1 = X[np.newaxis, :, :]
print("x_newaxis_1:", x_newaxis_1)
print(x_newaxis_1.shape)
diff_newaxis = x_newaxis - x_newaxis_1
print("diff_newaxis:", diff_newaxis, diff_newaxis.shape)
sq_differences = diff_newaxis ** 2
dist_sq = sq_differences.sum(-1) # axis 从倒数第2个到第一个
print("dist_sq:", dist_sq, sq_differences.shape, dist_sq.shape)
eye_dist_sq = dist_sq.diagonal() # 返回指定矩阵的对角线
print("eye_dist_sq is {}".format(eye_dist_sq))
nearest = np.argsort(dist_sq, axis=1) # 对列进行从小到大排序,返回排好序之后的索引值
K = 2
nearest_partition = np.argpartition(dist_sq, K+1, axis=1) # 分区排序,返回排好序的索引值
# print("nearest_partition.shape is {}".format(nearest_partition.shape))
# #
# # dis_sq = np.sum((X[:, np.newaxis, :] - X[np.newaxis, :, :])**2, axis=-1)
for i in range(X.shape[0]):
for j in nearest_partition[i, :K+1]:
plt.plot(*zip(X[j], X[i]), color='black')
# k_nearest_neighbors_loop_func(X, K)
plt.show()
def k_nearest_neighbors_loop_func(X, K):
all_dist = {}
index_dict = {}
# 计算每个点与其他点之间的距离并按序排列
for i in range(X.shape[0]):
start_point = X[i, :]
start_point_dis = {}
for j in range(X.shape[0]):
if i != j:
dis = np.sqrt((start_point[0] - X[j, 0])**2 + (start_point[1] - X[j, 1])**2)
# start_point_dis.append(dis)
start_point_dis[j] = dis
# 字典排序,按照值
sorted_start_point_dis = {}
# for item in dict_a.items():
# print(item)
# out.append((item[1], item[0]))
# print(out, sorted(out))
inter_list = sorted(start_point_dis.items(), key = lambda kv:(kv[1], kv[0]))
for each in inter_list:
sorted_start_point_dis[each[0]] = each[1]
all_dist[i] = list(sorted_start_point_dis.keys())[:K]
# 取出最近的两个点index
for a in range(X.shape[0]):
for b in all_dist[a]:
print("a, b", a, b)
plt.plot(*zip(X[a, :], X[b, :]), color='blue')
plt.show()
# print(all_dist)
def pandas_learner():
# pandas 里面的index 是不可变数组或者允许存在重复值的有序集合
indA = pd.Index([1, 3, 5, 7, 9])
indB = pd.Index([2, 3, 5, 7, 11])
index1 = indA & indB # 交集
index2 = indA | indB # 全集
index3 = indA ^ indB # 差集
print(index1, index2, index3)
data = pd.Series([0.25, 0.5, 0.75, 1.0],
index=['a', 'b', 'c', 'd'])
print(data['b'])
print('a' in data)
print(data.keys())
print(list(data.items()))
data['e'] = 1.25
print(data['a': 'c']) # 切片, 包含c列
print(data[0:2])
print(data[(data > 0.3) & (data < 0.8)])
print(data[['a', 'e']])
# loc 根据列标签索引访问
print(data[1])
print(data[1:3])
print(data.loc['a'])
# iloc根据行下标访问行
print(data.iloc[1])
print(data.iloc[1:3])
def pandas_null():
valsl = np.array([1, np.nan, 3, 4])
print(valsl.dtype)
print(1+np.nan)
print(0*np.nan)
print(np.sum(valsl), np.min(valsl), np.max(valsl)) # 任何累加和计算,最大值,最小值聚类函数中含有nan,其结果都是nan
print(np.nansum(valsl), np.nanmin(valsl), np.nanmax(valsl)) # 忽略nan值,计算累加和,最小值,最大值
print(np.nan == None)
data =
|
pd.Series([1, np.nan, 'hello', None])
|
pandas.Series
|
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx =
|
tm.box_expected(idx, box)
|
pandas.util.testing.box_expected
|
#!/usr/bin/env python
"""
Function and classes for KMC running
Author: <NAME>
Email: <EMAIL>
"""
import pandas as pd
import random
import json
import numpy as np
from pymatgen.core import Structure,Lattice
from copy import copy
import sys
sys.path.append('./')
from model import Event
import pickle
from numba.typed import List
class KMC:
def __init__(self):
pass
def initialization(self,occ,prim_fname,fitting_results,fitting_results_site,event_fname,supercell_shape,v,T,
lce_fname='../inputs/lce.pkl',lce_site_fname='../inputs/lce_site.pkl'):
print('Initializing KMC calculations with pirm.json at',prim_fname,'...')
with open(prim_fname,'r') as f:
prim = json.load(f)
prim_coords = [site['coordinate'] for site in prim['basis']]
prim_specie_cases = [site['occupant_dof'] for site in prim['basis']]
prim_lattice = Lattice(prim['lattice_vectors'])
prim_species = [s[0] for s in prim_specie_cases]
supercell_shape_matrix = np.diag(supercell_shape)
print('Supercell Shape:\n',supercell_shape_matrix)
self.structure = Structure(prim_lattice,prim_species,prim_coords)
print('Converting supercell ...')
self.structure.remove_species(['Zr','O'])
self.structure.make_supercell(supercell_shape_matrix)
print('Loading fitting results ...')
fitting_results = pd.read_pickle(fitting_results).sort_values(by=['time_stamp'],ascending=False).iloc[0]
self.keci = fitting_results.keci
self.empty_cluster = fitting_results.empty_cluster
print('Loading fitting results (site energy) ...')
fitting_results_site =
|
pd.read_pickle(fitting_results_site)
|
pandas.read_pickle
|
import re
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
import pytest
from rdt.transformers import (
CategoricalTransformer, LabelEncodingTransformer, OneHotEncodingTransformer)
RE_SSN = re.compile(r'\d\d\d-\d\d-\d\d\d\d')
class TestCategoricalTransformer:
def test___init__(self):
"""Passed arguments must be stored as attributes."""
# Run
transformer = CategoricalTransformer(
fuzzy='fuzzy_value',
clip='clip_value',
)
# Asserts
assert transformer.fuzzy == 'fuzzy_value'
assert transformer.clip == 'clip_value'
def test__get_intervals(self):
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
result = CategoricalTransformer._get_intervals(data)
# Asserts
expected_intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
'bar': (0.5, 0.8333333333333333, 0.6666666666666666, 0.05555555555555555),
'tar': (0.8333333333333333, 0.9999999999999999, 0.9166666666666666,
0.027777777777777776)
}
assert result[0] == expected_intervals
def test_fit(self):
# Setup
transformer = CategoricalTransformer()
# Run
data = np.array(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
transformer.fit(data)
# Asserts
expected_intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
'bar': (0.5, 0.8333333333333333, 0.6666666666666666, 0.05555555555555555),
'tar': (0.8333333333333333, 0.9999999999999999, 0.9166666666666666,
0.027777777777777776)
}
assert transformer.intervals == expected_intervals
def test__get_value_no_fuzzy(self):
# Setup
transformer = CategoricalTransformer(fuzzy=False)
transformer.fuzzy = False
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.25
@patch('scipy.stats.norm.rvs')
def test__get_value_fuzzy(self, rvs_mock):
# setup
rvs_mock.return_value = 0.2745
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.2745
def test__normalize_no_clip(self):
"""Test normalize data"""
# Setup
transformer = CategoricalTransformer(clip=False)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.57, 0.1234, 0.5, 0.69], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__normalize_clip(self):
"""Test normalize data with clip=True"""
# Setup
transformer = CategoricalTransformer(clip=True)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.0, 0.1234, 1.0, 0.0], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test_reverse_transform_array(self):
"""Test reverse_transform a numpy.array"""
# Setup
data = np.array(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
rt_data = np.array([-0.6, 0.5, 0.6, 0.2, 0.1, -0.2])
transformer = CategoricalTransformer()
# Run
transformer.fit(data)
result = transformer.reverse_transform(rt_data)
# Asserts
expected_intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
'bar': (0.5, 0.8333333333333333, 0.6666666666666666, 0.05555555555555555),
'tar': (0.8333333333333333, 0.9999999999999999, 0.9166666666666666,
0.027777777777777776)
}
assert transformer.intervals == expected_intervals
expect = pd.Series(data)
pd.testing.assert_series_equal(result, expect)
def test__transform_by_category_called(self):
"""Test that the `_transform_by_category` method is called.
When the number of rows is greater than the number of categories, expect
that the `_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 5 rows
Output:
- the output of `_transform_by_category`
Side effects:
- `_transform_by_category` will be called once
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means =
|
pd.Series([0.125, 0.375, 0.625, 0.875])
|
pandas.Series
|
# -*- coding: utf-8 -*-
from lxml import html
import requests
import pandas as pd
import matplotlib.ticker
import matplotlib.pyplot as plt
import numpy
import sys
###############################################################################
# programa somente funciona com python 2, por isso possui o seguinte check:
if sys.version_info[0] >= 3:
raise Exception("Executar com Python 2.")
###############################################################################
###############################################################################
# um dict para cada coin que possuira todos os dados coletados
LTC = {'marketcap': [], 'price': [], 'volume': []}
MONA = {'marketcap': [], 'price': [], 'volume': []}
DOGE = {'marketcap': [], 'price': [], 'volume': []}
NEO = {'marketcap': [], 'price': [], 'volume': []}
XEM = {'marketcap': [], 'price': [], 'volume': []}
STEEM = {'marketcap': [], 'price': [], 'volume': []}
XMR = {'marketcap': [], 'price': [], 'volume': []}
BCN = {'marketcap': [], 'price': [], 'volume': []}
BTC = {'marketcap': [], 'price': [], 'volume': []}
# dict que associa os dicts declarados previamente com os seus respectivos ids
# utilizados no site https://coinmarketcap.com/
ALTCOINS = {'id-litecoin': LTC,
'id-monacoin': MONA,
'id-dogecoin': DOGE,
'id-neo': NEO,
'id-nem': XEM,
'id-steem': STEEM,
'id-monero': XMR,
'id-bytecoin-bcn': BCN,
'id-bitcoin': BTC}
###############################################################################
###############################################################################
# inicio do parser que acessa os dados historicos em https://coinmarketcap.com/
# e pega os valores desejados para que alimente os dicts ja declarados
# todas as urls que possuem dados historicos do ultimo ano
urls = []
urls.append('https://coinmarketcap.com/historical/20171112/')
urls.append('https://coinmarketcap.com/historical/20171105/')
urls.append('https://coinmarketcap.com/historical/20171029/')
urls.append('https://coinmarketcap.com/historical/20171022/')
urls.append('https://coinmarketcap.com/historical/20171015/')
urls.append('https://coinmarketcap.com/historical/20171008/')
urls.append('https://coinmarketcap.com/historical/20171001/')
urls.append('https://coinmarketcap.com/historical/20170924/')
urls.append('https://coinmarketcap.com/historical/20170917/')
urls.append('https://coinmarketcap.com/historical/20170910/')
urls.append('https://coinmarketcap.com/historical/20170903/')
urls.append('https://coinmarketcap.com/historical/20170827/')
urls.append('https://coinmarketcap.com/historical/20170820/')
urls.append('https://coinmarketcap.com/historical/20170813/')
urls.append('https://coinmarketcap.com/historical/20170806/')
urls.append('https://coinmarketcap.com/historical/20170730/')
urls.append('https://coinmarketcap.com/historical/20170723/')
urls.append('https://coinmarketcap.com/historical/20170716/')
urls.append('https://coinmarketcap.com/historical/20170709/')
urls.append('https://coinmarketcap.com/historical/20170702/')
urls.append('https://coinmarketcap.com/historical/20170625/')
urls.append('https://coinmarketcap.com/historical/20170618/')
urls.append('https://coinmarketcap.com/historical/20170611/')
urls.append('https://coinmarketcap.com/historical/20170604/')
urls.append('https://coinmarketcap.com/historical/20170528/')
urls.append('https://coinmarketcap.com/historical/20170521/')
urls.append('https://coinmarketcap.com/historical/20170514/')
urls.append('https://coinmarketcap.com/historical/20170507/')
urls.append('https://coinmarketcap.com/historical/20170430/')
urls.append('https://coinmarketcap.com/historical/20170423/')
urls.append('https://coinmarketcap.com/historical/20170416/')
urls.append('https://coinmarketcap.com/historical/20170409/')
urls.append('https://coinmarketcap.com/historical/20170402/')
urls.append('https://coinmarketcap.com/historical/20170326/')
urls.append('https://coinmarketcap.com/historical/20170319/')
urls.append('https://coinmarketcap.com/historical/20170312/')
urls.append('https://coinmarketcap.com/historical/20170305/')
urls.append('https://coinmarketcap.com/historical/20170226/')
urls.append('https://coinmarketcap.com/historical/20170219/')
urls.append('https://coinmarketcap.com/historical/20170212/')
urls.append('https://coinmarketcap.com/historical/20170205/')
urls.append('https://coinmarketcap.com/historical/20170129/')
urls.append('https://coinmarketcap.com/historical/20170122/')
urls.append('https://coinmarketcap.com/historical/20170115/')
urls.append('https://coinmarketcap.com/historical/20170108/')
urls.append('https://coinmarketcap.com/historical/20170101/')
urls.append('https://coinmarketcap.com/historical/20161225/')
urls.append('https://coinmarketcap.com/historical/20161218/')
urls.append('https://coinmarketcap.com/historical/20161211/')
urls.append('https://coinmarketcap.com/historical/20161204/')
urls.append('https://coinmarketcap.com/historical/20161127/')
urls.append('https://coinmarketcap.com/historical/20161120/')
urls.append('https://coinmarketcap.com/historical/20161113/')
for url in urls:
page = requests.get(url)
tree = html.fromstring(page.content)
for idcoin, coin in ALTCOINS.iteritems():
# xpath e utilizado para acessar o conteudo html
tr = '//tr[@id="{}"]'.format(idcoin)
td_class = '//td[@class="no-wrap market-cap text-right"]/text()'
cap = tree.xpath(''.join((tr, td_class)))
clean_cap = float(cap[0].strip().replace('$', '').replace(',', ''))
a_price = '//a[@class="price"]/text()'
price = tree.xpath(''.join((tr, a_price)))
clean_price = float(price[0].replace('$', ''))
a_volume = '//a[@class="volume"]/@data-usd'
volume = tree.xpath(''.join((tr, a_volume)))
clean_volume = round(float(volume[0].replace('$', '').replace(',', '')),
2)
# valores que alimentam as listas dentro dos dicts sao ja sao limpos no
# momento da extracao
coin['marketcap'].append(clean_cap)
coin['price'].append(clean_price)
coin['volume'].append(clean_volume)
###############################################################################
###############################################################################
# dicts auxiliares para serem utilizados nos calculos estatisticos
MARKETCAP = {'LTC': LTC['marketcap'],
'MONA': MONA['marketcap'],
'DOGE': DOGE['marketcap'],
'NEO': NEO['marketcap'],
'XEM': XEM['marketcap'],
'STEEM': STEEM['marketcap'],
'XMR': XMR['marketcap'],
'BCN': BCN['marketcap'],
'BTC': BTC['marketcap']}
PRICE = {'LTC': LTC['price'], 'MONA': MONA['price'], 'DOGE': DOGE['price'],
'NEO': NEO['price'], 'XEM': XEM['price'], 'STEEM': STEEM['price'],
'XMR': XMR['price'], 'BCN': BCN['price'], 'BTC': BTC['price']}
VOLUME = {'LTC': LTC['volume'], 'MONA': MONA['volume'], 'DOGE': DOGE['volume'],
'NEO': NEO['volume'], 'XEM': XEM['volume'], 'STEEM': STEEM['volume'],
'XMR': XMR['volume'], 'BCN': BCN['volume'], 'BTC': BTC['volume']}
###############################################################################
###############################################################################
# calculo e plotagem dos coeficientes de correlacao por pearson para market
# cap, volume e preco
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(pd.DataFrame(data=MARKETCAP).corr(), vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = numpy.arange(0, 9, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(MARKETCAP.keys())
ax.set_yticklabels(MARKETCAP.keys())
plt.title(u'Correlação por Pearson do market cap no último ano')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(pd.DataFrame(data=PRICE).corr(), vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = numpy.arange(0, 9, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(PRICE.keys())
ax.set_yticklabels(PRICE.keys())
plt.title(u'Correlação por Pearson do preço no último ano')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(pd.DataFrame(data=VOLUME).corr(), vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = numpy.arange(0, 9, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(VOLUME.keys())
ax.set_yticklabels(VOLUME.keys())
plt.title(u'Correlação por Pearson do volume de transação no último ano')
plt.show()
###############################################################################
###############################################################################
# valores do bitcoin so foram utilizados em uma comparacao geral, agora nao
# sao mais necessarios
del MARKETCAP['BTC']
del PRICE['BTC']
del VOLUME['BTC']
###############################################################################
###############################################################################
# calculo e plotagem das medias do market cap, preço e volume
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.plot(pd.DataFrame(data=MARKETCAP).mean())
y_formatter = matplotlib.ticker.FormatStrFormatter('$%1.2f')
ax.yaxis.set_major_formatter(y_formatter)
ax.grid(True)
plt.title(u'Média em dólares do market cap no último ano')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.plot(pd.DataFrame(data=PRICE).mean())
y_formatter = matplotlib.ticker.FormatStrFormatter('$%1.2f')
ax.yaxis.set_major_formatter(y_formatter)
ax.grid(True)
plt.title(u'Média em dólares do preço no último ano')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.plot(pd.DataFrame(data=VOLUME).mean())
y_formatter = matplotlib.ticker.FormatStrFormatter('$%1.2f')
ax.yaxis.set_major_formatter(y_formatter)
ax.grid(True)
plt.title(u'Média em dólares do volume de transação no último ano')
plt.show()
###############################################################################
###############################################################################
# calculo e plotagem dos desvios padroes do market cap, preço e volume
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.plot(pd.DataFrame(data=MARKETCAP).std())
y_formatter = matplotlib.ticker.FormatStrFormatter('$%1.2f')
ax.yaxis.set_major_formatter(y_formatter)
ax.grid(True)
plt.title(u'Desvio padrão do market cap no último ano')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.plot(pd.DataFrame(data=PRICE).std())
y_formatter = matplotlib.ticker.FormatStrFormatter('$%1.2f')
ax.yaxis.set_major_formatter(y_formatter)
ax.grid(True)
plt.title(u'Desvio padrão do preço no último ano')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.plot(pd.DataFrame(data=VOLUME).std())
y_formatter = matplotlib.ticker.FormatStrFormatter('$%1.2f')
ax.yaxis.set_major_formatter(y_formatter)
ax.grid(True)
plt.title(u'Desvio padrão do volume de transação no último ano')
plt.show()
###############################################################################
###############################################################################
# calculo e plotagem dos coeficientes de correlacao por pearson entre market
# cap, volume e preco de cada altcoin analisada
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(pd.DataFrame(data=LTC).corr(), vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = numpy.arange(0, 3, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(LTC.keys())
ax.set_yticklabels(LTC.keys())
plt.title(u'Correlação por Pearson do market cap, preço e volume do LTC')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(pd.DataFrame(data=MONA).corr(), vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = numpy.arange(0, 3, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(MONA.keys())
ax.set_yticklabels(MONA.keys())
plt.title(u'Correlação por Pearson do market cap, preço e volume do MONA')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(pd.DataFrame(data=DOGE).corr(), vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = numpy.arange(0, 3, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(DOGE.keys())
ax.set_yticklabels(DOGE.keys())
plt.title(u'Correlação por Pearson do market cap, preço e volume do DOGE')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(pd.DataFrame(data=NEO).corr(), vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = numpy.arange(0, 3, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(NEO.keys())
ax.set_yticklabels(NEO.keys())
plt.title(u'Correlação por Pearson do market cap, preço e volume do NEO')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(pd.DataFrame(data=XEM).corr(), vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = numpy.arange(0, 3, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(XEM.keys())
ax.set_yticklabels(XEM.keys())
plt.title(u'Correlação por Pearson do market cap, preço e volume do XEM')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(pd.DataFrame(data=STEEM).corr(), vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = numpy.arange(0, 3, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(STEEM.keys())
ax.set_yticklabels(STEEM.keys())
plt.title(u'Correlação por Pearson do market cap, preço e volume do STEEM')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(pd.DataFrame(data=XMR).corr(), vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = numpy.arange(0, 3, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(XMR.keys())
ax.set_yticklabels(XMR.keys())
plt.title(u'Correlação por Pearson do market cap, preço e volume do XMR')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(
|
pd.DataFrame(data=BCN)
|
pandas.DataFrame
|
""" Tests for dataframe module
"""
# pylint: disable=missing-function-docstring,missing-class-docstring
# pylint: disable=invalid-name,no-self-use
import unittest
import pandas as pd
import numpy as np
from data_science_tools import dataframe
from data_science_tools.dataframe import (
coalesce,
merge_on_index,
)
class TestCoalesce(unittest.TestCase):
def test_coalesce(self):
series = [
pd.Series([np.nan, 1, np.nan, np.nan, 1]),
pd.Series([np.nan, 2, np.nan, 2, 2]),
pd.Series([np.nan, np.nan, 3, 3, 3]),
]
expected =
|
pd.Series([np.nan, 1, 3, 2, 1])
|
pandas.Series
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.metrics import r2_score
import scipy.stats
df_mort_siteid = pd.read_csv("mortality siteid obs vs expected.csv")
df_mort_surgid = pd.read_csv("mortality surgid obs vs expected.csv")
df_complics_siteid = pd.read_csv("Complics siteid obs vs expected.csv")
df_complics_surgid = pd.read_csv("Complics surgid obs vs expected.csv")
df = pd.read_csv("total_avg_surgid.csv")
df1 = pd.read_csv("total_avg_site_id.csv")
def siteid_obs_vs_expected_mort():
mask = df_mort_siteid['count_Reop'] == 0
df_reop = df_mort_siteid[~mask]
mask = df_mort_siteid['count_First'] == 0
df_op = df_mort_siteid[~mask]
ax = plt.gca()
ax.scatter(df_op['Year_avg_Firstop'], df_op['log_First'], color="plum",edgecolor='orchid', s=30)
# ax.scatter(df_reop['Year_avg_reop'], df_reop['log_Reoperation'], color="lightskyblue", edgecolor='tab:blue', s=30)
plt.title('Siteid observe vs expected Mortality First operation')
plt.xlabel("Yearly AVG of first operation")
plt.ylabel("mortality observe vs expected of first operation")
x = df_op['Year_avg_Firstop']
y = df_op['log_First']
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x, p(x), "purple")
# a = df_reop['Year_avg_reop']
# b = df_reop['log_Reoperation']
# c = np.polyfit(a, b, 1)
# t = np.poly1d(c)
# plt.plot(a, t(a), "mediumblue")
text = f" First : $Y={z[0]:0.6f}X{z[1]:+0.6f}$" # \n$R^2 = {r2_score(y, p):0.3f}$"
# text2 = f" Reoperation : $Y={c[0]:0.6f}X{c[1]:+0.6f}$"
# r, p = scipy.stats.spearmanr(a, b)
r1, p1 = scipy.stats.spearmanr(x, y)
text3 = f" Spearman Corr : {r1:0.4f} P-value : {p1:0.4f}"
# text4 = f" Spearman Corr : {r:0.4f} P-value : {p:0.4f}"
print(text)
# print(text2)
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
ax.legend(handles=[f("orchid"), f("white")],
labels=[text, text3])
# fig = plt.figure()
# fig.subplots_adjust()
# plt.text(30, 50, text)
# plt.text(90, 45, text2)
# plt.title("y=%.6fx^2+%.6fx+(%.6f)" % (z[0], z[1], z[2]))
# print("y=%.6fx^2+%.6fx+(%.6f)" % (z[0], z[1], z[2]))
# plt.savefig('Surgid yearly average for Reoperation.png')
# plt.gca().text(0.05, 0.95, text, transform=plt.gca().transAxes,
# fontsize=14, verticalalignment='top')
plt.show()
def siteid_obs_vs_expected_mort_reop():
mask = df_mort_siteid['count_Reop'] == 0
df_reop = df_mort_siteid[~mask]
ax = plt.gca()
ax.scatter(df_reop['Year_avg_reop'], df_reop['log_Reoperation'], color="lightskyblue", edgecolor='tab:blue', s=30)
plt.title('Siteid observe vs expected Mortality Reoperation')
plt.xlabel("Yearly AVG of Reoperation")
plt.ylabel("mortality observe vs expected Reoperation")
a = df_reop['Year_avg_reop']
b = df_reop['log_Reoperation']
c = np.polyfit(a, b, 1)
t = np.poly1d(c)
plt.plot(a, t(a), "mediumblue")
r, p = scipy.stats.spearmanr(a, b)
text2 = f" Reoperation : $Y={c[0]:0.6f}X{c[1]:+0.6f}$"
text3 = f" Spearman Corr : {r:0.4f} P-value : {p:0.4f}"
print(text2)
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
ax.legend(handles=[ f("lightskyblue"),f("white")],
labels=[ text2,text3])
plt.show()
def surgid_obs_vs_expected_mort():
mask = df_mort_surgid['count_Reop'] == 0
df_reop = df_mort_surgid[~mask]
mask = df_mort_surgid['count_First'] == 0
df_op = df_mort_surgid[~mask]
ax = plt.gca()
ax.scatter(df_op['Year_avg_Firstop'], df_op['log_First'], color="plum",edgecolor='orchid', s=30)
ax.scatter(df_reop['Year_avg_reop'], df_reop['log_Reoperation'], color="lightskyblue", edgecolor='tab:blue', s=30)
plt.title('Surgid observe vs expected Mortality')
plt.xlabel("Yearly AVG of operation")
plt.ylabel("mortality observe vs expected")
x = df_op['Year_avg_Firstop']
y = df_op['log_First']
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x, p(x), "purple")
a = df_reop['Year_avg_reop']
b = df_reop['log_Reoperation']
c = np.polyfit(a, b, 1)
t = np.poly1d(c)
plt.plot(a, t(a), "mediumblue")
text = f" First : $Y={z[0]:0.6f}X{z[1]:+0.6f}$" #\n$R^2 = {r2_score(y, p):0.3f}$"
text2 = f" Reoperation : $Y={c[0]:0.6f}X{c[1]:+0.6f}$"
r, p = scipy.stats.spearmanr(a, b)
r1, p1 = scipy.stats.spearmanr(x, y)
text3 = f" Spearman Corr : {r1:0.4f} P-value : {p1:0.4f}"
text4 = f" Spearman Corr : {r:0.4f} P-value : {p:0.4f}"
print (text)
print(text2)
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
ax.legend(handles=[f("orchid"),f("white"), f("steelblue"),f("white")],
labels=[text,text3, text2,text4])
# ax.text(right, top, 'right top',
# horizontalalignment='right',
# verticalalignment='top',
# transform=ax.transAxes)
# plt.text(100, 50, text)
# plt.text(100, 45, text2)
# plt.title("y=%.6fx^2+%.6fx+(%.6f)" % (z[0], z[1], z[2]))
# print("y=%.6fx^2+%.6fx+(%.6f)" % (z[0], z[1], z[2]))
# plt.savefig('Surgid yearly average for Reoperation.png')
# plt.gca().text(0.05, 0.95, text, transform=plt.gca().transAxes,
# fontsize=14, verticalalignment='top')
plt.show()
def siteid_obs_vs_expected_complics():
mask = df_complics_siteid['count_Reop'] == 0
df_reop = df_complics_siteid[~mask]
mask = df_complics_siteid['count_First'] == 0
df_op = df_complics_siteid[~mask]
ax = plt.gca()
ax.scatter(df_op['Year_avg_Firstop'], df_op['log_First'], color="palevioletred",edgecolor='indianred', s=30)
ax.scatter(df_reop['Year_avg_reop'], df_reop['log_Reoperation'], color="darkturquoise",edgecolor='lightseagreen',s=30)
plt.title('Siteid observe vs expected Complications')
plt.xlabel("Yearly AVG of operation")
plt.ylabel("complication observe vs expected")
x = df_op['Year_avg_Firstop']
y = df_op['log_First']
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x, p(x), "maroon")
a = df_reop['Year_avg_reop']
b = df_reop['log_Reoperation']
c = np.polyfit(a, b, 1)
t = np.poly1d(c)
plt.plot(a, t(a), "darkgreen")
text = f" First : $Y={z[0]:0.6f}X{z[1]:+0.6f}$" # \n$R^2 = {r2_score(y, p):0.3f}$"
text2 = f" Reoperation : $Y={c[0]:0.6f}X{c[1]:+0.6f}$"
r, p = scipy.stats.spearmanr(a, b)
r1, p1 = scipy.stats.spearmanr(x, y)
text3 = f" Spearman Corr : {r1:0.4f} P-value : {p1:0.4f}"
text4 = f" Spearman Corr : {r:0.4f} P-value : {p:0.4f}"
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
ax.legend(handles=[f("palevioletred"),f("white"), f("darkturquoise"),f("white")],
labels=[text,text3, text2,text4])
# fig = plt.figure()
# fig.subplots_adjust()
# plt.text(30, 50, text)
# plt.text(90, 45, text2)
# plt.title("y=%.6fx^2+%.6fx+(%.6f)" % (z[0], z[1], z[2]))
# print("y=%.6fx^2+%.6fx+(%.6f)" % (z[0], z[1], z[2]))
# plt.savefig('Surgid yearly average for Reoperation.png')
# plt.gca().text(0.05, 0.95, text, transform=plt.gca().transAxes,
# fontsize=14, verticalalignment='top')
plt.show()
def surgid_obs_vs_expected_complics():
mask = df_complics_surgid['count_Reop'] == 0
df_reop = df_complics_surgid[~mask]
mask = df_complics_surgid['count_First'] == 0
df_op = df_complics_surgid[~mask]
ax = plt.gca()
ax.scatter(df_op['Year_avg_Firstop'], df_op['log_First'], color="palevioletred",edgecolor='indianred',s=30)
ax.scatter(df_reop['Year_avg_reop'], df_reop['log_Reoperation'], color="darkturquoise",edgecolor='lightseagreen',s=30)
plt.title('Surgid observe vs expected Complications')
plt.xlabel("Yearly AVG of operation")
plt.ylabel("complication observe vs expected")
x = df_op['Year_avg_Firstop']
y = df_op['log_First']
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x, p(x), "maroon")
a = df_reop['Year_avg_reop']
b = df_reop['log_Reoperation']
c = np.polyfit(a, b, 1)
t = np.poly1d(c)
plt.plot(a, t(a), "darkgreen")
text = f" First : $Y={z[0]:0.6f}X{z[1]:+0.6f}$" # \n$R^2 = {r2_score(y, p):0.3f}$"
text2 = f" Reoperation : $Y={c[0]:0.6f}X{c[1]:+0.6f}$"
print(text)
print(text2)
r, p = scipy.stats.spearmanr(a, b)
r1, p1 = scipy.stats.spearmanr(x, y)
text3 = f" Spearman Corr : {r1:0.4f} P-value : {p1:0.4f}"
text4 = f" Spearman Corr : {r:0.4f} P-value : {p:0.4f}"
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
ax.legend(handles=[f("palevioletred"), f("white"), f("darkturquoise"), f("white")],
labels=[text, text3, text2, text4])
# fig = plt.figure()
# fig.subplots_adjust()
# plt.text(30, 50, text)
# plt.text(90, 45, text2)
# plt.title("y=%.6fx^2+%.6fx+(%.6f)" % (z[0], z[1], z[2]))
# print("y=%.6fx^2+%.6fx+(%.6f)" % (z[0], z[1], z[2]))
# plt.savefig('Surgid yearly average for Reoperation.png')
# plt.gca().text(0.05, 0.95, text, transform=plt.gca().transAxes,
# fontsize=14, verticalalignment='top')
plt.show()
def mortality_reop_surgid_boxplot():
mask = df['Year_sum_reop'] == 0
df_reop = df[~mask]
# total_year_sum
new_df=pd.DataFrame(data=df_reop,columns=['mortalty_reop_rate','total_year_avg'])
new_df['bins'] = pd.qcut(new_df['total_year_avg'], 3, labels=['low', 'mid', 'high'])
print(new_df)
new_df.to_csv("box_surgid_mort.csv")
mask = new_df['bins'] == 'low'
df_low = new_df[mask]
mask = new_df['bins'] == 'mid'
df_mid = new_df[mask]
mask = new_df['bins'] == 'high'
df_high = new_df[mask]
data = [df_low['mortalty_reop_rate'],df_mid['mortalty_reop_rate'],df_high['mortalty_reop_rate']]
print (df_low.describe())
print(df_mid.describe())
print(df_high.describe())
text = f" low\n ${df_low['total_year_avg'].min(): 0.2f} - ${df_low['total_year_avg'].max(): 0.2f}\n Mean = ${df_low['mortalty_reop_rate'].mean():0.6f} $"
text2 = f"mid\n ${df_mid['total_year_avg'].min(): 0.2f} - ${df_mid['total_year_avg'].max(): 0.2f}\n Mean = ${df_mid['mortalty_reop_rate'].mean():0.6f} $"
text3 =f"high\n${df_high['total_year_avg'].min(): 0.2f} - ${df_high['total_year_avg'].max(): 0.2f}\n Mean = ${df_high['mortalty_reop_rate'].mean():0.6f} $"
# ax = plt.gca()
# ax = sns.boxplot(x="day", y="total_bill", data=df_mid['mortalty_reop_rate'])
# show plot
labels = [text,text2,text3]
fig1, ax1 = plt.subplots()
ax1.set_title('Mortality surgid reop boxplot')
bp = ax1.boxplot(data, patch_artist=True, labels=labels)
colors = ['pink', 'lightblue', 'palegreen']
for patch, color in zip(bp['boxes'], colors):
patch.set_facecolor(color)
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
plt.legend(handles=[f("pink"), f("lightblue"), f("palegreen")],
labels=['low', 'mid', 'high'])
plt.ylabel("Mortality Reop rate")
plt.show()
# ax = plt.gca()
#
# f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
# new_df.boxplot(column='mortalty_reop_rate', by='bins')
# plt.legend(handles=[f("palevioletred"), f("mediumturquoise"), f("yellow")],
# labels=['low', 'mid','high'])
#
# plt.show()
def mortality_reop_siteid_boxplot():
mask = df1['Year_sum_reop'] == 0
df_reop = df1[~mask]
# total_year_sum
new_df=pd.DataFrame(data=df_reop,columns=['mortalty_reop_rate','total_year_avg'])
new_df['bins'] = pd.qcut(new_df['total_year_avg'], 3, labels=['low', 'mid', 'high'])
print(new_df)
new_df.to_csv("box_siteid_mort.csv")
mask = new_df['bins'] == 'low'
df_low = new_df[mask]
mask = new_df['bins'] == 'mid'
df_mid = new_df[mask]
mask = new_df['bins'] == 'high'
df_high = new_df[mask]
data = [df_low['mortalty_reop_rate'],df_mid['mortalty_reop_rate'],df_high['mortalty_reop_rate']]
print (df_low.describe())
print(df_mid.describe())
print(df_high.describe())
text = f" low\n ${df_low['total_year_avg'].min(): 0.2f} - ${df_low['total_year_avg'].max(): 0.2f}\n Mean = ${df_low['mortalty_reop_rate'].mean():0.6f} $"
text2 = f"mid\n ${df_mid['total_year_avg'].min(): 0.2f} - ${df_mid['total_year_avg'].max(): 0.2f}\n Mean = ${df_mid['mortalty_reop_rate'].mean():0.6f} $"
text3 = f"high\n${df_high['total_year_avg'].min(): 0.2f} - ${df_high['total_year_avg'].max(): 0.2f}\n Mean = ${df_high['mortalty_reop_rate'].mean():0.6f} $"
labels = [text, text2, text3]
fig1, ax1 = plt.subplots()
ax1.set_title('Mortality siteid reop boxplot')
bp = ax1.boxplot(data, patch_artist=True, labels=labels)
colors = ['pink', 'lightblue', 'palegreen']
for patch, color in zip(bp['boxes'], colors):
patch.set_facecolor(color)
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
plt.legend(handles=[f("pink"), f("lightblue"), f("palegreen")],
labels=['low', 'mid', 'high'])
plt.ylabel("Mortality Reop rate")
plt.show()
def complics_reop_surgid_boxplot():
mask = df['Year_sum_reop'] == 0
df_reop = df[~mask]
# total_year_sum
new_df=pd.DataFrame(data=df_reop,columns=['Complics_reop_rate','total_year_avg'])
new_df['bins'] = pd.qcut(new_df['total_year_avg'], 3, labels=['low', 'mid', 'high'])
print(new_df)
new_df.to_csv("box_surgid_complics.csv")
mask = new_df['bins'] == 'low'
df_low = new_df[mask]
mask = new_df['bins'] == 'mid'
df_mid = new_df[mask]
mask = new_df['bins'] == 'high'
df_high = new_df[mask]
data = [df_low['Complics_reop_rate'],df_mid['Complics_reop_rate'],df_high['Complics_reop_rate']]
print (df_low.describe())
print(df_mid.describe())
print(df_high.describe())
text = f" low\n${df_low['total_year_avg'].min(): 0.2f} - ${df_low['total_year_avg'].max(): 0.2f}\n Mean = ${df_low['Complics_reop_rate'].mean():0.6f} $"
text2 = f"mid\n${df_mid['total_year_avg'].min(): 0.2f} - ${df_mid['total_year_avg'].max(): 0.2f}\n Mean = ${df_mid['Complics_reop_rate'].mean():0.6f} $"
text3 =f"high\n${df_high['total_year_avg'].min(): 0.2f} - ${df_high['total_year_avg'].max(): 0.2f}\n Mean = ${df_high['Complics_reop_rate'].mean():0.6f} $"
labels = [text, text2, text3]
fig1, ax1 = plt.subplots()
ax1.set_title('Complication surgid reop boxplot')
bp = ax1.boxplot(data, patch_artist=True, labels=labels)
colors = ['pink', 'lightblue', 'palegreen']
for patch, color in zip(bp['boxes'], colors):
patch.set_facecolor(color)
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
plt.legend(handles=[f("pink"), f("lightblue"), f("palegreen")],
labels=['low', 'mid', 'high'])
plt.ylabel("Complication Reop rate")
plt.show()
def complics_reop_siteid_boxplot():
mask = df1['Year_sum_reop'] == 0
df_reop = df1[~mask]
# total_year_sum
new_df=
|
pd.DataFrame(data=df_reop,columns=['Complics_reop_rate','total_year_avg'])
|
pandas.DataFrame
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Loads the episode lengths from the csv files into a dictionary and return the dictionary
def load_data(algpath, name='episodes'):
Data = []
dirFiles = os.listdir(algpath)
# Files = np.array([i for i in dirFiles if 'episodes' in i])
Files = np.array([i for i in dirFiles if name in i])
for fileIndex in range(len(Files)):
if name == "episodes":
List =
|
pd.read_csv(algpath+'/'+Files[fileIndex])
|
pandas.read_csv
|
import argparse
import sys
import os
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import glob
from sklearn import metrics
from scipy.stats import pearsonr, spearmanr
from scipy.optimize import curve_fit
from collections import Counter
import pickle
import pdb
parser = argparse.ArgumentParser(description = '''Visualize and analyze the DockQ scores.''')
#Bench4
parser.add_argument('--bench4_dockq_aa', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for benchmark 4 AF in csv.')
parser.add_argument('--bench4_dockq_RF', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for benchmark 4 from RF in csv.')
parser.add_argument('--plDDT_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
#parser.add_argument('--pconsdock_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to pconsdock metrics in csv.')
#parser.add_argument('--pconsdock_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to pconsdock metrics in csv.')
parser.add_argument('--bench4_kingdom', nargs=1, type= str, default=sys.stdin, help = 'Path to kingdoms for bench4 in csv.')
parser.add_argument('--dssp_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to dssp annotations for bench4 in csv.')
parser.add_argument('--afdefault_neff_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to default AF alignments Neff in csv.')
parser.add_argument('--tophits_neff_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to top hits alignments Neff in csv.')
#Marks positivef
parser.add_argument('--marks_dockq_RF', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set RF in csv.')
parser.add_argument('--marks_dockq_AF_bb', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set AF back bone atoms in csv.')
parser.add_argument('--marks_dockq_AF_aa', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set AF all atoms in csv.')
parser.add_argument('--marks_dockq_GRAMM', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set GRAMM in csv.')
parser.add_argument('--marks_dockq_TMfull', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set TMdock in csv.')
parser.add_argument('--marks_dockq_TMint', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set interface TMdock in csv.')
parser.add_argument('--marks_dockq_mdockpp', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set MdockPP in csv.')
parser.add_argument('--plDDT_marks_af', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
parser.add_argument('--plDDT_marks_fused', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
parser.add_argument('--dssp_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to dssp metrics in csv.')
parser.add_argument('--ifstats_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to if metrics in csv.')
parser.add_argument('--aln_scores_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to aln scores in csv.')
parser.add_argument('--oxstats_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to statistics over organisms in csv.')
parser.add_argument('--afdefault_neff_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to default AF alignments Neff in csv.')
parser.add_argument('--tophits_neff_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to top hits alignments Neff in csv.')
parser.add_argument('--af_chain_overlap_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to chain overlap for AF a3m in csv.')
#Marks negative
parser.add_argument('--plDDT_marks_negative_af', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
#Negatome
parser.add_argument('--plDDT_negatome_af', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
#New set
parser.add_argument('--newset_dockq_AF', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for new set AF in csv.')
parser.add_argument('--plDDT_newset', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv for newset.')
#Output directory
parser.add_argument('--outdir', nargs=1, type= str, default=sys.stdin, help = 'Path to output directory. Include /in end')
################FUNCTIONS#################
def dockq_box(bench4_dockq, outdir):
'''Plot a boxplot of the dockq score for the different modes
'''
#Plot
fig,ax = plt.subplots(figsize=(24/2.54,12/2.54))
modes = bench4_dockq.columns[1:]
all_modes = []
all_scores = []
all_msas = []
all_model_options = []
accuracies = {}
for mode in modes:
#Frac correct and avg score
fraq_correct = np.argwhere(bench4_dockq[mode].values>=0.23).shape[0]/len(bench4_dockq)
accuracies[mode]=fraq_correct
av = np.average(bench4_dockq[mode].values)
print(mode, np.round(fraq_correct,3),np.round(av,3))
#Save scores
all_scores.extend([*bench4_dockq[mode].values])
mode = '_'.join(mode.split('_')[4:])
mode = mode.split('_')
msa = mode[0]
model = '_'.join(mode[1:-1])
option = mode[-1]
#save
all_modes.extend([msa+'\n'+model+'\n'+option]*len(bench4_dockq))
all_msas.extend([msa]*len(bench4_dockq))
all_model_options.extend([model+' '+option]*len(bench4_dockq))
def correlate_scores(bench4_dockq, outdir):
'''Correlate the scores for all different modeling strategies
'''
modes = ['DockQ_dockqstats_bench4_af2_hhblits_model_1_ens8',
'DockQ_dockqstats_bench4_af2_hhblits_model_1_rec10',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_ens8',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_rec10',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_ens8',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_rec10',
'DockQ_dockqstats_bench4_af2_hhblits_model_1_ptm_ens8',
'DockQ_dockqstats_bench4_af2_hhblits_model_1_ptm_rec10',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_ptm_ens8',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_ptm_rec10',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_ptm_ens8',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_ptm_rec10']
corr_matrix = np.zeros((len(modes),len(modes)))
for i in range(len(modes)):
scores_i = bench4_dockq[modes[i]].values
for j in range(i+1,len(modes)):
scores_j = bench4_dockq[modes[j]].values
#Correlate
R,p = pearsonr(scores_i,scores_j)
corr_matrix[i,j]=np.round(R,2)
corr_matrix[j,i]=np.round(R,2)
print(modes)
print(corr_matrix)
#Create df
corr_df = pd.DataFrame()
modes = ['_'.join(x.split('_')[4:]) for x in modes]
corr_df['Comparison'] = modes
for i in range(len(modes)):
corr_df[modes[i]]=corr_matrix[i,:]
corr_df.to_csv(outdir+'model_correlations.csv')
def fetch_missing_dockq(marks_dockq_AF_bb,marks_dockq_AF_aa):
'''Fetch missing DockQ scores
'''
ids = ['_'.join(x.split('-')) for x in marks_dockq_AF_aa.complex_id.values]
#Get mising scores
missing = marks_dockq_AF_bb[~marks_dockq_AF_bb.complex_id.isin(ids)]
ids = [x[:6]+'-'+x[7:] for x in missing.complex_id.values]
missing['complex_id']=ids
marks_dockq_AF_aa = pd.concat([marks_dockq_AF_aa,missing[marks_dockq_AF_aa.columns]])
return marks_dockq_AF_aa
def pdockq(if_plddt_contacts, dockq_scores, outdir):
#pdockq
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
#Create RA
x_ra = []
y_ra = []
y_std = []
y_av_err = []
step = 20
for t in np.arange(0,max(if_plddt_contacts)-step,step):
inds = np.argwhere((if_plddt_contacts>=t)&(if_plddt_contacts<t+step))[:,0]
x_ra.append(t+step/2)
y_ra.append(np.average(dockq_scores[inds]))
y_std.append(np.std(dockq_scores[inds]))
y_av_err.append(np.average(np.absolute(dockq_scores[inds]-y_ra[-1])))
#Do a simple sigmoid fit
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
xdata = if_plddt_contacts[np.argsort(if_plddt_contacts)]
ydata = dockq_scores[np.argsort(if_plddt_contacts)]
p0 = [max(ydata), np.median(xdata),1,min(ydata)] # this is an mandatory initial guess
popt, pcov = curve_fit(sigmoid, xdata, ydata,p0, method='dogbox')
y = sigmoid(xdata, *popt)
plt.plot(xdata,y,color='r',label='Sigmoidal fit')
#Calc error
print('Sigmoid params:',*popt)
plt.scatter(if_plddt_contacts,dockq_scores,s=1)
#plt.plot(x_ra,y_ra,label='Running average', color='tab:blue')
#plt.fill_between(x_ra,np.array(y_ra)-np.array(y_av_err),np.array(y_ra)+np.array(y_av_err),color='tab:blue',alpha=0.25, label='Average error')
plt.title('pDockQ')
plt.xlabel('IF plDDT⋅log(IF contacts)')
plt.ylabel('DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'pDockQ.svg',format='svg',dpi=300)
plt.close()
print('Average error for sigmoidal fit:',np.average(np.absolute(y-ydata)))
print('L=',np.round(popt[0],3),'x0=',np.round(popt[1],3) ,'k=',np.round(popt[2],3), 'b=',np.round(popt[3],3))
return popt
def ROC_pred_marks(marks_dockq_AF, plDDT_marks, outdir):
'''Compare the separation in the marks dataset for AF using metrics from the
predicted structures
'''
#Merge dfs
plDDT_marks['complex_id']=plDDT_marks.id1+'-'+plDDT_marks.id2
merged = pd.merge(marks_dockq_AF,plDDT_marks,on=['complex_id'],how='inner')
#Get min of chains
single_chain_plddt = np.min(merged[['ch1_plddt_av_1', 'ch2_plddt_av_1']].values,axis=1)
merged['min_chain_plddt_av_1'] = single_chain_plddt
#Analyze ROC as a function of
plDDT_metrics = ['if_plddt_av', 'min_chain_plddt_av',
'plddt_av', 'num_atoms_in_interface', 'num_res_in_interface']
plDDT_nice_names = {'if_plddt_av':'IF_plDDT', 'min_chain_plddt_av':'Min plDDT per chain',
'plddt_av':'Average plDDT', 'num_atoms_in_interface':'IF_contacts',
'num_res_in_interface':'IF_residues'}
run='1'
dockq_scores = merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run'+run].values
correct = np.zeros(len(dockq_scores))
correct[np.argwhere(dockq_scores>=0.23)]=1
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
colors = {0:'darkblue',1:'magenta',2:'orange',3:'darkgreen',4:'tab:blue',5:'tab:yellow',6:'tab:black'}
for i in range(len(plDDT_metrics)):
plDDT_metric_vals = merged[plDDT_metrics[i]+'_'+run].values
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, plDDT_metric_vals, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
label = plDDT_metrics[i]
plt.plot(fpr, tpr, label = plDDT_nice_names[label]+': AUC = %0.2f' % roc_auc,color=colors[i])
#Add log(if contacts)*if_plddt_av
if_plddt_contacts = merged['if_plddt_av_1'].values*np.log10(merged['num_atoms_in_interface_1'].values+1)
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, if_plddt_contacts, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, label = 'IF_plDDT⋅log(IF_contacts)'+': AUC = %0.2f' % roc_auc,color='tab:cyan')
#Get pDockQ
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
sigmoid_params = pdockq(if_plddt_contacts, dockq_scores, outdir)
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, sigmoid(if_plddt_contacts,*sigmoid_params), pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, label = 'pDockQ'+': AUC = %0.2f' % roc_auc,color='k',linestyle='--')
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
plt.legend(fontsize=9)
plt.title('ROC as a function of different metrics')
plt.xlabel('FPR')
plt.ylabel('TPR')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'ROC_marks.svg',format='svg',dpi=300)
plt.close()
#pDockQ vs DockQ
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(sigmoid(if_plddt_contacts,*sigmoid_params),dockq_scores,s=1)
plt.title('pDockQ vs DockQ')
plt.xlabel('pDockQ')
plt.ylabel('DockQ')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'pdockq_vs_dockq.svg',format='svg',dpi=300)
plt.close()
#plot if plddt vs log contacts and color by dockq
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['num_atoms_in_interface_1'].values+1, merged['if_plddt_av_1'].values,c=dockq_scores,s=2)
cbar = plt.colorbar()
cbar.set_label('DockQ')
plt.xscale('log')
plt.ylim([40,100])
plt.title('Interface contacts, plDDT and DockQ')
plt.xlabel('Interface contacts')
plt.ylabel('Average interface plDDT')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'if_conctacts_vs_plddt.svg',format='svg',dpi=300)
plt.close()
return sigmoid_params
def score_marks_5runs_paired_af(marks_dockq_AF, plDDT_marks, sigmoid_params, outdir):
'''Analyze the variation in DockQ using 5 identical runs of the same settings
'''
plDDT_marks['complex_id'] = plDDT_marks.id1+'-'+plDDT_marks.id2
merged = pd.merge(marks_dockq_AF,plDDT_marks,on='complex_id',how='inner')
#Get separator
separator1 = merged[['if_plddt_av_1', 'if_plddt_av_2','if_plddt_av_3','if_plddt_av_4','if_plddt_av_5']].values
separator2 = merged[['num_atoms_in_interface_1', 'num_atoms_in_interface_2','num_atoms_in_interface_3','num_atoms_in_interface_4','num_atoms_in_interface_5']].values
separator = separator1*np.log10(separator2+1)
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
separator = sigmoid(separator, *sigmoid_params)
scores = merged[['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run2',
'DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run3','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run4',
'DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run5']].values
#Get max and min scores
max_scores = np.max(scores,axis=1)
min_scores = np.min(scores,axis=1)
#Get success rates per initializations
srs = []
for i in range(scores.shape[1]):
srs.append(np.argwhere(scores[:,i]>=0.23).shape[0]/len(scores))
print('Test set scoring:')
print('Success rate 5 runs top scores',np.argwhere(max_scores>=0.23).shape[0]/len(max_scores))
print('Av diff',np.average(max_scores-min_scores))
print('Std diff',np.std(max_scores-min_scores))
print('Avg and std success rate', np.average(srs),np.std(srs))
#Separate the models using the number of contacts in the interface
max_inds = np.argmax(separator,axis=1)
first_ranked_scores = []
first_ranked_separators = []
#Get max separator scores
for i in range(len(max_inds)):
first_ranked_scores.append(scores[i,max_inds[i]])
first_ranked_separators.append(separator[i,max_inds[i]])
#Convert to array
first_ranked_scores = np.array(first_ranked_scores)
first_ranked_separators = np.array(first_ranked_separators)
#Get success rate
print('Ranking test set success rate using av plDDT*log(if_contacts) in interface',np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
#Get AUC using that success rate
correct = np.zeros(len(first_ranked_scores))
correct[np.argwhere(first_ranked_scores>=0.23)]=1
fpr, tpr, threshold = metrics.roc_curve(correct, first_ranked_separators, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
print('AUC using the same ranking', roc_auc)
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(first_ranked_scores, max_scores,s=3,color='tab:blue',label='Max')
plt.scatter(first_ranked_scores, min_scores,s=3,color='mediumseagreen',label='Min')
plt.title('Model ranking on the test set')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.plot([0,1],[0,1],color='k',linewidth=1,linestyle='--')
plt.xlabel('DockQ first ranked model')
plt.ylabel('DockQ')
plt.legend()
plt.tight_layout()
plt.savefig(outdir+'DockQ_marks_5runs.svg',format='svg',dpi=300)
plt.close()
#Assign top ranked scores and origin
marks_dockq_AF['top_ranked_model_DockQ_af2']=first_ranked_scores
marks_dockq_AF['top_ranked_pDockQ']=first_ranked_separators
marks_dockq_AF['top_ranked_model_run_af2']=max_inds+1
#Create and save df
roc_df = pd.DataFrame()
roc_df['FPR']=fpr
roc_df['TPR']=tpr
roc_df['FPR*SR']=fpr*(np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
roc_df['TPR*SR']=tpr*np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores)
roc_df['PPV']=tpr/(tpr+fpr)
roc_df['pDockQ']=threshold
roc_df.to_csv(outdir+'roc_df_af2_marks.csv')
#Select a reduced version of the df
roc_df.loc[np.arange(0,len(roc_df),10)].to_csv(outdir+'roc_df_af2_marks_reduced.csv')
return marks_dockq_AF
def score_marks_5runs_paired_fused(marks_dockq_AF, plDDT_marks, sigmoid_params, outdir):
'''Analyze the variation in DockQ using 5 identical runs of the same settings
'''
plDDT_marks['complex_id'] = plDDT_marks.id1+'-'+plDDT_marks.id2
merged = pd.merge(marks_dockq_AF,plDDT_marks,on='complex_id',how='inner')
#Get separator
separator1 = merged[['if_plddt_av_1', 'if_plddt_av_2','if_plddt_av_3','if_plddt_av_4','if_plddt_av_5']].values
separator2 = merged[['num_atoms_in_interface_1', 'num_atoms_in_interface_2','num_atoms_in_interface_3','num_atoms_in_interface_4','num_atoms_in_interface_5']].values
separator = separator1*np.log10(separator2+1)
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
separator = sigmoid(separator, *sigmoid_params)
scores = merged[['DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run1','DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run2',
'DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run3','DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run4',
'DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run5']].values
#Get max and min scores
max_scores = np.max(scores,axis=1)
min_scores = np.min(scores,axis=1)
#Get success rates per initializations
srs = []
for i in range(scores.shape[1]):
srs.append(np.argwhere(scores[:,i]>=0.23).shape[0]/len(scores))
print('FUSED test set scoring:')
print('Success rate 5 runs top scores',np.argwhere(max_scores>=0.23).shape[0]/len(max_scores))
print('Av diff',np.average(max_scores-min_scores))
print('Std diff',np.std(max_scores-min_scores))
print('Avg and std success rate', np.average(srs),np.std(srs))
#Separate the models using the number of contacts in the interface
max_inds = np.argmax(separator,axis=1)
first_ranked_scores = []
first_ranked_separators = []
#Get max separator scores
for i in range(len(max_inds)):
first_ranked_scores.append(scores[i,max_inds[i]])
first_ranked_separators.append(separator[i,max_inds[i]])
#Convert to array
first_ranked_scores = np.array(first_ranked_scores)
first_ranked_separators = np.array(first_ranked_separators)
#Get success rate
print('Ranking test set success rate using if_plddt_av and num contacts in interface',np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
#Get AUC using that success rate
correct = np.zeros(len(first_ranked_scores))
correct[np.argwhere(first_ranked_scores>=0.23)]=1
fpr, tpr, threshold = metrics.roc_curve(correct, first_ranked_separators, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
print('FUSED AUC using the same ranking', roc_auc)
#Assign top ranked scores and origin
marks_dockq_AF['top_ranked_model_DockQ_fused']=first_ranked_scores
marks_dockq_AF['top_ranked_model_run_fused']=max_inds+1
#Create and save df
roc_df = pd.DataFrame()
roc_df['FPR']=fpr
roc_df['TPR']=tpr
roc_df['FPR*SR']=fpr*(np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
roc_df['TPR*SR']=tpr*np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores)
roc_df['PPV']=tpr/(tpr+fpr)
roc_df['pDockQ']=threshold
roc_df.to_csv(outdir+'roc_df_fused_marks.csv')
#Select a reduced version of the df
roc_df.loc[np.arange(0,len(roc_df),10)].to_csv(outdir+'roc_df_fused_marks_reduced.csv')
return marks_dockq_AF
def marks_box(marks_dockq_AF, marks_dockq_GRAMM, marks_dockq_mdockpp, marks_dockq_TMfull, marks_dockq_TMint, marks_dockq_RF,outdir):
'''Box df of Marks set
'''
marks_dockq_TMint = marks_dockq_TMint.dropna()
marks_dockq_TMfull = marks_dockq_TMfull.dropna()
#Get data
rf_scores = marks_dockq_RF.DockQ_dockqstats_marks_RF.values
gramm_scores = marks_dockq_GRAMM[1].values
mdockpp_scores = marks_dockq_mdockpp.DockQ.values
TMfull_scores = marks_dockq_TMfull.dockq.values
TMint_scores = marks_dockq_TMint.dockq.values
paired_scores = marks_dockq_AF.DockQ_dockqstats_marks_af2_hhblitsn2_model_1_rec10.values
af2_std_scores = marks_dockq_AF.DockQ_dockqstats_marks_af2_af2stdmsa_model_1_rec10.values
run1_both_scores= marks_dockq_AF.DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1.values
run1_fused_scores = marks_dockq_AF.DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run1.values
top_paired_af_scores = marks_dockq_AF.top_ranked_model_DockQ_af2.values
top_paired_fused_scores = marks_dockq_AF.top_ranked_model_DockQ_fused.values
data1 = [rf_scores, gramm_scores, mdockpp_scores, TMint_scores, af2_std_scores, paired_scores, top_paired_af_scores, top_paired_fused_scores]
data2 = [run1_both_scores, run1_fused_scores, top_paired_af_scores,top_paired_fused_scores]
all_data = [data1,data2]
xlabels1 = ['RF','GRAMM', 'MDockPP', 'TMdock\nInterfaces', 'AF2', 'Paired', 'AF2+Paired\ntop ranked','Block+Paired\ntop ranked']
xlabels2 = ['AF2+Paired', 'Block+Paired', 'AF2+Paired\ntop ranked', 'Block+Paired\ntop ranked']
all_xlabels = [xlabels1, xlabels2]
#Color
colors = sns.husl_palette(len(xlabels1)+2)
all_colors = [colors[:len(xlabels1)],colors[-len(xlabels2):]]
for i in range(len(all_data)):
#Boxplot
fig,ax = plt.subplots(figsize=(24/2.54,12/2.54))
data = all_data[i] #Get data and xlabel variation
xlabels = all_xlabels[i]
colors = all_colors[i]
#Success rates
srs = []
for j in range(len(data)):
sr = np.argwhere(data[j]>=0.23).shape[0]/len(data[j])
median = np.median(data[j])
print(xlabels[j],'sr:',np.round(sr,3),len(data[j]),median)
#xlabels[j]+='\nSR: '+str(np.round(100*sr,1))+'%'
#xlabels[j]+='\nM: '+str(np.round(median,3))
# Creating plot
#ax.violinplot(data)
bp = ax.boxplot(data, patch_artist = True, notch=True, showfliers=False)
for patch, color in zip(bp['boxes'], colors):
patch.set_facecolor(color)
patch.set_alpha(0.75)
# changing color and linewidth of
# medians
for median in bp['medians']:
median.set(color ='k',linewidth = 3)
# #Add swarm
# for i in range(len(data)):
# # Add some random "jitter" to the x-axis
# x = np.random.normal(i, 0.04, size=len(data[i]))
# plt.plot(x+1, data[i], 'r.', alpha=0.2)
# changing color and linewidth of
# whiskers
for whisker in bp['whiskers']:
whisker.set(color ='grey',
linewidth = 1)
# changing color and linewidth of
# caps
for cap in bp['caps']:
cap.set(color ='grey',
linewidth = 1)
plt.title('DockQ scores for the test set',fontsize=20)
plt.xticks(np.arange(1,len(xlabels)+1),xlabels,fontsize=12)
plt.ylabel('DockQ')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'DockQ_box_test'+str(i)+'.svg',format='svg',dpi=300)
plt.close()
def AF_vs_RF_marks(marks_dockq_RF,marks_dockq_AF, outdir):
'''Compare the scores for RF vs AF
'''
merged = pd.merge(marks_dockq_RF,marks_dockq_AF,on='complex_id',how='inner')
print('Number of complexes in merged Marks RF and AF', len(merged))
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['DockQ_dockqstats_marks_RF'],merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'],s=1)
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
#Plot correct cutoff
plt.plot([0.23,0.23],[0,0.23],linewidth=1,linestyle='--',color='k')
plt.plot([0,0.23],[0.23,0.23],linewidth=1,linestyle='--',color='k',label='Success cutoff')
plt.title('RF vs AF2 performance on the test set')
plt.xlabel('RF DockQ')
plt.ylabel('AF DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'RF_vs_AF_test.svg',format='svg',dpi=300)
#Get num correct
num_correct_RF = np.argwhere(merged['DockQ_dockqstats_marks_RF'].values>=0.23).shape[0]
num_correct_AF = np.argwhere(merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'].values>=0.23).shape[0]
num_total = len(merged)
print('Success rate RF:',num_correct_RF,'out of',num_total,'|',np.round(100*num_correct_RF/num_total,2),'%')
print('Success rate AF:',num_correct_AF,'out of',num_total,'|',np.round(100*num_correct_AF/num_total,2),'%')
#Get where RF outperforms AF
scores = merged[['DockQ_dockqstats_marks_RF','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1']].values
rf_pos = scores[np.argwhere(scores[:,0]>=0.23)[:,0],:]
max_scores = np.argmax(rf_pos,axis=1)
print('RF outperform AF', np.argwhere(max_scores==0).shape[0], 'out of',len(rf_pos),'times|',np.argwhere(max_scores==0).shape[0]/len(rf_pos))
def AF_vs_GRAMM_marks(marks_dockq_GRAMM, marks_dockq_AF, outdir):
'''Compare the scores for GRAMM vs AF
'''
marks_dockq_GRAMM = marks_dockq_GRAMM.rename(columns={1: 'DockQ GRAMM'})
marks_dockq_GRAMM['complex_id'] = ['_'.join(x.split('-')) for x in marks_dockq_GRAMM[0]]
merged = pd.merge(marks_dockq_GRAMM,marks_dockq_AF,on='complex_id',how='inner')
print('Number of complexes in merged Marks GRAMM and AF', len(merged))
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['DockQ GRAMM'],merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'],s=1)
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
#Plot correct cutoff
plt.plot([0.23,0.23],[0,0.23],linewidth=1,linestyle='--',color='k')
plt.plot([0,0.23],[0.23,0.23],linewidth=1,linestyle='--',color='k',label='Success cutoff')
plt.title('GRAMM vs AF2 performance on the test set')
plt.xlabel('GRAMM DockQ')
plt.ylabel('AF DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'GRAMM_vs_AF_test.svg',format='svg',dpi=300)
#Get num correct
num_correct_GRAMM = np.argwhere(merged['DockQ GRAMM'].values>=0.23).shape[0]
num_correct_AF = np.argwhere(merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'].values>=0.23).shape[0]
num_total = len(merged)
print('Success rate GRAMM:',num_correct_GRAMM,'out of',num_total,'|',np.round(100*num_correct_GRAMM/num_total,2),'%')
print('Success rate AF:',num_correct_AF,'out of',num_total,'|',np.round(100*num_correct_AF/num_total,2),'%')
#Get where GRAMM outperforms AF
scores = merged[['DockQ GRAMM','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1']].values
GRAMM_pos = scores[np.argwhere(scores[:,0]>=0.23)[:,0],:]
max_scores = np.argmax(GRAMM_pos,axis=1)
print('GRAMM outperform AF', np.argwhere(max_scores==0).shape[0], 'out of',len(GRAMM_pos),'times|',np.argwhere(max_scores==0).shape[0]/len(GRAMM_pos))
def AF_vs_TMint_marks(marks_dockq_TMint, marks_dockq_AF, outdir):
'''Compare the scores for GRAMM vs AF
'''
marks_dockq_TMint = marks_dockq_TMint.rename(columns={'dockq': 'DockQ TMint'})
merged = pd.merge(marks_dockq_TMint,marks_dockq_AF,on='complex_id',how='inner')
print('Number of complexes in merged Marks TMint and AF', len(merged))
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['DockQ TMint'],merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'],s=1)
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
#Plot correct cutoff
plt.plot([0.23,0.23],[0,0.23],linewidth=1,linestyle='--',color='k')
plt.plot([0,0.23],[0.23,0.23],linewidth=1,linestyle='--',color='k',label='Success cutoff')
plt.title('TMint vs AF2 performance on the test set')
plt.xlabel('TMint DockQ')
plt.ylabel('AF DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'TMint_vs_AF_test.svg',format='svg',dpi=300)
#Get num correct
num_correct_TMint = np.argwhere(merged['DockQ TMint'].values>=0.23).shape[0]
num_correct_AF = np.argwhere(merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'].values>=0.23).shape[0]
num_total = len(merged)
print('Success rate TMint:',num_correct_TMint,'out of',num_total,'|',np.round(100*num_correct_TMint/num_total,2),'%')
print('Success rate AF:',num_correct_AF,'out of',num_total,'|',np.round(100*num_correct_AF/num_total,2),'%')
#Get where GRAMM outperforms AF
scores = merged[['DockQ TMint','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1']].values
TMint_pos = scores[np.argwhere(scores[:,0]>=0.23)[:,0],:]
max_scores = np.argmax(TMint_pos,axis=1)
print('TMint outperform AF', np.argwhere(max_scores==0).shape[0], 'out of',len(TMint_pos),'times|',np.argwhere(max_scores==0).shape[0]/len(TMint_pos))
def real_features_marks(marks_dockq_AF, dssp_marks, ifstats_marks, aln_scores_marks, AFneffs_marks, topneffs_marks, outdir):
'''Compare the separation in the marks dataset for AF using metrics from the
real structures
'''
#Change DSSP df
dssp_marks['Helix']=dssp_marks.G+dssp_marks.H+dssp_marks.I
dssp_marks['Sheet']=dssp_marks.E+dssp_marks.B
dssp_marks['Loop']=dssp_marks[' '].values
ss = dssp_marks[['Helix','Sheet','Loop']].values #0,1,2
dssp_marks['ss_class']=np.argmax(dssp_marks[['Helix','Sheet','Loop']].values,axis=1)
dssp_marks = dssp_marks[['id1','id2','ss_class']]
#Merge dfs
dssp_marks['complex_id']=dssp_marks.id1+'-'+dssp_marks.id2
ifstats_marks['complex_id']=ifstats_marks.id1+'-'+ifstats_marks.id2
aln_scores_marks['complex_id']=aln_scores_marks.id1+'-'+aln_scores_marks.id2
aln_scores_marks = aln_scores_marks[['complex_id','aln_score']]
merged_dssp = pd.merge(marks_dockq_AF,dssp_marks,on=['complex_id'],how='inner')
merged_if = pd.merge(marks_dockq_AF,ifstats_marks,on=['complex_id'],how='inner')
merged_if = pd.merge(merged_if,aln_scores_marks,on=['complex_id'],how='inner')
#AFneffs_marks['complex_id']=[code.replace('-', '_') for code in AFneffs_marks['complex_id']]
#topneffs_marks['complex_id']=[code.replace('-', '_') for code in topneffs_marks['complex_id']]
merged_if = pd.merge(merged_if,AFneffs_marks,on=['complex_id'],how='inner')
merged_if = pd.merge(merged_if,topneffs_marks,on=['complex_id'],how='inner')
'''
G = 3-turn helix (310 helix). Min length 3 residues.
H = 4-turn helix (α helix). Minimum length 4 residues.
I = 5-turn helix (π helix). Minimum length 5 residues.
T = hydrogen bonded turn (3, 4 or 5 turn)
E = extended strand in parallel and/or anti-parallel β-sheet conformation. Min length 2 residues.
B = residue in isolated β-bridge (single pair β-sheet hydrogen bond formation)
S = bend (the only non-hydrogen-bond based assignment).
C = coil (residues which are not in any of the above conformations).
'''
print('Num complexes in DSSP feature analysis',len(merged_dssp))
#Plot success rate per ss class
ss_classes = {0:'Helix',1:'Sheet',2:'Loop'}
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
for i in range(3):
sel = merged_dssp[merged_dssp.ss_class==i]
success=np.argwhere(sel.top_ranked_model_DockQ_af2.values>=0.23).shape[0]/len(sel)
print(ss_classes[i],'success rate',np.round(success,3),'over',len(sel),'structures')
#
sns.distplot(sel.top_ranked_model_DockQ_af2,label=ss_classes[i]+' : '+str(np.round(100*success,1))+' % successful',hist=False)
plt.title('DockQ and SS for the test set')
plt.xlabel('DockQ')
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.legend()
plt.tight_layout()
plt.savefig(outdir+'DockQ_per_SS_marks.svg',format='svg',dpi=300)
plt.close()
#Plot feature vs DockQ
#Get min chain len
merged_if['smallest chain length'] = np.min(merged_if[['l1','l2']].values,axis=1)
#Get max chain len
merged_if['biggest chain length'] = np.max(merged_if[['l1','l2']].values,axis=1)
vars = ['num_if_contacts_total','smallest chain length', 'biggest chain length', 'aln_score', 'AFdefault_Neff', 'tophit_Neff']
nicer_names = {'num_if_contacts_total':'number of interface contacts','smallest chain length':'smallest chain length', 'biggest chain length':'biggest chain length',
'aln_score':'alignment score', 'AFdefault_Neff':'AF Neff', 'tophit_Neff':'Paired Neff'}
print('Num complexes in real feature analysis',len(merged_if))
#Plot each third and the distribution vs vars
for var in vars:
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
print (np.quantile(merged_if[var],0.5,axis=0))
l=[np.min(merged_if[var])]
l+=[np.quantile(merged_if[var],0.33,axis=0)]
l+=[np.quantile(merged_if[var],0.67,axis=0)]
l+=[np.max(merged_if[var])]
print (l)
j=0
for i in l[0:3]:
j+=1
#print ("test: ",i,j,l[j])
sel = merged_if.loc[ (merged_if[var] > i) & (merged_if[var] < l[j]) ]
success=np.argwhere(sel.top_ranked_model_DockQ_af2.values>=0.23).shape[0]/len(sel)
print(j,str(i)+" - "+ str(l[j])+":",'success rate',np.round(success,3),'over',len(sel),'structures')
#
sns.kdeplot(sel.top_ranked_model_DockQ_af2,label=str(round(i,0))+"-"+str(round(l[j],0))+' : '+str(np.round(100*success,1))+' % successful')
plt.title('DockQ and ' + nicer_names[var] + '\nfor the test set')
plt.xlabel('DockQ')
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.legend()
plt.tight_layout()
plt.savefig(outdir+'DockQ_per_'+var+'_marks.svg',format='svg',dpi=300)
plt.close()
def marks_dockq_per_org(marks_dockq_AF, oxstats_marks, ifstats_marks, aln_scores_marks, AFneffs_marks, topneffs_marks, outdir):
'''Analyze the dockq per organism
'''
#Merge
oxstats_marks['complex_id'] = oxstats_marks.id1+'-'+oxstats_marks.id2
ifstats_marks['complex_id']=ifstats_marks.id1+'-'+ifstats_marks.id2
#AFneffs_marks['complex_id']=[code.replace('-', '_') for code in AFneffs_marks['complex_id']]
#topneffs_marks['complex_id']=[code.replace('-', '_') for code in topneffs_marks['complex_id']]
aln_scores_marks['complex_id']=aln_scores_marks.id1+'-'+aln_scores_marks.id2
aln_scores_marks = aln_scores_marks[['complex_id','aln_score']]
merged = pd.merge(marks_dockq_AF,oxstats_marks,on='complex_id',how='left')
merged = pd.merge(merged,ifstats_marks,on=['complex_id'],how='inner')
merged = pd.merge(merged,aln_scores_marks,on=['complex_id'],how='inner')
merged = pd.merge(merged,AFneffs_marks,on=['complex_id'],how='inner')
merged = pd.merge(merged,topneffs_marks,on=['complex_id'],how='inner')
#Get min chain len
merged['smallest chain length'] = np.min(merged[['l1','l2']].values,axis=1)
#Get max chain len
merged['biggest chain length'] = np.max(merged[['l1','l2']].values,axis=1)
organisms = ['Homo sapiens','Saccharomyces cerevisiae', 'Escherichia coli']
vars = ['num_if_contacts_total','smallest chain length', 'biggest chain length', 'aln_score','AFdefault_Neff', 'tophit_Neff']
#Save
orgs = []
dockq_scores = []
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
for org in organisms:
sel = merged[merged.Org1==org]
sel = sel[sel.Org2==org]
print('Number of complexes for',org,len(sel))
#Successs rate
sel_scores = sel.top_ranked_model_DockQ_af2.values
sr = np.argwhere(sel_scores>=0.23).shape[0]/len(sel_scores)
print('Success rate',sr)
#correlation
for var in vars:
R,p = spearmanr(sel[var].values,sel['top_ranked_model_DockQ_af2'].values)
print(var, np.round(R,2))
if org =='Saccharomyces cerevisiae':
org = 'S.cerevisiae'
if org =='Escherichia coli':
org = 'E.coli'
sns.distplot(sel_scores,label=org+' : '+str(np.round(sr*100,1))+' % successful',hist=False)
plt.title('DockQ per organism for the test set')
plt.xlabel('DockQ')
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.legend()
plt.tight_layout()
plt.savefig(outdir+'DockQ_per_org_marks.svg',format='svg',dpi=300)
plt.close()
def marks_dockq_per_kingdom(marks_dockq_AF, oxstats_marks, AFneffs_marks, topneffs_marks, outdir):
'''Analyze the dockq per organism
'''
#Merge
oxstats_marks['complex_id'] = oxstats_marks.id1+'-'+oxstats_marks.id2
#AFneffs_marks['complex_id']=['_'.join(x.split('-')) for x in AFneffs_marks.complex_id]
#topneffs_marks['complex_id']=['_'.join(x.split('-')) for x in topneffs_marks.complex_id]
merged = pd.merge(marks_dockq_AF,oxstats_marks,on='complex_id',how='left')
merged = pd.merge(merged,AFneffs_marks,on=['complex_id'],how='inner')
merged =
|
pd.merge(merged,topneffs_marks,on=['complex_id'],how='inner')
|
pandas.merge
|
#####################################################.
# This file stores all the functions #
# used for genrating all parameters from DBSTEP #
#####################################################.
import sys
import os
import pandas as pd
import subprocess
from pathlib import Path
from aqme.utils import move_file
def calculate_db_parameters(qm_files,args,log,w_dir_initial,name_mol,lot,bs):
try:
from dbstep.Dbstep import dbstep
except (ModuleNotFoundError,AttributeError):
log.write('\nx DBSTEP is not installed correctly - DBSTEP is not available')
sys.exit()
total_data = pd.DataFrame()
#find the ring atoms in the File
filelines = open(w_dir_initial+'/'+args.dbstep_cen_lig_file,'r').readlines()
for counter,log in enumerate(qm_files):
for line in (filelines):
split_line = line.strip().split(',')
if split_line[0] == name_mol:
C = split_line[1]
L = split_line[2]
break
sterics = dbstep(log, atom1=str(C),atom2=str(L), volume=True, sterimol=True, commandline=True)
total_data.at[counter,'Name'] = name_mol
total_data.at[counter,'log'] = log.split('.log')[0]
total_data.at[counter,'bv'] = sterics.bur_vol
total_data.at[counter,'bmax'] = sterics.Bmax
total_data.at[counter,'bmin'] = sterics.Bmin
total_data.at[counter,'L'] = sterics.L
#creating folder for all molecules to write geom parameter
if str(bs).find('/') > -1:
folder = w_dir_initial + '/QPRED/dbstep_parameters/all_confs_sterics/'+str(lot)+'-'+str(bs).split('/')[0]
else:
folder = w_dir_initial + '/QPRED/dbstep_parameters/all_confs_sterics/'+str(lot)+'-'+str(bs)
try:
os.makedirs(folder)
except OSError:
if os.path.isdir(folder):
pass
total_data.to_csv(folder+'/'+name_mol+'-all-steric-data.csv',index=False)
def calculate_boltz_and_dbstep(val,args,log,name,w_dir,w_dir_initial,lot,bs):
# GoodVibes must be installed as a module (through pip or conda)
cmd_boltz = ['python','-m', 'goodvibes', '--boltz', '--output', name ]
for file in val:
cmd_boltz.append(file)
subprocess.call(cmd_boltz)
#writing to coorect places
if str(bs).find('/') > -1:
destination = Path(w_dir_initial+'/QPRED/dbstep_parameters/boltz/'+str(lot)+'-'+str(bs).split('/')[0])
else:
destination = Path(w_dir_initial+'/QPRED/dbstep_parameters/boltz/'+str(lot)+'-'+str(bs))
move_file(destination, os.getcwd(),'Goodvibes_'+name+'.dat')
if str(bs).find('/') > -1:
dbstep_parm_file = w_dir_initial + '/QPRED/dbstep_parameters/all_confs_sterics/'+str(lot)+'-'+str(bs).split('/')[0]+'/'+name+'-all-steric-data.csv'
else:
dbstep_parm_file = w_dir_initial + '/QPRED/dbstep_parameters/all_confs_sterics/'+str(lot)+'-'+str(bs)+'/'+name+'-all-steric-data.csv'
df_dbstep = pd.read_csv(dbstep_parm_file)
if str(bs).find('/') > -1:
file = w_dir_initial+'/QPRED/dbstep_parameters/boltz/'+str(lot)+'-'+str(bs).split('/')[0]+'/Goodvibes_'+name+'.dat'
else:
file = w_dir_initial+'/QPRED/dbstep_parameters/boltz/'+str(lot)+'-'+str(bs)+'/Goodvibes_'+name+'.dat'
outlines = open(file,"r").readlines()
#reading the data from boltz fileyiu
for i in range(len(outlines)):
# I remove the NMR from the file names using [0:-4]
if outlines[i].find(' ***************************************************************************************************************************************\n') > -1 and outlines[i-1].find(' Structure') > -1:
start_line = i+1
elif outlines[i].find(' ***************************************************************************************************************************************\n') > -1:
end_line = i
boltz_values =
|
pd.DataFrame()
|
pandas.DataFrame
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2019, Arm Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import operator
import math
import numpy as np
import pandas as pd
import scipy.integrate
import scipy.signal
def series_refit_index(series, start=None, end=None, method='pre'):
"""
Slice a series using :func:`series_window` and ensure we have a value at
exactly the specified boundaries.
:param df: Series to act on
:type df: pandas.Series
:param start: First index value to find in the returned series.
:type start: object
:param end: Last index value to find in the returned series.
:type end: object
:param method: Windowing method used to select the first and last values of
the series using :func:`series_window`. Defaults to ``pre``, which is
suitable for signals where all the value changes have a corresponding
row without any fixed sample-rate constraints. If they have been
downsampled, ``nearest`` might be a better choice.).
"""
return _data_refit_index(series, start, end, method=method)
def df_refit_index(df, start=None, end=None, method='pre'):
"""
Same as :func:`series_refit_index` but acting on :class:`pandas.DataFrame`
"""
return _data_refit_index(df, start, end, method=method)
def _data_refit_index(data, start, end, method):
if data.empty:
return data
data = _data_window(data, (start, end), method=method, clip_window=True)
index = data.index.to_series()
if start is not None:
index.iloc[0] = start
if end is not None:
index.iloc[-1] = end
# Shallow copy is enough, we only want to replace the index and not the
# actual data
data = data.copy(deep=False)
data.index = index
return data
def df_squash(df, start, end, column='delta'):
"""
Slice a dataframe of deltas in [start:end] and ensure we have
an event at exactly those boundaries.
The input dataframe is expected to have a "column" which reports
the time delta between consecutive rows, as for example dataframes
generated by add_events_deltas().
The returned dataframe is granted to have an initial and final
event at the specified "start" ("end") index values, which values
are the same of the last event before (first event after) the
specified "start" ("end") time.
Examples:
Slice a dataframe to [start:end], and work on the time data so that it
makes sense within the interval.
Examples to make it clearer:
df is:
Time len state
15 1 1
16 1 0
17 1 1
18 1 0
-------------
df_squash(df, 16.5, 17.5) =>
Time len state
16.5 .5 0
17 .5 1
df_squash(df, 16.2, 16.8) =>
Time len state
16.2 .6 0
:returns: a new df that fits the above description
"""
if df.empty:
return df
end = min(end, df.index[-1] + df[column].values[-1])
res_df =
|
pd.DataFrame(data=[], columns=df.columns)
|
pandas.DataFrame
|
import os
import string
from collections import Counter
from datetime import datetime
from functools import partial
from pathlib import Path
from typing import Optional
import numpy as np
import pandas as pd
from scipy.stats.stats import chisquare
from tangled_up_in_unicode import block, block_abbr, category, category_long, script
from pandas_profiling.config import Settings
from pandas_profiling.model.summary_helpers_image import (
extract_exif,
hash_image,
is_image_truncated,
open_image,
)
def mad(arr: np.ndarray) -> np.ndarray:
"""Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variability of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
return np.median(np.abs(arr - np.median(arr)))
def named_aggregate_summary(series: pd.Series, key: str) -> dict:
summary = {
f"max_{key}": np.max(series),
f"mean_{key}": np.mean(series),
f"median_{key}": np.median(series),
f"min_{key}": np.min(series),
}
return summary
def length_summary(series: pd.Series, summary: dict = None) -> dict:
if summary is None:
summary = {}
length = series.str.len()
summary.update({"length": length})
summary.update(named_aggregate_summary(length, "length"))
return summary
def file_summary(series: pd.Series) -> dict:
"""
Args:
series: series to summarize
Returns:
"""
# Transform
stats = series.map(lambda x: os.stat(x))
def convert_datetime(x: float) -> str:
return datetime.fromtimestamp(x).strftime("%Y-%m-%d %H:%M:%S")
# Transform some more
summary = {
"file_size": stats.map(lambda x: x.st_size),
"file_created_time": stats.map(lambda x: x.st_ctime).map(convert_datetime),
"file_accessed_time": stats.map(lambda x: x.st_atime).map(convert_datetime),
"file_modified_time": stats.map(lambda x: x.st_mtime).map(convert_datetime),
}
return summary
def path_summary(series: pd.Series) -> dict:
"""
Args:
series: series to summarize
Returns:
"""
# TODO: optimize using value counts
summary = {
"common_prefix": os.path.commonprefix(series.values.tolist())
or "No common prefix",
"stem_counts": series.map(lambda x: os.path.splitext(x)[0]).value_counts(),
"suffix_counts": series.map(lambda x: os.path.splitext(x)[1]).value_counts(),
"name_counts": series.map(lambda x: os.path.basename(x)).value_counts(),
"parent_counts": series.map(lambda x: os.path.dirname(x)).value_counts(),
"anchor_counts": series.map(lambda x: os.path.splitdrive(x)[0]).value_counts(),
}
summary["n_stem_unique"] = len(summary["stem_counts"])
summary["n_suffix_unique"] = len(summary["suffix_counts"])
summary["n_name_unique"] = len(summary["name_counts"])
summary["n_parent_unique"] = len(summary["parent_counts"])
summary["n_anchor_unique"] = len(summary["anchor_counts"])
return summary
def url_summary(series: pd.Series) -> dict:
"""
Args:
series: series to summarize
Returns:
"""
summary = {
"scheme_counts": series.map(lambda x: x.scheme).value_counts(),
"netloc_counts": series.map(lambda x: x.netloc).value_counts(),
"path_counts": series.map(lambda x: x.path).value_counts(),
"query_counts": series.map(lambda x: x.query).value_counts(),
"fragment_counts": series.map(lambda x: x.fragment).value_counts(),
}
return summary
def count_duplicate_hashes(image_descriptions: dict) -> int:
"""
Args:
image_descriptions:
Returns:
"""
counts = pd.Series(
[x["hash"] for x in image_descriptions if "hash" in x]
).value_counts()
return counts.sum() - len(counts)
def extract_exif_series(image_exifs: list) -> dict:
"""
Args:
image_exifs:
Returns:
"""
exif_keys = []
exif_values: dict = {}
for image_exif in image_exifs:
# Extract key
exif_keys.extend(list(image_exif.keys()))
# Extract values per key
for exif_key, exif_val in image_exif.items():
if exif_key not in exif_values:
exif_values[exif_key] = []
exif_values[exif_key].append(exif_val)
series = {"exif_keys":
|
pd.Series(exif_keys, dtype=object)
|
pandas.Series
|
import os
from datetime import datetime
import time
import sys
import ccxt
import pandas as pd
import pickle as pkl
# AITB Basic base class
from .basic import Basic
# Trends
from pytrends.request import TrendReq
# Feed reader
import feedparser
# Textblob
from textblob import TextBlob
from textblob.sentiments import NaiveBayesAnalyzer
# Vader
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
class Runner(Basic):
def test(self):
# Create lock file
tname = self.logPath + 'test.log'
with open(tname, 'a') as file:
file.write(str(datetime.now()) + " -- Testing timing and threads\n")
print('TesterRunner', file=sys.stderr)
def dataDownload(self, aggro):
# Create file and path
if(aggro):
dpre = 'dataDownloadAggro'
else:
dpre = 'dataDownload'
dname = self.runPath + dpre + '.run'
dlog = self.logPath + dpre + '.log'
# Test if already running
if os.path.exists(dname):
return
# Write lock file
with open(dname, 'w') as file:
file.write(str(datetime.now()))
# Create SQL pre insert
if 'sqlite' in str(self.db.engine.url):
sqlpre = 'INSERT OR IGNORE INTO '
else:
sqlpre = 'INSERT IGNORE INTO '
# Get list of data files
dataCfgs = self.listCfgFiles('data')
for file in dataCfgs:
tmpDataConf = self.readCfgFile('data', file)
if tmpDataConf['enabled'] and tmpDataConf['aggro'] == aggro:
# Create exchange instance
ex_class = getattr(ccxt, tmpDataConf['con'])
tmpex = ex_class({'timeout': 10000, 'enableRateLimit': True})
data = ""
try:
# Check if data empty else start from last entry
if tmpDataConf['count'] == 0:
if tmpex.has['fetchOHLCV']:
data = tmpex.fetch_ohlcv(tmpDataConf['symb'], '1m', tmpDataConf['start'])
else:
if tmpex.has['fetchOHLCV']:
# Check for recent additions
result = self.db.session.execute('SELECT * from ' + tmpDataConf['id'] + ' ORDER BY Date DESC LIMIT 1').fetchall()
self.db.session.commit()
# Drop results to dataFrame
datadf = pd.DataFrame(result, columns=['Date', 'Open', 'High', 'Low', 'Close', 'Volume'])
ldate = datetime.utcfromtimestamp(datadf['Date'].iloc[-1] / 1000).strftime('%Y-%m-%d %H:%M')
with open(dlog, 'a') as file:
file.write(str(datetime.now()) + " -- Downloading " + tmpDataConf['symb'] + " starting from " + ldate + "\n")
data = tmpex.fetch_ohlcv(tmpDataConf['symb'], '1m', int(datadf['Date'].iloc[-1]))
# Write results to database
for datarow in data:
self.db.session.execute(sqlpre + tmpDataConf['id'] + ' VALUES (' + str(datarow[0]) + ',' + str(datarow[1]) + ',' + str(datarow[2]) + ',' + str(datarow[3]) + ',' + str(datarow[4]) + ',' + str(datarow[5]) + ')')
# Commit database entries
self.db.session.commit()
except (ccxt.ExchangeError, ccxt.NetworkError) as error:
# Catch most common errors
with open(dlog, 'a') as file:
file.write(str(datetime.now()) + " --" + type(error).__name__ + "--" + error.args + "\n")
break
# Check for recent additions
result = self.db.session.execute('SELECT * from ' + tmpDataConf['id']).fetchall()
# Drop results to dataFrame
datadf = pd.DataFrame(result, columns=['Date', 'Open', 'High', 'Low', 'Close', 'Volume'])
# Create and save head tail and count
tmpDataConf['head'] = datadf.values.tolist()[0]
tmpDataConf['tail'] = datadf.values.tolist()[-1]
tmpDataConf['end'] = datadf.values.tolist()[-1][0]
tmpDataConf['count'] = str(datadf.shape[0])
id = tmpDataConf['id']
# Cfg File
self.writeCfgFile('data', id, tmpDataConf)
# Remove File Lock
os.remove(dname)
# Close DB Connection
def dataUpload(self):
# Create file and path
uname = self.runPath + 'dataUpload.run'
ulog = self.logPath + 'dataUpload.log'
# Test if already running
if os.path.exists(uname):
return
# Write lock file
with open(uname, 'w') as file:
file.write(str(datetime.now()))
# Get list of data files
upFiles = self.listUpFiles()
for fname in upFiles:
# Split file into extension and filename
nom, ext = os.path.splitext(fname)
ffname = os.path.join(self.upPath, fname)
updf = pd.DataFrame()
if ext == '.csv':
updf = pd.read_csv(ffname)
if ext == '.feather':
updf = pd.read_feather(ffname)
if ext == '.parquet':
updf =
|
pd.read_parquet(ffname, engine='fastparquet')
|
pandas.read_parquet
|
# -*- coding: utf-8 -*-
import numpy as np
import xgboost as xgb
import testing as tm
import unittest
import pytest
try:
import pandas as pd
except ImportError:
pass
pytestmark = pytest.mark.skipif(**tm.no_pandas())
dpath = 'demo/data/'
rng = np.random.RandomState(1994)
class TestPandas(unittest.TestCase):
def test_pandas(self):
df = pd.DataFrame([[1, 2., True], [2, 3., False]],
columns=['a', 'b', 'c'])
dm = xgb.DMatrix(df, label=pd.Series([1, 2]))
assert dm.feature_names == ['a', 'b', 'c']
assert dm.feature_types == ['int', 'float', 'i']
assert dm.num_row() == 2
assert dm.num_col() == 3
np.testing.assert_array_equal(dm.get_label(), np.array([1, 2]))
# overwrite feature_names and feature_types
dm = xgb.DMatrix(df, label=pd.Series([1, 2]),
feature_names=['x', 'y', 'z'],
feature_types=['q', 'q', 'q'])
assert dm.feature_names == ['x', 'y', 'z']
assert dm.feature_types == ['q', 'q', 'q']
assert dm.num_row() == 2
assert dm.num_col() == 3
# incorrect dtypes
df = pd.DataFrame([[1, 2., 'x'], [2, 3., 'y']],
columns=['a', 'b', 'c'])
self.assertRaises(ValueError, xgb.DMatrix, df)
# numeric columns
df = pd.DataFrame([[1, 2., True], [2, 3., False]])
dm = xgb.DMatrix(df, label=pd.Series([1, 2]))
assert dm.feature_names == ['0', '1', '2']
assert dm.feature_types == ['int', 'float', 'i']
assert dm.num_row() == 2
assert dm.num_col() == 3
np.testing.assert_array_equal(dm.get_label(), np.array([1, 2]))
df = pd.DataFrame([[1, 2., 1], [2, 3., 1]], columns=[4, 5, 6])
dm = xgb.DMatrix(df, label=pd.Series([1, 2]))
assert dm.feature_names == ['4', '5', '6']
assert dm.feature_types == ['int', 'float', 'int']
assert dm.num_row() == 2
assert dm.num_col() == 3
df = pd.DataFrame({'A': ['X', 'Y', 'Z'], 'B': [1, 2, 3]})
dummies = pd.get_dummies(df)
# B A_X A_Y A_Z
# 0 1 1 0 0
# 1 2 0 1 0
# 2 3 0 0 1
result, _, _ = xgb.core._maybe_pandas_data(dummies, None, None)
exp = np.array([[1., 1., 0., 0.],
[2., 0., 1., 0.],
[3., 0., 0., 1.]])
np.testing.assert_array_equal(result, exp)
dm = xgb.DMatrix(dummies)
assert dm.feature_names == ['B', 'A_X', 'A_Y', 'A_Z']
assert dm.feature_types == ['int', 'int', 'int', 'int']
assert dm.num_row() == 3
assert dm.num_col() == 4
df =
|
pd.DataFrame({'A=1': [1, 2, 3], 'A=2': [4, 5, 6]})
|
pandas.DataFrame
|
# -*- coding:utf-8 -*-
"""
基本面数据接口
Created on 2015/01/18
@author: <NAME>
@group : waditu
@contact: <EMAIL>
"""
import pandas as pd
from hsstock.tushare.stock import cons as ct
import lxml.html
from lxml import etree
import re
import time
from pandas.compat import StringIO
from hsstock.tushare.util import dateu as du
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_stock_basics(date=None):
"""
获取沪深上市公司基本情况
Parameters
date:日期YYYY-MM-DD,默认为上一个交易日,目前只能提供2016-08-09之后的历史数据
Return
--------
DataFrame
code,代码
name,名称
industry,细分行业
area,地区
pe,市盈率
outstanding,流通股本
totals,总股本(万)
totalAssets,总资产(万)
liquidAssets,流动资产
fixedAssets,固定资产
reserved,公积金
reservedPerShare,每股公积金
eps,每股收益
bvps,每股净资
pb,市净率
timeToMarket,上市日期
"""
wdate = du.last_tddate() if date is None else date
wdate = wdate.replace('-', '')
if wdate < '20160809':
return None
datepre = '' if date is None else wdate[0:4] + wdate[4:6] + '/'
request = Request(ct.ALL_STOCK_BASICS_FILE%(datepre, '' if date is None else wdate))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
text = text.replace('--', '')
df = pd.read_csv(StringIO(text), dtype={'code':'object'})
df = df.set_index('code')
return df
def get_report_data(year, quarter):
"""
获取业绩报表数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
Return
--------
DataFrame
code,代码
name,名称
eps,每股收益
eps_yoy,每股收益同比(%)
bvps,每股净资产
roe,净资产收益率(%)
epcf,每股现金流量(元)
net_profits,净利润(万元)
profits_yoy,净利润同比(%)
distrib,分配方案
report_date,发布日期
"""
if ct._check_input(year,quarter) is True:
ct._write_head()
df = _get_report_data(year, quarter, 1,
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/python
# The following methods are thanks to <NAME>
# (http://cs.berry.edu/~nhamid/p2p/filer-python.html): __router,
# __handle_insertpeer, __handle_listpeers, __handle_peername,
# __handle_query, __handle_quit, buildpeers
from btpeer import *
import os
import sys
import tempfile
import numpy as np
import pandas as pd
import texttoimage
import math
from ast import literal_eval as make_tuple
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from io import BytesIO
sys.path.insert(1, '../../data') or sys.path.insert(1, '../data')
from visualize import Visualize
from filter import Filter
from quickQuery import QuickQuery
PEERNAME = "NAME" # request a peer's canonical id
LISTPEERS = "LIST"
INSERTPEER = "JOIN"
QUERY = "QUER"
FILEGET = "FGET"
PEERQUIT = "QUIT"
REPLY = "REPL"
ERROR = "ERRO"
# RC CHANGE
MODEL = "MODL" # request id's of modelling peers only
FILESEND = "FSEN"
QUERYSEND = "QSEN"
QUERYREQ = "QUER"
VISREQ = "VISR"
# EVA CHANGE
DATAQUERY = "DATQ" # used to query data on a peer without downloading it
BUILDMODEL = "BMOD"
# DATA_DIR = 'Spring21_SE_project/data'
# Assumption in this program:
# peer id's in this application are just "host:port" strings
#==============================================================================
class FilerPeer(BTPeer):
#==============================================================================
""" Implements a file-sharing peer-to-peer entity based on the generic
BerryTella P2P framework.
"""
#--------------------------------------------------------------------------
def __init__(self, maxpeers, serverport, model = False, directory = None):
#--------------------------------------------------------------------------
""" Initializes the peer to support connections up to maxpeers number
of peers, with its server listening on the specified port. Also sets
the dictionary of local files to empty and adds handlers to the
BTPeer framework.
"""
BTPeer.__init__(self, maxpeers, serverport)
self.addrouter(self.__router)
# The below variable is set to true if and only if this node is a modeling node.
self.model = model
self.directory = directory
self.received = dict()
handlers = {
LISTPEERS : self.__handle_listpeers,
INSERTPEER : self.__handle_insertpeer,
PEERNAME: self.__handle_peername,
QUERY: self.__handle_query,
PEERQUIT: self.__handle_quit,
MODEL: self.__handle_model,
FILEGET: self.__handle_fileget,
# DATAREQ: self.__handle_datareq,
QUERYREQ: self.__handle_queryreq,
DATAQUERY: self.__handle_dataquery,
VISREQ: self.__handle_visrequest,
BUILDMODEL: self.__handle_buildmodel,
}
for mt in handlers:
self.addhandler(mt, handlers[mt])
# end FilerPeer constructor
#--------------------------------------------------------------------------
def __debug(self, msg):
#--------------------------------------------------------------------------
if self.debug:
btdebug(msg)
#--------------------------------------------------------------------------
def __router(self, peerid):
#--------------------------------------------------------------------------
if peerid not in self.getpeerids():
return (None, None, None)
else:
rt = [peerid]
rt.extend(self.peers[peerid])
return rt
#--------------------------------------------------------------------------
def __handle_insertpeer(self, peerconn, data):
#--------------------------------------------------------------------------
""" Handles the INSERTPEER (join) message type. The message data
should be a string of the form, "peerid host port", where peer-id
is the canonical name of the peer that desires to be added to this
peer's list of peers, host and port are the necessary data to connect
to the peer.
"""
self.peerlock.acquire()
try:
try:
peerid,host,port = data.split()
if self.maxpeersreached():
self.__debug('maxpeers %d reached: connection terminating'
% self.maxpeers)
peerconn.senddata(ERROR, 'Join: too many peers')
return
if peerid not in self.getpeerids() and peerid != self.myid:
self.addpeer(peerid, host, port)
self.__debug('added peer: %s' % peerid)
peerconn.senddata(REPLY, 'Join: peer added: %s' % peerid)
else:
peerconn.senddata(ERROR, 'Join: peer already inserted %s'
% peerid)
except:
self.__debug('invalid insert %s: %s' % (str(peerconn), data))
peerconn.senddata(ERROR, 'Join: incorrect arguments')
finally:
self.peerlock.release()
# end handle_insertpeer method
#--------------------------------------------------------------------------
def __handle_listpeers(self, peerconn, data):
#--------------------------------------------------------------------------
""" Handles the LISTPEERS message type. Message data is not used. """
self.peerlock.acquire()
try:
self.__debug('Listing peers %d' % self.numberofpeers())
peerconn.senddata(REPLY, '%d' % self.numberofpeers())
for pid in self.getpeerids():
host,port = self.getpeer(pid)
peerconn.senddata(REPLY, '%s %s %d' % (pid, host, port))
finally:
self.peerlock.release()
#--------------------------------------------------------------------------
def __handle_peername(self, peerconn, data):
#--------------------------------------------------------------------------
""" Handles the NAME message type by sending the value of the myid variable
through the BTPeerConnection peerconn. Message data is not used. """
peerconn.senddata(REPLY, self.myid)
#--------------------------------------------------------------------------
def __handle_query(self, peerconn, data):
#--------------------------------------------------------------------------
""" Handles the QUERY message type. The message data should be in the
format of a string, "return-peer-id key ttl", where return-peer-id
is the name of the peer that initiated the query, key is the (portion
of the) file name being searched for, and ttl is how many further
levels of peers this query should be propagated on.
"""
# self.peerlock.acquire()
try:
peerid, key, ttl = data.split()
peerconn.senddata(REPLY, 'Query ACK: %s' % key)
except:
self.__debug('invalid query %s: %s' % (str(peerconn), data))
peerconn.senddata(ERROR, 'Query: incorrect arguments')
# self.peerlock.release()
t = threading.Thread(target=self.__processquery,
args=[peerid, key, int(ttl)])
t.start()
#--------------------------------------------------------------------------
def __handle_quit(self, peerconn, data):
#--------------------------------------------------------------------------
""" Handles the QUIT message type. The message data should be in the
format of a string, "peer-id", where peer-id is the canonical
name of the peer that wishes to be unregistered from this
peer's directory.
"""
self.peerlock.acquire()
try:
peerid = data.lstrip().rstrip()
if peerid in self.getpeerids():
msg = 'Quit: peer removed: %s' % peerid
self.__debug(msg)
peerconn.senddata(REPLY, msg)
self.removepeer(peerid)
else:
msg = 'Quit: peer not found: %s' % peerid
self.__debug(msg)
peerconn.senddata(ERROR, msg)
finally:
self.peerlock.release()
# precondition: may be a good idea to hold the lock before going
# into this function
#--------------------------------------------------------------------------
def model_datareq(self):
#--------------------------------------------------------------------------
""" Used to initiate data request to secondary modelling nodes.
"""
try:
# Get list of all model peers reachable from the current node.
model_peers = self.modelpeerlist(sys.maxsize) # TODO: confirm data type of model_peers
results = []
# Fetch data from secondary model nodes
for peer in model_peers:
print("Requesting game data from supernode at {}:{}...".format(peer[0],peer[1]))
result = self.persistent_connectandsend(peer[0], peer[1], FILEGET, '')
result = ''.join(result).strip()
result = bytes.fromhex(result)
result = pd.read_pickle(BytesIO(result), compression="xz")
# result = peer.senddata(FILEGET)
results.append(result)
print("Data from supernode at {}:{} arrived successfully".format(peer[0],peer[1]))
# Fetch local data on primary model node
results.append(self.extract_model_files())
print("Data from primary supernode {} received successfully".format(self.myid))
# return combined data to calling function (build_model).
return results
except:
traceback.print_exc()
# finally:
# # Remove temp file - prevent huge amount of data from being keep on a node
# # after the request has been completed.
# os.remove(path)
#--------------------------------------------------------------------------
def __handle_queryreq(self, peerconn, argument):
#--------------------------------------------------------------------------
""" Handles the QUERYREQ message type. This function is called by the node
that receives a query request.
Parameters:
argument (str):
Specifies the parameters to use in querying the collected data and is
expected to be a parameter (mean, median, mode, variance, stdev),
start date, and end data separated by commas.
peerconn (BTPeerConnection):
Connection to the peer requesting the query result.
"""
print("Received following query:", argument)
try:
# Get list of all model peers reachable from the current node.
model_peers = self.modelpeerlist(sys.maxsize)
print("List of peers found in network", model_peers)
fd, path = tempfile.mkstemp()
counts = []
values = []
node_num = 1
parameter = argument.split(',')[0]
error_line = 'Model peer {}: no data found'
result_text = 'Individual model node results -\n'
# Get query result for each model peer node and consolidate.
for peer in model_peers:
result = self.query(argument, peer)
if result == 'None':
text = error_line.format(node_num)
else:
text = self.queryreq_helper(counts, values, result, parameter, node_num)
result_text += text + '\n'
node_num += 1
# Include query result for own data.
self_query = self.dataquery(argument)
if self_query == 'None':
text = error_line.format(node_num)
else:
text = self.queryreq_helper(counts, values, self_query, parameter, node_num)
result_text += text
if parameter == 'mean' or parameter == 'variance' or parameter == 'stdev':
# Calculate weighted averages of results.
result = self.compute_summary_stat(parameter, counts, values)
result_text = result + '\n' + result_text
print("Sending query result:\n", result_text)
with open(path, 'w') as tmp:
tmp.write(result_text)
# Send combined query results back to the requester.
self.send_requested_data(peerconn, path)
except:
traceback.print_exc()
finally:
# Remove temp file - prevent huge amount of data from being keep on a node
# after the request has been completed.
os.remove(path)
#--------------------------------------------------------------------------
def compute_summary_stat(self, parameter, counts, values):
#--------------------------------------------------------------------------
""" Computes a summary statistic given the name of a statistic,
an array of calculated values for that statistic, and an array containing
the number of data samples used in calculating each of the individual
values for that statistic.
Parameters:
parameter (str):
Specifies the type of parameter (ex. 'stdev', 'variance', 'mean')
contained in counts.
counts (array of int):
Array of calculated values of the specified parameter type.
values (array of float):
Values is an array such that the value at the i-th index is equal to the
number of data samples used to calculate the i-th value in counts.
Returns:
A str specifying the calculated summary statistic.
"""
value_arr = np.array(values)
count_arr = np.array(counts)
if parameter == 'stdev':
value_arr = np.square(value_arr)
w_sum = np.dot(value_arr, count_arr)
count_sum = np.sum(count_arr)
if count_sum == 0:
w_avg = 0
else:
w_avg = w_sum/count_sum
if parameter == 'stdev':
w_avg = math.sqrt(w_avg)
if parameter == 'stdev' or parameter == 'variance':
return f'Weighted average of {parameter} results - {w_avg:.3f}\n'
else:
return f'Mean of results - {w_avg:.3f}\n'
#--------------------------------------------------------------------------
def queryreq_helper(self, counts, values, result, parameter, node_num):
#--------------------------------------------------------------------------
"""Process a query result received from a different model node.
Parameters:
counts (array of int):
Array of calculated values of the specified parameter type sent from
previous model node peers in the network.
values (array of float):
Values is an array such that the value at the i-th index is equal to the
number of data samples used to calculate the i-th value in counts.
result (str):
A tuple-convertible string specifying the number of rows used in
calculating a parameter value and the calculated value itself that
is to to be added to values.
parameter (str):
Specifies the type of parameter (ex. 'stdev', 'variance', 'mean')
contained in counts.
node_num(int):
An id number for the peer that sent the calculations contained in
result.
Returns:
A str specifying the information contained in the input argument result.
"""
result = make_tuple(result)
if parameter == 'mean' or parameter == 'variance' or parameter == 'stdev':
counts.append(int(result[0]))
values.append(float(result[1]))
value = float(result[2])
return f'Model peer {node_num}: {value:.3f}'
#--------------------------------------------------------------------------
def query(self, argument, peer):
#--------------------------------------------------------------------------
'''
Sends a message with message type DATAQUERY containing the query argument
through a BTPeerConnection to the peer specified by the input argument peer.
Parameters:
argument (str):
Specifies the parameters to use in querying the collected data and is
expected to contain a parameter (mean, median, mode, variance, stdev),
start date, and end data separated by commas.
peer (tuple of str and int):
A tuple with the first element specifying the IP address of the node
to send the message to and the second element specifying the port of
the node.
Returns:
A str specifying the query response from the model node specified by peer.
'''
print("Sending query request to peer", peer, ":", argument)
resp = self.connectandsend( peer[0], peer[1], DATAQUERY, argument)
resp = resp[0][1]
print("Received result from peer", peer, ":", resp)
resp = ''.join(resp).strip()
return resp
#--------------------------------------------------------------------------
def dataquery(self, data):
#--------------------------------------------------------------------------
""" Runs a query on the data stored on the specific node.
Parameters:
data (str):
Specifies the parameters to use in querying the collected data and is
expected to contain a parameter (mean, median, mode, variance, stdev),
start date, and end data separated by commas.
Returns:
A str specifying the result of executing the query on locally-collected
data.
"""
filter = Filter()
if 'game_data' not in os.listdir(self.directory):
return str(None)
parameters = data.split(',')
path = os.path.join(self.directory, 'game_data')
filter.filter(gdir=path, start_date=parameters[1], end_date=parameters[2])
game_data = os.path.join(self.directory, 'filtered_game_data')
health_data = os.path.join(self.directory, 'filtered_health_data')
# No data for specified date range.
if len(os.listdir(game_data)) == 0:
return str(None)
qq = QuickQuery()
query_result = qq.descriptive_stats(parameters[0], 'heart_rate', game_data, health_data)
return str(query_result)
#--------------------------------------------------------------------------
def __handle_dataquery(self, peerconn, data):
#--------------------------------------------------------------------------
""" Handles the DATAQUERY message type. This function is called by a node
that is to run a query on the data it has stored. The result of the query
is sent back to the requester.
Parameters:
data (str):
Specifies the parameters to use in querying the collected data and is
expected to contain a parameter (mean, median, mode, variance, stdev),
start date, and end data separated by commas.
peerconn (BTPeerConnection):
Connection to the peer requesting the query result.
"""
print("Received query request:", data)
response = self.dataquery(data)
print("Sending query response:", response)
peerconn.senddata(QUERYSEND, response)
#--------------------------------------------------------------------------
def submitquery(self, peer, argument):
#--------------------------------------------------------------------------
""" Requests that a query is run on data stored on all modeling nodes
reachable by the node defined by the input argument peer.
Parameters:
peer (tuple of str and int):
A tuple where the first element is a string equal to the IP address of
the node to which the request is being made and the second element
is an int equal to the port of the node to which the request is being made.
argument (str):
Specifies the parameters to use in querying the collected data and is
expected to contain a parameter (mean, median, mode, variance, stdev),
start date, and end data separated by commas.
Returns:
A str reporting the received query results.
"""
try:
result = self.persistent_connectandsend(peer[0], peer[1], QUERYREQ, argument)
except:
if self.debug:
traceback.print_exc()
print("Query result received:\n", ''.join(result).strip())
return result
#--------------------------------------------------------------------------
def submitvisrequest(self, peer, argument):
#--------------------------------------------------------------------------
""" Requests that a visualization specified by argument is created for the
data stored on a modeling node specified by peer.
Parameters:
peer (tuple of str and int):
A tuple where the first element is a string equal to the IP address of the
node to which the request is being made and the second element is an int
equal to the port of the node to which the request is being made.
argument (str):
Specifies the visual to create and contains the name of a game and a feature
of recorded health data separated by a comma.
Result:
The bytes for the .png image of the created visual.
"""
try:
result = self.persistent_connectandsend(peer[0], peer[1], VISREQ, argument)
except:
if self.debug:
traceback.print_exc()
result = ''.join(result).strip()
print("Received a visualization result", len(bytes.fromhex(result)), "bytes")
return result
#--------------------------------------------------------------------------
def __handle_visrequest(self, peerconn, argument):
#--------------------------------------------------------------------------
""" Requests that a visual is created using the data stored on the node
and the parameters specified by the input argument.
Parameters:
peerconn (BTPeerConnection):
Connection to the peer requesting the visualization.
argument (str):
Specifies the visual to create and contains the name of a game and a feature
of recorded health data separated by a comma.
"""
args = argument.split(',')
game = args[0]
feature = args[1]
print("Received visualization request:", argument)
if 'game_data' in os.listdir(self.directory):
path = os.path.join(self.directory, 'game_data')
filter = Filter()
filter.filter(gdir=path, game=game)
print("hello_again")
vis = Visualize()
path = vis.visualize(os.path.join(self.directory, 'filtered_game_data'), feature)
else:
text = 'No data found to visualize: Model node\'s data \ndirectory may be incorrectly configured'
path = 'error.png'
texttoimage.convert(text, image_file=path, font_size=50, color='black')
print("Sending back a visualization response with", len(open(path, 'rb').read()), "bytes")
self.send_requested_data(peerconn, path)
os.remove(path)
#--------------------------------------------------------------------------
def read_as_bytes(self, filepath):
#--------------------------------------------------------------------------
""" Determines whether the file with a path specified by filepath should
be read as bytes when its data is sent across the network.
Parameters:
filepath (str):
The filepath of the file to be sent across the network.
Return:
A bool indicating whether the file with a path of filepath should be
read as bytes.
"""
filepath_elements = filepath.split('.')
filetype = filepath_elements[-1]
if len(filepath_elements) == 1 or (filetype != 'png' and filetype != 'xz'):
return False
return True
#--------------------------------------------------------------------------
def send_requested_data(self, peerconn, filepath):
#--------------------------------------------------------------------------
""" Sends the data contained in the file with a path equal to filepath through
the BTPeerConnection opened with persistent_connectandsend to the node on the
other end of the connection.
Parameters:
peerconn (BTPeerConnection):
Connection to the peer to send the file data to.
filepath (str):
The filepath of the file whose data is to be sent across the network.
"""
read_size = 2048
fd = None
# Stores whether the file is to be read as bytes.
byte_reading = False
chunk_number = 10
try:
if self.read_as_bytes(filepath):
fd = open(filepath, 'rb')
byte_reading = True
chunk_number = 5
else:
fd = open(filepath, 'r')
filedata = ''
i = 0
while True:
data = fd.read(read_size)
if not len(data):
# EOF reached.
while (len(filedata) % 2048) != 0:
filedata += " "
# Note: FILESEND does not have a function handler. The msgtype
# does not seem to matter here, but has previously been used
# in debugging to make sure that the receiver was receiving
# the proper message type.
peerconn.senddata(FILESEND, filedata)
break
if byte_reading:
filedata += data.hex()
else:
filedata += data
i += 1
if (i % chunk_number) == 0:
while(len(filedata) % 2048) != 0:
filedata += " "
peerconn.senddata(FILESEND, filedata)
filedata = ''
except:
traceback.print_exc()
self.__debug('Error reading file %s' % fname)
finally:
# Send a message so the connection can be closed in persistent_connectandsend.
peerconn.senddata(REPLY, "DONE")
if fd != None:
fd.close()
#--------------------------------------------------------------------------
def makerequest(self, peer, intercept):
#--------------------------------------------------------------------------
""" Requests data from all modeling nodes reachable by the node defined by
the input argument peer. The peer is expected to be a tuple where the first
element is a string equal to the IP address of the node to which the request
is being made and the second element is an int equal to the port of the node
to which the request is being made. The game_name should be a string with
specific information about the request.
"""
try:
result = self.persistent_connectandsend(peer[0], peer[1], BUILDMODEL, intercept)
except:
if self.debug:
traceback.print_exc()
return result
#--------------------------------------------------------------------------
def modelpeerlist(self, hops=1):
#--------------------------------------------------------------------------
"""
Constructs a list of model peers reachable from the current node within a
number of hops equal to the input argument hops, which should be an integer.
Parameters:
hops (int):
Specifies the depth of the depth-first search to be performed through
the network for model peers
Returns:
An array of tuples specifying model peers found in the network, where the
elements at index 0 are peer IP addresses, and the elements at index 1
are peer port numbers.
"""
seen = set()
peers = []
try:
print("Generating a list of supernodes in the network")
for pid in self.getpeerids():
host,port = self.getpeer(pid)
self.modelpeerlisthelper(host, port, seen, peers, hops)
except:
if self.debug:
traceback.print_exc()
return peers
#--------------------------------------------------------------------------
def modelpeerlisthelper(self, host, port, seen, peers, hops=1):
#--------------------------------------------------------------------------
"""
Helper for the modelpeerlist method. If hops is greater than 0 and the node
specified by the input arguments host and port is not included in the input
argument seen, adds the node to seen, adds the node to the input array peers
if it is a model node and then calls the method recursively on each of the
node's peers with hops decremented by 1.
Parameters:
host (str):
The IP address of a node in the network
port (int):
An int equal to the port of the node with IP address specified by host
seen (set of str):
A set containing the peerid's of all nodes seen so far in the search for
model nodes
peers (array of tuples of str and int):
An array containing tuples specifying the model nodes found so far in the
search for model nodes. For each tuple, the elements at index 0 are peer
IP addresses, and the elements at index 1 are peer port numbers
hops (int):
Specifies the depth of the depth-first search to be performed through
the network for model peers
"""
if not hops:
return
self.__debug("Searching for model peer from (%s,%s)" % (host,port))
try:
_, peerid = self.connectandsend(host, port, PEERNAME, '')[0]
self.__debug("contacted " + peerid)
if peerid in seen:
# Peer has already been explored.
return
else:
seen.add(peerid)
# Check if the peer represented by host and port is a model node.
print("Asking if peer at", "{}:{}".format(host, str(port)), "is a supernode")
resp = self.connectandsend(host, port, MODEL, '')[0]
self.__debug(str(resp))
print("Supernode response for peer at", "{}:{}".format(host, str(port)), "is:", resp[1])
if (resp[0] == REPLY) and (resp[1] == 'True'):
# Peer is a model node.
peers.append((host, int(port)))
# Do recursive depth first search to find more peers.
resp = self.connectandsend(host, port, LISTPEERS, '',
pid=peerid)
if len(resp) > 1:
resp.reverse()
resp.pop() # get rid of header count reply
while len(resp):
nextpid,host,port = resp.pop()[1].split()
if nextpid != self.myid:
self.modelpeerlisthelper(host, port, seen, peers, hops - 1)
except:
if self.debug:
traceback.print_exc()
#--------------------------------------------------------------------------
def modelpeersearch(self, hops=1):
#--------------------------------------------------------------------------
"""
Searches the network for a single model node reachable from the current node
within a number of hops equal to the input argument hops, which should be an
integer.
Parameters:
hops (int):
Specifies the depth of the depth-first search to be performed through
the network for model peers
Returns:
A tuple where the first element is a string equal to the IP address of a model
node and the second element is an int equal to the port of the model node.
"""
seen = set()
try:
print("Searching for a supernode in the network to become a primary supernode")
for pid in self.getpeerids():
host,port = self.getpeer(pid)
result = self.modelpeersearchhelper(host, port, seen, hops)
print("Primary supernode is:", result)
if result is not None:
return result
except:
if self.debug:
traceback.print_exc()
#--------------------------------------------------------------------------
def modelpeersearchhelper(self, host, port, seen, hops=1):
#--------------------------------------------------------------------------
"""
Helper for the modelpeersearch method. If hops is greater than 0 and the node
specified by the input arguments host and seen is not represented in the input
set seen, checks if the node specified by host and port is a model node, and
if so, returns a tuple where the first element is host and the second is port
cast to an int. Otherwise, calls the method recursively on each of the peers
of the node represented by host and port with host decremented by 1.
argument seen, adds the node to seen, adds the node to the input array peers
if it is a model node and then calls the method recursively on each of the
node's peers with hops decremented by 1.
Parameters:
host (str):
The IP address of a node in the network
port (int):
An int equal to the port of the node with IP address specified by host
seen (set of str):
A set containing the peerid's of all nodes seen so far in the search for
model nodes
hops (int):
Specifies the depth of the depth-first search to be performed through
the network for model peers
"""
if not hops:
return
self.__debug("Searching for model peer from (%s,%s)" % (host,port))
try:
_, peerid = self.connectandsend(host, port, PEERNAME, '')[0]
self.__debug("contacted " + peerid)
if peerid in seen:
# Peer has already been explored.
return None
else:
seen.add(peerid)
# Check if the peer represented by host and port is a model node.
print("Asking if peer at", "{}:{}".format(host, str(port)), "is a supernode")
resp = self.connectandsend(host, port, MODEL, '')[0]
self.__debug(str(resp))
print("Supernode response for peer at", "{}:{}".format(host, str(port)), "is:", resp[1])
if (resp[0] == REPLY) and (resp[1] == 'True'):
# Peer represented by host and port is a model node.
return (host, int(port))
# Do recursive depth first search to find more peers.
resp = self.connectandsend(host, port, LISTPEERS, '',
pid=peerid)
if len(resp) > 1:
resp.reverse()
resp.pop() # get rid of header count reply
while len(resp):
nextpid,host,port = resp.pop()[1].split()
if nextpid != self.myid:
result = self.modelpeersearchhelper(host, port, seen, hops - 1)
if result is not None:
return result
except:
print("ERROR ", traceback.print_exc())
if self.debug:
traceback.print_exc()
#--------------------------------------------------------------------------
def __handle_model(self, peerconn, data):
#--------------------------------------------------------------------------
""" Handles the MODEL message type by sending the value of the model
variable through the BTPeerConnection peerconn. Message data is not used.
Parameters:
peerconn (BTPeerConnection):
Connection to the peer requesting the value of the model instance
variable.
"""
print("Replying that this node's supernode status is:", self.model)
peerconn.senddata(REPLY, str(self.model))
#--------------------------------------------------------------------------
def persistent_connectandsend( self, host, port, msgtype, msgdata,
pid=None, waitreply=True ):
#--------------------------------------------------------------------------
"""
persistent_connectandsend( host, port, message type, message data, peer id,
wait for a reply ) -> [ first reply data , second reply data ... ]
Connects and sends a message to the specified host:port. The reply/replies
from the node specified by host:port are added to the array msgreply until
a reply with a message type of REPLY is received.
Parameters:
host (str):
The IP address of a node to connect to
port (int):
An int equal to the port of the node to connect to
msgtype (str):
The type of the message to send to the peer specified by host:port
msgdata (str):
The the content of the message to send to the peer specified by host:port
Returns:
An array of str specifying the response data received from the contacted peer.
"""
msgreply = []
try:
peerconn = BTPeerConnection( pid, host, port, debug=self.debug )
peerconn.senddata( msgtype, msgdata )
self.__debug( 'Sent %s: %s' % (pid, msgtype) )
while True:
onereply = peerconn.recvdata()
if (onereply[0] == REPLY):
break
msgreply.append( onereply[1] )
peerconn.close()
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
if self.debug:
traceback.print_exc()
return msgreply
#--------------------------------------------------------------------------
def __handle_fileget(self, peerconn, data):
#--------------------------------------------------------------------------
""" Handles the FILEGET message type by sending the value of the model
variable through the BTPeerConnection peerconn.
Method is used to extract, compress and send all game data to a requesting
primary model node
"""
print("FILEGET request received ")
self.peerlock.acquire()
game_dir = self.directory + "/game_data"
compressed_file_path = game_dir + '/all_game_df.xz'
try:
all_game_df = self.extract_model_files()
# if no data is found
if 'NO DATA' in all_game_df:
print("No game data found on {}. Sending a 'NO DATA' message to {}:{}...".format(self.myid, peerconn.host, peerconn.port ))
peerconn.senddata(REPLY, all_game_df)
# if data exists
else:
print("Preparing to send compressed game data on {} to supernode at {}:{}...".format(self.myid, peerconn.host, peerconn.port))
# Save and compress game data
all_game_df.to_pickle(compressed_file_path)
# Send compressed data from each reachable model node back to the requester.
self.send_requested_data(peerconn, compressed_file_path)
print("Game data on {} sent to supernode at {}:{}...".format(self.myid, peerconn.host, peerconn.port))
except:
traceback.print_exc()
finally:
# Remove temp file - prevent huge amount of data from being keep on a node
# after the request has been completed.
os.remove(compressed_file_path)
print("Deleted compressed game data on {}".format(self.myid))
self.peerlock.release()
#--------------------------------------------------------------------------
def extract_model_files(self):
#--------------------------------------------------------------------------
""" Used to extract all game data used for model building into a single dataframe.
"""
print("Checking for game data on supernode at {}".format(self.myid))
game_dir = self.directory + "/game_data"
# Fetch all file names in the game directory
game_files = [ i for i in os.listdir(game_dir) if 'csv' in i]
# Check that there is game data available
if len(game_files) > 0:
print("Found game data on supernode at {}".format(self.myid))
dataframes = []
for i in game_files:
i = os.path.join(game_dir, i)
game_df = pd.read_csv(i, parse_dates=["timestamp"])
#game_df["game"] = i.split(".")[0].rsplit("_")[-1] # adds name of the game that has the data
game_df["game"] = i.split(".")[-2].rsplit("_")[-1]
dataframes.append(game_df)
# Merge all game data
print("Extracted game data on supernode at {}.".format(self.myid))
return pd.concat(dataframes)
else:
return 'NO DATA FROM {}'.format(self.myid)
#--------------------------------------------------------------------------
def __handle_buildmodel(self, peerconn, intercept):
#--------------------------------------------------------------------------
""" Used to build model from game data across all nodes in the network
"""
print("Received build model message...")
self.peerlock.acquire()
try:
print("Fetching data from other super nodes...")
all_df_compressed = self.model_datareq()
print("Data from all supernodes returned")
# Filter out empty data
# all_df_compressed = [i for i in all_df_compressed if 'NO DATA' not in i]
all_dfs = []
for ind, compressed_df in enumerate(all_df_compressed):
if ind != len(all_df_compressed) - 1:
if 'NO DATA' not in compressed_df:
print("Casting recieved data to the correct types...")
compressed_df['timestamp'] = pd.to_datetime(compressed_df['timestamp'])
compressed_df['heart_rate'] = compressed_df['heart_rate'].astype(int)
compressed_df['steps'] = compressed_df['steps'].astype(int)
compressed_df['distance(miles)'] = compressed_df['distance(miles)'].astype(float)
compressed_df['active_calories_burned'] = compressed_df['active_calories_burned'].astype(float)
compressed_copy = compressed_df[['timestamp', 'heart_rate', 'steps', 'distance(miles)', 'active_calories_burned']]
df = compressed_copy.diff()[:-1] # generates changes in values with each time period of 1 min
df['game'] = compressed_df['game'].to_list()[1:]
df = df[1:]
all_dfs.append(df)
else:
if 'NO DATA' not in compressed_df:
# Last entry is local data and wasn't compressed so merge immediately
compressed_copy = compressed_df[['timestamp', 'heart_rate', 'steps', 'distance(miles)', 'active_calories_burned']]
df = compressed_copy.diff()[:-1] # generates changes in values with each time period of 1 min
df['game'] = compressed_df['game'].to_list()[1:]
df = df[1:]
all_dfs.append(df)
# Merging all dataframes
print("Merging all data from all supernodes...")
data_df =
|
pd.concat(all_dfs)
|
pandas.concat
|
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pandas.util.testing as pandas_tm
import pytest
from dask.dataframe.utils import tm
from pytest import param
import ibis
from .. import Backend
from ..client import DaskTable
pytestmark = pytest.mark.dask
def make_dask_data_frame(npartitions):
return dd.from_pandas(pandas_tm.makeDataFrame(), npartitions=npartitions)
@pytest.fixture
def client(npartitions):
return Backend().connect(
{
'df': dd.from_pandas(
pd.DataFrame({'a': [1, 2, 3], 'b': list('abc')}),
npartitions=npartitions,
),
'df_unknown': dd.from_pandas(
|
pd.DataFrame({'array_of_strings': [['a', 'b'], [], ['c']]})
|
pandas.DataFrame
|
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calendar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tseries import offsets
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas.errors import OutOfBoundsDatetime
from pandas.compat import long, PY3
from pandas.compat.numpy import np_datetime64_compat
from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
assert data.day_name(time_locale) == expected_day
assert data.month_name(time_locale) == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_is_leap_year(self, tz):
# GH 13727
dt =
|
Timestamp('2000-01-01 00:00:00', tz=tz)
|
pandas.Timestamp
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/01/02 17:37
Desc: 获取交易法门-工具: https://www.jiaoyifamen.com/tools/
交易法门首页: https://www.jiaoyifamen.com/
# 交易法门-工具-套利分析
交易法门-工具-套利分析-跨期价差(自由价差)
交易法门-工具-套利分析-自由价比
交易法门-工具-套利分析-多腿组合
交易法门-工具-套利分析-FullCarry
交易法门-工具-套利分析-套利价差矩阵
# 交易法门-工具-资讯汇总
交易法门-工具-资讯汇总-研报查询
交易法门-工具-资讯汇总-交易日历
# 交易法门-工具-持仓分析
交易法门-工具-持仓分析-期货持仓
交易法门-工具-持仓分析-席位持仓
交易法门-工具-持仓分析-持仓季节性
# 交易法门-工具-资金分析
交易法门-工具-资金分析-资金流向
交易法门-工具-资金分析-沉淀资金
交易法门-工具-资金分析-资金季节性
交易法门-工具-资金分析-成交排名
# 交易法门-工具-席位分析
交易法门-工具-席位分析-持仓结构
交易法门-工具-席位分析-持仓成本
交易法门-工具-席位分析-建仓过程
# 交易法门-工具-仓单分析
交易法门-工具-仓单分析-仓单日报
交易法门-工具-仓单分析-仓单查询
交易法门-工具-仓单分析-虚实盘比日报
交易法门-工具-仓单分析-虚实盘比查询
# 交易法门-工具-期限分析
交易法门-工具-期限分析-基差日报
交易法门-工具-期限分析-基差分析
交易法门-工具-期限分析-期限结构
交易法门-工具-期限分析-价格季节性
# 交易法门-工具-行情分析
交易法门-工具-行情分析-行情数据
# 交易法门-工具-交易规则
交易法门-工具-交易规则-限仓规定
交易法门-工具-交易规则-仓单有效期
交易法门-工具-交易规则-品种手册
"""
import time
import matplotlib.pyplot as plt
import pandas as pd
import requests
from mssdk.futures_derivative.cons import (
csa_payload,
csa_url_spread,
csa_url_ratio,
csa_url_customize,
)
from mssdk.futures_derivative.jyfm_login_func import jyfm_login
# pd.set_option('display.max_columns', None)
# 交易法门-工具-套利分析
def jyfm_tools_futures_spread(
type_1="RB", type_2="RB", code_1="01", code_2="05", headers="", plot=True
):
"""
交易法门-工具-套利分析-跨期价差(自由价差)
:param type_1: str
:param type_2: str
:param code_1: str
:param code_2: str
:param plot: Bool
:return: pandas.Series or pic
"""
csa_payload_his = csa_payload.copy()
csa_payload_his.update({"type1": type_1})
csa_payload_his.update({"type2": type_2})
csa_payload_his.update({"code1": code_1})
csa_payload_his.update({"code2": code_2})
res = requests.get(csa_url_spread, params=csa_payload_his, headers=headers)
data_json = res.json()
data_df = pd.DataFrame([data_json["category"], data_json["value"]]).T
data_df.index = pd.to_datetime(data_df.iloc[:, 0])
data_df = data_df.iloc[:, 1]
data_df.name = "value"
if plot:
data_df.plot()
plt.legend(loc="best")
plt.xlabel("date")
plt.ylabel("value")
plt.show()
return data_df
else:
return data_df
def jyfm_tools_futures_ratio(
type_1="RB", type_2="RB", code_1="01", code_2="05", headers="", plot=True
):
"""
交易法门-工具-套利分析-自由价比
:param type_1: str
:param type_2: str
:param code_1: str
:param code_2: str
:param plot: Bool
:return: pandas.Series or pic
2013-01-04 -121
2013-01-07 -124
2013-01-08 -150
2013-01-09 -143
2013-01-10 -195
...
2019-10-21 116
2019-10-22 126
2019-10-23 123
2019-10-24 126
2019-10-25 134
"""
csa_payload_his = csa_payload.copy()
csa_payload_his.update({"type1": type_1})
csa_payload_his.update({"type2": type_2})
csa_payload_his.update({"code1": code_1})
csa_payload_his.update({"code2": code_2})
res = requests.get(csa_url_ratio, params=csa_payload_his, headers=headers)
data_json = res.json()
data_df = pd.DataFrame([data_json["category"], data_json["value"]]).T
data_df.index = pd.to_datetime(data_df.iloc[:, 0])
data_df = data_df.iloc[:, 1]
data_df.name = "value"
if plot:
data_df.plot()
plt.legend(loc="best")
plt.xlabel("date")
plt.ylabel("value")
plt.show()
return data_df
else:
return data_df
def jyfm_tools_futures_customize(
formula="RB01-1.6*I01-0.5*J01-1200", headers="", plot=True
):
"""
交易法门-工具-套利分析-多腿组合
:param formula: str
:param plot: Bool
:return: pandas.Series or pic
"""
params = {"formula": formula}
res = requests.get(csa_url_customize, params=params, headers=headers)
data_json = res.json()
data_df = pd.DataFrame([data_json["category"], data_json["value"]]).T
data_df.index = pd.to_datetime(data_df.iloc[:, 0])
data_df = data_df.iloc[:, 1]
data_df.name = "value"
if plot:
data_df.plot()
plt.legend(loc="best")
plt.xlabel("date")
plt.ylabel("value")
plt.show()
return data_df
else:
return data_df
def jyfm_tools_futures_full_carry(
begin_code="05", end_code="09", ratio="4", headers=""
):
"""
交易法门-工具-套利分析-FullCarry
https://www.jiaoyifamen.com/tools/future/full/carry?beginCode=05&endCode=09&ratio=4
注: 正向转抛成本主要是仓储费和资金成本,手续费占比很小,故忽略。增值税不确定,故也未列入计算。使用该表时注意仓单有效期问题、升贴水问题以及生鲜品种其他较高费用的问题。实际Full Carry水平要略高于这里的测算水平。
:param begin_code: 开始月份
:type begin_code: str
:param end_code: 结束月份
:type end_code: str
:param ratio: 百分比, 这里输入绝对值
:type ratio: str
:param headers: 请求头
:type headers: dict
:return: 正向市场转抛成本估算
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/future/full/carry"
params = {
"beginCode": begin_code,
"endCode": end_code,
"ratio": ratio,
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["table_data"])
def jyfm_tools_futures_arbitrage_matrix(
category="1", type1="RB", type2="RB", headers=""
):
"""
交易法门-工具-套利分析-跨期价差矩阵
https://www.jiaoyifamen.com/tools/future/arbitrage/matrix
:param category: 1: 跨期价差; 2: 自由价差; 3: 自由价比
:type category: str
:param type1: 种类一
:type type1: str
:param type2: 种类二
:type type2: str
:param headers: 请求头
:type headers: dict
:return: 对应的矩阵
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/future/arbitrage/matrix"
params = {
"category": category,
"type1": type1,
"type2": type2,
"_": "1583846468579",
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"])
def jyfm_exchange_symbol_dict():
jyfm_exchange_symbol_dict_inner = {
"中国金融期货交易所": {
"TF": "五债",
"T": "十债",
"IC": "中证500",
"IF": "沪深300",
"IH": "上证50",
"TS": "二债",
},
"郑州商品交易所": {
"FG": "玻璃",
"RS": "菜籽",
"CF": "棉花",
"LR": "晚稻",
"CJ": "红枣",
"JR": "粳稻",
"ZC": "动力煤",
"TA": "PTA",
"SA": "纯碱",
"AP": "苹果",
"WH": "强麦",
"SF": "硅铁",
"MA": "甲醇",
"CY": "棉纱",
"RI": "早稻",
"OI": "菜油",
"SM": "硅锰",
"RM": "菜粕",
"UR": "尿素",
"PM": "普麦",
"SR": "白糖",
},
"大连商品交易所": {
"PP": "PP",
"RR": "粳米",
"BB": "纤板",
"A": "豆一",
"EG": "乙二醇",
"B": "豆二",
"C": "玉米",
"JM": "焦煤",
"I": "铁矿",
"J": "焦炭",
"L": "塑料",
"M": "豆粕",
"P": "棕榈",
"CS": "淀粉",
"V": "PVC",
"Y": "豆油",
"JD": "鸡蛋",
"FB": "胶板",
"EB": "苯乙烯",
},
"上海期货交易所": {
"SS": "不锈钢",
"RU": "橡胶",
"AG": "沪银",
"AL": "沪铝",
"FU": "燃油",
"RB": "螺纹",
"CU": "沪铜",
"PB": "沪铅",
"BU": "沥青",
"AU": "沪金",
"ZN": "沪锌",
"SN": "沪锡",
"HC": "热卷",
"NI": "沪镍",
"WR": "线材",
"SP": "纸浆",
},
"上海国际能源交易中心": {"SC": "原油", "NR": "20号胶"},
}
return jyfm_exchange_symbol_dict_inner
# 交易法门-工具-资讯汇总
def jyfm_tools_research_query(limit="100", headers=""):
"""
交易法门-工具-资讯汇总-研报查询
https://www.jiaoyifamen.com/tools/research/qryPageList
:param limit: 返回条数
:type limit: str
:return: 返回研报信息数据
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/research/qryPageList"
params = {
"page": "1",
"limit": limit,
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"])
def jyfm_tools_trade_calendar(trade_date="2020-01-03", headers=""):
"""
交易法门-工具-资讯汇总-交易日历
此函数可以返回未来的交易日历数据
https://www.jiaoyifamen.com/tools/trade-calendar/events
:param trade_date: 指定交易日
:type trade_date: str
:return: 返回指定交易日的交易日历数据
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/trade-calendar/events"
params = {
"page": "1",
"limit": "1000",
"day": trade_date,
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"])
# 交易法门-工具-持仓分析
def jyfm_tools_position_detail(
symbol="JM", code="jm2005", trade_date="2020-01-03", headers=""
):
"""
交易法门-工具-持仓分析-期货持仓
:param symbol: 指定品种
:type symbol: str
:param code: 指定合约
:type code: str
:param trade_date: 指定交易日
:type trade_date: str
:param headers: headers with cookies
:type headers:dict
:return: 指定品种的指定合约的指定交易日的期货持仓数据
:rtype: pandas.DataFrame
"""
url = f"https://www.jiaoyifamen.com/tools/position/details/{symbol}?code={code}&day={trade_date}&_=1578040551329"
res = requests.get(url, headers=headers)
return pd.DataFrame(res.json()["short_rank_table"])
def jyfm_tools_position_seat(seat="永安期货", trade_date="2020-01-03", headers=""):
"""
交易法门-工具-持仓分析-持仓分析-席位持仓
:param seat: 指定期货公司
:type seat: str
:param trade_date: 具体交易日
:type trade_date: str
:param headers: headers with cookies
:type headers: dict
:return: 指定期货公司指定交易日的席位持仓数据
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/position/seat"
params = {
"seat": seat,
"day": trade_date,
"type": "",
"_": "1578040989932",
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"])
def jyfm_tools_position_season(symbol="RB", code="05", headers=""):
"""
交易法门-工具-持仓分析-持仓分析-持仓季节性
https://www.jiaoyifamen.com/tools/position/season
:param symbol: 具体品种
:type symbol: str
:param code: 具体合约月份
:type code: str
:param headers: headers with cookies
:type headers: dict
:return: 合约持仓季节性规律
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/position/season"
params = {
"type": symbol,
"code": code,
}
res = requests.get(url, params=params, headers=headers)
data_json = res.json()
temp_df = pd.DataFrame(
[
data_json["year2013"],
data_json["year2014"],
data_json["year2015"],
data_json["year2016"],
data_json["year2017"],
data_json["year2018"],
data_json["year2019"],
data_json["year2020"],
],
columns=data_json["dataCategory"],
).T
temp_df.columns = ["2013", "2014", "2015", "2016", "2017", "2018", "2019", "2020"]
return temp_df
# 交易法门-工具-资金分析
def jyfm_tools_position_fund_direction(
trade_date="2020-02-24", indicator="期货品种资金流向排名", headers=""
):
"""
交易法门-工具-资金分析-资金流向
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param trade_date: 指定交易日
:type trade_date: str
:param indicator: "期货品种资金流向排名" or "期货主力合约资金流向排名"
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的资金流向数据
:rtype: pandas.DataFrame
"""
params = {
"day": trade_date,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "期货品种资金流向排名":
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["flowCategory"]),
data_json["flowCategory"],
data_json["flowValue"],
],
index=["date", "symbol", "fund"],
).T
else:
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["dominantFlowCategory"]),
data_json["dominantFlowCategory"],
data_json["dominantFlowValue"],
],
index=["date", "symbol", "fund"],
).T
def jyfm_tools_position_fund_down(
trade_date="2020-02-24", indicator="期货品种沉淀资金排名", headers=""
):
"""
交易法门-工具-资金分析-沉淀资金
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param trade_date: 指定交易日
:type trade_date: str
:param indicator: "期货品种沉淀资金排名" or "期货主力合约沉淀资金排名"
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的沉淀资金
:rtype: pandas.DataFrame
"""
params = {
"day": trade_date,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "期货品种沉淀资金排名":
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["precipitationCategory"]),
data_json["precipitationCategory"],
data_json["precipitationValue"],
],
index=["date", "symbol", "fund"],
).T
else:
return pd.DataFrame(
[
[data_json["tradingDay"]]
* len(data_json["dominantPrecipitationCategory"]),
data_json["dominantPrecipitationCategory"],
data_json["dominantPrecipitationValue"],
],
index=["date", "symbol", "fund"],
).T
def jyfm_tools_position_fund_season(symbol="RB", code="05", headers=""):
"""
交易法门-工具-资金分析-资金季节性
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param symbol: 指定品种
:type symbol: str
:param code: 合约到期月
:type code: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的资金资金季节性
:rtype: pandas.DataFrame
"""
params = {
"type": symbol,
"code": code,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/season"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
data_df = pd.DataFrame(
[
data_json["dataCategory"],
data_json["year2013"],
data_json["year2014"],
data_json["year2015"],
data_json["year2016"],
data_json["year2017"],
data_json["year2018"],
data_json["year2019"],
data_json["year2020"],
],
index=["date", "2013", "2014", "2015", "2016", "2017", "2018", "2019", "2020"],
).T
return data_df
def jyfm_tools_position_fund_deal(
trade_date="2020-02-24", indicator="期货品种成交量排名", headers=""
):
"""
交易法门-工具-资金分析-成交排名
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param trade_date: 指定交易日
:type trade_date: str
:param indicator: "期货品种成交量排名" or "期货主力合约成交量排名"
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的资金成交排名
:rtype: pandas.DataFrame
"""
params = {
"day": trade_date,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "期货品种成交量排名":
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["turnOverCategory"]),
data_json["turnOverCategory"],
data_json["turnOverValue"],
],
index=["date", "symbol", "fund"],
).T
else:
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["dominantTurnOverCategory"]),
data_json["dominantTurnOverCategory"],
data_json["dominantTurnOverValue"],
],
index=["date", "symbol", "fund"],
).T
# 交易法门-工具-席位分析-持仓结构
def jyfm_tools_position_structure(
trade_date="2020-03-02", seat="永安期货", indicator="持仓变化", headers=""
):
"""
交易法门-工具-席位分析-持仓结构
https://www.jiaoyifamen.com/tools/position/seat
:param trade_date: 指定交易日
:type trade_date: str
:param seat: broker name, e.g., seat="永安期货"
:type seat: str
:param indicator: 持仓变化,净持仓分布,总持仓分布; 持仓变化总,净持仓分布总,总持仓分布总
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日指定机构的持仓结构
:rtype: pandas.DataFrame
"""
indicator_dict = {"持仓变化": 1, "净持仓分布": 2, "总持仓分布": 3}
params = {
"seat": seat,
"day": trade_date,
"type": indicator_dict[indicator],
"_": int(time.time() * 1000),
}
url = "https://www.jiaoyifamen.com/tools/position/struct"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "持仓变化":
return pd.DataFrame(data_json["varieties"])
if indicator == "净持仓分布":
return pd.DataFrame(data_json["varieties"])
if indicator == "总持仓分布":
return pd.DataFrame(data_json["varieties"])
if indicator == "持仓变化总":
return
|
pd.DataFrame(data_json["ratio"])
|
pandas.DataFrame
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pandas
import numpy as np
from astroquery.mast import Observations
from . import io
KNOWN_INSTRUMENTS = ["galex", "sdss", "panstarrs"]
def query_metadata(instrument, ra, dec):
""" query metadata for the given intrument and coordinates
the metadata are information about filename and url
"""
return eval("query_%s_metadata(ra, dec)"%_test_instrument_(instrument))
def load_metadata(instrument, load_empty=False):
""" Load existing metadata for the given instrument
Returns
-------
Pandas.DataFrame
//(columns are (at least) "name","ra","dec","filters", "project", "basename", "baseurl")
"""
empty = pandas.DataFrame(columns=["name","ra","dec","filters", "project",
"basename", "baseurl"])
if load_empty:
return empty
metadata_file = _get_metadata_file_(_test_instrument_(instrument))
return pandas.read_csv(metadata_file) if os.path.exists(metadata_file) else empty
def get_directory(instrument):
""" The default instrument directory on your computer """
return io.DATAPATH+"%s/"%instrument.upper()
def metadata_to_url(instrument, baseurl, basename, bands, **kwargs):
""" build url (or local fullpath) from metadata information """
return eval("_%s_info_to_urlpath_(baseurl=baseurl, basename=basename, bands=bands,**kwargs)"%_test_instrument_(instrument) )
def _get_metadata_file_(instrument):
""" returns the metadata associated to the given instruement """
return eval("_%s_METADATA_FILE"%instrument.upper())
def _test_instrument_(instrument):
""" """
if instrument.lower() not in KNOWN_INSTRUMENTS:
raise NotImplementedError("unknown instrument: %s"%instrument +"\n"+"known instruments: "+",".join(KNOWN_INSTRUMENTS))
return instrument.lower()
##########################
# #
# GENERAL TOOLS #
# #
##########################
class _Query_( object ):
""" Simply Class to manage data IO """
INSTRUMENT = "to_be_defined"
def __init__(self, empty=False):
""" initialize query. It loads the known `instrument` meta data. """
self.metadata = load_metadata(self.INSTRUMENT, load_empty=empty)
# --------- #
# GETTER #
# --------- #
def get_target_data(self, targetname, must_exists=True, fromdir="default",
filters="*", **kwargs):
""" returns the full path of data on your computer.
"""
if fromdir is None or fromdir in ["default"]:
fromdir = self._default_dldir
urls, localpaths = self._build_target_data_url_and_path_(targetname, fromdir,
filters=filters, **kwargs)
return [l_ for l_ in localpaths if os.path.exists(l_) or not must_exists]
def get_target_coords(self, targetname):
""" """
if not self.is_target_known(targetname):
raise AttributeError("unknown target. Please run download_target_metadata()")
# This assumes all entry at the same name have the same coordinate as it should.
return np.asarray(self.metadata[self.metadata["name"]==targetname
].iloc[0].get(["ra","dec"]).values, dtype="float")
def get_target(self, targetname):
""" Loads an astrobject target (name, ra, dec) and returns it """
from astrobject import get_target
ra,dec = self.get_target_coords(targetname)
return get_target(name=targetname, ra=ra, dec=dec)
def get_target_instruments(self, targetname, cachedl=False, filters="*"):
""" Return a list of Astrobject's Instrument for each entry coresponding to the given target """
if not self.is_target_known(targetname):
raise AttributeError("unknown target. Please run download_target_metadata(), and download the associated files")
from astrobject import instruments
target_data = self.get_target(targetname)
# Cache Download
load_prop = dict(target=target_data, instrument=self.INSTRUMENT.lower())
if cachedl:
sourcefile = self.download_target_data(targetname, "default", filters=filters, dl=False)[0]
load_prop["cache"]=False
else:
sourcefile = self.get_target_data(targetname,filters=filters)
# load files
return [instruments.get_instrument(f_, **load_prop)
for f_ in sourcefile]
# ------------- #
# Downloader #
# ------------- #
def query_metadata(self, ra, dec):
""" """
df_ = query_metadata(self.INSTRUMENT, ra, dec)
df_["ra"] = ra
df_["dec"] = dec
return df_
def download_target_metadata(self, targetname, ra, dec,
update=True, store=True,
overwrite=False, dl=True):
""" Look for metadata online archive, save them and download the corresponding files.
(options enables to store or not and download or not)
Parameters:
-----------
targetname: [str]
instrument metadata will be associated to this name.
ra,dec: [float, float] // in degree
Coordinates, in degree, of the target.
// options //
update: [bool] -optional-
Shall the downloaded metadata be inserted in the object's self.metadata ?
=> If False, the downloaded metadata is returned
store: [bool] -optional-
If the downloaded metadata has been inserted to the object's self.metadata
shall this file stored in your computer be updated too ? [you should !]
overwrite: [bool] -optional-
If the object's metadata already contains the target, shall we overwrite it ?
If not, this entire function is skipped
(see self.download_target_data(targetname) do download data of already known targets)
dl: [bool] -optional-
Shall the data associated to the target's metadata be downloaded ?
Returns
-------
None (or DataFrame if update=False)
"""
if self.is_target_known(targetname) and not overwrite:
print("no need")
return
df_ = self.query_metadata(ra,dec)
df_["name"] = targetname
if not update:
return df_
# merge with current
if self.is_target_known(targetname):
self.metadata.drop(index=self.metadata.index[self.metadata["name"]==targetname], inplace=True)
self.metadata = pandas.concat([self.metadata, df_], sort=False)
if store:
self.store()
# Downloading
if dl:
self.download_target_data(targetname)
return 0 # 0 means no problem
def store(self):
""" """
fileout = _get_metadata_file_(self.INSTRUMENT)
if not os.path.isdir(os.path.dirname(fileout)):
if io.DATAPATH == "_notdefined_":
raise AttributeError("You must define the global variable DATAPATH to bve able to download/store data")
os.mkdir(os.path.dirname(fileout))
self.metadata.to_csv(fileout, index=False)
def download_target_data(self, targetname, dirout="default",
overwrite=False,
dl=True, **kwargs):
""" Download the target photometric data.
Parameters
----------
targetname:
Name of a target known by the class
dirout: [string] -optional-
Where shall the data be downloaded
- "default": the default local structure | use this if unsure
- "PATH": provide any path, the data will be downloaded here
- "StringIO": download the data inside StringIO files [they will be returned]
overwrite: [bool] -optional-
If the file already exists where you want to download them, should this overwrite them?
dl: [bool] -optional-
Should the download be actually launched ?
If False: the returns the urls to be downloaded and where they will be.
**kwargs goes to _build_target_data_url_and_path_
Returns
-------
None (or list of StringIO if dirout='StringIO')
"""
if dirout is None or dirout in ["default"]:
dirout = self._default_dldir
if io.DATAPATH == "_notdefined_":
raise AttributeError("You must define the global variable DATAPATH to bve able to download/store data")
if dirout in ["StringIO", "stringio", "iostring", "io", "BytesIO","BytesIO","bytes"]:
urls = self._build_target_data_url_and_path_(targetname, "default", **kwargs)[0]
# Bytes IO are more suitable for internet requests
localpaths = ["BytesIO" for i in range(len(urls))]
is_stringio=True
overwrite=True
else:
urls, localpaths = self._build_target_data_url_and_path_(targetname, dirout, **kwargs)
is_stringio=False
if not dl:
return urls, localpaths
return [io.download_single_url(url_, localpath_, overwrite=overwrite)
for url_, localpath_ in zip(urls, localpaths)]
def _build_target_data_url_and_path_(self, targetname, dirout, filters=None, **kwargs):
""" Returns the URL and download location of data """
if not self.is_target_known(targetname):
raise AttributeError("unknown target. Please run download_target_metadata()")
url_ = np.asarray([metadata_to_url(self.INSTRUMENT, row["baseurl"], row["basename"], bands=row["filters"], **kwargs)
for index_, row in self.metadata[self.metadata["name"]==targetname].iterrows()
if (filters is None or filters in ["all","*"]) or row["filters"] in filters]).flatten()
localpath_ = np.asarray([metadata_to_url(self.INSTRUMENT, dirout, row["basename"], bands=row["filters"], **kwargs)
for index_, row in self.metadata[self.metadata["name"]==targetname].iterrows()
if (filters is None or filters in ["all","*"]) or row["filters"] in filters]).flatten()
return url_, localpath_
def is_target_known(self, targetname):
""" Test if the given target has known metadata. """
return targetname in self.known_targets
##################
# Properties #
##################
@property
def known_targets(self):
""" list of targets inside metadata """
return self.metadata["name"].values
@property
def _default_dldir(self):
""" """
return get_directory(self.INSTRUMENT)
# ====================== #
# #
# GALEX #
# #
# ====================== #
GALEX_DIR = io.DATAPATH+"GALEX/"
_GALEX_METADATA_FILE = GALEX_DIR+"target_source.csv"
def query_mast(ra, dec, instrument=None, radius="10 arcsec"):
""" returns a table containing the existing data information """
from astroquery.mast import Observations
t = Observations.query_region("%.5f %+.5f"%(ra,dec), radius=radius)
if instrument is not None:
return t[t["obs_collection"]==instrument]
return t
def _galex_info_to_urlpath_(baseurl, basename, which=["int", "skybg"], bands=["NUV","FUV"]):
""" Build URL or fullpath for the given data """
return [[baseurl+"/"+basename+"-%sd-%s.fits.gz"%(band[0].lower(),todl_) for band in np.atleast_1d(bands)]
for todl_ in np.atleast_1d(which)]
def query_galex_metadata(ra, dec):
""" look for GALEX meta data inside MAST archive """
t = query_mast(ra, dec, instrument="GALEX")
df = pandas.DataFrame(dict(t[["filters", "project","target_name"]]))
df["basename"] = [t_.replace("_1_","_sg") if t_.startswith("AIS") else t_ for t_ in df.pop("target_name")]
df["basename"] = [b_.replace("_sg","_sg0") if "_sg" in b_ and len(b_.split("_sg")[-1])==1 else b_ for b_ in df["basename"]]
df = df.assign(baseurl= ["/".join(url_.split("/")[:-1]) for url_ in t["dataURL"].data])
return df
#
# CLASS
#
class GALEXQuery( _Query_ ):
""" Simply Class to manage the GALEX data IO """
INSTRUMENT = "GALEX"
def get_target_instruments(self, targetname, contains=None, buffer_safe_width=0.05):
""" """
if not self.is_target_known(targetname):
raise AttributeError("unknown target. Please run download_target_metadata(), and download the associated files")
from astrobject import instruments
# Which data to use
target_data = self.get_target_data(targetname)
all_data_int = [f for f in target_data if "int" in f and (contains is None or contains in f)]
# Which data to use
target = self.get_target(targetname)
instru = []
for fullpath in [f for f in all_data_int if f.replace("int","skybg") in target_data]:
inst_ = instruments.get_instrument(fullpath)
if not inst_.is_target_in(target,buffer_safe_width=buffer_safe_width):
print("Given target not inside GALEX FoV for %s - skipped"%fullpath)
continue
inst_.set_sky(fullpath.replace("int","skybg"))
inst_.set_target(target)
instru.append(inst_)
return instru
# ====================== #
# #
# SDSS #
# #
# ====================== #
SDSS_DIR = io.DATAPATH+"SDSS/"
_SDSS_METADATA_FILE = SDSS_DIR+"target_source.csv"
SDSS_BASEURL = "https://dr12.sdss.org"
def query_sdss_metadata(ra, dec):
""" look for SDSS meta data inside MAST archive """
import requests
url_ = SDSS_BASEURL+'/fields/raDec?ra=%.5f&dec=%+.5f'%(ra,dec)
r = requests.post(url_)
r.raise_for_status() # raise a status if issue, like wrong auth
fitsband = [l for l in r.text.splitlines() if "FITS" in l]
if len(fitsband)==0:
raise AttributeError("Was not able to find sdss data while searching for \n %s"%url_)
filters = []
basename = []
baseurl = []
for lband in fitsband:
url = lband.split('"')[1]
filters.append(lband.split("-band")[0][-1])
baseurl.append("/".join([SDSS_BASEURL]+url.split("/")[1:-1]))
basename.append(url.split("/")[-1].split(".")[0])
df = pandas.DataFrame(np.asarray([filters,baseurl,basename]).T, columns=["filters","baseurl","basename"])
df["project"] = "dr12"
return df
def _sdss_info_to_urlpath_(baseurl, basename, bands=None):
""" band name inside the sdss url. *bands is not used.*
simply does:
"return baseurl+'/'+basename+'.fits.bz2'"
"""
return baseurl+"/"+basename+".fits.bz2"
#
# CLASS
#
class SDSSQuery( _Query_ ):
""" """
INSTRUMENT = "SDSS"
# ====================== #
# #
# PanStarrs #
# #
# ====================== #
PANSTARRS_DIR = io.DATAPATH+"PanSTARRS/"
_PANSTARRS_METADATA_FILE = PANSTARRS_DIR+"target_source.csv"
def query_panstarrs_metadata(ra, dec, size=240, filters="grizy"):
""" Query ps1filenames.py service to get a list of images
Parameters
----------
ra, dec: [floats]
position in degrees
size: [float]
image size in pixels (0.25 arcsec/pixel)
filters: [strings]
string with filters to include
Returns
--------
Table (a table with the results)
"""
service = "https://ps1images.stsci.edu/cgi-bin/ps1filenames.py"
url = ("{service}?ra={ra}&dec={dec}&size={size}&format=fits"
"&filters={filters}").format(**locals())
d_ =
|
pandas.read_csv(url, sep=" ")
|
pandas.read_csv
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.