prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import os
import datetime
path1=os.getcwd()+'\\current_pid1.csv'
path2=os.getcwd()+'\\current_pid2.csv'
path3=os.getcwd()+'\\current_pid3.csv'
path4=os.getcwd()+'\\控制器设备信息反馈-2018-09-19.csv'
path5=os.getcwd()+'\\1.csv'
path6=os.getcwd()+'\\2.csv'
path7=os.getcwd()+'\\3.csv'
file1=open(path1,encoding='utf-8')
file2=open(path2,encoding='utf-8')
file3=open(path3,encoding='utf-8')
file4=open(path4,encoding='utf-8')
df1=pd.read_csv(file1, header=None)
df2=pd.read_csv(file2, header=None)
df3= | pd.read_csv(file3, header=None) | pandas.read_csv |
# Pandas
# 효과적인 데이터 분석 기능을 제공하는 패키지
# R에서 자주 사용하는 DataFrame을 파이썬에서도 사용할 수 있게 함
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats as stats
df = | pd.read_excel('D:/빅데이터/data/pdf/빅데이터/데이터분석통계/data/sungjuk.xlsx') | pandas.read_excel |
import etl.transforms.primitives.df.pandas_utils as pandas_utils
from etl.core.exceptions import TransformError
from etl.mappings.med_regex import med_regex
import pandas as pd
import re
import logging
import etl.transforms.primitives.df.filter_rows as filter_rows
import functools
def translate_epic_id_to_fid(df, col, new_col, config_map, drop_original=False,
add_string='', add_string_fid=None, remove_if_not_found=False, name_col=None, name_config_map=None):
def convert_id(row):
for fid, epic_id_list in config_map:
if row[col] in epic_id_list:
return fid
if name_col is not None and name_config_map is not None:
for epic_regex_dict in name_config_map:
if re.search(epic_regex_dict['pos'], row[name_col], flags=re.I):
if 'neg' in epic_regex_dict and len(epic_regex_dict['neg']) > 0 and \
re.search(epic_regex_dict['neg'], row[name_col], flags=re.I):
return 'INVALID FID'
return epic_regex_dict['fid']
if remove_if_not_found:
return 'INVALID FID'
raise TransformError(
'translate.translate_epic_id_to_fid',
'Could not find an fid for this ID.',
col + " = " + row['epic_id']
)
pandas_utils.check_column_name(df, col)
df[new_col] = df.apply(convert_id, axis=1)
if drop_original:
df.drop(col, axis=1, inplace=True)
if remove_if_not_found:
df = df[df[new_col] != 'INVALID FID']
if add_string != '' and add_string_fid is not None:
for fid in add_string_fid:
fid_rows = (df[new_col] == fid)
df_temp = df[new_col][fid_rows] + add_string
df[new_col][fid_rows] = df_temp
return df
def translate_med_name_to_fid(med_data):
def find_fid_with_regex(med_name, med):
if re.search(med['pos'], med_name, flags=re.I):
if 'neg' in med and len(med['neg']) > 0 and re.search(med['neg'], med_name, flags=re.I):
return 'Invalid Medication'
return med['fid']
else:
return 'Unknown Medication'
res = None
for med in med_regex:
this_med_data = med_data.copy()
this_med_data['fid'] = this_med_data['full_name'].apply(functools.partial(find_fid_with_regex, med=med))
this_med_data = filter_rows.filter_medications(this_med_data)
if not this_med_data.empty:
if res is None:
res = this_med_data
else:
res = pd.concat([res, this_med_data], ignore_index=True)
return res
def override_empty_doses_with_rates(med_data, fid_col, fids):
med_idx = med_data[fid_col].isin(fids) & \
( | pd.isnull(med_data['dose_unit']) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 11:33:55 2020
@author: User
"""
import sys
from pathlib import Path
import functools
# import collections
from collections import Counter
# import types
# import post_helper
# import plotting
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.stats import linregress
import pandas as pd
import numpy as np
import datetime as dt
mpl.rcParams["figure.dpi"] = 100
# from sklearn.cluster import KMeans
# print ('Name prepare input:', __name__ )
if __name__ == "__main__":
# print(f'Package: {__package__}, File: {__file__}')
# FH_path = Path(__file__).parent.parent.parent.joinpath('FileHelper')
# sys.path.append(str(FH_path))
# sys.path.append(str(Path(__file__).parent.parent.joinpath('indexer')))
sys.path.append(str(Path(__file__).parent.parent.parent))
# sys.path.append("..")
# print(sys.path)
# import FileHelper
from FileHelper.PostChar import Characterization_TypeSetting, SampleCodesChar
from FileHelper.PostPlotting import *
from FileHelper.FindSampleID import GetSampleID
from FileHelper.FindFolders import FindExpFolder
# from FileHelper.FileFunctions.FileOperations import PDreadXLorCSV
from collect_load import Load_from_Indexes
# from FileHelper.FindExpFolder import FindExpFolder
from plotting import eisplot
elif "prepare_input" in __name__:
pass
# import RunEC_classifier
# from FileHelper.FindSampleID import FindSampleID
# from FileHelper.PostChar import SampleSelection, Characterization_TypeSetting
def mkfolder(folder):
folder.mkdir(exist_ok=True, parents=True)
return folder
OriginColors = Characterization_TypeSetting.OriginColorList()
Pfolder = FindExpFolder().TopDir.joinpath(Path("Preparation-Thesis/CB_projects"))
plotsfolder = mkfolder(Pfolder.joinpath("correlation_plots"))
EC_folder = Pfolder.joinpath("EC_data")
EC_index, SampleCodes = Load_from_Indexes.get_EC_index()
print("finished")
# SampleCodesChar().load
def multiIndex_pivot(df, index=None, columns=None, values=None):
# https://github.com/pandas-dev/pandas/issues/23955
output_df = df.copy(deep=True)
if index is None:
names = list(output_df.index.names)
output_df = output_df.reset_index()
else:
names = index
output_df = output_df.assign(
tuples_index=[tuple(i) for i in output_df[names].values]
)
if isinstance(columns, list):
output_df = output_df.assign(
tuples_columns=[tuple(i) for i in output_df[columns].values]
) # hashable
output_df = output_df.pivot(
index="tuples_index", columns="tuples_columns", values=values
)
output_df.columns = pd.MultiIndex.from_tuples(
output_df.columns, names=columns
) # reduced
else:
output_df = output_df.pivot(
index="tuples_index", columns=columns, values=values
)
output_df.index = pd.MultiIndex.from_tuples(output_df.index, names=names)
return output_df
def get_float_cols(df):
return [key for key, val in df.dtypes.to_dict().items() if "float64" in str(val)]
# class PorphSamples():
# def __init__(self):
# self.template = PorphSamples.template()
def decorator(func):
@functools.wraps(func)
def wrapper_decorator(*args, **kwargs):
# Do something before
value = func(*args, **kwargs)
# Do something after
return value
return wrapper_decorator
def PorphSiO2_template():
# 'SerieIDs' : ('Porph_SiO2')*5,
Series_Porph_SiO2 = {
"SampleID": (
"DW16",
"DW17",
"DW18",
"DW19",
"DW20",
"DW21",
"DW24",
"DW25",
"DW26",
"DW27",
"DW28",
"DW29",
"JOS12",
"JOS13",
"JOS14",
"JOS15",
)
}
# 'Metal' : ('Fe','Co','MnTPP','FeTPP','H2'),
# 'color' : (2, 4, 6, 15, 3)}
Porphyrins = {
"TMPP": {"Formula": "C48H38N4O4", "MW": 734.8382},
"TMPP-Fe(III)Cl": {"Formula": "C48H36ClFeN4O4", "MW": 824.1204},
"TMPP-Co(II)": {"Formula": "C48H36CoN4O4", "MW": 791.7556},
"TTP-Mn(III)Cl": {"Formula": "C44H28ClMnN4", "MW": 703.1098},
"TPP-Fe(III)Cl": {"Formula": "C44H28ClFeN4", "MW": 704.0168},
"TPP": {"Formula": "C44H30N4", "MW": 614.7346},
}
Porph_template = pd.DataFrame(Series_Porph_SiO2)
return Porph_template
def EC_types_grp():
# KL ['ORR_E_AppV_RHE', 'ORR_KL_E_AppV_RHE','Electrode']
_basic_EC_cond = ["postAST_post", "Sweep_Type", "pH", "Loading_cm2"]
_extra_EC_cond = {
"N2CV": [],
"ORR": ["RPM_DAC_uni"],
"KL": ["Electrode", "ORR_E_AppV_RHE"],
"EIS": ["E_RHE"],
"HER": [],
"OER": [],
}
_out = {key: _basic_EC_cond + val for key, val in _extra_EC_cond.items()}
return _out
def save_EC_index_PorphSiO2(EC_index, EC_folder):
_porph_index = EC_index.loc[EC_index.SampleID.isin(PorphSiO2_template().SampleID)]
_porph_index.to_excel(EC_folder.joinpath("EC_index_PorphSiO2.xlsx"))
# save_EC_index_PorphSiO2(EC_index, EC_folder)
class EC_PorphSiO2:
folder = FindExpFolder("PorphSiO2").compare
Porph_template = PorphSiO2_template()
EIS_models = [
"Model(EEC_Randles_RWpCPE)",
"Model(EEC_2CPE)",
"Model(EEC_2CPEpW)",
"Model(EEC_RQ_RQ_RW)",
"Model(EEC_RQ_RQ_RQ)",
"Model(Randles_RQRQ)",
]
ORR_pars_all = Load_from_Indexes.ORR_pars_OVV(reload=False, use_daily=True)
mcols = [i for i in Load_from_Indexes.EC_label_cols if i not in ["PAR_file"]] + [
"Sweep_Type"
]
EC_idx_PorphSiO2 = pd.read_excel(list(EC_folder.rglob("*EC_index*"))[0])
EC_idx_PorphSiO2 = EC_idx_PorphSiO2.assign(
**{
"PAR_date_day_dt": [
dt.date.fromisoformat(np.datetime_as_string(np.datetime64(i, "D")))
for i in EC_idx_PorphSiO2.PAR_date.to_numpy()
]
}
)
# ['Model(Singh2015_RQRQ)', 'Model(Singh2015_RQRQR)', 'Model(Bandarenka_2011_RQRQR)',
# 'Model(Singh2015_RQRWR)', 'Model(Randles_RQRQ)', 'Model(Singh2015_R3RQ)']
# model_select = EC_PorphSiO2.EIS_models[1]
# self = EC_PorphSiO2()
def __init__(self):
self.index, self.AST_days = EC_PorphSiO2.select_ECexps(EC_folder)
# = EC_PorphSiO2.get_AST_days()
# self.pars = EC_PorphSiO2.mergedEC()
# self.par_export = EC_OHC.to_excel(self.folder.joinpath('EC_ORR_HPRR.xlsx'))
def get_AST_days():
gr_idx = EC_PorphSiO2.EC_idx_PorphSiO2.groupby("PAR_date_day_dt")
AST_days = []
for n, gr in gr_idx:
# n,gr
exps = gr.PAR_exp.unique()
# gr.PAR_date_day.unique()[0]
if any(["AST" in i for i in exps]):
# print(n,exps)
# AST_days.append(n)
if n + dt.timedelta(1) in gr_idx.groups.keys():
_post = gr_idx.get_group(n + dt.timedelta(1))
# print(n + dt.timedelta(1), gr_idx.get_group(n + dt.timedelta(1)))
AST_days.append((n, n + dt.timedelta(1)))
else:
AST_days.append((n, n))
print(n + dt.timedelta(1), "grp missing")
print(AST_days)
return AST_days
def select_ECexps(EC_folder):
LC_idx_fp = list(EC_folder.rglob("*EC_index*"))[0].parent.joinpath(
"LC_index.xlsx"
)
AST_days = EC_PorphSiO2.get_AST_days()
if LC_idx_fp.exists():
LC_fls = EC_PorphSiO2.EC_idx_PorphSiO2.loc[
EC_PorphSiO2.EC_idx_PorphSiO2.PAR_date_day_dt.isin(
[i for a in AST_days for i in a]
)
]
LC_fls.to_excel(
list(EC_folder.rglob("*EC_index*"))[0].parent.joinpath("LC_index.xlsx")
)
else:
try:
LC_fls = pd.read_excel(LC_idx_fp, index_col=[0])
except Exception as e:
print(f"Excel load fail: {e}\n,file: {LC_idx_fp}")
LC_fls = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Data Cleaning Codes
"""
'''
The data for the social network of the eight masters of the Tang and Song
are extracted from China Biographical Database Project (CBDB).
This code is intended to prepare two excel tables for visualization in Gephi.
input: query_results.xlsx
output: the_eight_gephi.xlsx
'''
import pandas as pd
import os
path = '<Your directory path>'
raw = | pd.read_excel(path + os.sep + 'query_results.xlsx') | pandas.read_excel |
#!/usr/bin/env python
# Edit this script to add your team's training code.
# Some functions are *required*, but you can edit most parts of required functions, remove non-required functions, and add your own function.
from helper_code import *
import numpy as np, os, sys, joblib
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
import neurokit2 as nk
from skmultilearn.ensemble import LabelSpacePartitioningClassifier
from skmultilearn.cluster import FixedLabelSpaceClusterer
from sklearn.ensemble import RandomForestClassifier
from skmultilearn.problem_transform import ClassifierChain
import pandas as pd
from sklearn.preprocessing import MultiLabelBinarizer
twelve_lead_model_filename = '12_lead_model.sav'
six_lead_model_filename = '6_lead_model.sav'
three_lead_model_filename = '3_lead_model.sav'
two_lead_model_filename = '2_lead_model.sav'
################################################################################
#
# Training function
#
################################################################################
def find_R_peaks(ecg_data,samplefreq):
try:
_, rpeaks = nk.ecg_peaks(ecg_data, sampling_rate=samplefreq)
r_peaks=rpeaks['ECG_R_Peaks']
r_peaks = np.delete(r_peaks,np.where(np.isnan(r_peaks))[0]).astype(int)
except:
print("cleaning data")
cleaned_ecg = nk.ecg_clean(ecg_data, sampling_rate=samplefreq, method="neurokit")
try:
_, rpeaks = nk.ecg_peaks(cleaned_ecg, sampling_rate=samplefreq)
r_peaks=rpeaks['ECG_R_Peaks']
r_peaks = np.delete(r_peaks,np.where(np.isnan(r_peaks))[0]).astype(int)
except:
print("could not analyse cleaned ECG")
#Midlertidig løsning:
r_peaks = np.array([0,1,2,3])
return r_peaks
# Train your model. This function is *required*. Do *not* change the arguments of this function.
def training_code(data_directory, model_directory):
# Find header and recording files.
print('Finding header and recording files...')
header_files, recording_files = find_challenge_files(data_directory)
num_recordings = len(recording_files)
if not num_recordings:
raise Exception('No data was provided.')
# Create a folder for the model if it does not already exist.
if not os.path.isdir(model_directory):
os.mkdir(model_directory)
# Extract classes from dataset.
print('Extracting classes...')
all_labels = []
#classes = set()
for header_file in header_files:
header = load_header(header_file)
#classes |= set(get_labels(header))
all_labels.append(get_labels(header))
df_labels = pd.DataFrame(all_labels)
SNOMED_scored=pd.read_csv("./dx_mapping_scored.csv", sep=",")
SNOMED_unscored= | pd.read_csv("./dx_mapping_unscored.csv", sep=",") | pandas.read_csv |
# Libraries
import json
import numpy as np
import pandas as pd
def load_features_map(filepath):
"""This method loads the features map json file.
Parameters
----------
Return
------
"""
# Reading the json as a dict
with open(filepath) as json_data:
data = json.load(json_data)
# Read file
config = pd.DataFrame(data)
# Basic formatting
config.name = config.name.str.title()
config.code = config.code.str.upper()
# Return
return config
def load_columns_operations(filepath):
"""This method..."""
pass
def merge_date_time(data, date_column, time_column=None):
"""This method merges columns date and time
.. note: If time is missing default is 00:00.
.. note: Also convert date using dt.apply(str).
Parameters
----------
Returns
-------
"""
if not date_column in data:
print("Column <%s> not found!" % date_column)
if not time_column in data:
print("Column <%s> not found!" % time_column)
# Fill empty times with default value
data[time_column] = data[time_column].fillna('00:00')
# Format
date = data[date_column].dt.strftime('%Y-%m-%d')
time = data[time_column]
# Return
return | pd.to_datetime(date + ' ' + time) | pandas.to_datetime |
"""
Modified from pvlib python pvlib/tests/test_bsrn.py.
See LICENSES/PVLIB-PYTHON_LICENSE
"""
import inspect
import gzip
import os
from pathlib import Path
import re
import pandas as pd
import pytest
from solarforecastarbiter.io.fetch import bsrn
from pandas.testing import assert_index_equal, assert_frame_equal
DATA_DIR = Path(os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))) / 'data'
@pytest.mark.parametrize('testfile,open_func,mode,expected_index', [
('bsrn-pay0616.dat.gz', gzip.open, 'rt',
| pd.date_range(start='20160601', periods=43200, freq='1min', tz='UTC') | pandas.date_range |
#!/usr/bin/env python
# coding: utf-8
# import libraries
import numpy as np
import pandas as pd
import streamlit as st
import plotly as pt
import matplotlib.pyplot as plt
from collections import Counter
import seaborn as sns
#import pandas_profiling as pf
import plotly.express as px
import plotly.graph_objects as go
sns.set_style("darkgrid")
# ignore warnings
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
# my app’s title
#st.title('Ongo-Total Run Experience Subscription Enhancement')
#st.markdown("""# **OngoBoost-Total Run Experience Subscription Enhancement**""")
st.markdown("""
<style>
body{
#color:;
background-color: #E4F2FE;
}
</style>
""",unsafe_allow_html=True)
#st.markdown("""# ** **""")#ff8c69
st.markdown("<h1 style='text-align: center; color: ;'><b>OngoBoost: Subscribe Today Run Tomorrow!</b></h1>", unsafe_allow_html=True)
st.markdown("<h3 style='text-align: left; color: ;'><b></b></h3>", unsafe_allow_html=True)
#st.markdown("<h3 style='text-align: left; color: ;'><b></b></h3>", unsafe_allow_html=True)
#st.title("OngoBoost-Subscribe Today Run Tomorrow")
#st.markdown("<h1 style='text-align: center; color: red;'></h1>", unsafe_allow_html=True)
#st.markdown(<style>h1{color: red}='text-align: center; color: red;'>, unsafe_allow_html=True)
#st.header("Upload New Users")
st.markdown("<h4 style='text-align: left; color: ;'><b>Upload New Users</b></h4>", unsafe_allow_html=True)
upload_flag = st.radio("", ("Yes, upload new user data", "No, use preloaded data"), index=1)
if upload_flag=="Yes, upload new user data":
csv_file = st.file_uploader(label="", type=["csv"], encoding="utf-8")#Upload a CSV file
if csv_file is not None:
data = pd.read_csv(csv_file)
#if st.checkbox("Show data"):
st.dataframe(data)
else:
def get_data():
#url = r"test_streamlit.csv"
#path = '/Users/sayantan/Desktop/test_streamlit.csv'
path = 'test_streamlit.csv'
return | pd.read_csv(path) | pandas.read_csv |
# -*- coding: utf-8 -*-
import nose
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class TestIndexCoercion(tm.TestCase):
_multiprocess_can_split_ = True
def test_setitem_index_numeric_coercion_int(self):
# tests setitem with non-existing numeric key
s = pd.Series([1, 2, 3, 4])
self.assertEqual(s.index.dtype, np.int64)
# int + int -> int
temp = s.copy()
temp[5] = 5
tm.assert_series_equal(temp, pd.Series([1, 2, 3, 4, 5],
index=[0, 1, 2, 3, 5]))
self.assertEqual(temp.index.dtype, np.int64)
# int + float -> float
temp = s.copy()
temp[1.1] = 5
tm.assert_series_equal(temp, pd.Series([1, 2, 3, 4, 5],
index=[0, 1, 2, 3, 1.1]))
self.assertEqual(temp.index.dtype, np.float64)
def test_setitem_index_numeric_coercion_float(self):
# tests setitem with non-existing numeric key
s = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(s.index.dtype, np.float64)
# float + int -> int
temp = s.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
temp = s.copy()
temp[5.1] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=[1.1, 2.1, 3.1, 4.1, 5.1])
tm.assert_series_equal(temp, exp)
self.assertEqual(temp.index.dtype, np.float64)
def test_insert_numeric_coercion_int(self):
idx = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(idx.dtype, np.int64)
# int + int -> int
res = idx.insert(1, 1)
tm.assert_index_equal(res, pd.Index([1, 1, 2, 3, 4]))
self.assertEqual(res.dtype, np.int64)
# int + float -> float
res = idx.insert(1, 1.1)
tm.assert_index_equal(res, pd.Index([1, 1.1, 2, 3, 4]))
self.assertEqual(res.dtype, np.float64)
# int + bool -> int
res = idx.insert(1, False)
tm.assert_index_equal(res, pd.Index([1, 0, 2, 3, 4]))
self.assertEqual(res.dtype, np.int64)
def test_insert_numeric_coercion_float(self):
idx = pd.Float64Index([1, 2, 3, 4])
self.assertEqual(idx.dtype, np.float64)
# float + int -> int
res = idx.insert(1, 1)
tm.assert_index_equal(res, pd.Index([1., 1., 2., 3., 4.]))
self.assertEqual(res.dtype, np.float64)
# float + float -> float
res = idx.insert(1, 1.1)
tm.assert_index_equal(res, pd.Index([1., 1.1, 2., 3., 4.]))
self.assertEqual(res.dtype, np.float64)
# float + bool -> float
res = idx.insert(1, False)
tm.assert_index_equal(res, pd.Index([1., 0., 2., 3., 4.]))
self.assertEqual(res.dtype, np.float64)
class TestSeriesCoercion(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.rep = {}
self.rep['object'] = ['a', 'b']
self.rep['int64'] = [4, 5]
self.rep['float64'] = [1.1, 2.2]
self.rep['complex128'] = [1 + 1j, 2 + 2j]
self.rep['bool'] = [True, False]
def test_setitem_numeric_coercion_int(self):
s = pd.Series([1, 2, 3, 4])
self.assertEqual(s.dtype, np.int64)
# int + int -> int
temp = s.copy()
temp[1] = 1
tm.assert_series_equal(temp, pd.Series([1, 1, 3, 4]))
self.assertEqual(temp.dtype, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
temp = s.copy()
temp[1] = 1.1
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
tm.assert_series_equal(temp, pd.Series([1, 1, 3, 4]))
self.assertEqual(temp.dtype, np.int64)
# int + complex -> complex
temp = s.copy()
temp[1] = 1 + 1j
tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 3, 4]))
self.assertEqual(temp.dtype, np.complex128)
# int + bool -> int
temp = s.copy()
temp[1] = True
tm.assert_series_equal(temp, pd.Series([1, 1, 3, 4]))
self.assertEqual(temp.dtype, np.int64)
def test_setitem_numeric_coercion_float(self):
s = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(s.dtype, np.float64)
# float + int -> float
temp = s.copy()
temp[1] = 1
tm.assert_series_equal(temp, pd.Series([1.1, 1.0, 3.3, 4.4]))
self.assertEqual(temp.dtype, np.float64)
# float + float -> float
temp = s.copy()
temp[1] = 1.1
tm.assert_series_equal(temp, pd.Series([1.1, 1.1, 3.3, 4.4]))
self.assertEqual(temp.dtype, np.float64)
# float + complex -> complex
temp = s.copy()
temp[1] = 1 + 1j
tm.assert_series_equal(temp, pd.Series([1.1, 1 + 1j, 3.3, 4.4]))
self.assertEqual(temp.dtype, np.complex128)
# float + bool -> float
temp = s.copy()
temp[1] = True
tm.assert_series_equal(temp, pd.Series([1.1, 1.0, 3.3, 4.4]))
self.assertEqual(temp.dtype, np.float64)
def test_setitem_numeric_coercion_complex(self):
s = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(s.dtype, np.complex128)
# complex + int -> complex
temp = s.copy()
temp[1] = 1
tm.assert_series_equal(temp, pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j]))
self.assertEqual(temp.dtype, np.complex128)
# complex + float -> complex
temp = s.copy()
temp[1] = 1.1
tm.assert_series_equal(temp, pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j]))
self.assertEqual(temp.dtype, np.complex128)
# complex + complex -> complex
temp = s.copy()
temp[1] = 1 + 1j
tm.assert_series_equal(temp,
pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j]))
self.assertEqual(temp.dtype, np.complex128)
# complex + bool -> complex
temp = s.copy()
temp[1] = True
tm.assert_series_equal(temp, pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j]))
self.assertEqual(temp.dtype, np.complex128)
def test_setitem_numeric_coercion_bool(self):
s = pd.Series([True, False, True, False])
self.assertEqual(s.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
temp = s.copy()
temp[1] = 1
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
tm.assert_series_equal(temp, pd.Series([True, True, True, False]))
self.assertEqual(temp.dtype, np.bool)
# TODO_GH12747 The result must be int
temp = s.copy()
temp[1] = 3 # greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
tm.assert_series_equal(temp, pd.Series([True, True, True, False]))
self.assertEqual(temp.dtype, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
temp = s.copy()
temp[1] = 1.1
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
tm.assert_series_equal(temp, pd.Series([True, True, True, False]))
self.assertEqual(temp.dtype, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
temp = s.copy()
temp[1] = 1 + 1j
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
tm.assert_series_equal(temp, pd.Series([True, True, True, False]))
self.assertEqual(temp.dtype, np.bool)
# bool + bool -> int
temp = s.copy()
temp[1] = True
tm.assert_series_equal(temp, pd.Series([True, True, True, False]))
self.assertEqual(temp.dtype, np.bool)
def test_where_numeric_coercion_int(self):
s = pd.Series([1, 2, 3, 4])
self.assertEqual(s.dtype, np.int64)
cond = pd.Series([True, False, True, False])
# int + int -> int
res = s.where(cond, 1)
tm.assert_series_equal(res, pd.Series([1, 1, 3, 1]))
self.assertEqual(res.dtype, np.int64)
res = s.where(cond, pd.Series([5, 6, 7, 8]))
tm.assert_series_equal(res, pd.Series([1, 6, 3, 8]))
self.assertEqual(res.dtype, np.int64)
# int + float -> float
res = s.where(cond, 1.1)
tm.assert_series_equal(res, pd.Series([1, 1.1, 3, 1.1]))
self.assertEqual(res.dtype, np.float64)
res = s.where(cond, pd.Series([5.5, 6.6, 7.7, 8.8]))
tm.assert_series_equal(res, pd.Series([1, 6.6, 3, 8.8]))
self.assertEqual(res.dtype, np.float64)
# int + complex -> complex
res = s.where(cond, 1 + 1j)
tm.assert_series_equal(res, pd.Series([1, 1 + 1j, 3, 1 + 1j]))
self.assertEqual(res.dtype, np.complex128)
res = s.where(cond, pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j]))
tm.assert_series_equal(res, pd.Series([1, 6 + 6j, 3, 8 + 8j]))
self.assertEqual(res.dtype, np.complex128)
# int + bool -> int
res = s.where(cond, True)
tm.assert_series_equal(res, pd.Series([1, 1, 3, 1]))
self.assertEqual(res.dtype, np.int64)
res = s.where(cond, pd.Series([True, False, True, True]))
tm.assert_series_equal(res, pd.Series([1, 0, 3, 1]))
self.assertEqual(res.dtype, np.int64)
def test_where_numeric_coercion_float(self):
s = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(s.dtype, np.float64)
cond = pd.Series([True, False, True, False])
# float + int -> float
res = s.where(cond, 1)
tm.assert_series_equal(res, pd.Series([1.1, 1.0, 3.3, 1.0]))
self.assertEqual(res.dtype, np.float64)
res = s.where(cond, pd.Series([5, 6, 7, 8]))
tm.assert_series_equal(res, pd.Series([1.1, 6.0, 3.3, 8.0]))
self.assertEqual(res.dtype, np.float64)
# float + float -> float
res = s.where(cond, 1.1)
tm.assert_series_equal(res, pd.Series([1.1, 1.1, 3.3, 1.1]))
self.assertEqual(res.dtype, np.float64)
res = s.where(cond, pd.Series([5.5, 6.6, 7.7, 8.8]))
tm.assert_series_equal(res, pd.Series([1.1, 6.6, 3.3, 8.8]))
self.assertEqual(res.dtype, np.float64)
# float + complex -> complex
res = s.where(cond, 1 + 1j)
tm.assert_series_equal(res, pd.Series([1.1, 1 + 1j, 3.3, 1 + 1j]))
self.assertEqual(res.dtype, np.complex128)
res = s.where(cond, pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j]))
tm.assert_series_equal(res, pd.Series([1.1, 6 + 6j, 3.3, 8 + 8j]))
self.assertEqual(res.dtype, np.complex128)
# float + bool -> float
res = s.where(cond, True)
tm.assert_series_equal(res, pd.Series([1.1, 1.0, 3.3, 1.0]))
self.assertEqual(res.dtype, np.float64)
res = s.where(cond, pd.Series([True, False, True, True]))
tm.assert_series_equal(res, pd.Series([1.1, 0.0, 3.3, 1.0]))
self.assertEqual(res.dtype, np.float64)
def test_where_numeric_coercion_complex(self):
s = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(s.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> float
res = s.where(cond, 1)
tm.assert_series_equal(res, pd.Series([1 + 1j, 1, 3 + 3j, 1]))
self.assertEqual(res.dtype, np.complex128)
res = s.where(cond, pd.Series([5, 6, 7, 8]))
tm.assert_series_equal(res, pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0]))
self.assertEqual(res.dtype, np.complex128)
# complex + float -> float
res = s.where(cond, 1.1)
tm.assert_series_equal(res, pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1]))
self.assertEqual(res.dtype, np.complex128)
res = s.where(cond, pd.Series([5.5, 6.6, 7.7, 8.8]))
tm.assert_series_equal(res, pd.Series([1 + 1j, 6.6, 3 + 3j, 8.8]))
self.assertEqual(res.dtype, np.complex128)
# complex + complex -> complex
res = s.where(cond, 1 + 1j)
tm.assert_series_equal(res,
pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 1 + 1j]))
self.assertEqual(res.dtype, np.complex128)
res = s.where(cond, pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j]))
tm.assert_series_equal(res,
pd.Series([1 + 1j, 6 + 6j, 3 + 3j, 8 + 8j]))
self.assertEqual(res.dtype, np.complex128)
# complex + bool -> complex
res = s.where(cond, True)
tm.assert_series_equal(res, pd.Series([1 + 1j, 1, 3 + 3j, 1]))
self.assertEqual(res.dtype, np.complex128)
res = s.where(cond, | pd.Series([True, False, True, True]) | pandas.Series |
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import numba
import numpy as np
import pandas as pd
import platform
import pyarrow.parquet as pq
import random
import string
import unittest
from pandas.api.types import CategoricalDtype
import sdc
from sdc.str_arr_ext import StringArray
from sdc.tests.test_base import TestCase
from sdc.tests.test_utils import (count_array_OneDs,
count_array_REPs,
count_parfor_OneDs,
count_parfor_REPs,
dist_IR_contains,
get_start_end,
skip_numba_jit)
class TestJoin(TestCase):
@skip_numba_jit
def test_join1(self):
def test_impl(n):
df1 = pd.DataFrame({'key1': np.arange(n) + 3, 'A': np.arange(n) + 1.0})
df2 = pd.DataFrame({'key2': 2 * np.arange(n) + 1, 'B': n + np.arange(n) + 1.0})
df3 = pd.merge(df1, df2, left_on='key1', right_on='key2')
return df3.B.sum()
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
n = 11111
self.assertEqual(hpat_func(n), test_impl(n))
@skip_numba_jit
def test_join1_seq(self):
def test_impl(df1, df2):
df3 = df1.merge(df2, left_on='key1', right_on='key2')
return df3
hpat_func = self.jit(test_impl)
n = 11
df1 = pd.DataFrame({'key1': np.arange(n) + 3, 'A': np.arange(n) + 1.0})
df2 = pd.DataFrame({'key2': 2 * np.arange(n) + 1, 'B': n + np.arange(n) + 1.0})
pd.testing.assert_frame_equal(hpat_func(df1, df2), test_impl(df1, df2))
n = 11111
df1 = pd.DataFrame({'key1': np.arange(n) + 3, 'A': np.arange(n) + 1.0})
df2 = pd.DataFrame({'key2': 2 * np.arange(n) + 1, 'B': n + np.arange(n) + 1.0})
pd.testing.assert_frame_equal(hpat_func(df1, df2), test_impl(df1, df2))
@skip_numba_jit
def test_join1_seq_str(self):
def test_impl():
df1 = pd.DataFrame({'key1': ['foo', 'bar', 'baz']})
df2 = pd.DataFrame({'key2': ['baz', 'bar', 'baz'], 'B': ['b', 'zzz', 'ss']})
df3 = pd.merge(df1, df2, left_on='key1', right_on='key2')
return df3.B
hpat_func = self.jit(test_impl)
self.assertEqual(set(hpat_func()), set(test_impl()))
@skip_numba_jit
def test_join1_seq_str_na(self):
# test setting NA in string data column
def test_impl():
df1 = pd.DataFrame({'key1': ['foo', 'bar', 'baz']})
df2 = pd.DataFrame({'key2': ['baz', 'bar', 'baz'], 'B': ['b', 'zzz', 'ss']})
df3 = df1.merge(df2, left_on='key1', right_on='key2', how='left')
return df3.B
hpat_func = self.jit(test_impl)
self.assertEqual(set(hpat_func()), set(test_impl()))
@skip_numba_jit
def test_join_mutil_seq1(self):
def test_impl(df1, df2):
return df1.merge(df2, on=['A', 'B'])
hpat_func = self.jit(test_impl)
df1 = pd.DataFrame({'A': [3, 1, 1, 3, 4],
'B': [1, 2, 3, 2, 3],
'C': [7, 8, 9, 4, 5]})
df2 = pd.DataFrame({'A': [2, 1, 4, 4, 3],
'B': [1, 3, 2, 3, 2],
'D': [1, 2, 3, 4, 8]})
pd.testing.assert_frame_equal(hpat_func(df1, df2), test_impl(df1, df2))
@skip_numba_jit
def test_join_mutil_parallel1(self):
def test_impl(A1, B1, C1, A2, B2, D2):
df1 = pd.DataFrame({'A': A1, 'B': B1, 'C': C1})
df2 = pd.DataFrame({'A': A2, 'B': B2, 'D': D2})
df3 = df1.merge(df2, on=['A', 'B'])
return df3.C.sum() + df3.D.sum()
hpat_func = self.jit(locals={
'A1:input': 'distributed',
'B1:input': 'distributed',
'C1:input': 'distributed',
'A2:input': 'distributed',
'B2:input': 'distributed',
'D2:input': 'distributed', })(test_impl)
df1 = pd.DataFrame({'A': [3, 1, 1, 3, 4],
'B': [1, 2, 3, 2, 3],
'C': [7, 8, 9, 4, 5]})
df2 = pd.DataFrame({'A': [2, 1, 4, 4, 3],
'B': [1, 3, 2, 3, 2],
'D': [1, 2, 3, 4, 8]})
start, end = get_start_end(len(df1))
h_A1 = df1.A.values[start:end]
h_B1 = df1.B.values[start:end]
h_C1 = df1.C.values[start:end]
h_A2 = df2.A.values[start:end]
h_B2 = df2.B.values[start:end]
h_D2 = df2.D.values[start:end]
p_A1 = df1.A.values
p_B1 = df1.B.values
p_C1 = df1.C.values
p_A2 = df2.A.values
p_B2 = df2.B.values
p_D2 = df2.D.values
h_res = hpat_func(h_A1, h_B1, h_C1, h_A2, h_B2, h_D2)
p_res = test_impl(p_A1, p_B1, p_C1, p_A2, p_B2, p_D2)
self.assertEqual(h_res, p_res)
@skip_numba_jit
def test_join_left_parallel1(self):
"""
"""
def test_impl(A1, B1, C1, A2, B2, D2):
df1 = pd.DataFrame({'A': A1, 'B': B1, 'C': C1})
df2 = pd.DataFrame({'A': A2, 'B': B2, 'D': D2})
df3 = df1.merge(df2, on=('A', 'B'))
return df3.C.sum() + df3.D.sum()
hpat_func = self.jit(locals={
'A1:input': 'distributed',
'B1:input': 'distributed',
'C1:input': 'distributed', })(test_impl)
df1 = pd.DataFrame({'A': [3, 1, 1, 3, 4],
'B': [1, 2, 3, 2, 3],
'C': [7, 8, 9, 4, 5]})
df2 = pd.DataFrame({'A': [2, 1, 4, 4, 3],
'B': [1, 3, 2, 3, 2],
'D': [1, 2, 3, 4, 8]})
start, end = get_start_end(len(df1))
h_A1 = df1.A.values[start:end]
h_B1 = df1.B.values[start:end]
h_C1 = df1.C.values[start:end]
h_A2 = df2.A.values
h_B2 = df2.B.values
h_D2 = df2.D.values
p_A1 = df1.A.values
p_B1 = df1.B.values
p_C1 = df1.C.values
p_A2 = df2.A.values
p_B2 = df2.B.values
p_D2 = df2.D.values
h_res = hpat_func(h_A1, h_B1, h_C1, h_A2, h_B2, h_D2)
p_res = test_impl(p_A1, p_B1, p_C1, p_A2, p_B2, p_D2)
self.assertEqual(h_res, p_res)
self.assertEqual(count_array_OneDs(), 3)
@skip_numba_jit
def test_join_datetime_seq1(self):
def test_impl(df1, df2):
return | pd.merge(df1, df2, on='time') | pandas.merge |
from moex import MOEX
import datetime
from datetime import date
from urllib import request
from pandas.core.frame import DataFrame
from yahoofinancials import YahooFinancials
import pandas as pd
import os
import investpy
from pathlib import Path
def get_or_update_data(source: str, country: str, ticket: str, date_from: date, date_to: date, filename: str, currency: str = 'UNK'):
try:
if source is 'moex':
data = get_data_from_moex(
ticket=ticket, date_from=date_from, date_to=date_to)
elif source is 'stooq':
data = get_data_from_stooq(
ticket=ticket, date_from=date_from, date_to=date_to, currency=currency)
elif source is 'msci':
data = get_data_from_msci(
ticket=ticket, date_from=date_from, date_to=date_to)
elif source is 'yahoo':
data = get_data_from_yahoo(
ticket=ticket, date_from=date_from, date_to=date_to, currency=currency)
elif source is 'investing':
data = get_data_from_investing(ticket=ticket, country=country,
date_from=date_from, date_to=date_to)
else:
raise ValueError('Unknown data source')
except RuntimeError:
data = DataFrame(columns={'date', 'secid', 'close', 'open',
'low', 'high', 'volume', 'capitalisation', 'currency'})
except ValueError:
raise ValueError('Unknown data source or another')
except:
data = pd.DataFrame(columns={'date', 'secid', 'close', 'open',
'low', 'high', 'volume', 'capitalisation', 'currency'})
data = set_good_look(data)
save_to_csv(data, filename=filename)
return filename + '.csv'
def get_data_from_investing(ticket: str, country: str, date_from: date, date_to: date):
data = investpy.get_index_historical_data(index=ticket,
country=country,
from_date=date_from.strftime(
'%d/%m/%Y'),
to_date=date_to.strftime('%d/%m/%Y'))
data.reset_index(inplace=True)
data.rename(columns={'Date': 'date', 'Open': 'open',
'High': 'high', 'Low': 'low',
'Close': 'close', 'Volume': 'volume',
'Currency': 'currency', }, inplace=True)
data = data.assign(secid='investing:' + ticket)
return data
def get_data_from_moex(ticket: str, date_from: date, date_to: date):
moex = MOEX()
data = moex.history_engine_market_security(
date_start=date_from.strftime('%Y-%m-%d'), date_end=date_to.strftime('%Y-%m-%d'), security=ticket)
data = data[["TRADEDATE", "SECID", "OPEN", "CLOSE",
"LOW", "HIGH", "VALUE", "CURRENCYID", 'CAPITALIZATION']]
data.rename(columns={'TRADEDATE': 'date', 'OPEN': 'open',
'HIGH': 'high', 'LOW': 'low', 'CLOSE': 'close',
'SECID': 'secid', 'VALUE': 'volume',
'CURRENCYID': 'currency', 'CAPITALIZATION': 'capitalization', }, inplace=True)
return data
def get_data_from_stooq(ticket: str, date_from: date, date_to: date, currency='UNK'):
url = 'https://stooq.com/q/d/l/?s={0}&d1={1}&d2={2}&i=d'.format(
ticket, date_from.strftime('%Y%m%d'), date_to.strftime('%Y%m%d'))
data = | pd.read_csv(url) | pandas.read_csv |
## import libraries
import pandas as pd
import numpy as np
import datetime
def undersampling_balanced(unbalanced_data, lookback, lookforward, random_state, datetime_col, date_col, source_col, target, original_cols):
unbalanced_data = unbalanced_data.sort_values(by = [source_col, datetime_col])
data_zeros = unbalanced_data.loc[unbalanced_data[target] == 0]
data_ones = unbalanced_data.loc[unbalanced_data[target] == 1]
weights = data_ones.groupby([source_col, date_col]).url.agg(['count']).reset_index(drop = False)
data_zeros = weights.merge(data_zeros, how = 'left', on = [source_col,date_col], suffixes=('_1', '_0'))
data_zeros.drop_duplicates(inplace = True)
sampled_zeros = pd.DataFrame(columns=data_zeros.columns)
for i in weights.iterrows():
source_name = i[1][source_col]
creation_date = i[1][date_col]
counts = i[1]['count']
# print('ones count: ', counts)
extracted_data = data_zeros.loc[(data_zeros.source_name == source_name) & (data_zeros[date_col]== creation_date)]
if extracted_data.shape[0] >= counts:
# print('sampled data: ', extracted_data.sample(n = counts, random_state = random_state).shape[0])
sampled_zeros = pd.concat([sampled_zeros,extracted_data.sample(n = counts, random_state = random_state)])
else:
extracted_data = data_zeros.loc[(data_zeros.source_name == source_name) &
(data_zeros[date_col]>= creation_date - datetime.timedelta(days = lookback)) &
(data_zeros[date_col]<= creation_date + datetime.timedelta(days = lookforward))]
sampled_zeros = pd.concat([sampled_zeros,extracted_data.sample(n = counts, random_state = random_state)])
return | pd.concat([data_ones[original_cols], sampled_zeros[original_cols]], ignore_index= True) | pandas.concat |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import unittest
import warnings
import pandas as pd
import numpy as np
from qiime2 import Artifact
from qiime2.metadata import (Metadata, CategoricalMetadataColumn,
NumericMetadataColumn)
from qiime2.core.testing.util import get_dummy_plugin, ReallyEqualMixin
class TestInvalidMetadataConstruction(unittest.TestCase):
def test_non_dataframe(self):
with self.assertRaisesRegex(
TypeError, 'Metadata constructor.*DataFrame.*not.*Series'):
Metadata(pd.Series([1, 2, 3], name='col',
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_no_ids(self):
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({}, index=pd.Index([], name='id')))
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({'column': []},
index=pd.Index([], name='id')))
def test_invalid_id_header(self):
# default index name
with self.assertRaisesRegex(ValueError, r'Index\.name.*None'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'])))
with self.assertRaisesRegex(ValueError, r'Index\.name.*my-id-header'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'c'], name='my-id-header')))
def test_non_str_id(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata ID.*type.*float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', np.nan, 'c'], name='id')))
def test_non_str_column_name(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata column name.*type.*'
'float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
np.nan: [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_empty_id(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata ID.*at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', '', 'c'], name='id')))
def test_empty_column_name(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata column name.*'
'at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'': [4, 5, 6]}, index=pd.Index(['a', 'b', 'c'], name='id')))
def test_pound_sign_id(self):
with self.assertRaisesRegex(
ValueError, "metadata ID.*begins with a pound sign.*'#b'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', '#b', 'c'], name='id')))
def test_id_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata ID 'sample-id'.*conflicts.*reserved.*"
"ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'sample-id', 'c'], name='id')))
def test_column_name_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata column name 'featureid'.*conflicts.*"
"reserved.*ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'featureid': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_duplicate_ids(self):
with self.assertRaisesRegex(ValueError, "Metadata IDs.*unique.*'a'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'a'], name='id')))
def test_duplicate_column_names(self):
data = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
with self.assertRaisesRegex(ValueError,
"Metadata column names.*unique.*'col1'"):
Metadata(pd.DataFrame(data, columns=['col1', 'col2', 'col1'],
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_unsupported_column_dtype(self):
with self.assertRaisesRegex(
TypeError, "Metadata column 'col2'.*unsupported.*dtype.*bool"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': [True, False, True]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_unsupported_type(self):
with self.assertRaisesRegex(
TypeError, "CategoricalMetadataColumn.*strings or missing "
r"values.*42\.5.*float.*'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', 'bar', 42.5]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_empty_str(self):
with self.assertRaisesRegex(
ValueError, "CategoricalMetadataColumn.*empty strings.*"
"column 'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', '', 'bar']},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_numeric_column_infinity(self):
with self.assertRaisesRegex(
ValueError, "NumericMetadataColumn.*positive or negative "
"infinity.*column 'col2'"):
Metadata(pd.DataFrame(
{'col1': ['foo', 'bar', 'baz'],
'col2': [42, float('+inf'), 4.3]},
index=pd.Index(['a', 'b', 'c'], name='id')))
class TestMetadataConstructionAndProperties(unittest.TestCase):
def assertEqualColumns(self, obs_columns, exp):
obs = [(name, props.type) for name, props in obs_columns.items()]
self.assertEqual(obs, exp)
def test_minimal(self):
md = Metadata(pd.DataFrame({}, index=pd.Index(['a'], name='id')))
self.assertEqual(md.id_count, 1)
self.assertEqual(md.column_count, 0)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('a',))
self.assertEqualColumns(md.columns, [])
def test_single_id(self):
index = pd.Index(['id1'], name='id')
df = pd.DataFrame({'col1': [1.0], 'col2': ['a'], 'col3': ['foo']},
index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 1)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1',))
self.assertEqualColumns(md.columns,
[('col1', 'numeric'), ('col2', 'categorical'),
('col3', 'categorical')])
def test_no_columns(self):
index = pd.Index(['id1', 'id2', 'foo'], name='id')
df = pd.DataFrame({}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 0)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'foo'))
self.assertEqualColumns(md.columns, [])
def test_single_column(self):
index = pd.Index(['id1', 'a', 'my-id'], name='id')
df = pd.DataFrame({'column': ['foo', 'bar', 'baz']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 1)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'a', 'my-id'))
self.assertEqualColumns(md.columns, [('column', 'categorical')])
def test_retains_column_order(self):
# Supply DataFrame constructor with explicit column ordering instead of
# a dict.
index = pd.Index(['id1', 'id2', 'id3'], name='id')
columns = ['z', 'a', 'ch']
data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar'],
[3.0, 'c', '42']
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'id3'))
self.assertEqualColumns(md.columns,
[('z', 'numeric'), ('a', 'categorical'),
('ch', 'categorical')])
def test_supported_id_headers(self):
case_insensitive = {
'id', 'sampleid', 'sample id', 'sample-id', 'featureid',
'feature id', 'feature-id'
}
exact_match = {
'#SampleID', '#Sample ID', '#OTUID', '#OTU ID', 'sample_name'
}
# Build a set of supported headers, including exact matches and headers
# with different casing.
headers = set()
for header in case_insensitive:
headers.add(header)
headers.add(header.upper())
headers.add(header.title())
for header in exact_match:
headers.add(header)
count = 0
for header in headers:
index = pd.Index(['id1', 'id2'], name=header)
df = pd.DataFrame({'column': ['foo', 'bar']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_header, header)
count += 1
# Since this test case is a little complicated, make sure that the
# expected number of comparisons are happening.
self.assertEqual(count, 26)
def test_recommended_ids(self):
index = pd.Index(['c6ca034a-223f-40b4-a0e0-45942912a5ea', 'My.ID'],
name='id')
df = pd.DataFrame({'col1': ['foo', 'bar']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 2)
self.assertEqual(md.column_count, 1)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids,
('c6ca034a-223f-40b4-a0e0-45942912a5ea', 'My.ID'))
self.assertEqualColumns(md.columns, [('col1', 'categorical')])
def test_non_standard_characters(self):
index = pd.Index(['©id##1', '((id))2', "'id_3<>'", '"id#4"',
'i d\r\t\n5'], name='id')
columns = ['↩c@l1™', 'col(#2)', "#col'3", '"<col_4>"', 'col\t \r\n5']
data = [
['ƒoo', '(foo)', '#f o #o', 'fo\ro', np.nan],
["''2''", 'b#r', 'ba\nr', np.nan, np.nan],
['b"ar', 'c\td', '4\r\n2', np.nan, np.nan],
['b__a_z', '<42>', '>42', np.nan, np.nan],
['baz', np.nan, '42']
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 5)
self.assertEqual(md.column_count, 5)
self.assertEqual(md.id_header, 'id')
self.assertEqual(
md.ids, ('©id##1', '((id))2', "'id_3<>'", '"id#4"', 'i d\r\t\n5'))
self.assertEqualColumns(md.columns, [('↩c@l1™', 'categorical'),
('col(#2)', 'categorical'),
("#col'3", 'categorical'),
('"<col_4>"', 'categorical'),
('col\t \r\n5', 'numeric')])
def test_missing_data(self):
index = pd.Index(['None', 'nan', 'NA', 'foo'], name='id')
df = pd.DataFrame(collections.OrderedDict([
('col1', [1.0, np.nan, np.nan, np.nan]),
('NA', [np.nan, np.nan, np.nan, np.nan]),
('col3', ['null', 'N/A', np.nan, 'NA']),
('col4', np.array([np.nan, np.nan, np.nan, np.nan],
dtype=object))]),
index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 4)
self.assertEqual(md.column_count, 4)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('None', 'nan', 'NA', 'foo'))
self.assertEqualColumns(md.columns, [('col1', 'numeric'),
('NA', 'numeric'),
('col3', 'categorical'),
('col4', 'categorical')])
def test_does_not_cast_ids_or_column_names(self):
index = pd.Index(['0.000001', '0.004000', '0.000000'], dtype=object,
name='id')
columns = ['42.0', '1000', '-4.2']
data = [
[2.0, 'b', 2.5],
[1.0, 'b', 4.2],
[3.0, 'c', -9.999]
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('0.000001', '0.004000', '0.000000'))
self.assertEqualColumns(md.columns, [('42.0', 'numeric'),
('1000', 'categorical'),
('-4.2', 'numeric')])
def test_mixed_column_types(self):
md = Metadata(
pd.DataFrame({'col0': [1.0, 2.0, 3.0],
'col1': ['a', 'b', 'c'],
'col2': ['foo', 'bar', '42'],
'col3': ['1.0', '2.5', '-4.002'],
'col4': [1, 2, 3],
'col5': [1, 2, 3.5],
'col6': [1e-4, -0.0002, np.nan],
'col7': ['cat', np.nan, 'dog'],
'col8': ['a', 'a', 'a'],
'col9': [0, 0, 0]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 10)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'id3'))
self.assertEqualColumns(md.columns, [('col0', 'numeric'),
('col1', 'categorical'),
('col2', 'categorical'),
('col3', 'categorical'),
('col4', 'numeric'),
('col5', 'numeric'),
('col6', 'numeric'),
('col7', 'categorical'),
('col8', 'categorical'),
('col9', 'numeric')])
def test_case_insensitive_duplicate_ids(self):
index = pd.Index(['a', 'b', 'A'], name='id')
df = pd.DataFrame({'column': ['1', '2', '3']}, index=index)
metadata = Metadata(df)
self.assertEqual(metadata.ids, ('a', 'b', 'A'))
def test_case_insensitive_duplicate_column_names(self):
index = pd.Index(['a', 'b', 'c'], name='id')
df = pd.DataFrame({'column': ['1', '2', '3'],
'Column': ['4', '5', '6']}, index=index)
metadata = Metadata(df)
self.assertEqual(set(metadata.columns), {'column', 'Column'})
def test_categorical_column_leading_trailing_whitespace_value(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', ' bar ', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', 'bar', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
def test_leading_trailing_whitespace_id(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': [4, 5, 6]},
index=pd.Index(['a', ' b ', 'c'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
def test_leading_trailing_whitespace_column_name(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], ' col2 ': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
class TestSourceArtifacts(unittest.TestCase):
def setUp(self):
self.md = Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'], name='id')))
def test_no_source_artifacts(self):
self.assertEqual(self.md.artifacts, ())
def test_add_zero_artifacts(self):
self.md._add_artifacts([])
self.assertEqual(self.md.artifacts, ())
def test_add_artifacts(self):
# First two artifacts have the same data but different UUIDs.
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
self.md._add_artifacts([artifact1])
artifact2 = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
artifact3 = Artifact.import_data('IntSequence1', [1, 2, 3, 4])
self.md._add_artifacts([artifact2, artifact3])
self.assertEqual(self.md.artifacts, (artifact1, artifact2, artifact3))
def test_add_non_artifact(self):
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
with self.assertRaisesRegex(TypeError, "Artifact object.*42"):
self.md._add_artifacts([artifact, 42])
# Test that the object hasn't been mutated.
self.assertEqual(self.md.artifacts, ())
def test_add_duplicate_artifact(self):
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
artifact2 = Artifact.import_data('IntSequence1', [1, 2, 3, 4])
self.md._add_artifacts([artifact1, artifact2])
with self.assertRaisesRegex(
ValueError, "Duplicate source artifacts.*artifact: Mapping"):
self.md._add_artifacts([artifact1])
# Test that the object hasn't been mutated.
self.assertEqual(self.md.artifacts, (artifact1, artifact2))
class TestRepr(unittest.TestCase):
def test_singular(self):
md = Metadata(pd.DataFrame({'col1': [42]},
index=pd.Index(['a'], name='id')))
obs = repr(md)
self.assertIn('Metadata', obs)
self.assertIn('1 ID x 1 column', obs)
self.assertIn("col1: ColumnProperties(type='numeric')", obs)
def test_plural(self):
md = Metadata(pd.DataFrame({'col1': [42, 42], 'col2': ['foo', 'bar']},
index=pd.Index(['a', 'b'], name='id')))
obs = repr(md)
self.assertIn('Metadata', obs)
self.assertIn('2 IDs x 2 columns', obs)
self.assertIn("col1: ColumnProperties(type='numeric')", obs)
self.assertIn("col2: ColumnProperties(type='categorical')", obs)
def test_column_name_padding(self):
data = [[0, 42, 'foo']]
index = pd.Index(['my-id'], name='id')
columns = ['col1', 'longer-column-name', 'c']
md = Metadata(pd.DataFrame(data, index=index, columns=columns))
obs = repr(md)
self.assertIn('Metadata', obs)
self.assertIn('1 ID x 3 columns', obs)
self.assertIn(
"col1: ColumnProperties(type='numeric')", obs)
self.assertIn(
"longer-column-name: ColumnProperties(type='numeric')", obs)
self.assertIn(
"c: ColumnProperties(type='categorical')", obs)
class TestEqualityOperators(unittest.TestCase, ReallyEqualMixin):
def setUp(self):
get_dummy_plugin()
def test_type_mismatch(self):
md = Metadata(
pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
mdc = md.get_column('col1')
self.assertIsInstance(md, Metadata)
self.assertIsInstance(mdc, NumericMetadataColumn)
self.assertReallyNotEqual(md, mdc)
def test_id_header_mismatch(self):
data = {'col1': ['foo', 'bar'], 'col2': [42, 43]}
md1 = Metadata(pd.DataFrame(
data, index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame(
data, index=pd.Index(['id1', 'id2'], name='ID')))
self.assertReallyNotEqual(md1, md2)
def test_source_mismatch(self):
# Metadata created from an artifact vs not shouldn't compare equal,
# even if the data is the same.
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md_from_artifact = artifact.view(Metadata)
md_no_artifact = Metadata(md_from_artifact.to_dataframe())
pd.testing.assert_frame_equal(md_from_artifact.to_dataframe(),
md_no_artifact.to_dataframe())
self.assertReallyNotEqual(md_from_artifact, md_no_artifact)
def test_artifact_mismatch(self):
# Metadata created from different artifacts shouldn't compare equal,
# even if the data is the same.
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
artifact2 = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md1 = artifact1.view(Metadata)
md2 = artifact2.view(Metadata)
pd.testing.assert_frame_equal(md1.to_dataframe(), md2.to_dataframe())
self.assertReallyNotEqual(md1, md2)
def test_id_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['1'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_name_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'c': '2'},
index=pd.Index(['0'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_type_mismatch(self):
md1 = Metadata(pd.DataFrame({'col1': ['42', '43']},
index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame({'col1': [42, 43]},
index=pd.Index(['id1', 'id2'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_order_mismatch(self):
index = pd.Index(['id1', 'id2'], name='id')
md1 = Metadata(pd.DataFrame([[42, 'foo'], [43, 'bar']], index=index,
columns=['z', 'a']))
md2 = Metadata(pd.DataFrame([['foo', 42], ['bar', 43]], index=index,
columns=['a', 'z']))
self.assertReallyNotEqual(md1, md2)
def test_data_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['0'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_equality_without_artifact(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id')))
self.assertReallyEqual(md1, md2)
def test_equality_with_artifact(self):
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md1 = artifact.view(Metadata)
md2 = artifact.view(Metadata)
self.assertReallyEqual(md1, md2)
def test_equality_with_missing_data(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, np.nan, 4.2],
'col2': [np.nan, 'foo', np.nan]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, np.nan, 4.2],
'col2': [np.nan, 'foo', np.nan]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertReallyEqual(md1, md2)
class TestToDataframe(unittest.TestCase):
def test_minimal(self):
df = pd.DataFrame({}, index=pd.Index(['id1'], name='id'))
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
def test_id_header_preserved(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='#SampleID'))
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
self.assertEqual(obs.index.name, '#SampleID')
def test_dataframe_copy(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
self.assertIsNot(obs, df)
def test_retains_column_order(self):
index = pd.Index(['id1', 'id2'], name='id')
columns = ['z', 'a', 'ch']
data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar']
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
self.assertEqual(obs.columns.tolist(), ['z', 'a', 'ch'])
def test_missing_data(self):
# Different missing data representations should be normalized to np.nan
index = pd.Index(['None', 'nan', 'NA', 'id1'], name='id')
df = pd.DataFrame(collections.OrderedDict([
('col1', [42.5, np.nan, float('nan'), 3]),
('NA', [np.nan, 'foo', float('nan'), None]),
('col3', ['null', 'N/A', np.nan, 'NA']),
('col4', np.array([np.nan, np.nan, np.nan, np.nan],
dtype=object))]),
index=index)
md = Metadata(df)
obs = md.to_dataframe()
exp = pd.DataFrame(collections.OrderedDict([
('col1', [42.5, np.nan, np.nan, 3.0]),
('NA', [np.nan, 'foo', np.nan, np.nan]),
('col3', ['null', 'N/A', np.nan, 'NA']),
('col4', np.array([np.nan, np.nan, np.nan, np.nan],
dtype=object))]),
index=index)
pd.testing.assert_frame_equal(obs, exp)
self.assertEqual(obs.dtypes.to_dict(),
{'col1': np.float64, 'NA': object, 'col3': object,
'col4': object})
self.assertTrue(np.isnan(obs['col1']['NA']))
self.assertTrue(np.isnan(obs['NA']['NA']))
self.assertTrue(np.isnan(obs['NA']['id1']))
def test_dtype_int_normalized_to_dtype_float(self):
index = pd.Index(['id1', 'id2', 'id3'], name='id')
df = pd.DataFrame({'col1': [42, -43, 0],
'col2': [42.0, -43.0, 0.0],
'col3': [42, np.nan, 0]},
index=index)
self.assertEqual(df.dtypes.to_dict(),
{'col1': np.int64, 'col2': np.float64,
'col3': np.float64})
md = Metadata(df)
obs = md.to_dataframe()
exp = pd.DataFrame({'col1': [42.0, -43.0, 0.0],
'col2': [42.0, -43.0, 0.0],
'col3': [42.0, np.nan, 0.0]},
index=index)
pd.testing.assert_frame_equal(obs, exp)
self.assertEqual(obs.dtypes.to_dict(),
{'col1': np.float64, 'col2': np.float64,
'col3': np.float64})
class TestGetColumn(unittest.TestCase):
def setUp(self):
get_dummy_plugin()
def test_column_name_not_found(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
with self.assertRaisesRegex(ValueError,
"'col3'.*not a column.*'col1', 'col2'"):
md.get_column('col3')
def test_artifacts_are_propagated(self):
A = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
md = A.view(Metadata)
obs = md.get_column('b')
exp = CategoricalMetadataColumn(
pd.Series(['3'], name='b', index=pd.Index(['0'], name='id')))
exp._add_artifacts([A])
self.assertEqual(obs, exp)
self.assertEqual(obs.artifacts, (A,))
def test_categorical_column(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
obs = md.get_column('col2')
exp = CategoricalMetadataColumn(
pd.Series(['foo', 'bar'], name='col2',
index=pd.Index(['id1', 'id2'], name='id')))
self.assertEqual(obs, exp)
def test_numeric_column(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
obs = md.get_column('col1')
exp = NumericMetadataColumn(
pd.Series([42, 2.5], name='col1',
index=pd.Index(['id1', 'id2'], name='id')))
self.assertEqual(obs, exp)
def test_id_header_preserved(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['a', 'b'], name='#OTU ID'))
md = Metadata(df)
obs = md.get_column('col1')
exp = NumericMetadataColumn(
pd.Series([42, 2.5], name='col1',
index=pd.Index(['a', 'b'], name='#OTU ID')))
self.assertEqual(obs, exp)
self.assertEqual(obs.id_header, '#OTU ID')
class TestGetIDs(unittest.TestCase):
def test_default(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
actual = metadata.get_ids()
expected = {'S1', 'S2', 'S3'}
self.assertEqual(actual, expected)
def test_incomplete_where(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='sampleid'))
metadata = Metadata(df)
where = "Subject='subject-1' AND SampleType="
with self.assertRaises(ValueError):
metadata.get_ids(where)
where = "Subject="
with self.assertRaises(ValueError):
metadata.get_ids(where)
def test_invalid_where(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='sampleid'))
metadata = Metadata(df)
where = "not-a-column-name='subject-1'"
with self.assertRaises(ValueError):
metadata.get_ids(where)
def test_empty_result(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
where = "Subject='subject-3'"
actual = metadata.get_ids(where)
expected = set()
self.assertEqual(actual, expected)
def test_simple_expression(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
where = "Subject='subject-1'"
actual = metadata.get_ids(where)
expected = {'S1', 'S2'}
self.assertEqual(actual, expected)
where = "Subject='subject-2'"
actual = metadata.get_ids(where)
expected = {'S3'}
self.assertEqual(actual, expected)
where = "Subject='subject-3'"
actual = metadata.get_ids(where)
expected = set()
self.assertEqual(actual, expected)
where = "SampleType='gut'"
actual = metadata.get_ids(where)
expected = {'S1', 'S3'}
self.assertEqual(actual, expected)
where = "SampleType='tongue'"
actual = metadata.get_ids(where)
expected = {'S2'}
self.assertEqual(actual, expected)
def test_more_complex_expressions(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
where = "Subject='subject-1' OR Subject='subject-2'"
actual = metadata.get_ids(where)
expected = {'S1', 'S2', 'S3'}
self.assertEqual(actual, expected)
where = "Subject='subject-1' AND Subject='subject-2'"
actual = metadata.get_ids(where)
expected = set()
self.assertEqual(actual, expected)
where = "Subject='subject-1' AND SampleType='gut'"
actual = metadata.get_ids(where)
expected = {'S1'}
self.assertEqual(actual, expected)
def test_query_by_id(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
actual = metadata.get_ids(where="id='S2' OR id='S1'")
expected = {'S1', 'S2'}
self.assertEqual(actual, expected)
def test_query_by_alternate_id_header(self):
metadata = Metadata(pd.DataFrame(
{}, index=pd.Index(['id1', 'id2', 'id3'], name='#OTU ID')))
obs = metadata.get_ids(where="\"#OTU ID\" IN ('id2', 'id3')")
exp = {'id2', 'id3'}
self.assertEqual(obs, exp)
def test_no_columns(self):
metadata = Metadata(
pd.DataFrame({}, index=pd.Index(['a', 'b', 'my-id'], name='id')))
obs = metadata.get_ids()
exp = {'a', 'b', 'my-id'}
self.assertEqual(obs, exp)
def test_query_mixed_column_types(self):
df = pd.DataFrame({'Name': ['Foo', 'Bar', 'Baz', 'Baaz'],
# numbers that would sort incorrectly as strings
'Age': [9, 10, 11, 101],
'Age_Str': ['9', '10', '11', '101'],
'Weight': [80.5, 85.3, np.nan, 120.0]},
index=pd.Index(['S1', 'S2', 'S3', 'S4'], name='id'))
metadata = Metadata(df)
# string pattern matching
obs = metadata.get_ids(where="Name LIKE 'Ba_'")
exp = {'S2', 'S3'}
self.assertEqual(obs, exp)
# string comparison
obs = metadata.get_ids(where="Age_Str >= 11")
exp = {'S1', 'S3'}
self.assertEqual(obs, exp)
# numeric comparison
obs = metadata.get_ids(where="Age >= 11")
exp = {'S3', 'S4'}
self.assertEqual(obs, exp)
# numeric comparison with missing data
obs = metadata.get_ids(where="Weight < 100")
exp = {'S1', 'S2'}
self.assertEqual(obs, exp)
def test_column_with_space_in_name(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'Sample Type': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
metadata.get_ids()
# The list of captured warnings should be empty
self.assertFalse(w)
class TestMerge(unittest.TestCase):
def setUp(self):
get_dummy_plugin()
def test_merging_nothing(self):
md = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
with self.assertRaisesRegex(ValueError,
'At least one Metadata.*nothing to merge'):
md.merge()
def test_merging_two(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [7, 8, 9], 'd': [10, 11, 12]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
obs = md1.merge(md2)
exp = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6],
'c': [7, 8, 9], 'd': [10, 11, 12]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(obs, exp)
def test_merging_three(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [7, 8, 9], 'd': [10, 11, 12]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md3 = Metadata(pd.DataFrame(
{'e': [13, 14, 15], 'f': [16, 17, 18]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6],
'c': [7, 8, 9], 'd': [10, 11, 12],
'e': [13, 14, 15], 'f': [16, 17, 18]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(obs, exp)
def test_merging_unaligned_indices(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [9, 8, 7], 'd': [12, 11, 10]},
index=pd.Index(['id3', 'id2', 'id1'], name='id')))
md3 = Metadata(pd.DataFrame(
{'e': [13, 15, 14], 'f': [16, 18, 17]},
index=pd.Index(['id1', 'id3', 'id2'], name='id')))
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6],
'c': [7, 8, 9], 'd': [10, 11, 12],
'e': [13, 14, 15], 'f': [16, 17, 18]},
index= | pd.Index(['id1', 'id2', 'id3'], name='id') | pandas.Index |
import datetime
import logging
from openpyxl import Workbook
from openpyxl.styles import Border, Side, PatternFill, Font
from openpyxl.utils.dataframe import dataframe_to_rows
import os
import pandas as pd
import Levenshtein
def get_data_dir():
"""
Get the path of the data directory
:return:
"""
if os.path.exists('data'):
return 'data'
elif os.path.join('..', 'data'):
return os.path.join('..', 'data')
raise Exception('Data directory not found')
def fix_village_if_necessary(linelist, villages):
idx = linelist[~linelist['Village'].isin(villages)].index
for i in idx:
min_distance = 5
min_village = ''
for village in villages:
dist = Levenshtein.distance(linelist.loc[i, 'Village'], village)
if dist < min_distance:
min_distance = dist
min_village = village
if min_village == '':
logging.error(f'Line {i} of Linelist removed because village not found : ' + linelist.loc[i, 'Village'])
linelist = linelist[linelist.index != i]
else:
linelist.loc[i, 'Village'] = min_village
return linelist
def aggregate_on_column(column_name, linelist, value):
"""
Compute the number of cases from linelist with a value for a column
:param column_name: The column name in linelist dataframe on which the condition apply
:param linelist: The dataframe on which the agregation will be done
:param value: The value on which the dataframe will be filtered for the column column_name
:return:
"""
df = linelist[linelist[column_name] == value]
df_agg = df.groupby(['Village', 'Week', 'Year']).count()[column_name]
df_agg = df_agg.reset_index()
return df_agg
def aggregate(input_file):
"""
Read input file and aggregate the number of case by week and village
:param input_file: The Linelist Excel file
:return: Dataframe whit aggregation. Columns : 'CODE_LOCATION', 'Year', 'Month', 'Week', 'Num of cases'
"""
# Get data
linelist = pd.read_excel(input_file, sheet_name='Linelist')[['Date of Assessment', 'Village', 'Malaria RDT result']]
linelist = linelist[~pd.isnull(linelist).transpose().any()]
epiweek = pd.read_excel(input_file, sheet_name='Epiweeks', header=2)[['Epi week', 'Month', 'First day in week']]
geo = pd.read_excel(input_file, sheet_name='GEO')[['Camp/village', 'GEOID (pcode)?']]
geo.columns = ['Village', 'CODE_LOCATION']
# Transform data
linelist['Date of Assessment'] = pd.to_datetime(linelist['Date of Assessment'])
linelist['Week'] = linelist['Date of Assessment'].apply(lambda t: t.isocalendar()[1])
linelist['Year'] = linelist['Date of Assessment'].apply(lambda t: t.year)
linelist = fix_village_if_necessary(linelist, geo['Village'].unique())
linelist_agg = linelist.groupby(['Village', 'Week', 'Year']).count()['Date of Assessment']
linelist_agg = linelist_agg.reset_index()
linelist_agg = pd.merge(linelist_agg, aggregate_on_column('Malaria RDT result', linelist, 1),
how='outer', on=['Village', 'Week', 'Year'])
linelist_agg.columns = ['Village', 'Week', 'Year', 'Num of cases', 'Num of cases where malaria RTD was positive']
epiweek['Year'] = -1
for i in range(len(epiweek)):
if type(epiweek.loc[i, 'First day in week']) is str:
epiweek.loc[i, 'First day in week'] = datetime.datetime.strptime(epiweek.loc[i, 'First day in week'], '%d/%m/%Y')
epiweek.loc[i, 'Year'] = epiweek.loc[i, 'First day in week'].year
if epiweek.loc[i, 'Epi week'] == 1 and epiweek.loc[i, 'First day in week'].month == 12:
epiweek.loc[i, 'Year'] += 1
epiweek = epiweek[['Epi week', 'Month', 'Year']]
epiweek.columns = ['Week', 'Month', 'Year']
# Merge data
linelist_agg_geo = pd.merge(linelist_agg, geo, how='left', on='Village')
res = pd.DataFrame()
villages = geo['CODE_LOCATION'].unique()
villages = villages[~pd.isnull(villages)]
for code_loc in sorted(villages):
code_loc_linelist = linelist_agg_geo[linelist_agg_geo['CODE_LOCATION'] == code_loc]
code_loc_cases = | pd.merge(epiweek, code_loc_linelist, how='left', on=['Year', 'Week']) | pandas.merge |
import datetime
from collections import namedtuple
import numpy as np
import pandas as pd
import torch
# from ctgan.data_transformer import DataTransformer
from ctgan.synthesizers.base import BaseSynthesizer
from rdt.transformers import OneHotEncodingTransformer
from sklearn.mixture import BayesianGaussianMixture
from torch.nn import Linear, Module, Parameter, ReLU, Sequential
from torch.nn.functional import cross_entropy
from torch.optim import Adam
SpanInfo = namedtuple("SpanInfo", ["dim", "activation_fn"])
ColumnTransformInfo = namedtuple(
"ColumnTransformInfo",
[
"column_name",
"column_type",
"transform",
"transform_aux",
"output_info",
"output_dimensions",
],
)
class DataTransformer(object):
"""Data Transformer.
Model continuous columns with a BayesianGMM and normalized to a scalar [0, 1] and a vector.
Discrete columns are encoded using a scikit-learn OneHotEncoder.
Args:
max_clusters (int):
Maximum number of Gaussian distributions in Bayesian GMM.
weight_threshold (float):
Weight threshold for a Gaussian distribution to be kept.
"""
def __init__(self, max_clusters=10, weight_threshold=0.005):
self._max_clusters = max_clusters
self._weight_threshold = weight_threshold
self.gm = BayesianGaussianMixture(
n_components=self._max_clusters,
weight_concentration_prior_type="dirichlet_process",
weight_concentration_prior=0.001,
n_init=1,
warm_start=True,
)
def _fit_continuous(self, column_name, raw_column_data):
"""Train Bayesian GMM for continuous column."""
self.gm.fit(raw_column_data.reshape(-1, 1))
valid_component_indicator = self.gm.weights_ > self._weight_threshold
num_components = valid_component_indicator.sum()
return ColumnTransformInfo(
column_name=column_name,
column_type="continuous",
transform=self.gm,
transform_aux=valid_component_indicator,
output_info=[SpanInfo(1, "tanh"), SpanInfo(num_components, "softmax")],
output_dimensions=1 + num_components,
)
def _fit_discrete(self, column_name, raw_column_data):
"""Fit one hot encoder for discrete column."""
ohe = OneHotEncodingTransformer(error_on_unknown=False)
ohe.fit(raw_column_data)
num_categories = len(ohe.dummies)
return ColumnTransformInfo(
column_name=column_name,
column_type="discrete",
transform=ohe,
transform_aux=None,
output_info=[SpanInfo(num_categories, "softmax")],
output_dimensions=num_categories,
)
def fit(self, raw_data, discrete_columns=tuple()):
"""Fit self.
GMM for continuous columns and One hot encoder for discrete columns.
This step also counts the #columns in matrix data, and span information.
"""
self.output_info_list = []
self.output_dimensions = 0
if not isinstance(raw_data, pd.DataFrame):
self.dataframe = False
raw_data = | pd.DataFrame(raw_data) | pandas.DataFrame |
import glob as gb
import librosa
import librosa.display
import numpy as np
import time
import skimage.measure
import os
import scipy
from scipy.spatial import distance
import pandas as pd
import tensorflow.keras as k
import data_utils as du
from skimage.transform import resize
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
start_time = time.time()
# region DataPreparation
def compute_ssm(X, metric="cosine"):
"""Computes the self-similarity matrix of X."""
D = distance.pdist(X, metric=metric)
D = distance.squareform(D)
for i in range(D.shape[0]):
for j in range(D.shape[1]):
if np.isnan(D[i, j]):
D[i, j] = 0
D /= D.max()
return 1 - D
def mel_spectrogram(sr_desired, filepath, window_size, hop_length):
"""This function calculates the mel spectrogram in dB with Librosa library"""
y, sr = librosa.load(filepath, sr=None)
if sr != sr_desired:
y = librosa.core.resample(y, sr, sr_desired)
sr = sr_desired
S = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=window_size, hop_length=hop_length, n_mels=80, fmin=80,
fmax=16000)
S_to_dB = librosa.power_to_db(S, ref=np.max) # convert S in dB
return S_to_dB # S_to_dB is the spectrogam in dB
def fourier_transform(sr_desired, name_song, window_size, hop_length):
"""This function calculates the mel spectrogram in dB with Librosa library"""
y, sr = librosa.load(name_song, sr=None)
if sr != sr_desired:
y = librosa.core.resample(y, sr, sr_desired)
sr = sr_desired
stft = np.abs(librosa.stft(y=y, n_fft=window_size, hop_length=hop_length))
return stft
def max_pooling(stft, pooling_factor):
x_prime = skimage.measure.block_reduce(stft, (1, pooling_factor), np.max)
return x_prime
def sslm_gen(spectrogram, pooling_factor, lag, mode, feature):
padding_factor = lag
"""This part pads a mel spectrogram gived the spectrogram a lag parameter
to compare the first rows with the last ones and make the matrix circular"""
pad = np.full((spectrogram.shape[0], padding_factor), -70) # 80x30 frame matrix of -70dB corresponding to padding
S_padded = np.concatenate((pad, spectrogram), axis=1) # padding 30 frames with noise at -70dB at the beginning
"""This part max-poolend the spectrogram in time axis by a factor of p"""
x_prime = max_pooling(S_padded, pooling_factor)
x = []
if feature == "mfcc":
"""This part calculates a circular Self Similarity Lag Matrix given
the mel spectrogram padded and max-pooled"""
# MFCCs calculation from DCT-Type II
MFCCs = scipy.fftpack.dct(x_prime, axis=0, type=2, norm='ortho')
MFCCs = MFCCs[1:, :] # 0 componen ommited
# Bagging frames
m = 2 # baggin parameter in frames
x = [np.roll(MFCCs, n, axis=1) for n in range(m)]
elif feature == "chroma":
"""This part calculates a circular Self Similarity Lag Matrix given
the chromagram padded and max-pooled"""
PCPs = librosa.feature.chroma_stft(S=x_prime, sr=sr_desired, n_fft=window_size, hop_length=hop_length)
PCPs = PCPs[1:, :]
# Bagging frames
m = 2 # Bagging parameter in frames
x = [np.roll(PCPs, n, axis=1) for n in range(m)]
x_hat = np.concatenate(x, axis=0)
# Cosine distance calculation: D[N/p,L/p] matrix
distances = np.zeros((x_hat.shape[1], padding_factor // p)) # D has as dimensions N/p and L/p
for i in range(x_hat.shape[1]): # iteration in columns of x_hat
for l in range(padding_factor // p):
if i - (l + 1) < 0:
cur_dist = 1
elif i - (l + 1) < padding_factor // p:
cur_dist = 1
else:
cur_dist = 0
if mode == "cos":
cur_dist = distance.cosine(x_hat[:, i],
x_hat[:, i - (l + 1)]) # cosine distance between columns i and i-L
elif mode == "euc":
cur_dist = distance.euclidean(x_hat[:, i],
x_hat[:, i - (l + 1)]) # euclidian distance between columns i and i-L
if cur_dist == float('nan'):
cur_dist = 0
distances[i, l] = cur_dist
# Threshold epsilon[N/p,L/p] calculation
kappa = 0.1
epsilon = np.zeros((distances.shape[0], padding_factor // p)) # D has as dimensions N/p and L/p
for i in range(padding_factor // p, distances.shape[0]): # iteration in columns of x_hat
for l in range(padding_factor // p):
epsilon[i, l] = np.quantile(np.concatenate((distances[i - l, :], distances[i, :])), kappa)
# We remove the padding done before
distances = distances[padding_factor // p:, :]
epsilon = epsilon[padding_factor // p:, :]
x_prime = x_prime[:, padding_factor // p:]
# Self Similarity Lag Matrix
sslm = scipy.special.expit(1 - distances / epsilon) # aplicación de la sigmoide
sslm = np.transpose(sslm)
sslm = skimage.measure.block_reduce(sslm, (1, 3), np.max)
# Check if SSLM has nans and if it has them, substitute them by 0
for i in range(sslm.shape[0]):
for j in range(sslm.shape[1]):
if np.isnan(sslm[i, j]):
sslm[i, j] = 0
# if mode == "euc":
# return sslm, x_prime
# return sslm
return sslm, x_prime
def ssm_gen(spectrogram, pooling_factor):
"""This part max-poolend the spectrogram in time axis by a factor of p"""
x_prime = max_pooling(spectrogram, pooling_factor)
"""This part calculates a circular Self Similarity Matrix given
the mel spectrogram padded and max-pooled"""
# MFCCs calculation from DCT-Type II
MFCCs = scipy.fftpack.dct(x_prime, axis=0, type=2, norm='ortho')
MFCCs = MFCCs[1:, :] # 0 componen ommited
# Bagging frames
m = 2 # baggin parameter in frames
x = [np.roll(MFCCs, n, axis=1) for n in range(m)]
x_hat = np.concatenate(x, axis=0)
x_hat = np.transpose(x_hat)
ssm = compute_ssm(x_hat)
# Check if SSLM has nans and if it has them, substitute them by 0
for i in range(ssm.shape[0]):
for j in range(ssm.shape[1]):
if np.isnan(ssm[i, j]):
ssm[i, j] = 0
return ssm
# endregion
window_size = 2048 # (samples/frame)
hop_length = 1024 # overlap 50% (samples/frame)
sr_desired = 44100
p = 2 # pooling factor
p2 = 3 # 2pool3
L_sec_near = 14 # lag near context in seconds
L_near = round(L_sec_near * sr_desired / hop_length) # conversion of lag L seconds to frames
MASTER_DIR = 'D:/Google Drive/Resources/Dev Stuff/Python/Machine Learning/Master Thesis/'
DEFAULT_LABELPATH = os.path.join(MASTER_DIR, 'Labels/')
TRAIN_DIR = 'F:/Master Thesis Input/NewTrain/'
MIDI_DIR = os.path.join(MASTER_DIR, 'Data/MIDIs/')
def util_main_helper(feature, filepath, mode="cos", predict=False, savename=""):
sslm_near = None
if feature == "mfcc":
mel = mel_spectrogram(sr_desired, filepath, window_size, hop_length)
if mode == "cos":
sslm_near = sslm_gen(mel, p, L_near, mode=mode, feature="mfcc")[0]
# mls = max_pooling(mel, p2)
# Save mels matrices and sslms as numpy arrays in separate paths
# np.save(im_path_mel_near + song_id, mls)
elif mode == "euc":
sslm_near = sslm_gen(mel, p, L_near, mode=mode, feature="mfcc")[0]
if sslm_near.shape[1] < max_pooling(mel, 6).shape[1]:
sslm_near = np.hstack((np.ones((301, 1)), sslm_near))
elif sslm_near.shape[1] > max_pooling(mel, 6).shape[1]:
sslm_near = sslm_near[:, 1:]
elif feature == "chroma":
stft = fourier_transform(sr_desired, filepath, window_size, hop_length)
sslm_near = sslm_gen(stft, p, L_near, mode=mode, feature="chroma")[0]
if mode == "euc":
if sslm_near.shape[1] < max_pooling(stft, 6).shape[1]:
sslm_near = np.hstack((np.ones((301, 1)), sslm_near))
elif sslm_near.shape[1] > max_pooling(stft, 6).shape[1]:
sslm_near = sslm_near[:, 1:]
elif feature == "mls":
mel = mel_spectrogram(sr_desired, filepath, window_size, hop_length)
sslm_near = ssm_gen(mel, pooling_factor=6)
"""
# UNCOMMENT TO DISPLAY FEATURE GRAPHS
# recurrence = librosa.segment.recurrence_matrix(sslm_near, mode='affinity', k=sslm_near.shape[1])
plt.figure(figsize=(15, 10))
if feature == "mls":
plt.title("Mel Log-scaled Spectrogram - Self-Similarity matrix (MLS SSM)")
plt.imshow(sslm_near, origin='lower', cmap='plasma', aspect=0.8) # switch to recurrence if desired
else:
plt_title = "Self-Similarity Lag Matrix (SSLM): "
if feature == "chroma":
plt_title += "Chromas, "
else:
plt_title += "MFCCs, "
if mode == "cos":
plt_title += "Cosine Distance"
else:
plt_title += "Euclidian Distance"
plt.title(plt_title)
plt.imshow(sslm_near.astype(np.float32), origin='lower', cmap='viridis', aspect=0.8)
# switch to recurrence if desired
plt.show()
"""
if not predict:
# Save matrices and sslms as numpy arrays in separate paths
np.save(filepath, sslm_near)
else:
return sslm_near
def util_main(feature, mode="cos", predict=False, inpath=TRAIN_DIR, midpath=MIDI_DIR):
img_path = ""
if feature == "mfcc":
if mode == "cos":
img_path = os.path.join(inpath, 'SSLM_MFCC_COS/')
elif mode == "euc":
img_path = os.path.join(inpath, 'SSLM_MFCC_EUC/')
elif feature == "chroma":
if mode == "cos":
img_path = os.path.join(inpath, 'SSLM_CRM_COS/')
elif mode == "euc":
img_path = os.path.join(inpath, 'SSLM_CRM_EUC/')
elif feature == "mls":
img_path = os.path.join(inpath, 'MLS/')
if not os.path.exists(img_path):
os.makedirs(img_path)
num_songs = sum([len(files) for r, d, files in os.walk(midpath)])
i = 0
for folder in gb.glob(midpath + "*"):
for file in os.listdir(folder):
# foldername = folder.split('\\')[-1]
name_song, name = file, file.split('/')[-1].split('.')[0]
start_time_song = time.time()
i += 1
song_id = name_song[:-4] # delete .ext characters from the string
print("\tPreparing", song_id, "for processing...")
if str(song_id) + ".npy" not in os.listdir(img_path):
util_main_helper(feature, folder + '/' + name_song, mode, predict, savename=img_path + song_id)
print("\t\tFinished", i, "/", num_songs, "- Duration: {:.2f}s".format(time.time() - start_time_song))
else:
print("\t\tAlready completed. Skipping...\n\t\tFinished", i, "/", num_songs)
# return
print("All files have been converted. Duration: {:.2f}s".format(time.time() - start_time))
def validate_folder_contents(labels, midis, mlsdir, sslm1, sslm2, sslm3, sslm4):
"""Ensure all folders contain files of the same name"""
labelfiles = os.listdir(labels)
midifiles = os.listdir(midis)
mlsfiles = os.listdir(mlsdir)
sslm1files = os.listdir(sslm1)
sslm2files = os.listdir(sslm2)
sslm3files = os.listdir(sslm3)
sslm4files = os.listdir(sslm4)
for i in range(len(labelfiles)):
c_lbl = os.path.splitext(labelfiles[i])[0]
c_midi = os.path.splitext(midifiles[i])[0]
c_mls = os.path.splitext(mlsfiles[i])[0]
c_sslm1 = os.path.splitext(sslm1files[i])[0]
c_sslm2 = os.path.splitext(sslm2files[i])[0]
c_sslm3 = os.path.splitext(sslm3files[i])[0]
c_sslm4 = os.path.splitext(sslm4files[i])[0]
if c_lbl != c_midi or c_lbl != c_mls or\
c_lbl != c_sslm1 or c_lbl != c_sslm2 or c_lbl != c_sslm3 or c_lbl != c_sslm4:
err = FileNotFoundError("File discrepency at index " + str(i))
print("Current labels: ")
print(f"Label: {c_lbl}\nMIDI: {c_midi}\nMLS: {c_mls}\nSSLM-CRM-COS: {c_sslm1}"
f"\nSSLM-CRM-EUC: {c_sslm2}\nSSLM-MFCC-COS: {c_sslm3}\nSSLM-MFCC-EUC: {c_sslm4}")
raise err
if len(labelfiles) != len(midifiles) or len(labelfiles) != len(mlsfiles) or \
len(labelfiles) != len(sslm1files) or len(labelfiles) != len(sslm2files) or\
len(labelfiles) != len(sslm3files) or len(labelfiles) != len(sslm4files):
raise ValueError("Not all directories contain the same number of files")
# region Transformations
def gaussian(x, mu, sig):
"""Create array of labels"""
return np.exp(-np.power((x - mu) / sig, 2.) / 2)
def borders(image, label, labels_sec, label_form):
"""This function transforms labels in sc to gaussians in frames"""
pooling_factor = 6
num_frames = image.shape[2]
repeated_label = []
for i in range(len(labels_sec) - 1):
if labels_sec[i] == labels_sec[i + 1]:
repeated_label.append(i)
labels_sec = np.delete(labels_sec, repeated_label, 0) # labels in seconds
labels_sec = labels_sec / pooling_factor # labels in frames
# Pad frames we padded in images also in labels but in seconds
sr = sr_desired
padding_factor = 50
label_padded = [labels_sec[i] + padding_factor * hop_length / sr for i in range(labels_sec.shape[0])]
vector = np.arange(num_frames)
new_vector = (vector * hop_length + window_size / 2) / sr
sigma = 0.1
gauss_array = []
for mu in (label_padded[1:]): # Ignore first label (beginning of song) due to insignificance (0.000 Silence)
gauss_array = np.append(gauss_array, gaussian(new_vector, mu, sigma))
for i in range(len(gauss_array)):
if gauss_array[i] > 1:
gauss_array[i] = 1
return image, label[1:], gauss_array, label_form
def padding_MLS(image, label, labels_sec, label_form):
"""This function pads 30frames at the begining and end of an image"""
sr = sr_desired
padding_factor = 50
def voss(nrows, ncols=16):
"""Generates pink noise using the Voss-McCartney algorithm.
nrows: number of values to generate
rcols: number of random sources to add
returns: NumPy array
"""
array = np.empty((nrows, ncols))
array.fill(np.nan)
array[0, :] = np.random.random(ncols)
array[:, 0] = np.random.random(nrows)
# the total number of changes is nrows
n = nrows
cols = np.random.geometric(0.5, n)
cols[cols >= ncols] = 0
rows = np.random.randint(nrows, size=n)
array[rows, cols] = np.random.random(n)
df = pd.DataFrame(array)
df.fillna(method='ffill', axis=0, inplace=True)
total = df.sum(axis=1)
return total.values
n_mels = image.shape[1] # Default(80) - fit padding to image height
y = voss(padding_factor * hop_length - 1)
S = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=window_size, hop_length=hop_length,
n_mels=n_mels, fmin=80, fmax=16000)
S_to_dB = librosa.power_to_db(S, ref=np.max)
pad_image = S_to_dB[np.newaxis, :, :]
# Pad MLS
S_padded = np.concatenate((pad_image, image), axis=-1)
S_padded = np.concatenate((S_padded, pad_image), axis=-1)
return S_padded, label, labels_sec, label_form
def padding_SSLM(image, label, labels_sec, label_form):
"""This function pads 30 frames at the begining and end of an image"""
padding_factor = 50
# Pad SSLM
pad_image = np.full((image.shape[1], padding_factor), 1)
pad_image = pad_image[np.newaxis, :, :]
S_padded = np.concatenate((pad_image, image), axis=-1)
S_padded = np.concatenate((S_padded, pad_image), axis=-1)
return S_padded, label, labels_sec, label_form
def normalize_image(image, label, labels_sec, label_form):
"""This function normalizes an image"""
image = np.squeeze(image) # remove
def normalize(array):
"""This function normalizes a matrix along x axis (frequency)"""
normalized = np.zeros((array.shape[0], array.shape[1]))
for i in range(array.shape[0]):
normalized[i, :] = (array[i, :] - np.mean(array[i, :])) / np.std(array[i, :])
return normalized
image = normalize(image)
# image = (image-np.min(image))/(np.max(image)-np.min(image))
image = np.expand_dims(image, axis=0)
return image, label, labels_sec, label_form
# endregion
# Load MLS and SSLM Data
class BuildDataloader(k.utils.Sequence):
def __init__(self, images_path, label_path=DEFAULT_LABELPATH, transforms=None, batch_size=32, end=-1, reshape=True):
self.songs_list = []
self.images_path = images_path
self.images_list = []
self.labels_path = label_path
self.labels_list = []
self.labels_sec_list = []
self.labels_form_list = []
self.batch_size = batch_size
self.n = 0
self.reshape = reshape
print("Building dataloader for " + self.images_path)
cnt = 1
for (im_dirpath, im_dirnames, im_filenames) in os.walk(self.images_path):
for f in im_filenames:
if f.endswith('.npy'):
self.songs_list.append(os.path.splitext(f)[0])
# print("Reading file #" + str(cnt))
img_path = im_dirpath + f
image = np.load(img_path, allow_pickle=True)
if image.ndim == 1:
raise ValueError("Erroneous file:", img_path, "Shape:", image.shape, image.ndim)
else:
# image = resize(image, (300, 500))
# image = (image - image.mean()) / (image.std() + 1e-8)
if reshape:
image = np.mean(image, axis=0)
else:
image1 = np.mean(image, axis=0)
image2 = np.var(image, axis=0)
image = np.array([image1, image2])
self.images_list.append(image)
cnt += 1
if end != -1:
if cnt == end + 1:
break
lbls_seconds, lbls_phrases, lbl_forms = du.ReadLabelSecondsPhrasesFromFolder(lblpath=self.labels_path, stop=cnt)
self.labels_list = lbls_phrases
self.labels_sec_list = lbls_seconds
self.labels_form_list = lbl_forms
self.transforms = transforms
self.max = self.__len__()
def __len__(self):
return len(self.images_list)
def __getitem__(self, index):
# print("LEN: " + str(self.max) + " TRU LEN: " + str(len(self.images_list)) + " INDX: " + str(index))
image = self.images_list[index]
# print(image.shape, image.ndim)
# print(image)
# if image.ndim == 1:
# print(image)
if self.reshape:
image = image[np.newaxis, :, np.newaxis]
labels = self.labels_list[index]
# print("Labels: ", str(len(labels)), "Images: ", str(len(image)), image.shape)
labels_sec = self.labels_sec_list[index]
labels_form = self.labels_form_list[index]
song_name = self.songs_list[index]
if self.transforms is not None:
for t in self.transforms:
image, labels, labels_sec, labels_form = t(image, labels, labels_sec, labels_form)
return image, [labels, labels_sec, labels_form, song_name]
def __next__(self):
if self.n >= self.max:
self.n = 0
result = self.__getitem__(self.n)
self.n += 1
return result
def getNumClasses(self):
return len(self.labels_form_list[1])
def getLabels(self):
return self.labels_form_list
def getImages(self):
return self.images_list
def getCurrentIndex(self):
return self.n
def getSong(self, index):
return self.songs_list[index]
def getFormLabel(self, index):
return self.labels_form_list[index]
def getDuration(self, index):
return self.labels_sec_list[index][-1]
def get_midi_dataframe(building_df=False):
df = | pd.DataFrame(columns=['spectral_contrast_mean', 'spectral_contrast_var']) | pandas.DataFrame |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: <NAME>
Date: 2020/3/23 19:12
Contact: <EMAIL>
Desc: 东方财富网-数据中心-沪深港通持股
http://data.eastmoney.com/hsgtcg/
http://finance.eastmoney.com/news/1622,20161118685370149.html
"""
import requests
import json
import pandas as pd
def stock_em_hsgt_north_net_flow_in(indicator="沪股通"):
url = "http://push2his.eastmoney.com/api/qt/kamt.kline/get"
params = {
"fields1": "f1,f3,f5",
"fields2": "f51,f52",
"klt": "101",
"lmt": "500",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery18305732402561585701_1584961751919",
"_": "1584962164273",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{"):-2])
if indicator == "沪股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sh"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "深股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sz"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "北上":
temp_df = pd.DataFrame(data_json["data"]["s2n"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
def stock_em_hsgt_north_cash(indicator="沪股通"):
url = "http://push2his.eastmoney.com/api/qt/kamt.kline/get"
params = {
"fields1": "f1,f3,f5",
"fields2": "f51,f53",
"klt": "101",
"lmt": "500",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery18305732402561585701_1584961751919",
"_": "1584962164273",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{"):-2])
if indicator == "沪股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sh"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "深股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sz"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "北上":
temp_df = pd.DataFrame(data_json["data"]["s2n"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
def stock_em_hsgt_north_acc_flow_in(indicator="沪股通"):
url = "http://push2his.eastmoney.com/api/qt/kamt.kline/get"
params = {
"fields1": "f1,f3,f5",
"fields2": "f51,f54",
"klt": "101",
"lmt": "500",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery18305732402561585701_1584961751919",
"_": "1584962164273",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{"):-2])
if indicator == "沪股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sh"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "深股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sz"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "北上":
temp_df = | pd.DataFrame(data_json["data"]["s2n"]) | pandas.DataFrame |
from __future__ import absolute_import, division, unicode_literals
import datetime
import pytest
try:
import pandas as pd
import numpy as np
from pandas.testing import assert_series_equal
from pandas.testing import assert_frame_equal
from pandas.testing import assert_index_equal
except ImportError:
pytest.skip('numpy is not available', allow_module_level=True)
import jsonpickle
import jsonpickle.ext.pandas
@pytest.fixture(scope='module', autouse=True)
def pandas_extension():
"""Initialize the numpy extension for this test module"""
jsonpickle.ext.pandas.register_handlers()
yield # control to the test function.
jsonpickle.ext.pandas.unregister_handlers()
def roundtrip(obj):
return jsonpickle.decode(jsonpickle.encode(obj))
def test_series_roundtrip():
ser = pd.Series(
{
'an_int': np.int_(1),
'a_float': np.float_(2.5),
'a_nan': np.nan,
'a_minus_inf': -np.inf,
'an_inf': np.inf,
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
'date': np.datetime64('2014-01-01'),
'complex': np.complex_(1 - 2j),
# TODO: the following dtypes are not currently supported.
# 'object': np.object_({'a': 'b'}),
}
)
decoded_ser = roundtrip(ser)
assert_series_equal(decoded_ser, ser)
def test_dataframe_roundtrip():
df = pd.DataFrame(
{
'an_int': np.int_([1, 2, 3]),
'a_float': np.float_([2.5, 3.5, 4.5]),
'a_nan': np.array([np.nan] * 3),
'a_minus_inf': np.array([-np.inf] * 3),
'an_inf': np.array([np.inf] * 3),
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
'date': np.array([np.datetime64('2014-01-01')] * 3),
'complex': np.complex_([1 - 2j, 2 - 1.2j, 3 - 1.3j]),
# TODO: the following dtypes are not currently supported.
# 'object': np.object_([{'a': 'b'}]*3),
}
)
decoded_df = roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_multindex_dataframe_roundtrip():
df = pd.DataFrame(
{
'idx_lvl0': ['a', 'b', 'c'],
'idx_lvl1': np.int_([1, 1, 2]),
'an_int': np.int_([1, 2, 3]),
'a_float': np.float_([2.5, 3.5, 4.5]),
'a_nan': np.array([np.nan] * 3),
'a_minus_inf': np.array([-np.inf] * 3),
'an_inf': np.array([np.inf] * 3),
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
}
)
df = df.set_index(['idx_lvl0', 'idx_lvl1'])
decoded_df = roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_dataframe_with_interval_index_roundtrip():
df = pd.DataFrame(
{'a': [1, 2], 'b': [3, 4]}, index=pd.IntervalIndex.from_breaks([1, 2, 4])
)
decoded_df = roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_index_roundtrip():
idx = pd.Index(range(5, 10))
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_datetime_index_roundtrip():
idx = pd.date_range(start='2019-01-01', end='2019-02-01', freq='D')
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_ragged_datetime_index_roundtrip():
idx = pd.DatetimeIndex(['2019-01-01', '2019-01-02', '2019-01-05'])
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_timedelta_index_roundtrip():
idx = pd.timedelta_range(start='1 day', periods=4, closed='right')
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_period_index_roundtrip():
idx = pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_int64_index_roundtrip():
idx = pd.Int64Index([-1, 0, 3, 4])
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_uint64_index_roundtrip():
idx = pd.UInt64Index([0, 3, 4])
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_float64_index_roundtrip():
idx = pd.Float64Index([0.1, 3.7, 4.2])
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_interval_index_roundtrip():
idx = pd.IntervalIndex.from_breaks(range(5))
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_datetime_interval_index_roundtrip():
idx = pd.IntervalIndex.from_breaks(pd.date_range('2019-01-01', '2019-01-10'))
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_multi_index_roundtrip():
idx = pd.MultiIndex.from_product(((1, 2, 3), ('a', 'b')))
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_timestamp_roundtrip():
obj = pd.Timestamp('2019-01-01')
decoded_obj = roundtrip(obj)
assert decoded_obj == obj
def test_period_roundtrip():
obj = pd.Timestamp('2019-01-01')
decoded_obj = roundtrip(obj)
assert decoded_obj == obj
def test_interval_roundtrip():
obj = pd.Interval(2, 4, closed=str('left'))
decoded_obj = roundtrip(obj)
assert decoded_obj == obj
def test_b64():
"""Test the binary encoding"""
# array of substantial size is stored as b64
a = np.random.rand(20, 10)
index = ['Row' + str(i) for i in range(1, a.shape[0] + 1)]
columns = ['Col' + str(i) for i in range(1, a.shape[1] + 1)]
df = pd.DataFrame(a, index=index, columns=columns)
decoded_df = roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_series_list_index():
"""Test pandas using series with a list index"""
expect = pd.Series(0, index=[1, 2, 3])
actual = roundtrip(expect)
assert expect.values[0] == actual.values[0]
assert 0 == actual.values[0]
assert expect.index[0] == actual.index[0]
assert expect.index[1] == actual.index[1]
assert expect.index[2] == actual.index[2]
def test_series_multi_index():
"""Test pandas using series with a multi-index"""
expect = | pd.Series(0, index=[[1], [2], [3]]) | pandas.Series |
import numpy as np
import pandas as pd
import pandas._testing as tm
class TestTranspose:
def test_transpose_tzaware_1col_single_tz(self):
# GH#26825
dti = pd.date_range("2016-04-05 04:30", periods=3, tz="UTC")
df = pd.DataFrame(dti)
assert (df.dtypes == dti.dtype).all()
res = df.T
assert (res.dtypes == dti.dtype).all()
def test_transpose_tzaware_2col_single_tz(self):
# GH#26825
dti = pd.date_range("2016-04-05 04:30", periods=3, tz="UTC")
df3 = pd.DataFrame({"A": dti, "B": dti})
assert (df3.dtypes == dti.dtype).all()
res3 = df3.T
assert (res3.dtypes == dti.dtype).all()
def test_transpose_tzaware_2col_mixed_tz(self):
# GH#26825
dti = pd.date_range("2016-04-05 04:30", periods=3, tz="UTC")
dti2 = dti.tz_convert("US/Pacific")
df4 = | pd.DataFrame({"A": dti, "B": dti2}) | pandas.DataFrame |
import numpy as np
import pandas.util.testing as tm
from pandas import (Series, date_range, DatetimeIndex, Index, MultiIndex,
RangeIndex)
from .pandas_vb_common import setup # noqa
class SetOperations(object):
goal_time = 0.2
params = (['datetime', 'date_string', 'int', 'strings'],
['intersection', 'union', 'symmetric_difference'])
param_names = ['dtype', 'method']
def setup(self, dtype, method):
N = 10**5
dates_left = | date_range('1/1/2000', periods=N, freq='T') | pandas.date_range |
import collections
from functools import partial, reduce, lru_cache
import simplejson as json
import pandas as pd
import numpy as np
from .core import parallel, render_html, listify, add, CORD_CHALLENGE_PATH, BIORXIV_MEDRXIV, \
NONCOMM_USE_SUBSET, CUSTOM_LICENSE, COMM_USE_SUBSET, find_data_dir
from pathlib import Path, PurePath
import pickle
from .text import preprocess
import ipywidgets as widgets
from typing import Dict, List
from gensim.corpora import Dictionary
import time
_JSON_CATALOG_SAVEFILE = 'JsonCatalog'
PDF_JSON = 'pdf_json'
PMC_JSON = 'pmc_json'
def get_text_sections(paper_json, text_key) -> Dict:
"""
:param paper_json: The json
:param text_key: the text_key - "body_text" or "abstract"
:return: a dict with the sections
"""
body_dict = collections.defaultdict(list)
for rec in paper_json[text_key]:
body_dict[rec['section'].strip()].append(rec['text'])
return body_dict
get_body_sections = partial(get_text_sections, text_key='body_text')
get_abstract_sections = partial(get_text_sections, text_key='abstract')
def get_text(paper_json, text_key) -> str:
"""
:param paper_json: The json
:param text_key: the text_key - "body_text" or "abstract"
:return: a text string with the sections
"""
if not text_key in paper_json:
return ''
body_dict = collections.defaultdict(list)
for rec in paper_json[text_key]:
body_dict[rec['section']].append(rec['text'])
body = ''
for section, text_sections in body_dict.items():
body += section + '\n\n'
for text in text_sections:
body += text + '\n\n'
return body
get_body = partial(get_text, text_key='body_text')
get_abstract = partial(get_text, text_key='abstract')
def author_name(author_json):
first = author_json.get('first')
middle = "".join(author_json.get('middle'))
last = author_json.get('last')
if middle:
return ' '.join([first, middle, last])
return ' '.join([first, last])
def get_affiliation(author_json):
affiliation = author_json['affiliation']
institution = affiliation.get('institution', '')
location = affiliation.get('location')
if location:
location = ' '.join(location.values())
return f'{institution}, {location}'
def get_authors(paper_json, include_affiliation=False):
if include_affiliation:
return [f'{author_name(a)}, {get_affiliation(a)}'
for a in paper_json['metadata']['authors']]
else:
return [author_name(a) for a in paper_json['metadata']['authors']]
def get_pdf_json_paths(metadata: str, data_path: str) -> pd.Series:
"""
:param metadata: The CORD Research Metadata
:param data_path: The path to the CORD data
:return: a Series containing the PDF JSON paths
"""
def path_fn(full_text_file, sha):
if sha and isinstance(sha, str) and isinstance(full_text_file, str):
return Path(data_path) / full_text_file / full_text_file / PDF_JSON / f'{sha}.json'
sha_paths = metadata.apply(lambda m: [path_fn(m.full_text_file, sha.strip()) for sha in m.sha.split(';')]
if m.has_pdf_parse else np.nan, axis=1)
return sha_paths
def get_first_json(jsons: List):
if isinstance(jsons, list) and len(jsons) > 0:
return jsons[0]
return jsons
def get_pmcid_json_paths(metadata: pd.DataFrame, data_path: str) -> pd.Series:
"""
:param metadata: The CORD Research Metadata
:param data_path: The path to the CORD data
:return: a series containing the paths to the PMC JSONS .. will contain nans
"""
def path_fn(full_text_file, pmcid):
if pmcid and isinstance(pmcid, str) and isinstance(full_text_file, str):
pmcid_path= Path(data_path) / full_text_file / full_text_file / PMC_JSON / f'{pmcid}.xml.json'
if pmcid_path.exists():
return pmcid_path
return np.nan
pmc_paths = metadata.apply(lambda m: path_fn(m.full_text_file, m.pmcid), axis=1)
return pmc_paths
def get_json_paths(metadata: pd.DataFrame, data_path, first=True, tolist=False) -> pd.Series:
"""
:param metadata: The CORD Research Metadata
:param data_path: The path to the CORD data
:return: a series containing the paths to the JSONS .. will contain nans
"""
paths_df = metadata[['source']].copy()
data_path = Path(data_path)
def path_fn(path):
if isinstance(path, str):
return [data_path / p.strip() for p in path.split(';')]
return np.nan
paths_df['json_path'] = metadata.pmc_json_files.fillna(metadata.pdf_json_files).apply(path_fn)
if tolist:
if first:
paths_df.json_path.apply(get_first_json).dropna().tolist()
else:
path_list = paths_df.json_path.apply(lambda p: [a.strip() for a in p.split(';')]).tolist()
return [a for ps in path_list for a in ps]
else:
return paths_df.json_path.apply(get_first_json)
class JsonPaper:
def __init__(self, paper_json):
self.paper_json = paper_json
@property
def sha(self):
return self.paper_json['paper_id']
@property
def title(self):
return self.paper_json['metadata']['title']
@property
def text(self):
return get_body(self.paper_json)
@property
def abstract(self):
return get_abstract(self.paper_json)
@property
def html(self):
sections = get_body_sections(self.paper_json)
html = render_html('JsonPaperBody', title=self.title, sections=sections)
return widgets.HTML(html)
@property
def abstract_html(self):
sections = get_abstract_sections(self.paper_json)
html = render_html('JsonPaperBody', title=self.title, sections=sections)
return widgets.HTML(html)
@property
def authors(self):
return get_authors(self.paper_json)
@classmethod
def from_json(cls, paper_json):
return cls(paper_json=paper_json)
@classmethod
def from_dict(cls, paper_dict):
sha = paper_dict['sha']
text = paper_dict['text']
abstract = paper_dict['abstract']
title = paper_dict['title']
authors = paper_dict['authors']
return cls(sha=sha, text=text, abstract=abstract, title=title, authors=authors)
def to_dict(self):
return {'sha': self.sha, 'abstract': self.abstract,
'title': self.title, 'authors': ' '.join(self.authors)}
def _repr_html_(self):
return render_html('JPaper', paper=self)
def __repr__(self):
return 'JsonPaper'
@lru_cache(maxsize=1024)
def load_json_file(json_file):
with Path(json_file).open('r') as f:
return json.load(f)
def load_json_paper(json_file):
with Path(json_file).open('r') as f:
contents = json.load(f)
return JsonPaper(contents)
def load_text_body_from_file(json_path):
with json_path.open('r') as f:
json_content = json.load(f)
body_text = get_text(json_content, 'body_text')
authors = get_authors(json_content)
sha = json_path.stem
return sha, body_text, authors
def load_text(json_path):
"""
Load the text from the Json file
:param json_path:
:return: the text body of the json file
"""
with json_path.open('r') as f:
json_content = json.load(f)
body_text = get_text(json_content, 'body_text')
return body_text
def load_tokens_from_file(json_path):
sha, text, authors = load_text_body_from_file(json_path)
return sha, preprocess(text), authors
def list_json_files_in(json_path):
# As of April 4th the json files are separated into two directories
all_json_files = []
for sub_dir in ['pdf_json', 'pmc_json']:
json_sub_path = json_path / sub_dir
if json_sub_path.exists():
all_json_files = all_json_files + list(json_sub_path.glob('*.json'))
return all_json_files
def load_json_texts(json_dirs=None, tokenize=False):
data_path = Path(find_data_dir())
json_dirs = json_dirs or [BIORXIV_MEDRXIV, NONCOMM_USE_SUBSET, COMM_USE_SUBSET, CUSTOM_LICENSE]
json_dirs = listify(json_dirs)
text_dfs = []
for json_dir in json_dirs:
json_path = Path(data_path) / json_dir / json_dir
print('Loading json from', json_path.stem)
load_fn = load_tokens_from_file if tokenize else load_text_body_from_file
sha_texts_authors = parallel(load_fn, list_json_files_in(json_path))
text_dfs.append(pd.DataFrame(sha_texts_authors, columns=['sha', 'text', 'authors']))
text_df = | pd.concat(text_dfs, ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
import base64
import datetime
import io
import json
import numpy as np
import os
import pandas as pd
import re
import tempfile
from zipfile import ZipFile
from dash import Dash, dcc, html, callback_context
from dash.dependencies import Input, Output, State
# button: upload Excel Questionnarie
bt_up = dcc.Upload(
html.Button("Click to Upload", id="btn"),
id="upload-data",
)
# dropdown: year/s to parse (Excel Sheet/s)
# initialize empty
years_in_excel = {}
dd_years = dcc.Dropdown(
id="my_years",
placeholder="Year/s in Questionnarie",
multi=True,
)
# button: upload Excel Mapping
bt_up_map = dcc.Upload(
html.Button("Click to Upload", id="btn_map"),
id="upload-map",
)
# button: download converted SDMX-csvs (can't be placed in dcc.Download ?)
bt_con_dwd = html.Button("Click to Download", id="btn-con-dwd")
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
# Build App
app = Dash(__name__, external_stylesheets=external_stylesheets)
# to deploy using WSGI server
server = app.server
# app tittle for web browser
app.title = "ESSPROS to SDMX-csv converter"
# App Layout
app.layout = html.Div([
html.H6(
"Expenditures and Receipts Excel to SDMX Conversor",
style={'verticalAlign': 'bottom', 'fontWeight': 'bold'},
),
html.Hr(),
# div questionnarie
html.Div([
html.Div(
["Excel Questionnaire", bt_up],
style={'width': '20%', 'display': 'inline-block', 'verticalAlign': 'top'},
),
html.Div(
["Questionnaire Validation", html.Button("Click to Validate", id="btn-val")],
style={'width': '20%', 'display': 'inline-block', 'verticalAlign': 'top'},
),
html.Div(
["Select Year/s to Report", dd_years],
style={'width': '35%', 'display': 'inline-block', 'verticalAlign': 'top'},
),
]),
html.Hr(),
# div mapping
html.Div([
html.Div(
["Excel Mapping DSD", bt_up_map],
style={'width': '20%', 'display': 'inline-block', 'verticalAlign': 'top'},
),
html.Div(
["Mapping Validation", html.Button("Click to Validate", id="btn-val-map")],
style={'width': '20%', 'display': 'inline-block', 'verticalAlign': 'top'},
),
html.Div(
["Conversion Execution", html.Button("Click to Convert", id="btn-exe-con")],
style={'width': '20%', 'display': 'inline-block', 'verticalAlign': 'top'},
),
html.Div(
[
"Conversion Download",
bt_con_dwd,
dcc.Download(id="data-download"),
],
style={'width': '20%', 'display': 'inline-block', 'verticalAlign': 'top'},
),
]),
# display output for UX
html.Div(id='ux-display'),
# hidden div: html ouput from load button
html.Div(id='output-data-upload', style={'display': 'none'}),
# hidden div to share excel_quest_df across
html.Div(id='excel-quest-df', style={'display': 'none'}),
# hidden div to share questionnarie sheet names across
html.Div(id='excel-sheetnames', style={'display': 'none'}),
# hidden div: html ouput from questionnarie validation
html.Div(id='val-quest-output', style={'display': 'none'}),
# hidden div to share reported years in questionnarie
html.Div(id='years-report', style={'display': 'none'}),
# hidden div to share quest reported years with proper schemes and codes
html.Div(id='sheets-years-schemes', style={'display': 'none'}),
# hidden div: html ouput from load map button
html.Div(id='output-map-upload', style={'display': 'none'}),
# hidden div to share excel_map_df across
html.Div(id='excel-map-df', style={'display': 'none'}),
# hidden div to share map sheet names across
html.Div(id='map-sheetnames', style={'display': 'none'}),
# hidden div: html ouput from mapping validation
html.Div(id='val-map-output', style={'display': 'none'}),
# hidden div to share mapping sheets (EXP and REC)
html.Div(id='mapping-sheets', style={'display': 'none'}),
# hidden div to share mapping sheet names for (EXP and REC)
html.Div(id='mapping-exp-rec-sheetnames', style={'display': 'none'}),
# hidden div to share mapping validation flag
html.Div(id='map-val-flag', style={'display': 'none'}),
# hidden div: html ouput from conversion
html.Div(id='conversion-output', style={'display': 'none'}),
# hidden div to share sdmx-csv EXP file
html.Div(id='sdmx-csv-exp-output', style={'display': 'none'}),
# hidden div to share sdmx-csv REC file
html.Div(id='sdmx-csv-rec-output', style={'display': 'none'}),
# hidden div to share conversion flag
html.Div(id='conv-flag', style={'display': 'none'}),
# hidden div to share union set of espross codes
html.Div(id='union-set-codes', style={'display': 'none'}),
# hidden div: html ouput from download
html.Div(id='con-dwd-output', style={'display': 'none'}),
])
def parse_excel_file(contents, filename, date):
# decoded as proposed in Dash Doc
_, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'xls' in filename:
# Assume that the user uploaded an excel file
quest_file = pd.ExcelFile(io.BytesIO(decoded))
excel_df = quest_file.parse(sheet_name=None, header=2)
else:
# Warn user hasn't uploaded an excel file
return (
html.Div([
html.Hr(),
html.H6("Questionnarie must be an Excel file"),
]), ({}, {})
)
except Exception as e:
print(e)
# Warn user excel file wasn't parsed
return (
html.Div([
html.Hr(),
html.H6(f"There was an error processing {filename}"),
]), ({}, {})
)
# return ingestion message and parsed Excel
return (
html.Div([
html.Hr(),
html.H6(f"Uploaded Questionnarie is {filename}"),
html.H6(f"Last modified datetime is {datetime.datetime.fromtimestamp(date)}"),
]),
# special treatment: excel number of sheets
(
[excel_df[k].to_json(orient='split') for k in excel_df]
if type(excel_df) is dict
else [excel_df.to_json(orient='split')],
quest_file.sheet_names
)
)
def parse_mapping_file(contents, filename, date):
# decoded as proposed in Dash Doc
_, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'xls' in filename:
# Assume that the user uploaded an excel file
map_file = pd.ExcelFile(io.BytesIO(decoded))
map_df = map_file.parse(sheet_name=None, header=6)
else:
# Warn user hasn't uploaded an excel file
return (
html.Div([
html.Hr(),
html.H6("SDMX Mapping must be provided in Excel"),
]), ({}, {})
)
except Exception as e:
print(e)
# Warn user excel file wasn't parsed
return (
html.Div([
html.Hr(),
html.H6(f"There was an error processing {filename}"),
]), ({}, {})
)
# return ingestion message and parsed Excel
return (
html.Div([
html.Hr(),
html.H6(f"Uploaded SDMX Mapping is {filename}"),
html.H6(f"Last modified datetime is {datetime.datetime.fromtimestamp(date)}"),
]),
# special treatment: excel number of sheets
(
[map_df[k].to_json(orient='split') for k in map_df]
if type(map_df) is dict
else [map_df.to_json(orient='split')],
map_file.sheet_names
)
)
@app.callback(
Output('output-data-upload', 'children'),
Output("excel-quest-df", "children"),
Output("excel-sheetnames", "children"),
Input('upload-data', 'contents'),
State('upload-data', 'filename'),
State('upload-data', 'last_modified'),
prevent_initial_call=True,
)
def wrap_excel_parsing(loaded_file, file_name, file_last_mod):
# coded as proposed in Dash Doc
# callback sees changes in content only (eg: not same content with different filename)
if loaded_file is not None:
html_out, quest_json = parse_excel_file(loaded_file, file_name, file_last_mod)
return html_out, quest_json[0], quest_json[1]
@app.callback(
Output('output-map-upload', 'children'),
Output("excel-map-df", "children"),
Output("map-sheetnames", "children"),
Input('upload-map', 'contents'),
State('upload-map', 'filename'),
State('upload-map', 'last_modified'),
prevent_initial_call=True,
)
def wrap_mapping_parse(loaded_file, file_name, file_last_mod):
# coded as proposed in Dash Doc
# callback sees changes in content only (eg: not same content with different filename)
if loaded_file is not None:
html_out, map_json = parse_mapping_file(loaded_file, file_name, file_last_mod)
return html_out, map_json[0], map_json[1]
@app.callback(
Output('val-quest-output', 'children'),
Output('years-report', 'children'),
Output('sheets-years-schemes', 'children'),
Input("btn-val", "n_clicks"),
State("excel-quest-df", "children"),
State("excel-sheetnames", "children"),
prevent_initial_call=True,
)
def validate_questionnarie(_, parsed_quest, quest_sheetnames):
flag_parsed = parsed_quest[0] if parsed_quest else None
if not flag_parsed:
return (
html.Div([
html.Hr(),
html.H6("Please upload Excel Questionnarie first"),
]),
{},
[]
)
# json into dict of df/s - questionnarie sheets
quest_df = {
k: pd.read_json(parsed_quest[i], orient='split')
for i, k in enumerate(quest_sheetnames)
}
# extract years regex from sheetnames
years_report = {
k: re.findall(r'\d{4}', k)[0]
for k in quest_sheetnames
if re.findall(r'\d{4}', k)
}
# check if any 'scheme' in expected headers for all years
schemes_report = [
k for k in years_report
if not quest_df[k].columns.str.contains(r'(?i)scheme').any()
]
# verify ESSPROS codes for all years: 100 diff from 350 plausability
num_codes = [
k for k in years_report
if not (
250 < pd.to_numeric(
quest_df[k].iloc[:,1], errors='coerce'
).notnull().sum() < 450
)
]
# years with proper header and ESSPROS codes
years_scheme_code = years_report.copy()
for year_no_scheme_or_code in set().union(schemes_report, num_codes):
del years_scheme_code[year_no_scheme_or_code]
# message (years in quest)
years_msg = (
f"Years reported in questionnarie {list(years_report.values())}"
if years_report else "No years in questionnarie to report"
)
# message (proper header with schemes)
schemes_msg = (
f"Adjust header for sheet/s {schemes_report} in questionnarie"
if schemes_report else ""
)
# message (proper aligned column with ESSPROS codes)
num_codes_msg = (
f"Adjust ESSPROS codes in column B for sheet/s {num_codes} in questionnarie"
if num_codes else ""
)
# sheetnames list if ESSPROS codes duplicated
dupli_codes = []
for k in years_scheme_code:
# all-transform to numeric
quest_df[k] = quest_df[k].apply(pd.to_numeric, errors='coerce')
# replace zeros with NaN for cleaning later
quest_df[k].replace(0, np.nan, inplace=True)
# retain column B name before dropping
col_b_name = quest_df[k].columns[1]
# standardize col_b_bame to 'ESSPROS_CODE'
quest_df[k].rename(columns={col_b_name: 'ESSPROS_CODE'}, inplace=True)
# drop columns if all NaN
quest_df[k].dropna(axis='columns', how='all', inplace=True)
# drop rows if missing numeric code; IMPORTANT --> loc 'ESSPROS_CODE'
quest_df[k].drop(quest_df[k][
quest_df[k].loc[:,'ESSPROS_CODE'].isnull()
].index, inplace=True)
# cast numeric codes to integer type
quest_df[k].loc[:,'ESSPROS_CODE'] = quest_df[k].loc[:,'ESSPROS_CODE'].astype("int64")
# check for duplicated codes
filter_duplicates = quest_df[k].loc[:,'ESSPROS_CODE'].duplicated()
if filter_duplicates.any():
dupli_codes.append(k)
quest_df[k].drop(quest_df[k][filter_duplicates].index, inplace=True)
# message (duplicated ESSPROS codes)
duplicated_msg = (
f"Eliminate duplicated ESSPROS codes for sheet/s {dupli_codes} in questionnarie"
if dupli_codes else ""
)
# build ouput message
output_msg = html.Div([
html.Hr(),
html.H6(years_msg),
html.H6(schemes_msg),
html.H6(num_codes_msg),
html.H6(duplicated_msg),
])
return (
output_msg,
json.dumps(years_scheme_code, indent = 4),
[quest_df[k].to_json(orient='split') for k in years_scheme_code],
)
@app.callback(
Output("my_years", "options"),
Input('years-report', 'children'),
Input('upload-data', 'contents'),
prevent_initial_call=True,
)
def update_dd_years(years_in_quest, _):
triger_id = (
callback_context.
triggered[0]['prop_id'].
split('.')[0]
)
# coded as proposed in Dash Doc (without PreventUpdate)
if ( (not years_in_quest) | ('upload' in triger_id) ):
return []
return [
{'label': v, 'value': k}
for k, v in json.load(io.StringIO(years_in_quest)).items()
]
# v for v in options if search_value in o["label"]
@app.callback(
Output('val-map-output', 'children'),
Output('mapping-sheets', 'children'),
Output('mapping-exp-rec-sheetnames', 'children'),
Output('map-val-flag', 'children'),
Output('union-set-codes', 'children'),
Input("btn-val-map", "n_clicks"),
State("excel-map-df", "children"),
State("map-sheetnames", "children"),
)
def validate_mapping(n_clicks, parsed_map, map_sheetnames):
# initial call, map-val-flag: False
if not n_clicks:
return [], [], [], False, []
flag_parsed = parsed_map[0] if parsed_map else None
if not flag_parsed:
return (
html.Div([
html.Hr(),
html.H6("Please upload Excel Mapping first"),
]), [], [], False, []
)
# json into dict of df/s - mapping sheets
mapping_df = {
k: pd.read_json(parsed_map[i], orient='split')
for i, k in enumerate(map_sheetnames)
}
# mapping must contain ('EXPEND' and 'RECEIPT')-like sheets
map_sheets = {
k: v
for k in map_sheetnames
for v in ['EXPEND', 'RECEIPT']
if re.findall(f"(?i){v}", k)
}
# message (map_sheets must equal two)
map_sheet_msg = (
f"Mapping file sheets to be used in the conversion: {list(map_sheets.keys())}"
if len(map_sheets) == 2
else
"There must be two sheets for 'EXPEND' and 'RECEIPT' in mapping file"
)
# check expected headers: 'CODE' (ESSPROS) and DSDs
dsd_commons = [
'CODE',
'FREQ',
'REF_AREA',
'TIME_PERIOD',
'OBS_VALUE',
'UNIT',
'UNIT_MULT',
]
dsd_not_in_header = [
k for k in map_sheets
if not all(
col in mapping_df[k].columns for col in dsd_commons
)
] if len(map_sheets) == 2 else []
# message (proper headers with DSD's)
dsd_header_msg = (
f"Adjust header for sheet/s {dsd_not_in_header} in mapping"
if dsd_not_in_header else ""
)
# check that provided DSD's (EXP and REC) differ
map_sheets_keys = list(map_sheets.keys())
map_sheets_vals = list(map_sheets.values())
dsd_not_differ = (
set(
mapping_df[map_sheets_keys[map_sheets_vals.index('EXPEND')]].columns
) ==
set(
mapping_df[map_sheets_keys[map_sheets_vals.index('RECEIPT')]].columns
)
if (
(not dsd_not_in_header) & (len(map_sheets) == 2)
) else False
)
# message (EXP and REC differ)
dsd_differ_msg = (
"'EXPEND' and 'RECEIPT' columns structure must differ"
if dsd_not_differ else ""
)
# ESSPROS number of codes plausability bounds
num_codes_bound = {
'EXPEND': [310 - 80, 310 + 80],
'RECEIPT': [60 - 20, 60 + 20],
}
# verify ESSPROS codes for all years: 100 diff from 350 plausability
num_codes = [
k for k,v in map_sheets.items()
if not (
num_codes_bound[v][0] < pd.to_numeric(
mapping_df[k].iloc[:,1], errors='coerce'
).notnull().sum() < num_codes_bound[v][1]
)
] if (
(not dsd_not_differ) & (not dsd_not_in_header) & (len(map_sheets) == 2)
) else []
# message (proper aligned column with ESSPROS codes)
num_codes_msg = (
f"Adjust ESSPROS codes in column B for sheet/s {num_codes} in mapping"
if num_codes else ""
)
# check if mapping validated for conversion
map_val_flag = (
(len(map_sheets) == 2) &
(not dsd_not_in_header) &
(not dsd_not_differ) &
(not num_codes)
)
# set of EXPEND and RECEIPT codes
set_of_codes = []
# sheetnames list if ESSPROS codes duplicated
dupli_codes = []
# check for duplicates only if validated
if map_val_flag:
for k in map_sheets:
# drop rows if missing numeric code
mapping_df[k].drop(mapping_df[k][
pd.to_numeric(mapping_df[k].CODE, errors='coerce').isnull()
].index, inplace=True)
# cast numeric codes to integer type
mapping_df[k].loc[:,"CODE"] = mapping_df[k].CODE.astype("int64")
# check for duplicated codes
filter_duplicates = mapping_df[k].CODE.duplicated()
if filter_duplicates.any():
dupli_codes.append(k)
mapping_df[k].drop(
mapping_df[k][filter_duplicates].index, inplace=True
)
set_of_codes.append(set(mapping_df[k].CODE))
# message (duplicated ESSPROS codes)
duplicated_msg = (
f"Eliminate duplicated ESSPROS codes for sheet/s {dupli_codes} in mapping"
if dupli_codes else ""
)
# check empty intersection between EXPEND and RECEIPT
codes_intersect = (
set_of_codes[0] & set_of_codes[1]
if set_of_codes else ()
)
# message (shared EXPEND and RECEIPT codes in mappings)
code_intersect_msg = (
f"Eliminate shared codes between sheets {list(map_sheets.keys())} in mapping"
if codes_intersect else ""
)
# drop codes_intersect (in both EXPEND and RECEIPT)
for esspros_code in codes_intersect:
for k in map_sheets:
mapping_df[k].drop(mapping_df[k][
mapping_df[k].CODE == esspros_code
].index, inplace=True)
# build ouput message
output_msg = html.Div([
html.Hr(),
html.H6(map_sheet_msg),
html.H6(dsd_header_msg),
html.H6(dsd_differ_msg),
html.H6(num_codes_msg),
html.H6(duplicated_msg),
html.H6(code_intersect_msg),
])
return (
output_msg,
[mapping_df[k].to_json(orient='split') for k in map_sheets],
json.dumps(map_sheets, indent = 4),
map_val_flag,
# union set
list(set_of_codes[0].union(set_of_codes[1])) if set_of_codes else []
)
# Hard-coded country map: Bosnia and Herzegovina
country_map = {
r'(?i)\bCountry\b': 'BA',
r'(?i)\bCountry_National_currency\b': 'BAM',
r'(?i)\*': ''
}
# callback ejecucion de la conversion
@app.callback(
Output('conversion-output', 'children'),
Output('sdmx-csv-exp-output', 'children'),
Output('sdmx-csv-rec-output', 'children'),
Output('conv-flag', 'children'),
Input("btn-exe-con", "n_clicks"),
State("my_years", "value"),
State("map-val-flag", "children"),
State('sheets-years-schemes', 'children'),
State('years-report', 'children'),
State('mapping-sheets', 'children'),
State('mapping-exp-rec-sheetnames', 'children'),
State('union-set-codes', 'children'),
)
def execute_conversion(
n_clicks,
years_selected,
map_val_flag,
quest_sheets,
quest_years,
map_sheets,
exp_rec_ref,
union_set_codes,
):
# initial call, conv-flag: False
if not n_clicks:
return [], [], [], False
if not years_selected:
return (
html.Div([
html.Hr(),
html.H6("Upload and validate questionnarie, and then select years to convert"),
]), [], [], False
)
if not map_val_flag:
return (
html.Div([
html.Hr(),
html.H6("Upload and validate mapping first"),
]), [], [], False
)
# quest_years: json to dict
quest_years = json.load(io.StringIO(quest_years))
# quest_df: json to dict of dfs
quest_df = {
k: | pd.read_json(quest_sheets[i], orient='split') | pandas.read_json |
import os
import sys
import time
import pandas as pd
import numpy as np
from scipy.stats import kurtosis
from scipy.stats import skew
from statsmodels import robust
columns_intermediate = ['frame_no', 'ts', 'ts_delta', 'protocols', 'frame_len', 'eth_src',
'eth_dst', 'ip_src', 'ip_dst', 'tcp_srcport', 'tcp_dstport',
'http_host', 'sni', 'udp_srcport', 'udp_dstport']
columns_state_features = ["start_time", "end_time", "meanBytes", "minBytes", "maxBytes",
"medAbsDev", "skewLength",
"kurtosisLength", "q10", "q20", "q30", "q40", "q50", "q60",
"q70", "q80", "q90", "spanOfGroup", "meanTBP", "varTBP",
"medianTBP", "kurtosisTBP", "skewTBP", "network_to", "network_from",
"network_both", "network_to_external", "network_local",
"anonymous_source_destination", "device", "state"]
# import warnings
"""
INPUT: intermediate files
OUTPUT: features for RFT models, with device and state labels
"""
root_exp = ''
root_feature = ''
random_ratio=0.8
num_per_exp=10
RED = "\033[31;1m"
END = "\033[0m"
usage_stm = """
Usage: python3 {prog_name} in_imd_dir out_features_dir
Performs statistical analysis on decoded pcap files.
Example: python3 {prog_name} tagged-intermediate/us/ features/us/
Arguments:
in_imd_dir: path to a directory containing text files of decoded pcap data
out_features_dir: path to the directory to write the analyzed CSV files;
directory will be generated if it does not already exist
For more information, see the README or model_details.md.""".format(prog_name=sys.argv[0])
#isError is either 0 or 1
def print_usage(isError):
if isError == 0:
print(usage_stm)
else:
print(usage_stm, file=sys.stderr)
exit(isError)
def main():
global root_exp, root_feature
path = sys.argv[0]
print("Running %s..." % path)
for arg in sys.argv:
if arg in ("-h", "--help"):
print_usage(0)
if len(sys.argv) != 3:
print("%s%s: Error: 2 arguments required. %d arguments found.%s"
% (RED, path, (len(sys.argv) - 1), END), file=sys.stderr)
print_usage(1)
if not os.path.isdir(sys.argv[1]):
print("%s%s: Error: Input directory %s does not exist!%s"
% (RED, path, sys.argv[1], END), file=sys.stderr)
print_usage(1)
root_exp = sys.argv[1]
root_feature = sys.argv[2]
print("Input files located in: %s" % root_exp)
print("Output files placed in: %s" % root_feature)
prepare_features()
def prepare_features():
global root_exp, root_feature
group_size = 50
dict_intermediates = dict()
dircache = root_feature + '/caches'
if not os.path.exists(dircache):
os.system('mkdir -pv %s' % dircache)
#Parse input file names
#root_exp/dir_device/dir_exp/intermeidate_file
for dir_device in os.listdir(root_exp):
training_file = root_feature + '/' + dir_device + '.csv' #Output file
#Check if output file exists
if os.path.exists(training_file):
print('Features for %s prepared already in %s' % (dir_device, training_file))
continue
full_dir_device = root_exp + '/' + dir_device
if os.path.isdir(full_dir_device) == False:
continue
for dir_exp in os.listdir(full_dir_device):
full_dir_exp = full_dir_device + '/' + dir_exp
if os.path.isdir(full_dir_exp) == False:
continue
for intermediate_file in os.listdir(full_dir_exp):
full_intermediate_file = full_dir_exp + '/' + intermediate_file
if intermediate_file[-4:] != ".txt":
print("%s is not a .txt file!" % full_intermediate_file)
continue
if 'companion' in intermediate_file:
state = '%s_companion_%s' % (dir_exp, dir_device)
device = intermediate_file.split('.')[-2] # the word before pcap
else:
state = dir_exp
device = dir_device
feature_file = (root_feature + '/caches/' + device + '_' + state
+ '_' + intermediate_file[:-4] + '.csv') #Output cache files
paras = (full_intermediate_file, feature_file, group_size, device, state)
#Dict contains devices that do not have an output file
if device not in dict_intermediates:
dict_intermediates[device] = []
dict_intermediates[device].append(paras)
devices = "Feature files to be generated from following devices: "
if len(dict_intermediates) == 0:
devices = devices + "None"
else:
for key, value in dict_intermediates.items():
devices = devices + key + ", "
devices = devices[:-2]
print(devices)
for device in dict_intermediates:
training_file = root_feature + '/' + device + '.csv'
list_data= []
list_paras = dict_intermediates[device]
for paras in list_paras:
full_intermediate_file = paras[0]
feature_file = paras[1]
group_size = paras[2]
device = paras[3]
state = paras[4]
tmp_data = load_features_per_exp(
full_intermediate_file, feature_file, group_size, device, state)
if tmp_data is None or len(tmp_data) == 0:
continue
list_data.append(tmp_data)
if len(list_data) > 0:
pd_device = pd.concat(list_data, ignore_index=True) #Concat all cache files together
print('Saved to %s' % training_file)
pd_device.to_csv(training_file, index=False) #Put in CSV file
print('%s: Features prepared!' % time.time())
def load_features_per_exp(intermediate_file, feature_file, group_size, deviceName, state):
#Load data from cache
if os.path.exists(feature_file):
print(' Load from %s' % feature_file)
return pd.read_csv(feature_file)
#Attempt to extract data from input files if not in previously-generated cache files
feature_data = extract_features(intermediate_file, feature_file, group_size, deviceName, state)
if feature_data is None or len(feature_data) == 0: #Can't extract from input files
print('No data or features from %s' % intermediate_file)
return
else: #Cache was generated; save to file
feature_data.to_csv(feature_file, index=False)
return feature_data
#Create CSV cache file
def extract_features(intermediate_file, feature_file, group_size, deviceName, state):
if not os.path.exists(intermediate_file):
print('%s not exist' % intermediate_file)
return
col_names = columns_intermediate
c= columns_state_features
pd_obj_all = pd.read_csv(intermediate_file, names=col_names, sep='\t')
pd_obj = pd_obj_all.loc[:, ['ts', 'ts_delta', 'frame_len','ip_src','ip_dst']]
num_total = len(pd_obj_all)
if pd_obj is None or num_total < 10:
return
print('Extracting from %s' % intermediate_file)
print(' %s packets %s' % (num_total, feature_file))
feature_data = pd.DataFrame()
num_pkts = int(num_total * random_ratio)
for di in range(0, num_per_exp):
random_indices = list(np.random.choice(num_total, num_pkts))
random_indices=sorted(random_indices)
pd_obj = pd_obj_all.loc[random_indices, :]
d = compute_tbp_features(pd_obj, deviceName, state)
feature_data = feature_data.append( | pd.DataFrame(data=[d], columns=c) | pandas.DataFrame |
import csv
import glob
import math
import os
import sys
from random import random, seed
from timeit import default_timer as timer
import time
from statistics import mean
from pathlib import Path
import networkx as nx
import numpy as np
from scapy.layers.inet import IP, UDP
from scapy.utils import PcapWriter, PcapReader
import tkinter as tk
from tkinter import filedialog
import zat
from zat.log_to_dataframe import LogToDataFrame
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib.pyplot import cm
import matplotlib.transforms as mtrans
import shutil
from collections import Counter
from util.util import Util
class Nfstream_Operations():
@staticmethod
def move_pcacp_files_to_nfstream_repository(path_to_original_folder, path_to_nfstream_repository, filename_addition):
path_to_original_folder = path_to_original_folder
path_to_nfstream_repository = path_to_nfstream_repository
filename_addition = filename_addition
scan_file_order_path = path_to_original_folder + "/" + "scan_order.txt"
with open(scan_file_order_path, 'r') as inputfile:
scanned_files = inputfile.readlines()
scanned_files_list = [x.strip() for x in scanned_files]
to_move_files_path = []
for file in scanned_files_list:
scenario_name = file.split(",")[0]
file_name = file.split(",")[1]
new_file_name = scenario_name + "_" + file_name
new_path = path_to_original_folder + "/" + scenario_name + "/" + file_name + "/" + file_name + "_" + filename_addition + ".pcap"
to_move_files_path.append((new_path, new_file_name))
for path, new_file_name in to_move_files_path:
new_file_path = path_to_nfstream_repository + "/" + new_file_name
shutil.copy(path, new_file_path)
@staticmethod
def add_nfstream_results_to_filtered_dataset_netflow(path_to_root_folder, path_to_nfstream_results):
path_to_root_folder = path_to_root_folder
path_to_nfstream_results = path_to_nfstream_results
nfstream_csv_glob = path_to_nfstream_results + "/*.csv"
nfstream_csv_files = glob.glob(nfstream_csv_glob)
nfstream_csv_files = list(
map(lambda x: (os.path.basename(x).split(".csv")[0].split("_", 3)[2],
os.path.basename(x).split(".csv")[0].split("_", 3)[3], x), nfstream_csv_files))
for index, (scenario_name, file_name, path_to_nfstream_file) in enumerate(nfstream_csv_files):
path_to_summary_csv_file = path_to_root_folder + "/" + scenario_name + "/" + file_name + "/" + file_name + "_summary.csv"
print("File: " + str(index + 1) + "/" + str(len(nfstream_csv_files)))
nfstream_df = pd.read_csv(path_to_nfstream_file)
summary_df = pd.read_csv(path_to_summary_csv_file)
nfstream_df = pd.read_csv(path_to_nfstream_file)
summary_df = | pd.read_csv(path_to_summary_csv_file) | pandas.read_csv |
import pandas as pd
import numpy as np
import time
from static import PATH_START_PERSONAL
from pybliometrics.scopus import ScopusSearch
df_orig = | pd.read_excel(PATH_START_PERSONAL + '/arjan1.xlsx') | pandas.read_excel |
import yaml
import pandas as pd
import numpy as np
from os.path import join
from os import makedirs
import glob
import sys
import re
def parse_samplesheet(fp_samplesheet):
#print(fp_samplesheet.split('/')[-1])
# in a first iteration, open the file, read line by line and determine start
# of sample information by looking for a line starting with "[Data]".
# the following lines will be sample information, about lines are header infos.
row_sampleinformation = None
row_reads = None
with open(fp_samplesheet, "r") as f:
for linenumber, line in enumerate(f.readlines()):
if line.startswith("[Data]"):
row_sampleinformation = linenumber+1
elif line.startswith("[Reads]"):
row_reads = linenumber+1
if row_sampleinformation is None:
raise ValueError("Could not find [Data] line in file '%s'." % fp_samplesheet)
if row_reads is None:
raise ValueError("Could not find [Reads] line in file '%s'." % fp_samplesheet)
header = pd.read_csv(fp_samplesheet, sep=",", nrows=row_reads-2, index_col=0).dropna(axis=1, how="all").dropna(axis=0, how="all")
#header = header.set_index(header.columns[0])
header.index = list(map(lambda x: 'header_%s' % x, header.index))
header = header.dropna(axis=0, how="any")
header = header.T.reset_index()
del header['index']
# a xxx iteration parses sample information via pandas
ss = pd.read_csv(fp_samplesheet, sep=",", skiprows=row_sampleinformation, dtype={'Sample_Name': str, 'Sample_ID': str, 'spike_entity_id': str})
# bcl2fasta automatically changes - into _ char in output filenames
idx_rawilluminainput = ss[pd.notnull(ss['Lane'])].index
for f in ['Sample_ID', 'Sample_Name', 'Sample_Project']:
ss.loc[idx_rawilluminainput, f] = ss.loc[idx_rawilluminainput, f].apply(lambda x: x.replace('-', '_') if type(x) != float else x)
# bcl2fastq uses a S%03i index to address samples.
# They are numbered as occuring in the samplesheet order starting with 1.
# However, number is not increased if Sample_ID was already seen.
uidx = dict()
for _, sample_id in ss['Sample_ID'].iteritems():
if sample_id not in uidx:
uidx[sample_id] = len(uidx) + 1
ss['s-idx'] = ss['Sample_ID'].apply(lambda x: uidx[x])
ss['run'] = fp_samplesheet.split('/')[-1].replace('_spike.csv', '')
# TODO: ensure that sample names do not clash when not considering s-idx!
# fastq-prefix
fp_fastqs = []
for idx, row in ss.iterrows():
fp_fastq = ''
if pd.notnull(row['Sample_Project']):
fp_fastq = row['Sample_Project']
if pd.notnull(row['Sample_Name']):
fp_fastq = join(fp_fastq, row['Sample_ID'])
fp_fastqs.append(join(fp_fastq,
'%s' % (
row['Sample_Name'] if pd.notnull(
row['Sample_Name']) else row['Sample_ID'])))
ss['fastq-prefix'] = fp_fastqs
# remove samples that are marked to be ignored
if 'spike_ignore_sample' in ss.columns:
ss = ss[pd.isnull(ss['spike_ignore_sample'])]
if 'spike_notes' not in ss.columns:
ss['spike_notes'] = None
# merge with header information
if not all([c not in ss.columns for c in header.columns]):
raise ValueError("Header name conflicts with sample column in '%s'." % fp_samplesheet)
for c in header.columns:
ss[c] = header[c].iloc[0]
return ss
def validate_samplesheet(ss: pd.DataFrame, config, line_offset: int=22, err=sys.stderr):
"""Checks if sample sheet is valid.
Parameters
----------
ss : pd.DataFrame
Samplesheet to be validated.
config : dict from YAML
Snakemake configuration file holding information about projects.
line_offset : int
Default: 22.
To give user information about problematic lines, we need to go back
to the file (not the DataFrame) to address the correct line.
err : IO.stream
Default: sys.stderr
Stream onto which warnings are written.
Returns
-------
[str] : List of warnings
Raises
------
ValueError if errors are found in the sample sheet.
"""
errors = []
warnings = []
# ensure all needed columns are in the table
exp_columns = {'Lane', 'Sample_ID', 'Sample_Name', 'I7_Index_ID', 'index',
'Sample_Project', 'spike_entity_id', 'spike_entity_role'}
if len(exp_columns - set(ss.columns)) > 0:
errors.append(
'Samplesheet is missing column(s): "%s".' %
'", "'.join(sorted(exp_columns - set(ss.columns))))
# ensure to only use [A-z0-9_] in identifiers
allowedChars = re.compile("^[A-z0-9_]*$")
for field in ['Sample_ID', 'Sample_Name', 'Sample_Project',
'spike_entity_id', 'spike_entity_role']:
if field in ss:
for idx, x in ss[field].iteritems():
if pd.notnull(x):
if allowedChars.fullmatch(x) is None:
errors.append(
('%s in line %i contains a restricted char'
'acter: "%s". Only a-z A-Z 0-9 and _ are al'
'lowed!') % (field, line_offset+idx, x))
# ensure Sample_Project is not empty
if 'Sample_Project' in ss:
for idx, x in ss['Sample_Project'].iteritems():
if pd.isnull(x) or x.strip() == "":
errors.append('Line %i has an empty Sample_Project.' %
(line_offset+idx))
if len(errors) > 0:
raise ValueError('The following %i errors(s) were found in your sample sheet:\n%s\n' % (len(errors), '\n'.join(['ERROR %i: %s' % (i+1, error) for i, error in enumerate(errors)])))
# check that sample project is describes in config.yaml
for prj in ss['Sample_Project'].unique():
if prj not in config['projects']:
warnings.append(('Sample_Project "%s" is not described in config.'
'yaml. No processing other than demultiplexing w'
'ill be applied.') % (prj))
# check that spike_entity_role is a defined one
exp_roles = { 'patient', 'father', 'mother', 'sibling', 'healthy',
'tumor', 'tumor_patient', 'tumor_father', 'tumor_mother', 'tumor_sibling'}
for idx, row in ss.iterrows():
if | pd.notnull(row['spike_entity_role']) | pandas.notnull |
import unittest
from pydre import project
from pydre import core
from pydre import filters
from pydre import metrics
import os
import glob
import contextlib
import io
from tests.sample_pydre import project as samplePD
from tests.sample_pydre import core as c
import pandas
import numpy as np
from datetime import timedelta
import logging
import sys
class WritableObject:
def __init__(self):
self.content = []
def write(self, string):
self.content.append(string)
# Test cases of following functions are not included:
# Reason: unmaintained
# in common.py:
# tbiReaction()
# tailgatingTime() & tailgatingPercentage()
# ecoCar()
# gazeNHTSA()
#
# Reason: incomplete
# in common.py:
# findFirstTimeOutside()
# brakeJerk()
class TestPydre(unittest.TestCase):
ac_diff = 0.000001
# the acceptable difference between expected & actual results when testing scipy functions
def setUp(self):
# self.whatever to access them in the rest of the script, runs before other scripts
self.projectlist = ["honda.json"]
self.datalist = ["Speedbump_Sub_8_Drive_1.dat", "ColTest_Sub_10_Drive_1.dat"]
self.zero = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
funcName = ' [ ' + self._testMethodName + ' ] ' # the name of test function that will be executed right after this setUp()
print(' ')
print (funcName.center(80,'#'))
print(' ')
def tearDown(self):
print(' ')
print('[ END ]'.center(80, '#'))
print(' ')
# ----- Helper Methods -----
def projectfileselect(self, index: int):
projectfile = self.projectlist[index]
fullpath = os.path.join("tests/test_projectfiles/", projectfile)
return fullpath
def datafileselect(self, index: int):
datafile = self.datalist[index]
fullpath = glob.glob(os.path.join(os.getcwd(), "tests/test_datfiles/", datafile))
return fullpath
def secs_to_timedelta(self, secs):
return timedelta(weeks=0, days=0, hours=0, minutes=0, seconds=secs)
def compare_cols(self, result_df, expected_df, cols):
result = True
for names in cols:
result = result and result_df[names].equals(expected_df[names])
if not result:
print(names)
print(result_df[names])
print("===")
print(expected_df[names])
return False
return result
# convert a drivedata object to a str
def dd_to_str(self, drivedata: core.DriveData):
output = ""
output += str(drivedata.PartID)
output += str(drivedata.DriveID)
output += str(drivedata.roi)
output += str(drivedata.data)
output += str(drivedata.sourcefilename)
return output
# ----- Test Cases -----
def test_datafile_exist(self):
datafiles = self.datafileselect(0)
self.assertFalse(0 == len(datafiles))
for f in datafiles:
self.assertTrue(os.path.isfile(f))
def test_reftest(self):
desiredproj = self.projectfileselect(0)
p = project.Project(desiredproj)
results = p.run(self.datafileselect(0))
results.Subject.astype('int64')
sample_p = samplePD.Project(desiredproj)
expected_results = (sample_p.run(self.datafileselect(0)))
self.assertTrue(self.compare_cols(results, expected_results, ['ROI', 'getTaskNum']))
def test_columnMatchException_excode(self):
f = io.StringIO()
with self.assertRaises(SystemExit) as cm:
desiredproj = self.projectfileselect(0)
p = project.Project(desiredproj)
result = p.run(self.datafileselect(1))
self.assertEqual(cm.exception.code, 1)
def test_columnMatchException_massage(self):
d3 = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184]}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
handler = logging.FileHandler(filename='tests\\temp.log')
filters.logger.addHandler(handler)
with self.assertRaises(core.ColumnsMatchError):
result = filters.smoothGazeData(data_object)
expected_console_output = "Can't find needed columns {'FILTERED_GAZE_OBJ_NAME'} in data file ['test_file3.csv'] | function: smoothGazeData"
temp_log = open('tests\\temp.log')
msg_list = temp_log.readlines()
msg = ' '.join(msg_list)
filters.logger.removeHandler(handler)
#self.assertIn(expected_console_output, msg)
#Isolate this test case No more sliceByTime Function in pydre.core
def test_core_sliceByTime_1(self):
d = {'col1': [1, 2, 3, 4, 5, 6], 'col2': [7, 8, 9, 10, 11, 12]}
df = pandas.DataFrame(data=d)
result = (c.sliceByTime(1, 3, "col1", df).to_string()).lstrip()
expected_result = "col1 col2\n0 1 7\n1 2 8\n2 3 9"
self.assertEqual(result, expected_result)
#Isolate this test case No more sliceByTime Function in pydre.core
def test_core_sliceByTime_2(self):
d = {'col1': [1, 1.1, 3, 4, 5, 6], 'col2': [7, 8, 9, 10, 11, 12]}
df = pandas.DataFrame(data=d)
result = (c.sliceByTime(1, 2, "col1", df).to_string()).lstrip()
expected_result = "col1 col2\n0 1.0 7\n1 1.1 8"
self.assertEqual(result, expected_result)
def test_core_mergeBySpace(self):
d1 = {'SimTime': [1, 2], 'XPos': [1, 3], 'YPos': [4, 3]}
df1 = pandas.DataFrame(data=d1)
d2 = {'SimTime': [3, 4], 'XPos': [10, 12], 'YPos': [15, 16]}
df2 = pandas.DataFrame(data=d2)
data_object1 = core.DriveData.initV2(PartID=0,DriveID=1, data=df1, sourcefilename="test_file.csv")
data_object2 = core.DriveData.initV2(PartID=0, DriveID=2, data=df2, sourcefilename="test_file.csv")
param = []
param.append(data_object1)
param.append(data_object2)
result = self.dd_to_str(core.mergeBySpace(param))
expected_result = "01None SimTime XPos YPos\n0 1 1 4\n1 2 3 3\n0 2 10 15\n1 3 12 16test_file.csv"
self.assertEqual(result, expected_result)
def test_filter_numberSwitchBlocks_1(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'taskblocks': [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
print(result.data)
print(expected_result.data)
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_numberSwitchBlocks_2(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
#print(result.to_string())
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'taskblocks': [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_numberSwitchBlocks_3(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
#print(result.to_string())
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0],
'taskblocks': [np.nan, np.nan, np.nan, np.nan, np.nan, 1.0, 1.0, 1.0, 1.0, np.nan, np.nan]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_smoothGazeData_1(self):
d3 = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'FILTERED_GAZE_OBJ_NAME': ['localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen']}
# the func should be able to identify this in-valid input and returns None after prints
# "Bad gaze data, not enough variety. Aborting"
print("expected console output: Bad gaze data, not enough variety. Aborting")
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.smoothGazeData(data_object)
#print(result.to_string())
self.assertEqual(None, result)
def test_filter_smoothGazeData_2(self):
d3 = {'DatTime': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
'FILTERED_GAZE_OBJ_NAME': ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane']}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.smoothGazeData(data_object, latencyShift=0)
dat_time_col = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4]
timedelta_col = []
for t in dat_time_col:
timedelta_col.append(self.secs_to_timedelta(t))
expected = {'timedelta': timedelta_col, 'DatTime': dat_time_col,
'FILTERED_GAZE_OBJ_NAME': ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane'],
'gaze': ["offroad", "offroad", "offroad", "offroad", "onroad", "onroad", "onroad", "onroad", "onroad", "onroad", "onroad",
"onroad", "onroad", "onroad", "onroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad"],
'gazenum': np.array([1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3], dtype=np.int32)}
expected_result_df = pandas.DataFrame(data=expected)
self.assertTrue(expected_result_df.equals(result.data));
#self.assertTrue(self.compare_cols(result.data[0], expected_result_df, ['DatTime', 'FILTERED_GAZE_OBJ_NAME', 'gaze', 'gazenum']))
def test_filter_smoothGazeData_3(self):
# --- Construct input ---
dat_time_col = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4]
gaze_col = ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.dashPlane',
'localCS.WindScreen', 'localCS.dashPlane', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.dashPlane',
'localCS.WindScreen', 'localCS.dashPlane', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane']
d3 = {'DatTime': dat_time_col, 'FILTERED_GAZE_OBJ_NAME': gaze_col}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# ----------------------
result = filters.smoothGazeData(data_object, latencyShift=0)
print(result.data)
timedelta_col = []
for t in dat_time_col:
timedelta_col.append(self.secs_to_timedelta(t))
expected = {'timedelta': timedelta_col, 'DatTime': dat_time_col,
'FILTERED_GAZE_OBJ_NAME': gaze_col,
'gaze': ["offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad"],
'gazenum': np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.int32)}
expected_result_df = pandas.DataFrame(data=expected)
self.assertTrue(expected_result_df.equals(result.data));
#self.assertTrue(self.compare_cols(result.data[0], expected_result_df, ['DatTime', 'FILTERED_GAZE_OBJ_NAME', 'gaze', 'gazenum']))
def test_metrics_findFirstTimeAboveVel_1(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [-0.000051, -0.000051, -0.000041, -0.000066, -0.000111, -0.000158, -0.000194, -0.000207, 0.000016, 0.000107, 0.000198]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = -1
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeAboveVel_2(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = -1
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeAboveVel_3(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = 5
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeAboveVel_4(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeOutside_1(self):
pass
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
#result = metrics.common.findFirstTimeOutside(data_object)
#expected_result = 0
#self.assertEqual(expected_result, result)
#err: NameError: name 'pos' is not defined --------------------------------------------------------!!!!!!!!!
def test_metrics_colMean_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMean(data_object, 'position')
expected_result = 5
self.assertEqual(expected_result, result)
def test_metrics_colMean_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMean(data_object, 'position', 3)
expected_result = 6.5
self.assertEqual(expected_result, result)
def test_metrics_colMean_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMean(data_object, 'position', 3)
expected_result = np.nan
#self.assertEqual(expected_result, result)
np.testing.assert_equal(expected_result, result)
def test_metrics_colSD_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colSD(data_object, 'position')
expected_result = 3.1622776601683795
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_colSD_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colSD(data_object, 'position', 3)
expected_result = 2.29128784747792
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_colSD_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = | pandas.DataFrame(data=d) | pandas.DataFrame |
"""
Filename: scrape.py
Modified: 2019-06-17
Author: <NAME>
E-mail: <EMAIL>
License:
The code is licensed under MIT License. Please read the LICENSE file in
this distribution for details regarding the licensing of this code.
Description:
Checking CSV files for validity.
This is a very crude checker.
It is not:
- Efficient
- Written in idiomatic Python3
- PEP 8-compliant
- Beautiful (code-wise)
- Using a unit testing library like pytest
"""
import os
import pdftotext
import pandas as pd
pd.set_option("display.max_rows", 50000)
pd.set_option("display.max_columns", 50000)
| pd.set_option("display.width", 50000) | pandas.set_option |
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_timedelta64_dtype
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import CategoricalIndex, Series, Timedelta, Timestamp
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
IntervalArray,
PandasArray,
PeriodArray,
SparseArray,
TimedeltaArray,
)
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable(self, index_or_series, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_iterable_object_and_category(
self, index_or_series, method, dtype, rdtype, obj
):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable_map(self, index_or_series, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [ | Timestamp("2011-01-01") | pandas.Timestamp |
# coding=utf-8
import numpy as np
from pandas import (DataFrame, date_range, Timestamp, Series,
to_datetime)
import pandas.util.testing as tm
from .common import TestData
class TestFrameAsof(TestData):
def setup_method(self, method):
self.N = N = 50
self.rng = date_range('1/1/1990', periods=N, freq='53s')
self.df = DataFrame({'A': np.arange(N), 'B': np.arange(N)},
index=self.rng)
def test_basic(self):
df = self.df.copy()
df.loc[15:30, 'A'] = np.nan
dates = date_range('1/1/1990', periods=self.N * 3,
freq='25s')
result = df.asof(dates)
assert result.notnull().all(1).all()
lb = df.index[14]
ub = df.index[30]
dates = list(dates)
result = df.asof(dates)
assert result.notnull().all(1).all()
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
assert (rs == 14).all(1).all()
def test_subset(self):
N = 10
rng = date_range('1/1/1990', periods=N, freq='53s')
df = DataFrame({'A': np.arange(N), 'B': np.arange(N)},
index=rng)
df.loc[4:8, 'A'] = np.nan
dates = date_range('1/1/1990', periods=N * 3,
freq='25s')
# with a subset of A should be the same
result = df.asof(dates, subset='A')
expected = df.asof(dates)
tm.assert_frame_equal(result, expected)
# same with A/B
result = df.asof(dates, subset=['A', 'B'])
expected = df.asof(dates)
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import pandas as pd
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from ..datasets import amenities
__all__ = [
"add_neighborhood_features",
"feature_engineer_sales",
"get_modeling_inputs",
"run_regression",
]
# fields related to building characteristics
BUILDING_CHARACTERISTICS = [
"basements",
"building_code_description",
"central_air",
"depth",
"exterior_condition",
"fireplaces",
"frontage",
"garage_spaces",
"general_construction",
"homestead_exemption",
"interior_condition",
"is_condo",
"neighborhood",
"number_of_bathrooms",
"number_of_bedrooms",
"number_of_rooms",
"number_stories",
"police_district",
"season",
"topography",
"total_area",
"total_livable_area",
"type_heater",
"view_type",
"year_built",
"zip_code",
"zoning",
]
# categorical fields
CATEGORICAL = [
"basements",
"building_code_description",
"central_air",
"exterior_condition",
"fireplaces",
"garage_spaces",
"general_construction",
"homestead_exemption",
"interior_condition",
"is_condo",
"neighborhood",
"number_of_bathrooms",
"number_of_bedrooms",
"number_of_rooms",
"number_stories",
"sold_in_year_built",
"topography",
"type_heater",
"view_type",
"year_built",
"zoning",
"season",
"spacetime_flag*",
]
# fields we don't need to do the modeling
REMOVE = [
"geometry",
"sale_price",
"sale_price_indexed",
"time_offset",
"ln_sale_price",
"ln_sale_price_indexed",
"lat",
"lng",
"police_district",
"zip_code",
"sale_year",
]
def _knn_distance(coordinates, measureTo, k):
"""
Internal function to return the average distance to the k-nearest neighbors.
Parameters
----------
coordinates : array_like
the 2D array of coordinates for sales
measureTo : array_like
the coordinates of the thing we are measuring to
k : int
the number of neighbors to find
"""
nbrs = NearestNeighbors(n_neighbors=k, algorithm="ball_tree").fit(measureTo)
return nbrs.kneighbors(coordinates)
def add_neighborhood_features(sales):
"""
Add (dis)amenity distance features to the dataset of sales.
Parameters
----------
sales : DataFrame
the input data for sales
Returns
-------
DataFrame :
the output data with the added feature columns
"""
out = sales.copy()
salesXY = np.vstack([sales.geometry.x, sales.geometry.y]).T
# the features to calculate distances to
features = {
"Universities": [1, "dist_univ"],
"Parks": [1, "dist_park"],
"CityHall": [1, "dist_city_hall"],
"SubwayStations": [1, "dist_subway"],
"DryCleaners": [2, "dist_dry_clean"],
"Cafes": [3, "dist_cafes"],
"Bars": [3, "dist_bars"],
"Schools": [2, "dist_schools"],
"SchoolScores": [1, "closest_school_score"],
"GroceryStores": [2, "dist_grocery"],
"Libraries": [1, "dist_library"],
"NewConstructionPermits": [5, "dist_permits"],
"AggravatedAssaults": [5, "dist_agg_assaults"],
"GraffitiRequests": [5, "dist_graffiti"],
"AbandonedVehicleRequests": [5, "dist_abandoned_vehicle"],
}
for feature in features:
k, column = features[feature]
# load the amenity data
amenityData = getattr(amenities, feature).get()
# get neighbors
dists, indices = _knn_distance(salesXY, amenityData[["x", "y"]].values, k)
# calculate feature
if feature != "SchoolScores":
featureData = dists.mean(axis=1)
else:
featureData = amenityData["overall_score"].values[indices.squeeze()]
out[column] = featureData
return out
def feature_engineer_sales(sales, always_include=["spacetime_flag", "dist"]):
"""
Return a clean version of the input sales data after
performing multiple feature engineering steps.
Parameters
----------
sales : DataFrame
the input sales data
always_include : list, optional
list of any columns to include in the output data
Returns
-------
DataFrame :
the cleaned version of the data
"""
# Extract building characteristics that are present
building_characteristics = [
col for col in BUILDING_CHARACTERISTICS if col in sales.columns
]
def add_other_category(data, N=25):
return data.replace(data.value_counts(dropna=False).iloc[N:].index, "Other")
# Columns to keep
extra_cols = [
col
for col in sales.columns
if any(col.startswith(base) for base in always_include)
]
# Do the formatting
out = (
sales.loc[
:,
building_characteristics
+ [
"geometry",
"ln_sale_price",
"ln_sale_price_indexed",
"sale_price",
"sale_price_indexed",
"sale_year",
"sale_date",
"time_offset",
]
+ extra_cols,
]
.assign(
year_built=lambda df: | pd.to_numeric(df.year_built, errors="coerce") | pandas.to_numeric |
import jieba
from sklearn.cluster import KMeans
import re
from gensim.models import word2vec
import multiprocessing
import gensim
import numpy as np
import pandas as pd
import collections
import pandas
# mydict = ["result.txt"]
# file_path = '/data/cuimengmeng/News_Event/clustering/result2.txt'
# # 默认是精确模式
# test = WordCut()
# test.addDictionary(mydict) # 加载自定义词典
# # 分词,去停用词(集成在类中了),不显示在console,保存分词后的文件到file_path目录
# test.seg_file(file_path, show=False, write=True)
# 创建停用词列表
def stopwordslist():
stopwords = [line.strip() for line in open('/data/cuimengmeng/News_Event/data/老虎咬人事件/2007-03-23@老虎咬人之后', encoding='UTF-8').readlines()]
return stopwords
# 去除标点符号
#分词
#去停用词
def segment_text(source_corpus, train_corpus, coding, punctuation):
'''
切词,去除标点符号
:param source_corpus: 原始语料
:param train_corpus: 切词语料
:param coding: 文件编码
:param punctuation: 去除的标点符号
:return:
'''
with open(source_corpus, 'r', encoding=coding) as f, open(train_corpus, 'w', encoding=coding) as w:
for line in f:
# 去除标点符号
line = re.sub('[{0}]+'.format(punctuation), '', line.strip())
# 切词: 对文档中的每一行进行中文分词,默认是精确模式
seg_sentence = jieba.cut(line)
# 创建一个停用词列表
stopwords = stopwordslist()
# 输出结果为outstr
outstr = ''
# 去停用词
for word in seg_sentence:
if word not in stopwords:
if word != '\t':
outstr += word
outstr += " "
w.write(outstr)
# 对句子进行中文分词
def seg_depart(sentence):
# 对文档中的每一行进行中文分词
print("正在分词")
# # # 默认是精确模式
sentence_depart = jieba.cut(sentence.strip())
# 创建一个停用词列表
stopwords = stopwordslist()
# 输出结果为outstr
outstr = ''
# 去停用词
for word in sentence_depart:
if word not in stopwords:
if word != '\t':
outstr += word
outstr += " "
return outstr
# 严格限制标点符号
strict_punctuation = '。,、':∶;?‘’“”〝〞ˆˇ﹕︰﹔﹖﹑·¨….¸;!´?!~—ˉ|‖"〃`@﹫¡¿﹏﹋﹌︴々﹟#﹩$﹠&﹪%*﹡﹢﹦﹤‐ ̄¯―﹨ˆ˜﹍﹎+=<__-\ˇ~﹉﹊()〈〉‹›﹛﹜『』〖〗[]《》〔〕{}「」【】︵︷︿︹︽_﹁﹃︻︶︸﹀︺︾ˉ﹂﹄︼'
# 简单限制标点符号
simple_punctuation = '’!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
# 去除标点符号
punctuation = simple_punctuation + strict_punctuation
# 是每个词的向量维度
size = 10
# 是词向量训练时的上下文扫描窗口大小,窗口为5就是考虑前5个词和后5个词
window = 5
# 设置最低频率,默认是5,如果一个词语在文档中出现的次数小于5,那么就会丢弃
min_count = 1
# 是训练的进程数,默认是当前运行机器的处理器核数。
workers = multiprocessing.cpu_count()
# 切词语料
train_corpus_text = '/data/cuimengmeng/News_Event/clustering/result3.txt'
# w2v模型文件
model_text = 'w2v_size_{0}.model'.format(size)
source_corpus = "/data/cuimengmeng/News_Event/data/老虎咬人事件/2007-03-23@老虎咬人之后"
coding = 'utf-8'
f = open(source_corpus,"r",encoding='UTF-8')
line = f.readlines()
lines = ''.join(line)
# with open("./data/result2.txt","w",encoding='UTF-8') as f:
# f.write(seg_depart(lines))
# 切词 @TODO 切词后注释
segment_text(source_corpus, train_corpus_text, coding, punctuation)
# w2v训练模型 @TODO 训练后注释
sentences = word2vec.Text8Corpus(train_corpus_text) # 加载语料
model = word2vec.Word2Vec(sentences=sentences, size=size, window=window, min_count=min_count, workers=workers)
model.save(model_text)
# 加载模型
model = gensim.models.Word2Vec.load(model_text)
g = open(train_corpus_text, "r",encoding='UTF-8') # 设置文件对象
std = g.read() # 将txt文件的所有内容读入到字符串str中
g.close() # 将文件关闭
cc = std.split(' ') # ['标题', '老虎', '咬人', '之后发布', '时间', '20070323', '', '', '1426正文', '央视', '新闻频道', '社会', '记录', '3'
print("cc:",cc)
dd = []
kkl = dict()
# 文本向量化
'''
将每个词语向量化,并且append 在dd中,形成一个二维数组
并形成一个字典,index是序号,值是汉字
'''
index1 = []
for p in range(len(cc)): # 2896
hk = cc[p]
if hk in model:
vec = list(model.wv[hk])
dd.append(vec)
kkl[p] = hk
index1.append(p)
print("kkl:",kkl)
# kkl {0: '标题', 1: ':', 2: '老虎', 3: '咬人', 4: '之后', 6: '发布', 7: '时间', 8: ':', 9: '2007', 10: '-', 11: '03', 12: '-'
#dd [[0.021426199, -0.015310322, 0.028066937, 0.017086413, 0.020035753, -0.035560098, -0.042497594, -0.036129046, -0.0043878118, -0.026238237],
# 将二维数组转化成numpy
dd1 = np.array(dd)
estimator = KMeans(n_clusters=100) # 构造聚类器 生成100个分类
estimator.fit(dd1) # 聚类
label_pred = estimator.labels_ # 获取聚类标签
print("label_pred:",label_pred)
# label_pred [59 13 13 ... 61 20 18] len:2896 100个种类
# index 是某条向量的序号,值是分类号
index1 = list(range(len(dd1))) # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
vc = pd.Series(label_pred, index=index1) # 转为一维数组,聚类标签和index1对应,index1为下标
aa = collections.Counter(label_pred) # 统计器 Counter({5: 225, 15: 66, 26: 64, 31: 58, 48: 53, 7: 44, 59: 43, 53: 42, 1: 41, 17: 40, 10: 40, 52: 39, 33: 39, 12: 39,
v = | pd.Series(aa) | pandas.Series |
from requests import get
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import json
from tqdm import tqdm
import re
import gc
from article_parsers import efsyn_article_parser, save_articles_in_parts
def efsyn_article_links() -> list:
article_links = []
# hardcoded value, we want to keep only data for the past year
for page_id in range(600):
print(page_id)
try:
tanea_news_link = 'https://www.efsyn.gr/politiki?page=' + str(page_id)
response = get(tanea_news_link)
if response.status_code == 200:
news_soup = BeautifulSoup(response.text, 'html.parser')
article_links += ['https://www.efsyn.gr' + link['href'] for link in news_soup.find_all('a', class_='full-link', href=True)[:12]]
else:
break
except Exception as e:
print(e)
return article_links
if __name__ == "__main__":
article_links = efsyn_article_links()
links_df = | pd.DataFrame(article_links) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
import click
from tqdm import tqdm
from unicodedata import normalize
from pjud import data
def consolidated_materia(path_processed = "data/processed/pjud"):
tqdm.pandas()
df_ingresos_materia = pd.read_feather(f"{path_processed}/processes_IngresosMateria.feather")
df_termino_materia = pd.read_feather(f"{path_processed}/processes_TerminosMateria.feather")
df_fulldata_materia = pd.merge(df_ingresos_materia, df_termino_materia, how='outer', on=['COD. TRIBUNAL','RIT','COD. MATERIA'])
columnas_drop = ['index_x', 'index_y', 'MES INGRESO', 'MES TERMINO']
df_fulldata_materia.drop(columnas_drop, axis = 'columns', inplace = True)
click.echo('Transformando data faltante ...')
df_fulldata_materia = df_fulldata_materia.progress_apply(data.transformdata.faltantes_materia, axis=1)
columnas_drop = ['TIPO CAUSA_y', 'MATERIA_y', 'TRIBUNAL_y', 'COD. CORTE_y', 'CORTE_y', 'FECHA INGRESO_y']
df_fulldata_materia.drop(columnas_drop, axis = 'columns', inplace = True)
df_fulldata_materia.rename(columns = {'COD. CORTE_x':'COD. CORTE',
'CORTE_x':'CORTE',
'TRIBUNAL_x':'TRIBUNAL',
'TIPO CAUSA_x':'TIPO CAUSA',
'MATERIA_x':'MATERIA',
'FECHA INGRESO_x':'FECHA INGRESO'
}, inplace = True)
filtro_oral = df_fulldata_materia[df_fulldata_materia['TRIBUNAL'].str.contains('ORAL')]
filtro_garantia = df_fulldata_materia[df_fulldata_materia['TRIBUNAL'].str.contains('GARANTIA')]
data.save_feather(df_fulldata_materia, 'consolidated_Materia', path_processed)
data.save_feather(filtro_oral, 'consolidated_JuicioOralesMateria', path_processed)
data.save_feather(filtro_garantia, 'consolidated_CausasGarantiaMateria', path_processed)
click.echo('Generado archivo Feather. Proceso Terminado')
def consolidated_rol(path_processed = "data/processed/pjud"):
tqdm.pandas()
df_ingresos_rol = pd.read_feather(f"{path_processed}/processes_IngresosRol.feather")
df_termino_rol = pd.read_feather(f"{path_processed}/processes_TerminosRol.feather")
df_fulldata_rol = | pd.merge(df_ingresos_rol, df_termino_rol, how='outer', on=['COD. TRIBUNAL','RIT']) | pandas.merge |
# Functions for import of API data
# standard library imports
from sqlalchemy.inspection import inspect
import pandas as pd
import urllib.request
import urllib.parse
import urllib
import time
# project-specific imports
from PhosQuest_app.data_access.db_sessions import create_sqlsession
from PhosQuest_app.data_access.class_functions import get_class_key_attrs
# =========================================================================== #
def get_table_values_for_search(class_name):
"""
Produces a list of the key values in class_name table that will be used in
the API search. Works for classes with single primary key only.
:param class_name: a sqlalchemy declarative class (sqlalchemy class object)
:return: list of key values for search (list)
"""
print('Obtaining key values for %s records in DB' % class_name.__name__)
# get the name of the primary key attribute in the class's corresponding
# table
key_attr = get_class_key_attrs(class_name, single_key=True)
# create a DB session
session = create_sqlsession()
# list of the value for the key field for all records [('val1',),...]
records = session.query(getattr(class_name, key_attr)).all()
# close the session
session.close()
# convert into list of str ['val1', ...]
keys_list = [val[0] for val in records]
print('Retrieved key values for %i %s records'
% (len(keys_list), class_name.__name__))
return keys_list
def get_uniprot_api_data(class_name):
"""
Obtains UniProt data for the objects currently in the DB table
corresponding to class_name and returns a data frame where required
information has been parsed.
:param class_name: a sqlalchemy declarative class (sqlalchemy class object)
:return: pandas data frame (df)
"""
print('Retrieving UniProt data for %s records' % class_name.__name__)
# Get all class_name table key values
keys_list = get_table_values_for_search(class_name)
# convert list into Uniprot query format 'val1 val2'
query_str = ' '.join(keys_list)
# Get the corresponding data
# The default base URL.
url = 'https://www.uniprot.org/uploadlists/'
# Parameters for UniProt API site, selecting specific qualifiers using the
# api_query_accession variable from the accession list function.
params = {
'from': 'ACC',
'to': 'ACC',
'format': 'tab',
'columns': 'id,protein names,comment(SUBCELLULAR LOCATION),families,'
'genes',
'query': query_str
}
# Takes the parameters and encodes it as it should be in the URL
# (e.g. %20 = 'a space').
data = urllib.parse.urlencode(params)
# Changes it to a type of encoding, e.g. bytes.
data = data.encode('utf-8')
# Requests the URL and and data (which has already been encoded above).
request = urllib.request.Request(url, data)
# Opens the URL with parameters.
response = urllib.request.urlopen(request)
# Places the data into a dataframe.
df = | pd.read_table(response) | pandas.read_table |
import pandas as pd
from util import get_company_names, company_rename, most_common
import ssl
import json
ssl._create_default_https_context = ssl._create_unverified_context
lastFullYear = 2020
def incidentsPerKm(dfAll):
dfKm = | pd.read_excel('./raw_data/pipeline_length.XLSX') | pandas.read_excel |
# Futu Algo: Algorithmic High-Frequency Trading Framework
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Written by <NAME> <<EMAIL>>, 2021
# Copyright (c) billpwchan - All Rights Reserved
import pandas as pd
from strategies.Strategies import Strategies
from util import logger
pd.options.mode.chained_assignment = None # default='warn'
class MACDCross(Strategies):
def __init__(self, input_data: dict, fast_period=12, slow_period=26, signal_period=9, observation=100):
self.MACD_FAST = fast_period
self.MACD_SLOW = slow_period
self.MACD_SIGNAL = signal_period
self.OBSERVATION = observation
self.default_logger = logger.get_logger("macd_cross")
super().__init__(input_data)
self.parse_data()
def parse_data(self, latest_data: pd.DataFrame = None, backtesting: bool = False):
# Received New Data => Parse it Now to input_data
if latest_data is not None:
# Only need to update MACD for the stock_code with new data
stock_list = [latest_data['code'][0]]
# Remove records with duplicate time_key. Always use the latest data to override
time_key = latest_data['time_key'][0]
self.input_data[stock_list[0]].drop(
self.input_data[stock_list[0]][self.input_data[stock_list[0]].time_key == time_key].index,
inplace=True)
# Append empty columns and concat at the bottom
latest_data = pd.concat([latest_data, | pd.DataFrame(columns=['MACD', 'MACD_signal', 'MACD_hist']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020 Sarubee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
xbrl_edinet.py
- script to parse EDINET XBRL
"""
import pandas as pd
from pathlib import Path
from zipfile import ZipFile
import re
import logging
logger = logging.getLogger(__name__)
from lxml import etree
from datetime import date
class XbrlEdinetParseError(RuntimeError):
pass
# 想定外のエラー
class XbrlEdinetParseUnexpectedError(RuntimeError):
pass
def xbrl_name_info(name):
# jpcrp030000-asr-001_X99999-000_2012-03-31_01_2012-06-28.xbrl
# のようなファイル名をパースする
# jp で始まってなかったらエラー
if name[0:2] != "jp":
raise XbrlEdinetParseError(f"Unsupported XBRL filename ({name}) !")
info = {}
info["cabinet_order_code"] = name[2:5] # 府令略号
info["style_code"] = name[5:11] # 様式略号
info["report_code"] = name[12:15] # 報告書略号
info["serial_number"] = int(name[16:19]) # 報告書連番
info["edinet_code"] = name[20:26] # EDINET コード or ファンドコード
info["additional_number"] = int(name[27:30]) # 追番
info["end_day"] = date.fromisoformat(name[31:41]) # 報告対象期間期末日 or 報告義務発生日
info["submission_number"] = int(name[42:44]) # 報告書提出回数
info["submission_day"] = date.fromisoformat(name[45:55])# 報告書提出日
return info
def parse_zip(zip_path):
with ZipFile(zip_path) as z:
# zip ファイルから XBRL/PublicDoc/*.xbrl ファイルを取り出して読む
# 対象の xbrl ファイルがない場合、複数ある場合、はとりあえずエラーにする
xbrl_file = None
for name in z.namelist():
if re.match('XBRL/PublicDoc/.*\.xbrl$', name) is not None:
if xbrl_file is not None:
XbrlEdinetParseError(f"Multiple xbrl files ('XBRL/Public/*.xbrl') in {zip_path}!")
xbrl_file = name
if xbrl_file is None:
XbrlEdinetParseUnexpectedError(f"No xbrl file ('XBRL/Public/*.xbrl') in {zip_path}!")
root = etree.fromstring(z.read(xbrl_file))
xbrl_name = Path(xbrl_file).name
logger.debug(f"XBRL filename: {xbrl_name}")
ninfo = xbrl_name_info(xbrl_name)
nsmap = root.nsmap
logger.debug(f"Namespace: {nsmap}")
## debug 用
## nsmap の全タグをそのまま csv 出力する
#for ns_pre, ns in nsmap.items():
# elements = []
# for elem in root.findall(f".//{{{ns}}}*"):
# d = {}
# d["tag"] = re.sub("^{[^}]*}", "", elem.tag)
# d["attrib"] = elem.attrib
# d["text"] = elem.text
# elements.append(d)
# df = pd.DataFrame(elements)
# df.to_csv(f"debug/{ns_pre}.csv", index=False)
# context タグを取得 (xbrli)
contexts = {}
for e_content in root.findall(f"./{{{nsmap['xbrli']}}}context"):
id_ = e_content.get("id")
# TODO: 区別がつかないものがあるので id 自体をそのまま入れてる。もうちょっとちゃんとできるかも
c = {"context_id" : id_, "instant" : None, "start_date" : None, "end_date" : None, "nonconsolidated" : None}
# 日付
e_period = e_content.find(f"./{{{nsmap['xbrli']}}}period")
if e_period is not None:
for t, s in zip(["instant", "startDate", "endDate"], ["instant", "start_date", "end_date"]):
e_date = e_period.find(f"./{{{nsmap['xbrli']}}}{t}")
if e_date is not None:
c[s] = e_date.text
# 非連結かどうか
if "NonConsolidatedMember" in id_:
c["nonconsolidated"] = True
contexts[id_] = c
# 値を取得する名前空間
target_key = []
target_key.append("jpdei_cor")
target_key.append(f"jp{ninfo['cabinet_order_code']}{ninfo['style_code']}-{ninfo['report_code']}_{ninfo['edinet_code']}-{str(ninfo['additional_number']).zfill(3)}")
target_key.append(f"jp{ninfo['cabinet_order_code']}_cor")
target_key.append(f"jp{ninfo['cabinet_order_code']}-{ninfo['report_code']}_cor") # 報告書略号が入ってる場合もある?
target_key.append("jppfs_cor")
nsmap_target = {k : nsmap[k] for k in target_key if k in nsmap}
# 値を取得
xbrl_data = []
for ns_pre, ns in nsmap_target.items():
for elem in root.findall(f".//{{{ns}}}*"):
d = {"ns_pre" : ns_pre}
d["tag"] = re.sub("^{[^}]*}", "", elem.tag)
for k, v in contexts[elem.get("contextRef")].items():
d[k] = v
d["text"] = elem.text
d["unit"] = elem.get("unitRef")
xbrl_data.append(d)
df = | pd.DataFrame(xbrl_data) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Import all the dependent python libraries for this project
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, balanced_accuracy_score, plot_precision_recall_curve, precision_recall_curve, average_precision_score
# In[2]:
# Load the data using panda's read_csv method and verify whether the data has been loaded properly or not using sample method
# set header to None. This will prevent first row acting as a column name.
credit_data = pd.read_csv('crx.csv', header=None)
credit_data.sample(5)
# In[3]:
# Set the column names.
credit_data.columns = ['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8', 'A9', 'A10', 'A11', 'A12', 'A13', 'A14', 'A15', 'A16']
credit_data.sample(5)
# In[4]:
# Info method is used to inspect the data types and missing values. With the result below, we found there is no null value present in the dataset.
# However, the datatype does not match with the requirement.
credit_data.info()
# In[5]:
# Cross verify null values presence in the dataset. There is no null value present in the dataset.
print(credit_data.isnull().values.sum())
# In[6]:
# On Manual inspection, '?' is the only invalid value in the whole dataset.
# So, Instead of removing the rows that have '?s', Replace '?' with NaN value.
credit_data = credit_data.replace('?', np.nan)
# Inspect the changes by applying boolean mask technique
credit_data[~credit_data['A1'].notna()]
# In[7]:
credit_data.info()
# In[8]:
# Drop all the nan values from the dataset
credit_data.dropna(inplace=True)
credit_data.info()
# In[9]:
# Set appropriate dtype for numerical columns
credit_data["A2"] = pd.to_numeric(credit_data["A2"], downcast='float')
credit_data["A14"] = | pd.to_numeric(credit_data["A14"], downcast='integer') | pandas.to_numeric |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import scipy.io as sio
import pandas as pd
import os
def mat2df(mat_file, var=None, filepath=None):
var_in = var
if isinstance(var, str):
var = [var]
elif var is None:
if isinstance(mat_file, dict):
var = mat_file.keys()
v_names = []
# mat_file is a file path and var_list is a list of strings corresponding to structure field names
if isinstance(mat_file, str):
if os.path.isfile(mat_file):
return mat2df(sio.loadmat(mat_file, simplify_cells=True), var, filepath=mat_file)
elif os.path.isdir(mat_file):
df_list = []
for file in os.listdir(mat_file):
df_list.append(mat2df(file, var))
return pd.concat(df_list, axis=1).squeeze()
else:
print(mat_file + "is not a valid file path")
return
elif isinstance(mat_file, dict):
mat = mat_file
if any("__" in i for i in list(mat)) or any("readme" in i for i in list(mat)):
for i in list(mat):
if "__" not in i and "readme" not in i:
return mat2df(mat[i], var_in, filepath)
raise ValueError("no variable stored in {file}".format(file=filepath))
elif any(i in mat.keys() for i in var) or any("." in i for i in var):
df_list = []
for i in var:
if "." in i:
(left, right) = i.split(".", 1)
if left in mat.keys():
df_list.append(mat2df(mat[left], right, filepath))
elif i in mat.keys():
for v_name in list(set(var).intersection(mat.keys())):
v_names.append(v_name)
try:
df_list.append(pd.DataFrame(mat).filter(v_names).reset_index(drop=True)) # end
except ValueError as e:
print("warning:", e)
for cols in [mat[v_name] for v_name in v_names]:
if isinstance(cols,
dict): # if all values of dict are scalar, then an index must be provided
if all(np.isscalar(i) for i in cols.values()):
df_list.append(pd.DataFrame(cols, index=[0]))
else:
df_list.append(pd.DataFrame(cols).reset_index(drop=True))
else:
df_list.append(pd.DataFrame(cols).reset_index(drop=True))
return pd.concat(df_list, axis=1).squeeze()
else:
raise ValueError("None of the vars {vars} were found in {file}".format(vars=var, file=filepath))
elif isinstance(mat_file, list):
if isinstance(mat_file[0], str):
if os.path.isfile(mat_file[0]):
return pd.concat([mat2df(mat, var_in) for mat in mat_file], axis=1).squeeze()
else:
mat = | pd.DataFrame(mat_file) | pandas.DataFrame |
import pandas as pd
import numpy as np
from random import randint
import os.path
import click
from itertools import product
from sklearn.metrics import (
precision_score,
recall_score,
confusion_matrix,
accuracy_score,
)
from .preprocessing import (
feature_extraction,
group_feature_extraction,
normalize_data,
)
from .classification import (
split_data,
k_fold_crossvalid,
leave_one_group_out,
train_model,
predict_with_model,
predict_top_classes,
)
MODEL_NAMES = [
'LDA',
'random_forest',
'decision_tree',
'extra_tree',
'adaboost',
'knn',
'gaussianNB',
'linear_svc',
'svm',
'logistic_regression',
'neural_network',
]
NORMALIZER_NAMES = [
'raw',
'standard_scalar',
'total_sum',
'binary'
]
NOISE_VALUES = [
0,
0.0000000001,
0.000000001,
0.00000001,
0.0000001,
0.000001,
0.00001,
0.0001,
0.001,
0.01,
0.1,
1,
10,
100,
1000
]
@click.group()
def main():
pass
test_size = click.option('--test-size', default=0.2, help='The relative size of the test data')
num_estimators = click.option('--num-estimators', default=100, help='Number of trees in our Ensemble Methods')
num_neighbours = click.option('--num-neighbours', default=21, help='Number of clusters in our knn/MLknn')
n_components = click.option('--n-components', default=100,
help='Number of components for dimensionality reduction in Linear Discriminant Analysis')
model_name = click.option('--model-name', default='random_forest', help='The model type to train')
normalize_method = click.option('--normalize-method', default='standard_scalar', help='Normalization method')
feature_name = click.option('--feature-name', default='city', help='The feature to predict')
normalize_threshold = click.option('--normalize-threshold', default='0.0001',
help='Normalization threshold for binary normalization.')
@main.command('kfold')
@click.option('--k-fold', default=10, help='The value of k for cross-validation')
@test_size
@num_estimators
@num_neighbours
@n_components
@model_name
@normalize_method
@feature_name
@normalize_threshold
@click.option('--test-filename', default="test_sample.csv", help='Filename to save test dataset')
@click.option('--model-filename', default="model_k.pkl", help='Filename to save Model')
@click.argument('metadata_file', type=click.File('r'))
@click.argument('data_file', type=click.File('r'))
@click.argument('out_dir')
def kfold_cv(k_fold, test_size, num_estimators, num_neighbours, n_components, model_name, normalize_method,
feature_name, normalize_threshold, test_filename, model_filename, metadata_file, data_file, out_dir):
"""Train and evaluate a model with k-fold cross-validation. echo the model results to stderr."""
raw_data, microbes, feature, name_map = feature_extraction(data_file, metadata_file, feature_name=feature_name)
click.echo(f'Training {model_name} using {normalize_method} to predict {feature_name}',err=True)
tbl, seed = {}, randint(0, 1000)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
os.mkdir(str(out_dir + '/' + 'confusion_matrix'))
else:
os.mkdir(str(out_dir + '/' + 'confusion_matrix'))
normalized = normalize_data(raw_data, method=normalize_method, threshold=normalize_threshold)
split_train_data, split_test_data, split_train_feature, split_test_feature = split_data(
normalized, feature, test_size=test_size, seed=seed
)
model, mean_score, std_score = k_fold_crossvalid(
split_train_data, split_train_feature, method=model_name,
n_estimators=num_estimators, n_neighbours=num_neighbours, n_components=n_components, k_fold=k_fold, seed=seed
)
click.echo(f'Average cross-validation score {mean_score} and standard deviation {std_score}',err=True)
predictions = predict_with_model(model, split_test_data).round()
file_name = str(model_name + '_' + normalize_method)
model_results = []
model_results.append(accuracy_score(split_test_feature, predictions.round()))
model_results.append(precision_score(split_test_feature, predictions, average="micro"))
model_results.append(recall_score(split_test_feature, predictions, average="micro"))
tbl[file_name] = model_results
conf_matrix = pd.DataFrame(confusion_matrix(split_test_feature, predictions.round()))
conf_matrix.to_csv(os.path.join(str(out_dir + '/' + 'confusion_matrix' + '/'), file_name + "." + 'csv'))
col_names = [
'Accuracy',
'Precision',
'Recall',
]
out_metrics = pd.DataFrame.from_dict(tbl, columns=col_names, orient='index')
out_metrics.to_csv(os.path.join(out_dir, str(model_name + '_' + normalize_method) + "." + 'csv'))
@main.command('one')
@test_size
@num_estimators
@num_neighbours
@n_components
@model_name
@normalize_method
@feature_name
@normalize_threshold
@click.option('--model-filename', default=None, help='Filename of previously saved model')
@click.argument('metadata_file', type=click.File('r'))
@click.argument('data_file', type=click.File('r'))
@click.argument('out_dir')
def eval_one(test_size, num_estimators, num_neighbours, n_components, model_name, normalize_method,
feature_name, normalize_threshold, model_filename, metadata_file, data_file, out_dir):
"""Train and evaluate a model. Print the model results to stderr."""
raw_data, microbes, feature, name_map = feature_extraction(data_file, metadata_file, feature_name=feature_name)
click.echo(f'Training {model_name} using {normalize_method} to predict {feature_name}',err=True)
tbl, seed = {}, randint(0, 1000)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
os.mkdir(str(out_dir + '/' + 'confusion_matrix'))
os.mkdir(str(out_dir + '/' + 'classification_report'))
else:
os.mkdir(str(out_dir + '/' + 'confusion_matrix'))
os.mkdir(str(out_dir + '/' + 'classification_report'))
normalized = normalize_data(raw_data, method=normalize_method, threshold=normalize_threshold)
train_data, test_data, train_feature, test_feature = split_data(
normalized, feature, test_size=test_size, seed=seed
)
model = train_model(
train_data, train_feature, method=model_name,
n_estimators=num_estimators, n_neighbours=num_neighbours, n_components=n_components, seed=seed
)
predictions = predict_with_model(model, test_data).round()
conf_matrix = pd.DataFrame(confusion_matrix(test_feature, predictions.round()))
conf_matrix.to_csv(os.path.join(str(out_dir + '/' + 'confusion_matrix' + '/'), str(model_name + '_' + normalize_method) + "." + 'csv'))
model_results = []
model_results.append(accuracy_score(test_feature, predictions.round()))
model_results.append(precision_score(test_feature, predictions, average="micro"))
model_results.append(recall_score(test_feature, predictions, average="micro"))
col_names = [
'Accuracy',
'Precision',
'Recall',
]
tbl[str(model_name + ' ' + normalize_method)] = model_results
out_metrics = pd.DataFrame.from_dict(tbl, columns=col_names, orient='index')
out_metrics.to_csv(os.path.join(out_dir, str(model_name + '_' + normalize_method) + "." + 'csv'))
@main.command('all')
@test_size
@num_estimators
@num_neighbours
@n_components
@feature_name
@normalize_threshold
@click.option('--noisy', default=True, help='Add noise to data')
@click.argument('metadata_file', type=click.File('r'))
@click.argument('data_file', type=click.File('r'))
@click.argument('out_dir')
def eval_all(test_size, num_estimators, num_neighbours, n_components, feature_name, normalize_threshold, noisy,
metadata_file, data_file, out_dir):
"""Evaluate all models and all normalizers."""
raw_data, microbes, feature, name_map = feature_extraction(data_file, metadata_file, feature_name=feature_name)
click.echo(f'Training all models using multiple normalization to predict {feature_name}',err=True)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
os.mkdir(str(out_dir + '/' + 'confusion_matrix'))
os.mkdir(str(out_dir + '/' + 'pd_confusion_matrix'))
else:
os.mkdir(str(out_dir + '/' + 'confusion_matrix'))
os.mkdir(str(out_dir + '/' + 'pd_confusion_matrix'))
model_results = []
noise_data = [0]
if noisy==True:
noise_data = NOISE_VALUES
tbl, seed = {}, randint(0, 1000)
for model_name, norm_name in product(MODEL_NAMES, NORMALIZER_NAMES):
click.echo(
f'Training {model_name} using {norm_name} to predict {feature_name}',
err=True
)
normalized = normalize_data(raw_data, method=norm_name, threshold=normalize_threshold)
train_data, test_data, train_feature, test_feature = split_data(
normalized, feature, test_size=test_size, seed=seed
)
for i in noise_data:
click.echo(f'Gaussian noise {i} has been added',err=True)
# Adding noise to train data to check for over-fitting
train_noise = np.random.normal(0, i,(train_data.shape[0], train_data.shape[1]))
train_data = train_data+ train_noise
model = train_model(
train_data, train_feature, method=model_name,
n_estimators=num_estimators, n_neighbours=num_neighbours, n_components=n_components, seed=seed
)
predictions = predict_with_model(model, test_data).round()
model_results = predict_top_classes(model, test_data, test_feature)
model_results.append(precision_score(test_feature, predictions, average="micro"))
model_results.append(recall_score(test_feature, predictions, average="micro"))
model_results.insert(0,i);
model_results.insert(0,norm_name);
model_results.insert(0,model_name);
tbl[str(model_name + '_' + norm_name + '_' + str(i))] = model_results
conf_matrix = pd.DataFrame(confusion_matrix(test_feature, predictions.round()))
conf_matrix.to_csv(os.path.join(str(out_dir + '/' + 'confusion_matrix' + '/'), str(model_name + '_' + norm_name + '_' + str(i)) + "." + 'csv'))
CV_table = | pd.crosstab(name_map[test_feature], name_map[predictions], rownames=['Actual ' + feature_name], colnames=['Predicted ' + feature_name]) | pandas.crosstab |
import requests
import json
import pandas as pd
from tqdm import tqdm
import numpy as np
from datetime import datetime
from typing import List
from pandas import DataFrame
######Global Params#######
graph_url = 'https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v2'
col_data_types = {'amount0': float, 'amount1': float, 'logIndex': int, 'liquidity': float,
'amount0In': float, 'amount0Out': float, 'amount1In': float, 'amount1Out': float}
#########################
def process_query(query: str, data_field: str, graph_url: str) -> List[dict]:
"""
Helper function to take a query and retrieve the data.
query (str): The query to be executed
data_field (str): The data field to be pulled out
graph_url (str): The url of the subgraph
"""
#Make the request
request = requests.post(graph_url, json={'query': query})
#Pull the json out from the text
data = json.loads(request.text)
#Pull out the relevant data field
data = data['data'][data_field]
return data
def convert_where_clause(clause: dict) -> str:
"""
Convert a dictionary of clauses to a string for use in a query
Parameters
----------
clause : dict
Dictionary of clauses
Returns
-------
str
A string representation of the clauses
"""
out = "{"
for key in clause.keys():
out += "{}: ".format(key)
#If the type of the right hand side is string add the string quotes around it
if type(clause[key]) == str:
out += '"{}"'.format(clause[key])
else:
out += "{}".format(clause[key])
out += ","
out += "}"
return out
def query_builder(main: str, fields: List[str], first: int = 100,
skip: int = None, order_by: str = None,
order_direction: str = None,
where_clause: dict = None) -> str:
"""
Function for creation of a query string.
Parameters
----------
main : str
The query to be run
fields : List[str]
The fields to pull in the query
first : int, optional
The number of records to pull
skip : int, optional
The number of records to skip
order_by : str, optional
The field to order by
order_direction : str, optional
The direction to order by
where_clause : dict, optional
A dictionary of clauses for filtering of the records
Returns
-------
str
A query string constructed from all the parameters.
"""
#Assert the correct values for first and skip are used
assert first >= 1 and first <= 1000, "The value for first must be within 1-1000"
if skip:
assert skip >= 0 and skip <= 5000, "The value for skip must be within 1-5000"
#List of main parameters
main_params = []
main_params.append("first: {}".format(first))
if skip:
main_params.append("skip: {}".format(skip))
if order_by:
main_params.append("orderBy: {}".format(order_by))
if order_direction:
main_params.append("orderDirection: {}".format(order_direction))
if where_clause:
#Convert where clause
where_clause = convert_where_clause(where_clause)
main_params.append("where: {}".format(where_clause))
#Convert params to a string
main_params = ", ".join(main_params)
#Convert fields to a string
fields = ", ".join(fields)
#Add in all the components
query = """query{{
{}({}){{
{}
}}
}}""".format(main, main_params, fields)
return query
def pull_data(query_function: PaginatedQuery) -> DataFrame:
"""
Function to pull query data then process
Parameters
----------
query_function : PaginatedQuery
The paginated query object that retrieves our data
Returns
-------
DataFrame
A dataframe with the data pulled from our query
"""
#Pull the data
data = query_function.run_queries()
data['timestamp'] = pd.to_datetime(data['timestamp'], unit = 's')
data['event'] = query_function.data_field
#Create mapping of column data types
cdt = {}
#Check each column
for col in data.columns:
#If it has a mapping add it to cdt
if col in col_data_types.keys():
cdt[col] = col_data_types[col]
#Map the data types
data = data.astype(cdt)
return data
def find_data_overlap(data):
"""
Function to find the earliest date that ensures data overlap.
"""
return max([df['timestamp'].min() for df in data])
def process_amount(df: DataFrame) -> None:
"""
Modifies the dataframe in place to map values to amount0,
amount1 and liquidity with the current sign. This function
requires the dataframe to all be of the same event
Parameters
----------
df : DataFrame
A dataframe of data for either mints, burns or swaps
Returns
-------
None
"""
#Ensure there is only one event
assert len(set(df['event'].values)), "Dataframe has more than one event"
if df['event'].iloc[0] == 'mints':
#Mints are already correctly formated
pass
elif df['event'].iloc[0] == 'burns':
#Flip the sign to negative for burns
df[['amount0', 'amount1', 'liquidity']] *= -1
elif df['event'].iloc[0] == 'swaps':
#Map the amount in for each token minus amount out to be the column for
#amount0 and amount1, no liquidity for swaps
df['amount0'] = df['amount0In'] - df['amount0Out']
df['amount1'] = df['amount1In'] - df['amount1Out']
df['liquidity'] = 0
df.drop(columns=['amount0Out', 'amount0In', 'amount1Out', 'amount1In'], inplace=True)
else:
assert False, "The event is not recognized"
def process_events(df: DataFrame) -> None:
"""
Modifies the dataframe in place to map values for the events
Parameters
----------
df : DataFrame
A dataframe of data for either mints, burns or swaps
Returns
-------
None
"""
#Ensure there is only one event
assert len(set(df['event'].values)), "Dataframe has more than one event"
if df['event'].iloc[0] == 'mints':
df['event'] = 'mint'
elif df['event'].iloc[0] == 'burns':
df['event'] = 'burn'
elif df['event'].iloc[0] == 'swaps':
df['event'] = (df['amount0'] > 0).map({True: 'ethPurchase', False: 'tokenPurchase'})
def process_data(data: DataFrame) -> DataFrame:
"""
Process of all the data
Parameters
----------
data : DataFrame
A dataframe of data
Returns
-------
DataFrame
A dataframe of processed data
"""
#Do all data processing
for df in data:
process_amount(df)
process_events(df)
#Concat
data = pd.concat(data)
#Drop the id column
data = data.drop(columns=['id'])
#Rename columns
data = data.rename(columns={'amount0': 'token_delta', 'amount1': 'eth_delta', 'liquidity': 'UNI_delta'})
#Indexing
data = data.sort_values(['timestamp', 'logIndex'])
data.reset_index(inplace = True, drop = True)
return data
def add_starting_state(data: DataFrame, start_date: datetime,
end_date: datetime) -> DataFrame:
"""
Add the starting state data to the current data
Parameters
----------
data : DataFrame
The current dataset of transactions
start_date : datetime
The start date
end_date : datetime
The end date
Returns
-------
DataFrame
A dataframe with the new state variables added in
"""
#Convert the dates to unix and capture all the times within the end date by adding one day and subtracting 1 (unix)
#For the start date subtract one hour since each hour corresponds to the end of the hour
start_date_unix = convert_to_unix(start_date-pd.Timedelta("1h"))
end_date_unix = convert_to_unix(end_date+pd.Timedelta("1D"))-1
#Create the query object
state_query = PaginatedQuery("pairHourDatas",
["id",
"reserve0",
"reserve1",
"hourStartUnix"], "pairHourDatas",
first=1000, where_clause={"pair": "0x8ae720a71622e824f576b4a8c03031066548a3b1",
"hourStartUnix_gte": start_date_unix,
"hourStartUnix_lte": end_date_unix})
#Pull the data
state_data = state_query.run_queries()
#Convert the type
state_data['reserve0'] = state_data['reserve0'].astype(float)
state_data['reserve1'] = state_data['reserve1'].astype(float)
#Find the liquidity
state_data['liquidity'] = (state_data['reserve0'] * state_data['reserve1']) ** 0.5
#Convert the timestamp
state_data['timestamp'] = pd.to_datetime(state_data['hourStartUnix'], unit = 's')
#Drop extra columns
state_data = state_data.drop(columns=['id', 'hourStartUnix'])
#Sort the data
state_data = state_data.sort_values(by='timestamp')
#Convert the dates to unix and capture all the times within the end date by adding one day and subtracting 1 (unix)
#For the start date subtract one hour since each hour corresponds to the end of the hour
start_date_unix = convert_to_unix(start_date)
end_date_unix = convert_to_unix(end_date+pd.Timedelta("1D"))-1
state_query2 = PaginatedQuery("liquidityPositionSnapshots",
["id", "liquidityTokenTotalSupply", "timestamp"], "liquidityPositionSnapshots",
first=1000, where_clause={"pair": "0x8ae720a71622e824f576b4a8c03031066548a3b1",
"timestamp_gte": start_date_unix,
"timestamp_lte": end_date_unix})
#Pull the data
state_data2 = state_query2.run_queries()
#Convert the timestamp
state_data2['timestamp'] = pd.to_datetime(state_data2['timestamp'], unit = 's')
state_data2 = state_data2.sort_values(by='timestamp')
#Drop extra columns
state_data2 = state_data2.drop(columns=['id'])
#Convert the liquidityTokenTotalSupply to numeric
state_data2["liquidityTokenTotalSupply"] = state_data2["liquidityTokenTotalSupply"].astype(float)
#Get the historical token and eth balanace
data['token_balance'] = data['token_delta'].cumsum() + state_data['reserve0'].iloc[0]
data['eth_balance'] = data['eth_delta'].cumsum() + state_data['reserve1'].iloc[0]
#Compute the liquidity
data['liquidity'] = (data['token_balance'] * data['eth_balance']) ** .5
#Get the last hourly value for the balances
validation_data1 = data.groupby(data['timestamp'].dt.floor('h')).last()[['token_balance', 'eth_balance', 'liquidity']]
#Ensure datetime
validation_data1.index = pd.to_datetime(validation_data1.index)
#Get the other validation dataset
validation_data2 = state_data.set_index('timestamp').copy()[['reserve0','reserve1', 'liquidity']]
validation_data2.columns = ["token_balance", "eth_balance", 'liquidity']
validation_data2.index = pd.to_datetime(validation_data2.index)
assert abs(validation_data2-validation_data1).max().max() < .01
#Filter to only mints and burns
mints_burns = data[data['event'].isin(['mint', 'burn'])]
#Find the starting UNI supply
starting_UNI = state_data2['liquidityTokenTotalSupply'].iloc[0] - mints_burns["UNI_delta"].iloc[0]
#Find the starting supply of UNI
data['UNI_supply'] = data['UNI_delta'].cumsum() + starting_UNI
return data
def convert_to_unix(dt: datetime) -> int:
"""
Convert a datetime to a unix number
Parameters
----------
dt : datetime
The datetime to convert
Returns
-------
int
An integer representing the datetime in unix
"""
return int((dt - datetime(1970,1,1)).total_seconds() )
class PaginatedQuery:
"""
A class which handles a paginated query. Attributes of the base query are specified
and then given the latest ID, there is an update to the query. The sorting must be
done on the ID to ensure no data is missed.
"""
def __init__(self, main: str, fields: List[str], data_field: str,
where_clause: dict = None, first: int = None,
start_date: datetime = None, end_date: datetime = None) -> None:
"""
Parameters
----------
main : str
The main query that is being run.
fields : List[str]
A list of strings representing each field we want to pull.
data_field : str
The data field to pull out of the json
where_clause : dict, optional
A dictionary of clauses for filtering with the where statement
first : int, optional
Number of records to grab (maximum 1000)
start_date : datetime, optional
The start date of the data
end_date : datetime, optional
The end date of the data
Returns
-------
None
"""
self.main = main
self.fields = fields
self.data_field = data_field
#If there is no where clause, convert it to an empty dictionary
if where_clause is None:
where_clause = {}
self.where_clause = where_clause
self.first = first
self.start_date = start_date
self.end_date = end_date
#Convert the dates to unix and add them to the where clause
if self.start_date:
start_date_unix = convert_to_unix(start_date)
self.where_clause['timestamp_gte'] = start_date_unix
if self.end_date:
end_date_unix = convert_to_unix(end_date+ | pd.Timedelta("1D") | pandas.Timedelta |
from __future__ import print_function
import pandas
import matplotlib; matplotlib.use('Agg')
import sys, os, copy, math, numpy as np, matplotlib.pyplot as plt
from tabulate import tabulate
from munkres import Munkres
from collections import defaultdict
try:
from ordereddict import OrderedDict # can be installed using pip
except:
from collections import OrderedDict # only included from python 2.7 on
import mailpy
from box_util import boxoverlap, box3doverlap
from evaluate_kitti3dmot_model import *
def run(*argv):
"""
Parameters:
argv = [signture, dir ,"3D/2D","Baseline","Your model*", subfolder]
signture:
3D/2D:
Baseline: Name of basline
must match the folder where the results are stored.
tracked obejects are not in different
subfolders
Your model/*: name of your model
must match the folder where the results are stored.
Add * at the end if tracked obejects are not in different
subfolders
subfolder: (optional)
to store in a subfoler
"""
num_sample_pts = 41.0
# check for correct number of arguments. if user_sha and email are not supplied,
# no notification email is sent (this option is used for auto-updates)
if len(argv)<5:
print("Usage: python eval_kitti3dmot.py result_sha ?D(e.g. 2D or 3D)")
sys.exit(1);
# get unique sha key of submitted results
result_sha = argv[0]
obj_tracked = result_sha.split("_")[0]
dir = argv[1]
dt_typ= result_sha.split("_")[3]
baseline_name = argv[3]
mail = mailpy.Mail("")
D = argv[2]
#
if argv[2] == '2D':
eval_3diou, eval_2diou = False, True # eval 2d
elif argv[2] == '3D':
eval_3diou, eval_2diou = True, False # eval 3d
else:
print("Usage: python eval_kitti3dmot.py result_sha ?D(e.g. 2D or 3D)")
sys.exit(1);
# evaluate results
if len(argv) ==6:
table_name = 'results/{}/{}/results_{}_{}_table_{}.csv'.format(dir,argv[5],obj_tracked,dt_typ, D)
else:
table_name = 'results/{}/results_{}_{}_table_{}.csv'.format(dir,obj_tracked,dt_typ, D)
if os.path.exists(table_name):
df = | pandas.read_csv(table_name) | pandas.read_csv |
import configparser
import sys
import pandas as pd
import timeit
from pathlib import Path
from fair_clustering_metric_membership import fair_clustering_metric_membership
from util.configutil import read_list
from util.utilhelpers import max_Viol, x_for_colorBlind, find_balance
num_colors = 2
# k0: is the first cluster size
k0= 15
# kend: is the last cluster size
kend= 20
config_file = "config/example_metric_membership_config.ini"
config = configparser.ConfigParser(converters={'list': read_list})
config.read(config_file)
# Create your own entry in `example_config.ini` and change this str to run
# your own trial
config_str = "adult_age" if len(sys.argv) == 1 else sys.argv[1]
# Read variables
data_dir = config[config_str].get("data_dir")
dataset = config[config_str].get("dataset")
clustering_config_file = config[config_str].get("config_file")
deltas = list(map(float, config[config_str].getlist("deltas")))
max_points = config[config_str].getint("max_points")
# ready up for the loop
clusters = [ k+k0 for k in list(range(kend-k0+1))]
df = | pd.DataFrame(columns=['num_clusters','POF','MaxViolFairNormalized','MaxViolUnFairNormalized','MaxViolFair','MaxViolUnFair','Fair Balance','UnFair Balance','Run_Time','ColorBlindCost','FairCost','R_max']) | pandas.DataFrame |
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.nonparametric.kernel_regression import KernelReg
import itertools
import datetime as dt
import matplotlib.dates as mdates
DATA_FOLDER = 'G:/My Drive/Podcast/PNB/data/formatted/'
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] <NAME>, <NAME>, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, <NAME>, <NAME>, <NAME>
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(int(window_size))
order = np.abs(int(order))
except ValueError as msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order + 1)
half_window = (window_size - 1) // 2
# precompute coefficients
b = np.mat([[k ** i for i in order_range] for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] * rate ** deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode='valid')
if __name__ == '__main__':
with open(DATA_FOLDER + 'data.pickle', 'rb') as f:
data = pickle.load(f)
with open(DATA_FOLDER + 'meta.pickle', 'rb') as f:
meta = pickle.load(f)
meta.ep_release_date = pd.to_datetime(meta.ep_release_date)
data['TotalPlays'] = data['TotalPlays'].sort_values(by=['podcast', 'time'])
podcasts = data['TotalPlays'].podcast.unique()
rolls = []
for podcast in podcasts:
roll = data['TotalPlays'].loc[data['TotalPlays'].podcast == podcast].plays.rolling(window=7).mean()
rolls.append(roll)
rolls = list(itertools.chain.from_iterable(rolls))
data['TotalPlays']['plays_rolling_mean'] = rolls
podcast_colours = {'Dropzilla': '#C5252C',
'Wasabicast': '#02AF7E',
'PressStartCast': '#117C91'}
plays_smooth_golay = []
for podcast in podcasts:
plays = data['TotalPlays'].loc[data['TotalPlays'].podcast == podcast].set_index('time').plays
plays_smoothed_golay = savitzky_golay(plays, 365, 3)
plays_smooth_golay.append(plays_smoothed_golay)
data['TotalPlays']['plays_smooth_golay'] = list(itertools.chain.from_iterable(plays_smooth_golay))
kr = []
for podcast in podcasts:
kr.append(KernelReg(data['TotalPlays'].loc[data['TotalPlays'].podcast == podcast].plays,
data['TotalPlays'].loc[data['TotalPlays'].podcast == podcast].time.map(dt.datetime.toordinal),
'c'))
plays_smooth = []
for n, podcast in enumerate(podcasts):
plays_smoothed, std = kr[n].fit(data['TotalPlays'].loc[data['TotalPlays'].podcast == podcast].time.map(dt.datetime.toordinal))
plays_smooth.append(plays_smoothed)
data['TotalPlays']['plays_smooth'] = list(itertools.chain.from_iterable(plays_smooth))
plt.figure()
for podcast in podcasts:
data['TotalPlays'].loc[data['TotalPlays'].podcast == podcast].set_index('time').plays_rolling_mean.plot(
c=podcast_colours[podcast], alpha=0.3)
# for podcast in podcasts:
# data['TotalPlays'].loc[data['TotalPlays'].podcast == podcast].set_index('time').plays_smooth.plot(
# c=podcast_colours[podcast])
for podcast in podcasts:
data['TotalPlays'].loc[data['TotalPlays'].podcast == podcast].set_index('time').plays_smooth_golay.plot(
c=podcast_colours[podcast])
# for podcast in podcasts:
# for release in meta.loc[meta.podcast == podcast].ep_release_date:
# plt.axvline(pd.Timestamp(release), color=podcast_colours[podcast], alpha=0.2)
plt.axvspan(xmin=pd.Timestamp('2021-12-06'), xmax=pd.Timestamp('2021-12-11'), color='black', alpha=0.2)
plt.text( | pd.Timestamp('2021-12-06') | pandas.Timestamp |
'''
Author <NAME> (<EMAIL>)
K-means segmentation segments flights based on the amount of change in their trajectory variables.
Main analyses different weights for the segmentation.
'''
import argparse
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import os
import re
import itertools
import math
import logging
VAL_LIMITS = {
# Altitude and speed taken from A320 and B737 docs
# 'ts':[4, 1800], # artificially set segments to max 30 min
'alt': [0, 41000], # ft Both A320 and B737 have same ceiling
'spd': [0, 470], # spd (kts) Both A320 and B737 have same MMO (max mach operation)
'roc': [-10000, 10000], # roc (fpm) ICAO docs (https://www.icao.int/Meetings/anconf12/Document%20Archive/9863_cons_en.pdf)
}
def norm(array:np.ndarray, column:str) -> np.ndarray:
return (array - VAL_LIMITS[column][0]) / (VAL_LIMITS[column][1] - VAL_LIMITS[column][0])
def alpha_numeric_sort(l:list) -> list:
""" Sort the given iterable in the way that humans expect."""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key = alphanum_key)
number_to_label = {
0: "NA",
1: "taxi",
2: "take off",
3: "initial climb",
4: "climb",
5: "cruise",
6: "descent",
7: "approach",
8: "landing"
}
colormap = {
"NA": "red",
"taxi": "black",
"take off": "pink",
"initial climb": "yellow",
"climb": "green",
"cruise": "blue",
"descent": "orange",
"approach": "brown",
"landing": "cyan",
}
def kmeans_segmentation(x:np.ndarray, n_mu:int, weights:list, max_iters = 100)->(np.ndarray, np.ndarray):
'''
Segment x into n_mu segments of variable length using k-means clustering and enforcing continuity
:param x: multi-dimensional signal to be segmented
:param n_mu: number of segments
:param weights: weights in [0, 1] indicating influence of the dimensions
:return: an array with the segment number for each data point, mean of each segment
'''
# Uniform initialisation
dims = len(x[0])
n = len(x)
c = np.zeros(n, dtype=np.int)
same_class = math.floor(n/n_mu)
rest = n % n_mu
slice_start = 0
for i in range(n_mu):
if i < rest:
slice_end = slice_start + same_class + 1
else:
slice_end = slice_start + same_class
c[slice_start:slice_end] = i
slice_start = slice_end
def dist(x1:np.ndarray, x2:np.ndarray, w = weights)->float:
'''
Compute the Eucledian distance between x1 and x2 using the w weights
:param x1: a datapoint
:param x2: a second datapoint with same dimension as x1
:param w: weights for each dimension
:return: Eucledian distance between x1 and x2
'''
assert len(x1) == len(x2)
assert len(x1) == len(w)
dist = 0
for i in range(len(x1)):
dist += weights[i] * np.power((x1[i] - x2[i]), 2)
dist = np.sqrt(dist)
return dist
def get_means(c: np.ndarray, x_inner: np.ndarray = x, dim: int = dims, n_clusters: int = n_mu)->np.ndarray:
'''
Compute the segment means with the current segmentation
:param c: array with segment index for each datapoint
:param x_inner: multi-dimension signal
:param dim: number of dimensions of signal
:param n_clusters: number of segments
:return: array with mean values of each dimension for each segment
'''
means = np.zeros((n_clusters, dim))
counts = np.zeros(n_clusters)
for i, x_i in enumerate(x_inner):
means[c[i]] += x_i
counts[c[i]] += 1
counts = np.where(counts == 0, 1, counts)
for i, m in enumerate(means):
m /= counts[i]
return means
mu = get_means(c)
# Core
for iteration in range(max_iters):
new_c = c.copy()
counts = pd.value_counts(new_c)
for idx in range(1, n-1):
if counts[c[idx+1]] < 1800 and counts[c[idx-1]] <1800: # Segments should not be longer than half an hour
if c[idx+1] != c[idx] and counts[c[idx]] > 4:
dist_next = dist(x[idx], mu[c[idx+1]])
dist_curr = dist(x[idx], mu[c[idx]])
if dist_next < dist_curr:
new_c[idx] = c[idx + 1]
elif c[idx-1] != c[idx] and counts[c[idx]] > 4:
dist_curr = dist(x[idx], mu[c[idx]])
dist_prev = dist(x[idx], mu[c[idx-1]])
if dist_prev < dist_curr:
if new_c[idx-1] == c[idx-1]:
new_c[idx] = c[idx-1]
# In case two consecutive elements want to switch only the one with biggest difference does
elif dist(x[idx-1], mu[c[idx-1]]) - dist(x[idx-1], mu[c[idx]]) < dist_curr - dist_prev:
new_c[idx] = c[idx-1]
new_c[idx-1] = c[idx-1]
if any(new_c != c):
c = new_c.copy()
mu = get_means(c)
else:
# print("Stopped at iteration " + str(iteration))
return mu, c
print("Reached max iterations")
return mu, c
def segmentation_weight_evaluation(flights:list, labels:list, weights:list, plot=False, n_cluster=90):
'''
Function that computes the best possible accuracy for the flights when using weights
:param flights: pandas dataframes with trajectories
:param labels: pandas dataframes with ground truth labels
:param weights: weights ([0, 1]) to apply for segmentation
:param plot: show the identified segments
:param n_cluster: number of segments
:return:
'''
count_stats = 0
error = []
mean_err = 0
max_err = 0
corrects = 0
phase_lens = np.zeros(8)
for flight_n in range(len(flights)):
label = labels[flight_n]
flight = flights[flight_n]
x = [norm(flight[col].copy(), col) for col in flight.columns if col in VAL_LIMITS.keys()]
x = np.array(x).transpose()
phase_counts = np.bincount(label["phase"])
ts = flight['ts']
alts = flight['alt']
## K-means
_, twindows = kmeans_segmentation(x, n_mu=n_cluster, weights=weights)
flight['cluster'] = twindows
counts = pd.value_counts(twindows)
count_stats = np.add(count_stats, [counts.max(), counts.min(), counts.mean()])
flight["phase"] = 0
for i, t in enumerate(twindows):
if t != twindows[i - 1]:
if counts[t] > 0:
l_counts = label['phase'][i:(i + counts[t])].value_counts()
if not l_counts.empty:
flight.iloc[i:(i + counts[t]), flight.columns.get_loc('phase')] = l_counts.idxmax()
phase_lens += np.array([(abs(flight["phase"][label["phase"]==i] - label["phase"][label["phase"]==i]) > 0).sum() / phase_counts[i] for i in range(8)])
mean_err = (abs(flight["phase"] - label["phase"]) > 0).sum() / len(label)
correct_idx = []
missed_idx = []
if plot:
fig, ax = plt.subplots(2, figsize=(20, 10))
ax[0].scatter(ts, alts, s=1, c=np.mod(twindows, 2), cmap='viridis')
ax[1].scatter(ts, alts, s=1, c=label['phase'], cmap='viridis')
for idx in correct_idx:
ax[1].axvline(ts[idx], color='green', linestyle='--')
for idx in missed_idx:
ax[1].axvline(ts[idx], color='red', linestyle='--')
plt.show()
count_stats /= len(flights)
mean_err /= len(flights)
max_err /= len(flights)
corrects /= len(flights)
phase_lens /= len(flights)
print(f"{weights}, window lengths: {np.round(count_stats, 2)}, average relative error {round(mean_err*100, 4)}%, average maximum relative error {round(max_err*100, 5)}%")
return mean_err, phase_lens
def label_clusters(flights:list, labels:list, weights:list, plot=True, n_cluster=90)->(np.ndarray, float):
'''
Segments flights with k-means and label the segments with the most occuring ground truth label in that segment
:param flights: pandas dataframes with trajectories
:param labels: pandas dataframes labels
:param weights: weights (in range [0,1]) applied for segmentation
:param plot: show segmentation (True) or not (False)
:param n_cluster: number of clusters
:return: segment labels of all flights, error introduced by segmentation
'''
clustered_labels = []
diffs = np.zeros(len(flights))
k_means_iters = np.zeros(len(flights))
for flight_n in range(len(flights)):
label = labels[flight_n]
flight = flights[flight_n]
x = [norm(flight[col].copy(), col) for col in flight.columns if col in VAL_LIMITS.keys()]
x = np.array(x).transpose()
for i_w, w_i in enumerate(weights):
x[:, i_w] = w_i * x[:, i_w]
ts = flight.ts
alts = flight.alt
## K-means
_, twindows, k_means_iters[flight_n] = kmeans_segmentation(x, n_mu=n_cluster)
flight['cluster'] = twindows
counts = | pd.value_counts(twindows) | pandas.value_counts |
import argparse
import datetime
import logging
import os
import synapseclient
import genie
import pandas as pd
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def get_center_data_completion(center, df):
'''
Get center data completion. Calulates the percentile of
how complete a clinical data element is:
Number of not blank/Unknown/NA divded by
total number of patients or samples
Args:
center: GENIE center
df: sample or patient dataframe
Returns:
Dataframe: Center data
'''
centerdf = df[df['CENTER'] == center]
total = len(centerdf)
center_data = pd.DataFrame()
skip_cols = ['CENTER', 'PATIENT_ID', 'SAMPLE_ID', 'SAMPLE_TYPE_DETAILED',
'SECONDARY_RACE', 'TERTIARY_RACE']
for col in centerdf:
if col not in skip_cols:
not_missing = [not | pd.isnull(value) | pandas.isnull |
import dask.dataframe as dd
import pandas as pd
import pytest
import featuretools as ft
from featuretools.entityset import EntitySet, Relationship
def test_create_entity_from_dask_df(pd_es):
dask_es = EntitySet(id="dask_es")
log_dask = dd.from_pandas(pd_es["log"].df, npartitions=2)
dask_es = dask_es.entity_from_dataframe(
entity_id="log_dask",
dataframe=log_dask,
index="id",
time_index="datetime",
variable_types=pd_es["log"].variable_types
)
pd.testing.assert_frame_equal(pd_es["log"].df, dask_es["log_dask"].df.compute(), check_like=True)
def test_create_entity_with_non_numeric_index(pd_es, dask_es):
df = pd.DataFrame({"id": ["A_1", "A_2", "C", "D"],
"values": [1, 12, -34, 27]})
dask_df = dd.from_pandas(df, npartitions=2)
pd_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=df,
index="id")
dask_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=dask_df,
index="id",
variable_types={"id": ft.variable_types.Id, "values": ft.variable_types.Numeric})
pd.testing.assert_frame_equal(pd_es['new_entity'].df.reset_index(drop=True), dask_es['new_entity'].df.compute())
def test_create_entityset_with_mixed_dataframe_types(pd_es, dask_es):
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27]})
dask_df = dd.from_pandas(df, npartitions=2)
# Test error is raised when trying to add Dask entity to entitset with existing pandas entities
err_msg = "All entity dataframes must be of the same type. " \
"Cannot add entity of type {} to an entityset with existing entities " \
"of type {}".format(type(dask_df), type(pd_es.entities[0].df))
with pytest.raises(ValueError, match=err_msg):
pd_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=dask_df,
index="id")
# Test error is raised when trying to add pandas entity to entitset with existing dask entities
err_msg = "All entity dataframes must be of the same type. " \
"Cannot add entity of type {} to an entityset with existing entities " \
"of type {}".format(type(df), type(dask_es.entities[0].df))
with pytest.raises(ValueError, match=err_msg):
dask_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=df,
index="id")
def test_add_last_time_indexes():
pd_es = EntitySet(id="pd_es")
dask_es = EntitySet(id="dask_es")
sessions = pd.DataFrame({"id": [0, 1, 2, 3],
"user": [1, 2, 1, 3],
"time": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
pd.to_datetime('2019-01-01'),
pd.to_datetime('2017-08-25')],
"strings": ["I am a string",
"23",
"abcdef ghijk",
""]})
sessions_dask = dd.from_pandas(sessions, npartitions=2)
sessions_vtypes = {
"id": ft.variable_types.Id,
"user": ft.variable_types.Id,
"time": ft.variable_types.DatetimeTimeIndex,
"strings": ft.variable_types.Text
}
transactions = pd.DataFrame({"id": [0, 1, 2, 3, 4, 5],
"session_id": [0, 0, 1, 2, 2, 3],
"amount": [1.23, 5.24, 123.52, 67.93, 40.34, 50.13],
"time": [pd.to_datetime('2019-01-10 03:53'),
pd.to_datetime('2019-01-10 04:12'),
pd.to_datetime('2019-02-03 10:34'),
pd.to_datetime('2019-01-01 12:35'),
pd.to_datetime('2019-01-01 12:49'),
pd.to_datetime('2017-08-25 04:53')]})
transactions_dask = dd.from_pandas(transactions, npartitions=2)
transactions_vtypes = {
"id": ft.variable_types.Id,
"session_id": ft.variable_types.Id,
"amount": ft.variable_types.Numeric,
"time": ft.variable_types.DatetimeTimeIndex,
}
pd_es.entity_from_dataframe(entity_id="sessions", dataframe=sessions, index="id", time_index="time")
dask_es.entity_from_dataframe(entity_id="sessions", dataframe=sessions_dask, index="id", time_index="time", variable_types=sessions_vtypes)
pd_es.entity_from_dataframe(entity_id="transactions", dataframe=transactions, index="id", time_index="time")
dask_es.entity_from_dataframe(entity_id="transactions", dataframe=transactions_dask, index="id", time_index="time", variable_types=transactions_vtypes)
new_rel = Relationship(pd_es["sessions"]["id"],
pd_es["transactions"]["session_id"])
dask_rel = Relationship(dask_es["sessions"]["id"],
dask_es["transactions"]["session_id"])
pd_es = pd_es.add_relationship(new_rel)
dask_es = dask_es.add_relationship(dask_rel)
assert pd_es['sessions'].last_time_index is None
assert dask_es['sessions'].last_time_index is None
pd_es.add_last_time_indexes()
dask_es.add_last_time_indexes()
pd.testing.assert_series_equal(pd_es['sessions'].last_time_index.sort_index(), dask_es['sessions'].last_time_index.compute(), check_names=False)
def test_create_entity_with_make_index():
values = [1, 12, -23, 27]
df = pd.DataFrame({"values": values})
dask_df = dd.from_pandas(df, npartitions=2)
dask_es = EntitySet(id="dask_es")
vtypes = {"values": ft.variable_types.Numeric}
dask_es.entity_from_dataframe(entity_id="new_entity", dataframe=dask_df, make_index=True, index="new_index", variable_types=vtypes)
expected_df = pd.DataFrame({"new_index": range(len(values)), "values": values})
pd.testing.assert_frame_equal(expected_df, dask_es['new_entity'].df.compute())
def test_single_table_dask_entityset():
primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words']
dask_es = EntitySet(id="dask_es")
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
pd.to_datetime('2019-01-01'),
| pd.to_datetime('2017-08-25') | pandas.to_datetime |
import pandas as pd
def build_date_mapper (original_dates,master_dates):
# original dates should be a unique list
# master dates should be a sorted pandas index
mapper = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 30 22:22:22 2021
@author: Dell
"""
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.linear_model import LogisticRegression
data_income= | pd.read_csv("C:/Users/Dell/OneDrive/Desktop/income(1).csv") | pandas.read_csv |
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import jsonschema
import pandas as pd
try:
from pyspark import SparkConf, SparkContext
from pyspark.sql import SQLContext
spark_installed = True
except ImportError:
spark_installed = False
from test import EnableSchemaValidation
from lale import wrap_imported_operators
from lale.expressions import (
count,
day_of_month,
day_of_week,
day_of_year,
hour,
it,
minute,
month,
replace,
string_indexer,
)
from lale.lib.lale import (
Aggregate,
ConcatFeatures,
Hyperopt,
Join,
Map,
Relational,
Scan,
)
from lale.lib.sklearn import KNeighborsClassifier, LogisticRegression
from lale.operators import make_pipeline_graph
class TestMap(unittest.TestCase):
def test_init(self):
gender_map = {"m": "Male", "f": "Female"}
state_map = {"NY": "New York", "CA": "California"}
_ = Map(columns=[replace(it.gender, gender_map), replace(it.state, state_map)])
def test_transform_replace_list_and_remainder(self):
d = {
"gender": ["m", "f", "m", "m", "f"],
"state": ["NY", "NY", "CA", "NY", "CA"],
"status": [0, 1, 1, 0, 1],
}
df = pd.DataFrame(data=d)
gender_map = {"m": "Male", "f": "Female"}
state_map = {"NY": "New York", "CA": "California"}
trainable = Map(
columns=[replace(it.gender, gender_map), replace(it.state, state_map)],
remainder="drop",
)
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df.shape, (5, 2))
self.assertEqual(transformed_df["gender"][0], "Male")
self.assertEqual(transformed_df["state"][0], "New York")
def test_transform_replace_list(self):
d = {
"gender": ["m", "f", "m", "m", "f"],
"state": ["NY", "NY", "CA", "NY", "CA"],
"status": [0, 1, 1, 0, 1],
}
df = pd.DataFrame(data=d)
gender_map = {"m": "Male", "f": "Female"}
state_map = {"NY": "New York", "CA": "California"}
trainable = Map(
columns=[replace(it.gender, gender_map), replace(it.state, state_map)]
)
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df.shape, (5, 3))
self.assertEqual(transformed_df["gender"][0], "Male")
self.assertEqual(transformed_df["state"][0], "New York")
def test_transform_replace_map(self):
d = {
"gender": ["m", "f", "m", "m", "f"],
"state": ["NY", "NY", "CA", "NY", "CA"],
"status": [0, 1, 1, 0, 1],
}
df = pd.DataFrame(data=d)
gender_map = {"m": "Male", "f": "Female"}
state_map = {"NY": "New York", "CA": "California"}
trainable = Map(
columns={
"new_gender": replace(it.gender, gender_map),
"new_state": replace(it.state, state_map),
}
)
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df.shape, (5, 3))
self.assertEqual(transformed_df["new_gender"][0], "Male")
self.assertEqual(transformed_df["new_state"][0], "New York")
def test_transform_dom_list(self):
df = pd.DataFrame({"date_column": ["2016-05-28", "2016-06-27", "2016-07-26"]})
trainable = Map(columns=[day_of_month(it.date_column)])
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df["date_column"][0], 28)
self.assertEqual(transformed_df["date_column"][1], 27)
self.assertEqual(transformed_df["date_column"][2], 26)
def test_transform_dom_fmt_list(self):
df = pd.DataFrame({"date_column": ["2016-05-28", "2016-06-27", "2016-07-26"]})
trainable = Map(columns=[day_of_month(it.date_column, "%Y-%m-%d")])
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df["date_column"][0], 28)
self.assertEqual(transformed_df["date_column"][1], 27)
self.assertEqual(transformed_df["date_column"][2], 26)
def test_transform_dom_fmt_map(self):
df = pd.DataFrame({"date_column": ["2016-05-28", "2016-06-27", "2016-07-26"]})
trainable = Map(columns={"dom": day_of_month(it.date_column, "%Y-%m-%d")})
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df.shape, (3, 1))
self.assertEqual(transformed_df["dom"][0], 28)
self.assertEqual(transformed_df["dom"][1], 27)
self.assertEqual(transformed_df["dom"][2], 26)
def test_transform_dow_list(self):
df = pd.DataFrame({"date_column": ["2016-05-28", "2016-06-28", "2016-07-28"]})
trainable = Map(columns=[day_of_week(it.date_column)])
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df["date_column"][0], 5)
self.assertEqual(transformed_df["date_column"][1], 1)
self.assertEqual(transformed_df["date_column"][2], 3)
def test_transform_dow_fmt_list(self):
df = pd.DataFrame({"date_column": ["2016-05-28", "2016-06-28", "2016-07-28"]})
trainable = Map(columns=[day_of_week(it.date_column, "%Y-%m-%d")])
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df["date_column"][0], 5)
self.assertEqual(transformed_df["date_column"][1], 1)
self.assertEqual(transformed_df["date_column"][2], 3)
def test_transform_dow_fmt_map(self):
df = pd.DataFrame({"date_column": ["2016-05-28", "2016-06-28", "2016-07-28"]})
trainable = Map(columns={"dow": day_of_week(it.date_column, "%Y-%m-%d")})
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df.shape, (3, 1))
self.assertEqual(transformed_df["dow"][0], 5)
self.assertEqual(transformed_df["dow"][1], 1)
self.assertEqual(transformed_df["dow"][2], 3)
def test_transform_doy_list(self):
df = pd.DataFrame({"date_column": ["2016-01-01", "2016-06-28", "2016-07-28"]})
trainable = Map(columns=[day_of_year(it.date_column)])
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df["date_column"][0], 1)
self.assertEqual(transformed_df["date_column"][1], 180)
self.assertEqual(transformed_df["date_column"][2], 210)
def test_transform_doy_fmt_list(self):
df = pd.DataFrame({"date_column": ["2016-01-01", "2016-06-28", "2016-07-28"]})
trainable = Map(columns=[day_of_year(it.date_column, "%Y-%m-%d")])
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df["date_column"][0], 1)
self.assertEqual(transformed_df["date_column"][1], 180)
self.assertEqual(transformed_df["date_column"][2], 210)
def test_transform_doy_fmt_map(self):
df = pd.DataFrame({"date_column": ["2016-01-01", "2016-06-28", "2016-07-28"]})
trainable = Map(columns={"doy": day_of_year(it.date_column, "%Y-%m-%d")})
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df.shape, (3, 1))
self.assertEqual(transformed_df["doy"][0], 1)
self.assertEqual(transformed_df["doy"][1], 180)
self.assertEqual(transformed_df["doy"][2], 210)
def test_transform_hour_list(self):
df = pd.DataFrame(
{
"date_column": [
"2016-01-01 15:16:45",
"2016-06-28 12:18:51",
"2016-07-28 01:01:01",
]
}
)
trainable = Map(columns=[hour(it.date_column)])
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df["date_column"][0], 15)
self.assertEqual(transformed_df["date_column"][1], 12)
self.assertEqual(transformed_df["date_column"][2], 1)
def test_transform_hour_fmt_list(self):
df = pd.DataFrame(
{
"date_column": [
"2016-01-01 15:16:45",
"2016-06-28 12:18:51",
"2016-07-28 01:01:01",
]
}
)
trainable = Map(columns=[hour(it.date_column, "%Y-%m-%d %H:%M:%S")])
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df["date_column"][0], 15)
self.assertEqual(transformed_df["date_column"][1], 12)
self.assertEqual(transformed_df["date_column"][2], 1)
def test_transform_hour_fmt_map(self):
df = pd.DataFrame(
{
"date_column": [
"2016-01-01 15:16:45",
"2016-06-28 12:18:51",
"2016-07-28 01:01:01",
]
}
)
trainable = Map(columns={"hour": hour(it.date_column, "%Y-%m-%d %H:%M:%S")})
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df.shape, (3, 1))
self.assertEqual(transformed_df["hour"][0], 15)
self.assertEqual(transformed_df["hour"][1], 12)
self.assertEqual(transformed_df["hour"][2], 1)
def test_transform_minute_list(self):
df = pd.DataFrame(
{
"date_column": [
"2016-01-01 15:16:45",
"2016-06-28 12:18:51",
"2016-07-28 01:01:01",
]
}
)
trainable = Map(columns=[minute(it.date_column)])
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df["date_column"][0], 16)
self.assertEqual(transformed_df["date_column"][1], 18)
self.assertEqual(transformed_df["date_column"][2], 1)
def test_transform_minute_fmt_list(self):
df = pd.DataFrame(
{
"date_column": [
"2016-01-01 15:16:45",
"2016-06-28 12:18:51",
"2016-07-28 01:01:01",
]
}
)
trainable = Map(columns=[minute(it.date_column, "%Y-%m-%d %H:%M:%S")])
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df["date_column"][0], 16)
self.assertEqual(transformed_df["date_column"][1], 18)
self.assertEqual(transformed_df["date_column"][2], 1)
def test_transform_minute_fmt_map(self):
df = pd.DataFrame(
{
"date_column": [
"2016-01-01 15:16:45",
"2016-06-28 12:18:51",
"2016-07-28 01:01:01",
]
}
)
trainable = Map(columns={"minute": minute(it.date_column, "%Y-%m-%d %H:%M:%S")})
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df.shape, (3, 1))
self.assertEqual(transformed_df["minute"][0], 16)
self.assertEqual(transformed_df["minute"][1], 18)
self.assertEqual(transformed_df["minute"][2], 1)
def test_transform_month_list(self):
df = pd.DataFrame(
{
"date_column": [
"2016-01-01 15:16:45",
"2016-06-28 12:18:51",
"2016-07-28 01:01:01",
]
}
)
trainable = Map(columns=[month(it.date_column)])
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df["date_column"][0], 1)
self.assertEqual(transformed_df["date_column"][1], 6)
self.assertEqual(transformed_df["date_column"][2], 7)
def test_transform_month_fmt_list(self):
df = pd.DataFrame(
{
"date_column": [
"2016-01-01 15:16:45",
"2016-06-28 12:18:51",
"2016-07-28 01:01:01",
]
}
)
trainable = Map(columns=[month(it.date_column, "%Y-%m-%d %H:%M:%S")])
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df["date_column"][0], 1)
self.assertEqual(transformed_df["date_column"][1], 6)
self.assertEqual(transformed_df["date_column"][2], 7)
def test_transform_month_fmt_map(self):
df = pd.DataFrame(
{
"date_column": [
"2016-01-01 15:16:45",
"2016-06-28 12:18:51",
"2016-07-28 01:01:01",
]
}
)
trainable = Map(columns={"month": month(it.date_column, "%Y-%m-%d %H:%M:%S")})
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df.shape, (3, 1))
self.assertEqual(transformed_df["month"][0], 1)
self.assertEqual(transformed_df["month"][1], 6)
self.assertEqual(transformed_df["month"][2], 7)
def test_transform_string_indexer_list(self):
df = pd.DataFrame({"cat_column": ["a", "b", "b"]})
trainable = Map(columns=[string_indexer(it.cat_column)])
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df["cat_column"][0], 1)
self.assertEqual(transformed_df["cat_column"][1], 0)
self.assertEqual(transformed_df["cat_column"][2], 0)
def test_transform_string_indexer_map(self):
df = pd.DataFrame({"cat_column": ["a", "b", "b"]})
trainable = Map(columns={"string_indexed": string_indexer(it.cat_column)})
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df.shape, (3, 1))
self.assertEqual(transformed_df["string_indexed"][0], 1)
self.assertEqual(transformed_df["string_indexed"][1], 0)
self.assertEqual(transformed_df["string_indexed"][2], 0)
def test_not_expression(self):
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
_ = Map(columns=[123, "hello"])
def test_with_hyperopt(self):
from sklearn.datasets import load_iris
X, y = load_iris(return_X_y=True)
gender_map = {"m": "Male", "f": "Female"}
state_map = {"NY": "New York", "CA": "California"}
map_replace = Map(
columns=[replace(it.gender, gender_map), replace(it.state, state_map)],
remainder="drop",
)
pipeline = (
Relational(
operator=(Scan(table=it.main) & Scan(table=it.delay)) >> map_replace
)
>> LogisticRegression()
)
opt = Hyperopt(estimator=pipeline, cv=3, max_evals=5)
trained = opt.fit(X, y)
_ = trained
def test_with_hyperopt2(self):
from lale.expressions import (
count,
it,
max,
mean,
min,
string_indexer,
sum,
variance,
)
wrap_imported_operators()
scan = Scan(table=it["main"])
scan_0 = Scan(table=it["customers"])
join = Join(
pred=[
(
it["main"]["group_customer_id"]
== it["customers"]["group_customer_id"]
)
]
)
map = Map(
columns={
"[main](group_customer_id)[customers]|number_children|identity": it[
"number_children"
],
"[main](group_customer_id)[customers]|name|identity": it["name"],
"[main](group_customer_id)[customers]|income|identity": it["income"],
"[main](group_customer_id)[customers]|address|identity": it["address"],
"[main](group_customer_id)[customers]|age|identity": it["age"],
},
remainder="drop",
)
pipeline_4 = join >> map
scan_1 = Scan(table=it["purchase"])
join_0 = Join(
pred=[(it["main"]["group_id"] == it["purchase"]["group_id"])],
join_limit=50.0,
)
aggregate = Aggregate(
columns={
"[main](group_id)[purchase]|price|variance": variance(it["price"]),
"[main](group_id)[purchase]|time|sum": sum(it["time"]),
"[main](group_id)[purchase]|time|mean": mean(it["time"]),
"[main](group_id)[purchase]|time|min": min(it["time"]),
"[main](group_id)[purchase]|price|sum": sum(it["price"]),
"[main](group_id)[purchase]|price|count": count(it["price"]),
"[main](group_id)[purchase]|price|mean": mean(it["price"]),
"[main](group_id)[purchase]|price|min": min(it["price"]),
"[main](group_id)[purchase]|price|max": max(it["price"]),
"[main](group_id)[purchase]|time|max": max(it["time"]),
"[main](group_id)[purchase]|time|variance": variance(it["time"]),
},
group_by=it["row_id"],
)
pipeline_5 = join_0 >> aggregate
map_0 = Map(
columns={
"[main]|group_customer_id|identity": it["group_customer_id"],
"[main]|transaction_id|identity": it["transaction_id"],
"[main]|group_id|identity": it["group_id"],
"[main]|comments|identity": it["comments"],
"[main]|id|identity": it["id"],
"prefix_0_id": it["prefix_0_id"],
"next_purchase": it["next_purchase"],
"[main]|time|identity": it["time"],
},
remainder="drop",
)
scan_2 = Scan(table=it["transactions"])
scan_3 = Scan(table=it["products"])
join_1 = Join(
pred=[
(it["main"]["transaction_id"] == it["transactions"]["transaction_id"]),
(it["transactions"]["product_id"] == it["products"]["product_id"]),
]
)
map_1 = Map(
columns={
"[main](transaction_id)[transactions](product_id)[products]|price|identity": it[
"price"
],
"[main](transaction_id)[transactions](product_id)[products]|type|identity": it[
"type"
],
},
remainder="drop",
)
pipeline_6 = join_1 >> map_1
join_2 = Join(
pred=[
(it["main"]["transaction_id"] == it["transactions"]["transaction_id"])
]
)
map_2 = Map(
columns={
"[main](transaction_id)[transactions]|description|identity": it[
"description"
],
"[main](transaction_id)[transactions]|product_id|identity": it[
"product_id"
],
},
remainder="drop",
)
pipeline_7 = join_2 >> map_2
map_3 = Map(
columns=[
string_indexer(it["[main]|comments|identity"]),
string_indexer(
it["[main](transaction_id)[transactions]|description|identity"]
),
string_indexer(
it[
"[main](transaction_id)[transactions](product_id)[products]|type|identity"
]
),
string_indexer(
it["[main](group_customer_id)[customers]|name|identity"]
),
string_indexer(
it["[main](group_customer_id)[customers]|address|identity"]
),
]
)
pipeline_8 = ConcatFeatures() >> map_3
relational = Relational(
operator=make_pipeline_graph(
steps=[
scan,
scan_0,
pipeline_4,
scan_1,
pipeline_5,
map_0,
scan_2,
scan_3,
pipeline_6,
pipeline_7,
pipeline_8,
],
edges=[
(scan, pipeline_4),
(scan, pipeline_5),
(scan, map_0),
(scan, pipeline_6),
(scan, pipeline_7),
(scan_0, pipeline_4),
(pipeline_4, pipeline_8),
(scan_1, pipeline_5),
(pipeline_5, pipeline_8),
(map_0, pipeline_8),
(scan_2, pipeline_6),
(scan_2, pipeline_7),
(scan_3, pipeline_6),
(pipeline_6, pipeline_8),
(pipeline_7, pipeline_8),
],
)
)
pipeline = relational >> (KNeighborsClassifier | LogisticRegression)
from sklearn.datasets import load_iris
X, y = load_iris(return_X_y=True)
from lale.lib.lale import Hyperopt
opt = Hyperopt(estimator=pipeline, max_evals=2)
opt.fit(X, y)
class TestRelationalOperator(unittest.TestCase):
def setUp(self):
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_fit_transform(self):
relational = Relational(
operator=(Scan(table=it.main) & Scan(table=it.delay))
>> Join(
pred=[
it.main.TrainId == it.delay.TrainId,
it.main["Arrival time"] >= it.delay.TimeStamp,
]
)
>> Aggregate(columns=[count(it.Delay)], group_by=it.MessageId)
)
trained_relational = relational.fit(self.X_train, self.y_train)
_ = trained_relational.transform(self.X_test)
def test_fit_error(self):
relational = Relational(
operator=(Scan(table=it.main) & Scan(table=it.delay))
>> Join(
pred=[
it.main.TrainId == it.delay.TrainId,
it.main["Arrival time"] >= it.delay.TimeStamp,
]
)
>> Aggregate(columns=[count(it.Delay)], group_by=it.MessageId)
)
with self.assertRaises(ValueError):
_ = relational.fit([self.X_train], self.y_train)
def test_transform_error(self):
relational = Relational(
operator=(Scan(table=it.main) & Scan(table=it.delay))
>> Join(
pred=[
it.main.TrainId == it.delay.TrainId,
it.main["Arrival time"] >= it.delay.TimeStamp,
]
)
>> Aggregate(columns=[count(it.Delay)], group_by=it.MessageId)
)
trained_relational = relational.fit(self.X_train, self.y_train)
with self.assertRaises(ValueError):
_ = trained_relational.transform([self.X_test])
def test_fit_transform_in_pipeline(self):
relational = Relational(
operator=(Scan(table=it.main) & Scan(table=it.delay))
>> Join(
pred=[
it.main.TrainId == it.delay.TrainId,
it.main["Arrival time"] >= it.delay.TimeStamp,
]
)
>> Aggregate(columns=[count(it.Delay)], group_by=it.MessageId)
)
pipeline = relational >> LogisticRegression()
trained_pipeline = pipeline.fit(self.X_train, self.y_train)
_ = trained_pipeline.predict(self.X_test)
class TestMapSpark(unittest.TestCase):
def setUp(self):
if spark_installed:
conf = SparkConf().setMaster("local[2]")
sc = SparkContext.getOrCreate(conf=conf)
self.sqlCtx = SQLContext(sc)
def test_transform_spark_replace_list(self):
if spark_installed:
d = {
"gender": ["m", "f", "m", "m", "f"],
"state": ["NY", "NY", "CA", "NY", "CA"],
"status": [0, 1, 1, 0, 1],
}
df = pd.DataFrame(data=d)
sdf = self.sqlCtx.createDataFrame(df)
gender_map = {"m": "Male", "f": "Female"}
state_map = {"NY": "New York", "CA": "California"}
trainable = Map(
columns=[replace(it.gender, gender_map), replace(it.state, state_map)]
)
trained = trainable.fit(sdf)
transformed_df = trained.transform(sdf)
self.assertEqual(
(transformed_df.count(), len(transformed_df.columns)), (5, 3)
)
self.assertEqual(transformed_df.head()[0], "Male")
self.assertEqual(transformed_df.head()[1], "New York")
def test_transform_spark_replace_map(self):
if spark_installed:
d = {
"gender": ["m", "f", "m", "m", "f"],
"state": ["NY", "NY", "CA", "NY", "CA"],
"status": [0, 1, 1, 0, 1],
}
df = pd.DataFrame(data=d)
sdf = self.sqlCtx.createDataFrame(df)
gender_map = {"m": "Male", "f": "Female"}
state_map = {"NY": "New York", "CA": "California"}
trainable = Map(
columns={
"new_gender": replace(it.gender, gender_map),
"new_state": replace(it.state, state_map),
}
)
trained = trainable.fit(sdf)
transformed_df = trained.transform(sdf)
self.assertEqual(
(transformed_df.count(), len(transformed_df.columns)), (5, 3)
)
self.assertEqual(transformed_df.head()[1], "Male")
self.assertEqual(transformed_df.head()[2], "New York")
def test_transform_dom_list(self):
df = pd.DataFrame({"date_column": ["2016-05-28", "2016-06-27", "2016-07-26"]})
sdf = self.sqlCtx.createDataFrame(df)
trainable = Map(columns=[day_of_month(it.date_column)])
trained = trainable.fit(sdf)
transformed_df = trained.transform(sdf)
self.assertEqual(transformed_df.collect()[0]["date_column"], 28)
self.assertEqual(transformed_df.collect()[1]["date_column"], 27)
self.assertEqual(transformed_df.collect()[2]["date_column"], 26)
def test_transform_dom_fmt_list(self):
df = | pd.DataFrame({"date_column": ["28/05/2016", "27/06/2016", "26/07/2016"]}) | pandas.DataFrame |
import pandas as pd
import statsmodels.api as sm
import numpy as np
from pathlib import Path
outdir = Path('data')
def download_rivm_r():
df_rivm = pd.read_json('https://data.rivm.nl/covid-19/COVID-19_reproductiegetal.json').set_index('Date')
df_rivm.index = pd.to_datetime(df_rivm.index)
df_rivm.sort_index(inplace=True)
df_rivm.index.rename('date', inplace=True)
df_rivm = df_rivm[df_rivm.index >= '2021-06-01']
rename = {
'Rt_low': 'rivm_low',
'Rt_avg': 'rivm_mean',
'Rt_up': 'rivm_up',
}
df_rivm = df_rivm[df_rivm.index > '2021-01-01']
vals = list(rename.values())
return df_rivm.rename(columns=rename)[vals]
dfs = {}
df_combined = None
for csv in Path('data').glob('r_*.csv'):
if any([x in csv.name for x in ('linear',)]):
continue
print(csv)
df = pd.read_csv(csv, index_col=0)
df.index = pd.to_datetime(df.index)
dfs[csv.stem] = df
if df_combined is None:
df_combined = df['50%'].rename(csv.stem).to_frame()
else:
df_combined = df_combined.join(df['50%'].rename(csv.stem), how='outer')
df_rivm = download_rivm_r()
sel = df_rivm.dropna().index.intersection(df_combined.dropna().index)
x = df_combined.loc[sel]
y = df_rivm['rivm_mean'].loc[sel]
x = sm.add_constant(x) # adding a constant
model = sm.OLS(y, x).fit()
predictions = model.predict(x)
summary = model.summary()
print(summary)
print( | pd.DataFrame(model.params, columns=['coef']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This script structures the dashboard streamlit in three applications. Its main role is to display results of
evaluation which have already been performed. This module is independent from others.
"""
import streamlit as st
import glob
import plotly.graph_objects as go
import matplotlib.pyplot as plt
from statistics import mean
import pandas as pd
from math import nan
import os
import json
from dataset_helper import records, sampling_frequency
from algo_helper import algorithms_list
'''
# Benchmark of QRS detectors
'''
def get_layout(title: str) -> go.Layout:
"""
use for displaying a graph. create a Layout object
:param title: title of the displayed graph
:type title: str
:return: model of layout for graphs
:rtype: Layout
"""
return go.Layout(title=title, margin=dict(l=20, r=20, t=30, b=20))
# choose application of interest
applications = ['Comparison of different algorithms', 'Evaluation of one algorithm', 'Noise robustness']
application = st.sidebar.selectbox('What would you like to study ?', applications)
def print_error_no_evaluation(ds: str = '"#check --help#"', alg: str = '"#check --help#"', t: str = '#int(ms)#') \
-> None:
"""
display an error message when a result of a specific evaluation is selected, whereas the latter was not been
performed. Display make command to run the evaluation of interest.
:param ds: name of the dataset of interest
:type ds: str
:param alg: name of the algorithm of interest
:type alg: str
:param t: tolerance's value of interest
:type t: str
"""
st.write('The evaluation of your interest has not already being performed. You probably did not execute the '
'evaluation. Please compute the following command :')
st.write(f'\t make evaluation --DATASET="{ds}" --ALGO="{alg}" --TOLERANCE={t}')
# list of datasets used in the two first applications (comparison of performances on the entire datasets or on each
# record)
datasets_list = ['mit-bih-arrhythmia', 'mit-bih-supraventricular-arrhythmia', 'mit-bih-long-term-ecg', 'european-stt']
# colors used for graphs for each algorithm
colormap = {
'Pan-Tompkins-ecg-detector': 'rgb(41,58,143)',
'Hamilton-ecg-detector': 'rgb(215,48,39)',
'Christov-ecg-detector': 'rgb(26,152,80)',
'Engelse-Zeelenberg-ecg-detector': '#440154',
'SWT-ecg-detector': 'rgb(255,111,0)',
'Matched-filter-ecg-detector': 'rgb(179,88,6)',
'Two-average-ecg-detector': 'rgb(212,103,128)',
'Hamilton-biosppy': 'rgb(184,225,134)',
'Christov-biosppy': 'rgb(255,234,0)',
'Engelse-Zeelenberg-biosppy': 'rgb(197,27,125)',
'Gamboa-biosppy': 'rgb(153,204,255)',
'mne-ecg': 'rgb(61,89,65)',
'heartpy': 'rgb(44,255,150)',
'gqrs-wfdb': 'rgb(254,224,139)',
'xqrs-wfdb': 'rgb(10,136,186)'
}
# first application
if application == 'Comparison of different algorithms':
st.write('\n\n')
'''
## Comparison of performances of some algorithms on a dataset
'''
st.write('\n\n')
dataset = st.selectbox('Please choose a dataset:', datasets_list)
csv_files_dataset = glob.glob(f'output/perf/*_{dataset}_*.csv')
tolerance_list = []
for file in csv_files_dataset:
eval_tolerance = file[:-4].split('_')[-1]
tolerance_list.append(eval_tolerance)
tolerance = st.selectbox('Please choose tolerance of the evaluation (in ms):', list(set(tolerance_list)))
csv_files = [csv_file for csv_file in csv_files_dataset if csv_file[:-4].split('_')[-1] == tolerance]
# table of comparison
if len(csv_files) == 0:
print_error_no_evaluation(ds=dataset)
else:
comparison_df = pd.DataFrame(columns=['FP', 'FN', 'F', 'F(%)', 'P+(%)', 'Se(%)', 'F1(%)'])
number_of_beats = ''
st.write('Please select algorithms you would like to compare:')
if st.checkbox('Pan-Tompkins-ecg-detector'):
if not os.path.exists(f'output/perf/Pan-Tompkins-ecg-detector_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Pan-Tompkins-ecg-detector', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Pan-Tompkins-ecg-detector_{dataset}_{tolerance}.csv',
delimiter=',', index_col=0)
results_df.iloc[-1, :].name = 'Pan-Tompkins-ecg-detector'
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Pan-Tompkins-ecg-detector'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('Hamilton-ecg-detector'):
if not os.path.exists(f'output/perf/Hamilton-ecg-detector_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Hamilton-ecg-detector', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Hamilton-ecg-detector_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Hamilton-ecg-detector'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('Christov-ecg-detector'):
if not os.path.exists(f'output/perf/Christov-ecg-detector_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Christov-ecg-detector', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Christov-ecg-detector_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Christov-ecg-detector'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('Engelse-Zeelenberg-ecg-detector'):
if not os.path.exists(f'output/perf/Engelse-Zeelenberg-ecg-detector_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Engelse-Zeelenberg-ecg-detector', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Engelse-Zeelenberg-ecg-detector_{dataset}_{tolerance}.csv',
delimiter=',', index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Engelse-Zeelenberg-ecg-detector'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('SWT-ecg-detector'):
if not os.path.exists(f'output/perf/SWT-ecg-detector_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='SWT-ecg-detector', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/SWT-ecg-detector_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'SWT-ecg-detector'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('Matched-filter-ecg-detector'):
if not os.path.exists(f'output/perf/Matched-filter-ecg-detector_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Matched-filter-ecg-detector', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Matched-filter-ecg-detector_{dataset}_{tolerance}.csv',
delimiter=',', index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Matched-filter-ecg-detector'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('Two-average-ecg-detector'):
if not os.path.exists(f'output/perf/Two-average-ecg-detector_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Two-average-ecg-detector', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Two-average-ecg-detector_{dataset}_{tolerance}.csv',
delimiter=',', index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Two-average-ecg-detector'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('Hamilton-biosppy'):
if not os.path.exists(f'output/perf/Hamilton-biosppy_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Hamilton-biosppy', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Hamilton-biosppy_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Hamilton-biosppy'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('Christov-biosppy'):
if not os.path.exists(f'output/perf/Christov-biosppy_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Christov-biosppy', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Christov-biosppy_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Christov-biosppy'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('Engelse-Zeelenberg-biosppy'):
if not os.path.exists(f'output/perf/Engelse-Zeelenberg-biosppy_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Engelse-Zeelenberg-biosppy', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Engelse-Zeelenberg-biosppy_{dataset}_{tolerance}.csv',
delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Engelse-Zeelenberg-biosppy'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('Gamboa-biosppy'):
if not os.path.exists(f'output/perf/Gamboa-biosppy_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='Gamboa-biosppy', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/Gamboa-biosppy_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'Gamboa-biosppy'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('mne-ecg'):
if not os.path.exists(f'output/perf/mne-ecg_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='mne-ecg', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/mne-ecg_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'mne-ecg'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('heartpy'):
if not os.path.exists(f'output/perf/heartpy_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='heartpy', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/heartpy_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'heartpy'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('gqrs-wfdb'):
if not os.path.exists(f'output/perf/gqrs-wfdb_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='gqrs-wfdb', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/gqrs-wfdb_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'gqrs-wfdb'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
if st.checkbox('xqrs-wfdb'):
if not os.path.exists(f'output/perf/xqrs-wfdb_{dataset}_{tolerance}.csv'):
print_error_no_evaluation(ds=dataset, alg='xqrs-wfdb', t=tolerance)
else:
results_df = pd.read_csv(f'output/perf/xqrs-wfdb_{dataset}_{tolerance}.csv', delimiter=',',
index_col=0)
global_eval = results_df.iloc[-1, 1:]
global_eval.name = 'xqrs-wfdb'
comparison_df = comparison_df.append(global_eval)
number_of_beats = results_df.iloc[-1, 0]
st.write(f"Comparative table of global performances: ")
st.write(comparison_df)
st.write(f'Total number of beats for this dataset is : {number_of_beats}')
# graphs of comparison
'''
## Comparison of performances of algorithms on different datasets
'''
results_F1 = pd.DataFrame(columns=datasets_list, index=algorithms_list)
results_Fp = | pd.DataFrame(columns=datasets_list, index=algorithms_list) | pandas.DataFrame |
"""A class for generating stellar populations"""
from cosmicats import utils, popgen, apogee
from isochrones.mist.bc import MISTBolometricCorrectionGrid
import pandas as pd
import astropy.table as at
import numpy as np
class pop():
"""Class for generic populations
Attributes
----------
sys_type : `int`
current sys_types include:
0 = general single stars
1 = general binary stars
2 = binaries containing black holes
n_stop_APOGEE : `int`
stopping condition which specifies the maximum size of the
APOGEE population
n_stop_MW : `int`
stopping condition which specifies the maximum size of the
MW population
n_samp : `int`
specifies the number of systems to sample from the cosmic and sfh data
mets : `list`
list of metallicity bins in the simulated cosmic data
cosmic_path : `str`
path to the cosmic data
sfh_model : `str`
model assumed for stellar ages and positions as a funciton of metallicity
current models include:
'Frankel19' : positions and ages from Frankel+2019
seed : `int`
random seed for reproduceability
pop_var : `str`
Can be supplied for populations where sys_type is the same but the
population is varied in some way, like if qmin is different
lifetime_interp : `scipy.interpolate.interp1d`
interpolation for single star lifetime as a function of mass
for the population metallicity
"""
def __init__(self, sys_type, n_stop_APOGEE,
n_stop_MW, n_samp, mets,
cosmic_path, lifetime_interp,
sfh_model='Frankel19', seed=42,
pop_var=None, color_cut=0.3):
self.sys_type = sys_type
self.n_stop_APOGEE = n_stop_APOGEE
self.n_stop_MW = n_stop_MW
self.n_samp = n_samp
self.mets = mets
self.cosmic_path = cosmic_path
self.sfh_model = sfh_model
self.seed = seed
# set up dat and log files
if pop_var == None:
self.pop_var = 0
else:
self.pop_var = pop_var
self.color_cut = color_cut
# set up the single star lifetime interpolator
# note: we use the minimum metallicity which gives the maximum lifetime
self.lifetime_interp = lifetime_interp
def get_formation_efficiency(self, f_b=None):
"""Get the formation efficiency as a function of metallicity
NOTE : for the moment, we put in f_b by hand using the methods
in popgen, however in the future, it would be good to implement
a general treatment!
Parameters
----------
f_b : `float/list of lists`
binary fraction to weight single stars against binary stars
Returns
-------
formation_efficiency: `ndarray`
relative formation number per total population size for each
metallicity in simulation grid for each system type
"""
formation_efficiency = popgen.get_formation_efficiency(mets=self.mets,
path=self.cosmic_path,
var=self.pop_var,
sys_type=self.sys_type,
f_b=None)
return formation_efficiency
def build_pop(self, n_proc, run):
"""Generates an astrophysical population and associated APOGEE
catalog by matching a cosmic dataset to a star formation model
for a given population system type, and applying APOGEE-like
selections on the data"""
# load in the APOGEE binary data go get the orbital period data
binaries = at.Table.read('lnK0.0_logL4.6_metadata.fits')
all_star = at.Table.read('allStarLite-r12-l33.fits')
binaries_join = at.unique(at.join(binaries, all_star, join_type='left', keys='APOGEE_ID'), 'APOGEE_ID')
cols_drop = ['NINST', 'STABLERV_CHI2', 'STABLERV_RCHI2', 'CHI2_THRESHOLD', 'STABLERV_CHI2_PROB',
'PARAM', 'FPARAM', 'PARAM_COV', 'FPARAM_COV', 'PARAMFLAG', 'FELEM', 'FELEM_ERR',
'X_H', 'X_H_ERR', 'X_M', 'X_M_ERR', 'ELEM_CHI2', 'ELEMFLAG', 'ALL_VISIT_PK',
'VISIT_PK', 'FPARAM_CLASS', 'CHI2_CLASS', 'binary_catalog']
cols_keep = []
for col in binaries_join.columns:
if col not in cols_drop:
cols_keep.append(col)
binaries_join = binaries_join[cols_keep].to_pandas()
APOGEE_log_P = np.log10(binaries_join.MAP_P.values)
datfile_name = 'pop_{}_var_{}_run_{}.h5'.format(self.sys_type, self.pop_var, run)
logfile_name = 'log_pop_{}_var_{}_run_{}.txt'.format(self.sys_type, self.pop_var, run)
# open up a file to write data to in case the run stops
# Open the hdf5 file to store the fixed population data
try:
dat_store = pd.HDFStore(datfile_name)
n_samp_MW = pd.read_hdf(datfile_name, 'n_samp_MW').max()[0]
n_samp_APOGEE = pd.read_hdf(datfile_name, 'n_samp_APOGEE').max()[0]
MW_pop = pd.read_hdf(datfile_name, 'MW_pop')
n_MW = len(MW_pop)
APOGEE_pop = | pd.read_hdf(datfile_name, 'APOGEE_pop') | pandas.read_hdf |
# -*- coding: utf-8 -*-
"""Main formatting source code to format modelling results for plotting.
This code was written to process PLEXOS HDF5 outputs to get them ready for plotting.
Once the data is processed it is outputted as an intermediary HDF5 file format so that
it can be read into the marmot_plot_main.py file
@author: <NAME>
"""
# ===============================================================================
# Import Python Libraries
# ===============================================================================
import os
import sys
import pathlib
FILE_DIR = pathlib.Path(__file__).parent.absolute() # Location of this module
if __name__ == '__main__': # Add Marmot directory to sys path if running from __main__
if os.path.dirname(os.path.dirname(__file__)) not in sys.path:
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
os.chdir(pathlib.Path(__file__).parent.absolute().parent.absolute())
import time
import re
import logging
import logging.config
import pandas as pd
import h5py
import yaml
from typing import Union
try:
from marmot.meta_data import MetaData
except ModuleNotFoundError:
print("Attempted import of Marmot as a module from a Git directory. ", end='')
print("Import of Marmot will not function in this way. ", end='')
print("To import Marmot as a module use the preferred method of pip installing Marmot, ", end='')
print("or add the Marmot directory to the system path, see ReadME for details.\n")
print("System will now exit")
sys.exit()
import marmot.config.mconfig as mconfig
# Import as Submodule
try:
from h5plexos.query import PLEXOSSolution
except ModuleNotFoundError:
from marmot.h5plexos.h5plexos.query import PLEXOSSolution
# A bug in pandas requires this to be included,
# otherwise df.to_string truncates long strings. Fix available in Pandas 1.0
# but leaving here in case user version not up to date
pd.set_option("display.max_colwidth", 1000)
# Conversion units dict, key values is a tuple of new unit name and conversion multiplier
UNITS_CONVERSION = {
'kW': ('MW', 1e-3),
'MW': ('MW', 1),
'GW': ('MW', 1e3),
'TW': ('MW', 1e6),
'kWh': ('MWh', 1e-3),
'MWh': ('MWh', 1),
'GWh': ('MWh', 1e3),
'TWh': ('MWh', 1e6),
'lb': ('kg', 0.453592),
'ton': ('kg', 907.18474),
'kg': ('kg', 1),
'tonne': ('kg', 1000),
'$': ('$', 1),
'$000': ('$', 1000),
'h': ('h', 1),
'MMBTU': ('MMBTU', 1),
'GBTU': ('MMBTU', 1000),
'GJ"': ('MMBTU', 0.947817),
'TJ': ('MMBTU', 947.817120),
'$/MW': ('$/MW', 1),
'lb/MWh' : ('kg/MWh', 0.453592),
'Kg/MWh': ('Kg/MWh', 1)
}
class SetupLogger():
"""Sets up the python logger.
This class handles the following.
1. Configures logger from marmot_logging_config.yml file.
2. Handles rollover of log file on each instantiation.
3. Sets log_directory.
4. Append optional suffix to the end of the log file name
Optional suffix is useful when running multiple processes in parallel to
allow logging to separate files.
"""
def __init__(self, log_directory: str = 'logs',
log_suffix: str = None):
"""
Args:
log_directory (str, optional): log directory to save logs.
Defaults to 'logs'.
log_suffix (str, optional): Optional suffix to add to end of log file.
Defaults to None.
"""
if log_suffix is None:
self.log_suffix = ''
else:
self.log_suffix = f'_{log_suffix}'
current_dir = os.getcwd()
os.chdir(FILE_DIR)
try:
os.makedirs(log_directory)
except FileExistsError:
# log directory already exists
pass
with open('config/marmot_logging_config.yml', 'rt') as f:
conf = yaml.safe_load(f.read())
conf['handlers']['warning_handler']['filename'] = \
(conf['handlers']['warning_handler']['filename']
.format(log_directory, 'formatter', self.log_suffix))
conf['handlers']['info_handler']['filename'] = \
(conf['handlers']['info_handler']['filename']
.format(log_directory, 'formatter', self.log_suffix))
logging.config.dictConfig(conf)
self.logger = logging.getLogger('marmot_format')
# Creates a new log file for next run
self.logger.handlers[1].doRollover()
self.logger.handlers[2].doRollover()
os.chdir(current_dir)
class Process(SetupLogger):
"""Process PLEXOS class specific data from h5plexos database.
All methods are PLEXOS Class specific e.g generator, region, zone, line etc.
"""
def __init__(self, df: pd.DataFrame, metadata: MetaData,
model: str, Region_Mapping: pd.DataFrame,
emit_names: pd.DataFrame, logger: logging.Logger):
"""
Args:
df (pd.DataFrame): Unprocessed h5plexos dataframe containing
class and property specifc data.
metadata (MetaData): Instantiation of MetaData for specific
h5plexos file.
model (str): Name of specific PLEXOS model partition
Region_Mapping (pd.DataFrame): DataFrame to map custom
regions/zones to create custom aggregations.
emit_names (pd.DataFrame): DataFrame with 2 columns to rename
emission names.
logger (logging.Logger): logger object from SetupLogger.
"""
# certain methods require information from metadata. metadata is now
# passed in as an instance of MetaData class for the appropriate model
self.df = df
self.metadata = metadata
self.model = model
self.Region_Mapping = Region_Mapping
self.emit_names = emit_names
self.logger = logger
if not self.emit_names.empty:
self.emit_names_dict = (self.emit_names[['Original', 'New']]
.set_index("Original").to_dict()["New"])
def df_process_generator(self) -> pd.DataFrame:
"""Format PLEXOS Generator Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property"])
df.index.rename(['tech', 'gen_name'], level=['category', 'name'], inplace=True)
if self.metadata.region_generator_category(self.model).empty is False:
region_gen_idx = pd.CategoricalIndex(self.metadata.region_generator_category(self.model)
.index.get_level_values(0))
region_gen_idx = region_gen_idx.repeat(len(df.index.get_level_values('timestamp').unique()))
idx_region = pd.MultiIndex(levels=df.index.levels + [region_gen_idx.categories],
codes=df.index.codes + [region_gen_idx.codes],
names=df.index.names + region_gen_idx.names)
else:
idx_region = df.index
if self.metadata.zone_generator_category(self.model).empty is False:
zone_gen_idx = pd.CategoricalIndex(self.metadata.zone_generator_category(self.model)
.index.get_level_values(0))
zone_gen_idx = zone_gen_idx.repeat(len(df.index.get_level_values('timestamp').unique()))
idx_zone = pd.MultiIndex(levels=idx_region.levels + [zone_gen_idx.categories],
codes=idx_region.codes + [zone_gen_idx.codes],
names=idx_region.names + zone_gen_idx.names)
else:
idx_zone = idx_region
if not self.Region_Mapping.empty:
region_gen_mapping_idx = pd.MultiIndex.from_frame(self.metadata.region_generator_category(self.model)
.merge(self.Region_Mapping,
how="left",
on='region')
.sort_values(by=['tech', 'gen_name'])
.drop(['region', 'tech', 'gen_name'], axis=1)
)
region_gen_mapping_idx = region_gen_mapping_idx.repeat(len(df.index.get_level_values('timestamp').unique()))
idx_map = pd.MultiIndex(levels=idx_zone.levels + region_gen_mapping_idx.levels,
codes=idx_zone.codes + region_gen_mapping_idx.codes,
names=idx_zone.names + region_gen_mapping_idx.names)
else:
idx_map = idx_zone
df = pd.DataFrame(data=df.values.reshape(-1), index=idx_map)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_region(self) -> pd.DataFrame:
"""Format PLEXOS Region Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property", "category"])
df.index.rename('region', level='name', inplace=True)
# checks if Region_Mapping contains data to merge, skips if empty
if not self.Region_Mapping.empty:
mapping_idx = pd.MultiIndex.from_frame(self.metadata.regions(self.model)
.merge(self.Region_Mapping,
how="left",
on='region')
.drop(['region', 'category'], axis=1)
)
mapping_idx = mapping_idx.repeat(len(df.index.get_level_values('timestamp').unique()))
idx = pd.MultiIndex(levels=df.index.levels + mapping_idx.levels,
codes=df.index.codes + mapping_idx.codes,
names=df.index.names + mapping_idx.names)
else:
idx = df.index
df = pd.DataFrame(data=df.values.reshape(-1), index=idx)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # Move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_zone(self) -> pd.DataFrame:
"""Format PLEXOS Zone Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property", "category"])
df.index.rename('zone', level='name', inplace=True)
df = pd.DataFrame(data=df.values.reshape(-1), index=df.index)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_line(self) -> pd.DataFrame:
"""Format PLEXOS Line Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property", "category"])
df.index.rename('line_name', level='name', inplace=True)
df = pd.DataFrame(data=df.values.reshape(-1), index=df.index)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_interface(self) -> pd.DataFrame:
"""Format PLEXOS PLEXOS Interface Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property"])
df.index.rename(['interface_name', 'interface_category'],
level=['name', 'category'], inplace=True)
df = pd.DataFrame(data=df.values.reshape(-1), index=df.index)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_reserve(self) -> pd.DataFrame:
"""Format PLEXOS Reserve Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property"])
df.index.rename(['parent', 'Type'], level=['name', 'category'], inplace=True)
df = df.reset_index() # unzip the levels in index
if self.metadata.reserves_regions(self.model).empty is False:
# Merges in regions where reserves are located
df = df.merge(self.metadata.reserves_regions(self.model),
how='left', on='parent')
if self.metadata.reserves_zones(self.model).empty is False:
# Merges in zones where reserves are located
df = df.merge(self.metadata.reserves_zones(self.model),
how='left', on='parent')
df_col = list(df.columns) # Gets names of all columns in df and places in list
df_col.remove(0)
# move timestamp to start of df
df_col.insert(0, df_col.pop(df_col.index("timestamp")))
df.set_index(df_col, inplace=True)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_reserves_generators(self) -> pd.DataFrame:
"""Format PLEXOS Reserve_Generators Relational Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property"])
df.index.rename(['gen_name'], level=['child'], inplace=True)
df = df.reset_index() # unzip the levels in index
df = df.merge(self.metadata.generator_category(self.model),
how='left', on='gen_name')
# merging in generator region/zones first prevents double
# counting in cases where multiple model regions are within a reserve region
if self.metadata.region_generators(self.model).empty is False:
df = df.merge(self.metadata.region_generators(self.model),
how='left', on='gen_name')
if self.metadata.zone_generators(self.model).empty is False:
df = df.merge(self.metadata.zone_generators(self.model),
how='left', on='gen_name')
# now merge in reserve regions/zones
if self.metadata.reserves_regions(self.model).empty is False:
# Merges in regions where reserves are located
df = df.merge(self.metadata.reserves_regions(self.model),
how='left', on=['parent', 'region'])
if self.metadata.reserves_zones(self.model).empty is False:
# Merges in zones where reserves are located
df = df.merge(self.metadata.reserves_zones(self.model),
how='left', on=['parent', 'zone'])
df_col = list(df.columns) # Gets names of all columns in df and places in list
df_col.remove(0)
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df.set_index(df_col, inplace=True)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_fuel(self) -> pd.DataFrame:
"""Format PLEXOS Fuel Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property", "category"])
df.index.rename('fuel_type', level='name', inplace=True)
df = pd.DataFrame(data=df.values.reshape(-1), index=df.index)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_constraint(self) -> pd.DataFrame:
"""Format PLEXOS Constraint Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property"])
df.index.rename(['constraint_category', 'constraint'],
level=['category', 'name'], inplace=True)
df = pd.DataFrame(data=df.values.reshape(-1), index=df.index)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_emission(self) -> pd.DataFrame:
"""Format PLEXOS Emission Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property"])
df.index.rename('emission_type', level='name', inplace=True)
df = pd.DataFrame(data=df.values.reshape(-1), index=df.index)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_emissions_generators(self) -> pd.DataFrame:
"""Format PLEXOS Emissions_Generators Relational Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property"])
df.index.rename(['gen_name'], level=['child'], inplace=True)
df.index.rename(['pollutant'], level=['parent'], inplace=True)
df = df.reset_index() # unzip the levels in index
# merge in tech information
df = df.merge(self.metadata.generator_category(self.model),
how='left', on='gen_name')
# merge in region and zone information
if self.metadata.region_generator_category(self.model).empty is False:
# merge in region information
df = df.merge(self.metadata.region_generator_category(self.model).reset_index(),
how='left', on=['gen_name', 'tech'])
if self.metadata.zone_generator_category(self.model).empty is False:
# Merges in zones where reserves are located
df = df.merge(self.metadata.zone_generator_category(self.model).reset_index(),
how='left', on=['gen_name', 'tech'])
if not self.Region_Mapping.empty:
df = df.merge(self.Region_Mapping, how="left", on="region")
if not self.emit_names.empty:
# reclassify emissions as specified by user in mapping
df['pollutant'] = pd.Categorical(df['pollutant'].map(lambda x: self.emit_names_dict.get(x, x)))
# remove categoricals (otherwise h5 save will fail)
df = df.astype({'tech': 'object', 'pollutant': 'object'})
# Checks if all emissions categories have been identified and matched.
# If not, lists categories that need a match
if not self.emit_names.empty:
if self.emit_names_dict != {} and (set(df['pollutant'].unique()).issubset(self.emit_names["New"].unique())) is False:
missing_emit_cat = list((set(df['pollutant'].unique())) - (set(self.emit_names["New"].unique())))
self.logger.warning(f"The following emission objects do not have a correct category mapping: {missing_emit_cat}\n")
df_col = list(df.columns) # Gets names of all columns in df and places in list
df_col.remove(0)
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df.set_index(df_col, inplace=True)
# downcast values to save on memory
df[0] = pd.to_numeric(df[0].values, downcast='float')
# convert to range index (otherwise h5 save will fail)
df.columns = pd.RangeIndex(0, 1, step=1)
return df
def df_process_storage(self) -> pd.DataFrame:
"""Format PLEXOS Storage Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property", "category"])
df = df.reset_index() # unzip the levels in index
df = df.merge(self.metadata.generator_storage(self.model),
how='left', on='name')
if self.metadata.region_generators(self.model).empty is False:
# Merges in regions where generators are located
df = df.merge(self.metadata.region_generators(self.model),
how='left', on='gen_name')
if self.metadata.zone_generators(self.model).empty is False:
# Merges in zones where generators are located
df = df.merge(self.metadata.zone_generators(self.model),
how='left', on='gen_name')
# checks if Region_Maping contains data to merge, skips if empty (Default)
if not self.Region_Mapping.empty:
# Merges in all Region Mappings
df = df.merge(self.Region_Mapping, how='left', on='region')
df.rename(columns={'name': 'storage_resource'}, inplace=True)
df_col = list(df.columns) # Gets names of all columns in df and places in list
df_col.remove(0) # Removes 0, the data column from the list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df.set_index(df_col, inplace=True)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_region_regions(self) -> pd.DataFrame:
"""Format PLEXOS Region_Regions Relational Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property"])
df = pd.DataFrame(data=df.values.reshape(-1), index=df.index)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_node(self) -> pd.DataFrame:
"""Format PLEXOS Node Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property", "category"])
df.index.rename('node', level='name', inplace=True)
df.sort_index(level=['node'], inplace=True)
if self.metadata.node_region(self.model).empty is False:
node_region_idx = pd.CategoricalIndex(self.metadata.node_region(self.model).index.get_level_values(0))
node_region_idx = node_region_idx.repeat(len(df.index.get_level_values('timestamp').unique()))
idx_region = pd.MultiIndex(levels=df.index.levels + [node_region_idx.categories],
codes=df.index.codes + [node_region_idx.codes],
names=df.index.names + node_region_idx.names)
else:
idx_region = df.index
if self.metadata.node_zone(self.model).empty is False:
node_zone_idx = pd.CategoricalIndex(self.metadata.node_zone(self.model).index.get_level_values(0))
node_zone_idx = node_zone_idx.repeat(len(df.index.get_level_values('timestamp').unique()))
idx_zone = pd.MultiIndex(levels=idx_region.levels + [node_zone_idx.categories],
codes=idx_region.codes + [node_zone_idx.codes],
names=idx_region.names + node_zone_idx.names)
else:
idx_zone = idx_region
if not self.Region_Mapping.empty:
region_mapping_idx = pd.MultiIndex.from_frame(self.metadata.node_region(self.model)
.merge(self.Region_Mapping,
how="left",
on='region')
.drop(['region', 'node'], axis=1)
)
region_mapping_idx = region_mapping_idx.repeat(len(df.index.get_level_values('timestamp').unique()))
idx_map = pd.MultiIndex(levels=idx_zone.levels + region_mapping_idx.levels,
codes=idx_zone.codes + region_mapping_idx.codes,
names=idx_zone.names + region_mapping_idx.names)
else:
idx_map = idx_zone
df = pd.DataFrame(data=df.values.reshape(-1), index=idx_map)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_abatement(self):
"""Format PLEXOS Abatement Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property"])
df.index.rename('abatement_name', level='name', inplace=True)
df = pd.DataFrame(data=df.values.reshape(-1), index=df.index)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_batterie(self):
"""
Method for formatting data which comes form the PLEXOS Batteries Class
Returns
-------
df : pd.DataFrame
Processed Output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property"])
df.index.rename('battery_name', level='name', inplace=True)
df = pd.DataFrame(data=df.values.reshape(-1), index=df.index)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
class MarmotFormat(SetupLogger):
"""Main module class to be instantiated to run the formatter.
MarmotFormat reads in PLEXOS hdf5 files created with the h5plexos library
and processes the output results to ready them for plotting.
Once the outputs have been processed, they are saved to an intermediary hdf5 file
which can then be read into the Marmot plotting code
"""
def __init__(self, Scenario_name: str,
PLEXOS_Solutions_folder: str,
Plexos_Properties: Union[str, pd.DataFrame],
Marmot_Solutions_folder: str = None,
mapping_folder: str = 'mapping_folder',
Region_Mapping: Union[str, pd.DataFrame] = pd.DataFrame(),
emit_names: Union[str, pd.DataFrame] = pd.DataFrame(),
VoLL: int = 10000,
**kwargs):
"""
Args:
Scenario_name (str): Name of scenario to process.
PLEXOS_Solutions_folder (str): Folder containing h5plexos results files.
Plexos_Properties (Union[str, pd.DataFrame]): PLEXOS properties to process,
must follow format seen in Marmot directory.
Marmot_Solutions_folder (str, optional): Folder to save Marmot solution files.
Defaults to None.
mapping_folder (str, optional): The location of the Marmot mapping folder.
Defaults to 'mapping_folder'.
Region_Mapping (Union[str, pd.DataFrame], optional): Mapping file to map custom
regions/zones to create custom aggregations.
Aggregations are created by grouping PLEXOS regions.
Defaults to pd.DataFrame().
emit_names (Union[str, pd.DataFrame], optional): Mapping file to rename
emissions types. Defaults to pd.DataFrame().
VoLL (int, optional): Value of lost load, used to calculate cost of
unserved energy. Defaults to 10000.
"""
super().__init__(**kwargs) # Instantiation of SetupLogger
self.Scenario_name = Scenario_name
self.PLEXOS_Solutions_folder = PLEXOS_Solutions_folder
self.Marmot_Solutions_folder = Marmot_Solutions_folder
self.mapping_folder = mapping_folder
self.VoLL = VoLL
if self.Marmot_Solutions_folder is None:
self.Marmot_Solutions_folder = self.PLEXOS_Solutions_folder
if isinstance(Plexos_Properties, str):
try:
self.Plexos_Properties = pd.read_csv(Plexos_Properties)
except FileNotFoundError:
self.logger.warning("Could not find specified Plexos_Properties file; "
"check file name. This is required to run Marmot, system will now exit")
sys.exit()
elif isinstance(Plexos_Properties, pd.DataFrame):
self.Plexos_Properties = Plexos_Properties
if isinstance(Region_Mapping, str):
try:
self.Region_Mapping = pd.read_csv(Region_Mapping)
if not self.Region_Mapping.empty:
self.Region_Mapping = self.Region_Mapping.astype(str)
except FileNotFoundError:
self.logger.warning("Could not find specified Region Mapping file; "
"check file name\n")
self.Region_Mapping = pd.DataFrame()
elif isinstance(Region_Mapping, pd.DataFrame):
self.Region_Mapping = Region_Mapping
if not self.Region_Mapping.empty:
self.Region_Mapping = self.Region_Mapping.astype('string')
try:
# delete category columns if exists
self.Region_Mapping = self.Region_Mapping.drop(["category"], axis=1)
except KeyError:
pass
if isinstance(emit_names, str):
try:
self.emit_names = pd.read_csv(emit_names)
if not self.emit_names.empty:
self.emit_names.rename(columns={self.emit_names.columns[0]: 'Original',
self.emit_names.columns[1]: 'New'},
inplace=True)
except FileNotFoundError:
self.logger.warning('Could not find specified emissions mapping file; check file name\n')
self.emit_names = pd.DataFrame()
elif isinstance(emit_names, pd.DataFrame):
self.emit_names = emit_names
if not self.emit_names.empty:
self.emit_names.rename(columns={self.emit_names.columns[0]: 'Original',
self.emit_names.columns[1]: 'New'},
inplace=True)
def output_metadata(self, files_list: list, hdf_out_folder: str,
HDF5_output: str, HDF5_folder_in: str) -> None:
"""Transfers metadata from original PLEXOS solutions file to processed HDF5 file.
For each partition in a given scenario, the metadata from that partition
is copied over and saved in the processed output file.
Args:
files_list (list): List of all h5 files in hdf5 folder in alpha numeric order
hdf_out_folder (str): Location of formatted output h5 files
HDF5_output (str): Name of formatted hdf5 output file
HDF5_folder_in (str): Location of original PLEXOS solutions h5 files
"""
for partition in files_list:
f = h5py.File(os.path.join(HDF5_folder_in, partition),'r')
meta_keys = [key for key in f['metadata'].keys()]
group_dict = {}
for key in meta_keys:
sub_dict = {}
subkeys = [key for key in f['metadata'][key].keys()]
for sub in subkeys:
dset = f['metadata'][key][sub]
sub_dict[sub] = dset
group_dict[key] = sub_dict
with h5py.File(os.path.join(hdf_out_folder, HDF5_output),"a") as g:
# check if metadata group already exists
existing_groups = [key for key in g.keys()]
if 'metadata' not in existing_groups:
grp = g.create_group('metadata')
else:
grp = g['metadata']
partition_group = grp.create_group(partition)
for key in list(group_dict.keys()):
subgrp = partition_group.create_group(key)
s_dict = group_dict[key]
for key2 in list(s_dict.keys()):
dset = s_dict[key2]
subgrp.create_dataset(name=key2,data=dset)
f.close()
def _get_data(self, plexos_class: str, plexos_prop: str,
timescale: str, db: PLEXOSSolution, metadata: MetaData) -> pd.DataFrame:
"""Handles the pulling of data from the H5plexos hdf5
file and then passes the data to one of the formating functions
Args:
plexos_class (str): PLEXOS class e.g Region, Generator, Zone etc
plexos_prop (str): PLEXOS property e.g Max Capacity, Generation etc.
timescale (str): Data timescale, e.g Hourly, Monthly, 5 minute etc.
db (PLEXOSSolution): PLEXOSSolution instance for specific h5plexos file.
metadata (MetaData): MetaData instance
Returns:
pd.DataFrame: Formatted results dataframe.
"""
try:
if "_" in plexos_class:
df = db.query_relation_property(plexos_class, plexos_prop,
timescale=timescale)
object_class = plexos_class
else:
df = db.query_object_property(plexos_class, plexos_prop,
timescale=timescale)
if ((0,6,0) <= db.version and db.version < (0,7,0)):
object_class = f"{plexos_class}s"
else:
object_class = plexos_class
except (ValueError, KeyError):
df = self._report_prop_error(plexos_prop, plexos_class)
return df
# handles h5plexos naming discrepency
if ((0,6,0) <= db.version and db.version < (0,7,0)):
# Get original units from h5plexos file
df_units = (db.h5file[f'/data/ST/{timescale}/{object_class}/{plexos_prop}']
.attrs['units'].decode('UTF-8'))
else:
df_units = (db.h5file[f'/data/ST/{timescale}/{object_class}/{plexos_prop}']
.attrs['unit'])
# find unit conversion values
converted_units = UNITS_CONVERSION.get(df_units, (df_units, 1))
# Instantiate instance of Process Class
# metadata is used as a parameter to initialize process_cl
process_cl = Process(df, metadata, db.h5file.filename, self.Region_Mapping,
self.emit_names, self.logger)
# Instantiate Method of Process Class
process_att = getattr(process_cl, f'df_process_{plexos_class}')
# Process attribute and return to df
df = process_att()
# Convert units and add unit column to index
df = df*converted_units[1]
units_index = pd.Index([converted_units[0]] *len(df), name='units')
df.set_index(units_index, append=True, inplace=True)
if plexos_class == 'region' and plexos_prop == "Unserved Energy" and int(df.sum(axis=0)) > 0:
self.logger.warning(f"Scenario contains Unserved Energy: {int(df.sum(axis=0))} MW\n")
return df
def _report_prop_error(self, plexos_prop: str,
plexos_class: str) -> pd.DataFrame:
"""Outputs a warning message when the _get_data method
cannot find the specified PLEXOS property in the h5plexos hdf5 file
Args:
plexos_prop (str): PLEXOS class e.g Region, Generator, Zone etc.
plexos_class (str): PLEXOS property e.g Max Capacity, Generation etc.
Returns:
pd.DataFrame: Empty DataFrame.
"""
self.logger.warning(f'CAN NOT FIND "{plexos_class} {plexos_prop}". "{plexos_prop}" DOES NOT EXIST')
self.logger.info('SKIPPING PROPERTY\n')
df = pd.DataFrame()
return df
@staticmethod
def _save_to_h5(df: pd.DataFrame, file_name: str, key: str,
mode: str = "a", complevel: int = 9,
complib: str ='blosc:zlib', **kwargs) -> None:
"""Saves data to formatted hdf5 file
Args:
df (pd.DataFrame): Dataframe to save
file_name (str): name of hdf5 file
key (str): formatted property identifier, e.g generator_Generation
mode (str, optional): file access mode. Defaults to "a".
complevel (int, optional): compression level. Defaults to 9.
complib (str, optional): compression library. Defaults to 'blosc:zlib'.
"""
df.to_hdf(file_name, key=key, mode=mode,
complevel=complevel,
complib=complib,
**kwargs)
def run_formatter(self) -> None:
"""Main method to call to begin processing h5plexos files
This method takes no input variables, all required variables
are passed in via the __init__ method.
"""
self.logger.info(f"#### Processing {self.Scenario_name} PLEXOS Results ####")
# ===============================================================================
# Input and Output Directories
# ===============================================================================
HDF5_output = f"{self.Scenario_name}_formatted.h5"
HDF5_folder_in = os.path.join(self.PLEXOS_Solutions_folder, str(self.Scenario_name))
try:
os.makedirs(HDF5_folder_in)
except FileExistsError:
# directory already exists
pass
hdf_out_folder = os.path.join(self.Marmot_Solutions_folder, 'Processed_HDF5_folder')
try:
os.makedirs(hdf_out_folder)
except FileExistsError:
# directory already exists
pass
startdir = os.getcwd()
os.chdir(HDF5_folder_in) # Due to a bug on eagle need to chdir before listdir
files = []
for names in os.listdir():
if names.endswith(".h5"):
files.append(names) # Creates a list of only the hdf5 files
# List of all hf files in hdf5 folder in alpha numeric order
files_list = sorted(files, key=lambda x:int(re.sub('\D', '', x)))
os.chdir(startdir)
# Read in all HDF5 files into dictionary
self.logger.info("Loading all HDF5 files to prepare for processing")
hdf5_collection = {}
for file in files_list:
hdf5_collection[file] = PLEXOSSolution(os.path.join(HDF5_folder_in, file))
# ===================================================================================
# Process the Outputs
# ===================================================================================
# Creates Initial HDF5 file for outputting formated data
Processed_Data_Out = pd.DataFrame()
if os.path.isfile(os.path.join(hdf_out_folder, HDF5_output)) is True:
self.logger.info(f"'{hdf_out_folder}\{HDF5_output}' already exists: New variables will be added\n")
# Skip properties that already exist in *formatted.h5 file.
with h5py.File(os.path.join(hdf_out_folder, HDF5_output), 'r') as f:
existing_keys = [key for key in f.keys()]
# The processed HDF5 output file already exists. If metadata is already in
# this file, leave as is. Otherwise, append it to the file.
if 'metadata' not in existing_keys:
self.logger.info('Adding metadata to processed HDF5 file.')
self.output_metadata(files_list, hdf_out_folder, HDF5_output, HDF5_folder_in)
if not mconfig.parser('skip_existing_properties'):
existing_keys = []
# The processed HDF5 file does not exist. Create the file and add metadata to it.
else:
existing_keys = []
# Create empty hdf5 file
f = h5py.File(os.path.join(hdf_out_folder, HDF5_output), "w")
f.close()
self.output_metadata(files_list, hdf_out_folder, HDF5_output, HDF5_folder_in)
process_properties = self.Plexos_Properties.loc[self.Plexos_Properties["collect_data"] == True]
# Create an instance of metadata, and pass that as a variable to get data.
meta = MetaData(HDF5_folder_in, read_from_formatted_h5=False, Region_Mapping=self.Region_Mapping)
if not self.Region_Mapping.empty:
# if any(meta.regions()['region'] not in Region_Mapping['region']):
if set(meta.regions(files_list[0])['region']).issubset(self.Region_Mapping['region']) is False:
missing_regions = list(set(meta.regions(files_list[0])['region']) - set(self.Region_Mapping['region']))
self.logger.warning(f'The Following PLEXOS REGIONS are missing from the "region" column of your mapping file: {missing_regions}\n',)
start = time.time()
# Main loop to process each output and pass data to functions
for index, row in process_properties.iterrows():
Processed_Data_Out = pd.DataFrame()
data_chunks = []
self.logger.info(f'Processing {row["group"]} {row["data_set"]}')
prop_underscore = row["data_set"].replace(' ', '_')
key_path = row["group"] + "_" + prop_underscore
if key_path not in existing_keys:
for model in files_list:
self.logger.info(f" {model}")
db = hdf5_collection.get(model)
processed_data = self._get_data(row["group"], row["data_set"], row["data_type"], db, meta)
if processed_data.empty is True:
break
# Check if data is for year interval and of type capacity
if (row["data_type"] == "year") & (
(row["data_set"] == "Installed Capacity")
| (row["data_set"] == "Export Limit")
| (row["data_set"] == "Import Limit")
):
data_chunks.append(processed_data)
self.logger.info(f"{row['data_set']} Year property reported from only the first partition")
break
else:
data_chunks.append(processed_data)
if data_chunks:
Processed_Data_Out = pd.concat(data_chunks, copy=False)
if Processed_Data_Out.empty is False:
if (row["data_type"] == "year"):
self.logger.info("Please Note: Year properties can not be checked for duplicates.\n\
Overlaping data cannot be removed from 'Year' grouped data.\n\
This will effect Year data that differs between partitions such as cost results.\n\
It will not effect Year data that is equal in all partitions such as Installed Capacity or Line Limit results")
else:
oldsize = Processed_Data_Out.size
Processed_Data_Out = Processed_Data_Out.loc[~Processed_Data_Out.index.duplicated(keep='first')] # Remove duplicates; keep first entry
if (oldsize - Processed_Data_Out.size) > 0:
self.logger.info(f'Drop duplicates removed {oldsize-Processed_Data_Out.size} rows')
row["data_set"] = row["data_set"].replace(' ', '_')
save_attempt=1
while save_attempt<=3:
try:
self.logger.info("Saving data to h5 file...")
MarmotFormat._save_to_h5(Processed_Data_Out,
os.path.join(hdf_out_folder, HDF5_output),
key=f'{row["group"]}_{row["data_set"]}')
self.logger.info("Data saved to h5 file successfully\n")
save_attempt=4
except:
self.logger.warning("h5 File is probably in use, waiting to attempt to save again")
time.sleep(60)
save_attempt+=1
else:
continue
else:
self.logger.info(f"{key_path} already exists in output .h5 file.")
self.logger.info("PROPERTY ALREADY PROCESSED\n")
continue
# ===================================================================================
# Calculate Extra Outputs
# ===================================================================================
if "generator_Curtailment" not in h5py.File(os.path.join(hdf_out_folder, HDF5_output), 'r') or not mconfig.parser('skip_existing_properties'):
try:
self.logger.info("Processing generator Curtailment")
try:
Avail_Gen_Out = pd.read_hdf(os.path.join(hdf_out_folder,
HDF5_output),
'generator_Available_Capacity')
Total_Gen_Out = pd.read_hdf(os.path.join(hdf_out_folder,
HDF5_output),
'generator_Generation')
if Total_Gen_Out.empty is True:
self.logger.warning("generator_Available_Capacity & generator_Generation are required for Curtailment calculation")
except KeyError:
self.logger.warning("generator_Available_Capacity & generator_Generation are required for Curtailment calculation")
Curtailment_Out = Avail_Gen_Out - Total_Gen_Out
Upward_Available_Capacity = Curtailment_Out
MarmotFormat._save_to_h5(Curtailment_Out,
os.path.join(hdf_out_folder, HDF5_output),
key="generator_Curtailment")
MarmotFormat._save_to_h5(Upward_Available_Capacity,
os.path.join(hdf_out_folder, HDF5_output),
key="generator_Upward_Available_Capacity")
self.logger.info("Data saved to h5 file successfully\n")
# Clear Some Memory
del Total_Gen_Out
del Avail_Gen_Out
del Curtailment_Out
except Exception:
self.logger.warning("NOTE!! Curtailment not calculated, processing skipped\n")
if "region_Cost_Unserved_Energy" not in h5py.File(os.path.join(hdf_out_folder, HDF5_output), 'r') or not mconfig.parser('skip_existing_properties'):
try:
self.logger.info("Calculating Cost Unserved Energy: Regions")
Cost_Unserved_Energy = pd.read_hdf(os.path.join(hdf_out_folder,
HDF5_output),
'region_Unserved_Energy')
Cost_Unserved_Energy = Cost_Unserved_Energy * self.VoLL
MarmotFormat._save_to_h5(Cost_Unserved_Energy,
os.path.join(hdf_out_folder, HDF5_output),
key="region_Cost_Unserved_Energy")
except KeyError:
self.logger.warning("NOTE!! Regional Unserved Energy not available to process, processing skipped\n")
pass
if "zone_Cost_Unserved_Energy" not in h5py.File(os.path.join(hdf_out_folder, HDF5_output), 'r') or not mconfig.parser('skip_existing_properties'):
try:
self.logger.info("Calculating Cost Unserved Energy: Zones")
Cost_Unserved_Energy = pd.read_hdf(os.path.join(hdf_out_folder,
HDF5_output),
'zone_Unserved_Energy')
Cost_Unserved_Energy = Cost_Unserved_Energy * self.VoLL
MarmotFormat._save_to_h5(Cost_Unserved_Energy,
os.path.join(hdf_out_folder, HDF5_output),
key="zone_Cost_Unserved_Energy")
except KeyError:
self.logger.warning("NOTE!! Zonal Unserved Energy not available to process, processing skipped\n")
pass
end = time.time()
elapsed = end - start
self.logger.info('Main loop took %s minutes', round(elapsed/60, 2))
self.logger.info(f'Formatting COMPLETED for {self.Scenario_name}')
def main():
"""Run the formatting code and format desired properties based on user input files."""
# ===============================================================================
# Input Properties
# ===============================================================================
# Changes working directory to location of this python file
os.chdir(FILE_DIR)
Marmot_user_defined_inputs = pd.read_csv(mconfig.parser("user_defined_inputs_file"),
usecols=['Input', 'User_defined_value'],
index_col='Input',
skipinitialspace=True)
# File which determiens which plexos properties to pull from the h5plexos results and process, this file is in the repo
Plexos_Properties = pd.read_csv(mconfig.parser('plexos_properties_file'))
# Name of the Scenario(s) being run, must have the same name(s) as the folder holding the runs HDF5 file
Scenario_List = pd.Series(Marmot_user_defined_inputs.loc['Scenario_process_list'].squeeze().split(",")).str.strip().tolist()
# The folder that contains all PLEXOS h5plexos outputs - the h5 files should be contained in another folder with the Scenario_name
PLEXOS_Solutions_folder = Marmot_user_defined_inputs.loc['PLEXOS_Solutions_folder'].to_string(index=False).strip()
# Folder to save your processed solutions
if | pd.isna(Marmot_user_defined_inputs.loc['Marmot_Solutions_folder','User_defined_value']) | pandas.isna |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from pandas.compat import range, lrange
import numpy as np
from pandas import DataFrame, Series, Index, MultiIndex
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
# Column add, remove, delete.
class TestDataFrameMutateColumns(tm.TestCase, TestData):
def test_assign(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
original = df.copy()
result = df.assign(C=df.B / df.A)
expected = df.copy()
expected['C'] = [4, 2.5, 2]
assert_frame_equal(result, expected)
# lambda syntax
result = df.assign(C=lambda x: x.B / x.A)
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
# Non-Series array-like
result = df.assign(C=[4, 2.5, 2])
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
result = df.assign(B=df.B / df.A)
expected = expected.drop('B', axis=1).rename(columns={'C': 'B'})
assert_frame_equal(result, expected)
# overwrite
result = df.assign(A=df.A + df.B)
expected = df.copy()
expected['A'] = [5, 7, 9]
assert_frame_equal(result, expected)
# lambda
result = df.assign(A=lambda x: x.A + x.B)
assert_frame_equal(result, expected)
def test_assign_multiple(self):
df = DataFrame([[1, 4], [2, 5], [3, 6]], columns=['A', 'B'])
result = df.assign(C=[7, 8, 9], D=df.A, E=lambda x: x.B)
expected = DataFrame([[1, 4, 7, 1, 4], [2, 5, 8, 2, 5],
[3, 6, 9, 3, 6]], columns=list('ABCDE'))
assert_frame_equal(result, expected)
def test_assign_alphabetical(self):
# GH 9818
df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
result = df.assign(D=df.A + df.B, C=df.A - df.B)
expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]],
columns=list('ABCD'))
assert_frame_equal(result, expected)
result = df.assign(C=df.A - df.B, D=df.A + df.B)
assert_frame_equal(result, expected)
def test_assign_bad(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
# non-keyword argument
with tm.assertRaises(TypeError):
df.assign(lambda x: x.A)
with tm.assertRaises(AttributeError):
df.assign(C=df.A, D=df.A + df.C)
with tm.assertRaises(KeyError):
df.assign(C=lambda df: df.A, D=lambda df: df['A'] + df['C'])
with tm.assertRaises(KeyError):
df.assign(C=df.A, D=lambda x: x['A'] + x['C'])
def test_insert_error_msmgs(self):
# GH 7432
df = DataFrame({'foo': ['a', 'b', 'c'], 'bar': [
1, 2, 3], 'baz': ['d', 'e', 'f']}).set_index('foo')
s = DataFrame({'foo': ['a', 'b', 'c', 'a'], 'fiz': [
'g', 'h', 'i', 'j']}).set_index('foo')
msg = 'cannot reindex from a duplicate axis'
with assertRaisesRegexp(ValueError, msg):
df['newcol'] = s
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)),
columns=['a', 'b', 'c', 'd'])
msg = 'incompatible index of inserted column with frame index'
with assertRaisesRegexp(TypeError, msg):
df['gr'] = df.groupby(['b', 'c']).count()
def test_insert_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=lrange(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K),
index=lrange(N))
assert_frame_equal(df, expected)
def test_insert(self):
df = DataFrame(np.random.randn(5, 3), index=np.arange(5),
columns=['c', 'b', 'a'])
df.insert(0, 'foo', df['a'])
self.assert_index_equal(df.columns, Index(['foo', 'c', 'b', 'a']))
tm.assert_series_equal(df['a'], df['foo'], check_names=False)
df.insert(2, 'bar', df['c'])
self.assert_index_equal(df.columns,
Index(['foo', 'c', 'bar', 'b', 'a']))
tm.assert_almost_equal(df['c'], df['bar'], check_names=False)
# diff dtype
# new item
df['x'] = df['a'].astype('float32')
result = Series(dict(float64=5, float32=1))
self.assertTrue((df.get_dtype_counts() == result).all())
# replacing current (in different block)
df['a'] = df['a'].astype('float32')
result = Series(dict(float64=4, float32=2))
self.assertTrue((df.get_dtype_counts() == result).all())
df['y'] = df['a'].astype('int32')
result = Series(dict(float64=4, float32=2, int32=1))
self.assertTrue((df.get_dtype_counts() == result).all())
with assertRaisesRegexp(ValueError, 'already exists'):
df.insert(1, 'a', df['b'])
self.assertRaises(ValueError, df.insert, 1, 'c', df['b'])
df.columns.name = 'some_name'
# preserve columns name field
df.insert(0, 'baz', df['c'])
self.assertEqual(df.columns.name, 'some_name')
# GH 13522
df = | DataFrame(index=['A', 'B', 'C']) | pandas.DataFrame |
import pandas as pd
import plotly
from path import Path
from jinja2 import Environment, FileSystemLoader # html template engine
from flask import url_for
import visualize as bv
def generate_voc_html(feature: str, values: list, results: dict, template_name: str='voc.html'):
# express plots in html and JS
mutation_diversity = ''
# config = dict({'displaylogo': False})
config = {'displaylogo': False,
'scrollZoom': False,
'modeBarButtonsToAdd':['drawline',
'drawopenpath',
'drawrect',
'eraseshape'
],
'modeBarButtonsToRemove': ['toggleSpikelines','hoverCompareCartesian','lasso2d']}
# config = {'displayModeBar': False}
if results.get('mutation_diversity', None):
mutation_diversity = plotly.offline.plot(results['mutation_diversity'], include_plotlyjs=False, output_type='div', config=config)
sampling_img = plotly.offline.plot(results['sampling_fig'], include_plotlyjs=False, output_type='div', config=config)
world_time = plotly.offline.plot(results['world_time'], include_plotlyjs=False, output_type='div', config=config)
us_time = plotly.offline.plot(results['us_time'], include_plotlyjs=False, output_type='div', config=config)
ca_time = plotly.offline.plot(results['ca_time'], include_plotlyjs=False, output_type='div', config=config)
world_rtime = plotly.offline.plot(results['world_rtime'], include_plotlyjs=False, output_type='div', config=config)
us_rtime = plotly.offline.plot(results['us_rtime'], include_plotlyjs=False, output_type='div', config=config)
ca_rtime = plotly.offline.plot(results['ca_rtime'], include_plotlyjs=False, output_type='div', config=config)
world_map = plotly.offline.plot(results['world_map'],
include_plotlyjs=False, output_type='div', config=config)
state_map = plotly.offline.plot(results['state_map'], include_plotlyjs=False, output_type='div', config=config)
county_map = plotly.offline.plot(results['county_map'], include_plotlyjs=False, output_type='div', config=config)
# genetic_distance_plot = plotly.offline.plot(results['genetic_distance_plot'], include_plotlyjs=False, output_type='div')
strain_distance_plot = plotly.offline.plot(results['strain_distance_plot'], include_plotlyjs=False, output_type='div', config=config)
# aa_distance_plot = plotly.offline.plot(results['aa_distance_plot'], include_plotlyjs=False, output_type='div')
# s_aa_distance_plot = plotly.offline.plot(results['s_aa_distance_plot'], include_plotlyjs=False, output_type='div')
# generate output messages
#TODO: expt_name, first_detected
date = results['date']
strain = results['strain']
total_num = results['total_num']
num_countries = results['num_countries']
us_num = results['us_num']
num_states = results['num_states']
ca_num = results['ca_num']
num_lineages = results.get('num_lineages', '')
mutations = results.get('mutations', '')
# dir containing our template
file_loader = FileSystemLoader('templates')
# load the environment
env = Environment(loader=file_loader)
# load the template
template = env.get_template(template_name)
# render data in our template format
html_output = template.render(feature=feature, values=values,
total_num=total_num, num_countries=num_countries,
us_num=us_num, num_states=num_states, ca_num=ca_num,
num_lineages=num_lineages, strain=strain,
mutations=mutations,
date=date, world_time=world_time, us_time=us_time,
ca_time=ca_time, world_rtime=world_rtime,
ca_rtime=ca_rtime, us_rtime=us_rtime,
world_map=world_map,
state_map=state_map, county_map=county_map,
# genetic_distance_plot=genetic_distance_plot,
strain_distance_plot=strain_distance_plot,
# aa_distance_plot=aa_distance_plot,
# s_aa_distance_plot=s_aa_distance_plot,
first_detected=results['first_detected'],
sampling_img=sampling_img,
mutation_diversity=mutation_diversity)
print(f"Results for {values} embedded in HTML report")
return html_output
def generate_voc_data(feature, values, input_params):
results = pd.DataFrame()
res = pd.DataFrame()
if feature == 'mutation':
print(f"Loading variant data...")
gisaid_data = | pd.read_csv(input_params['gisaid_data_fp'], compression='gzip') | pandas.read_csv |
from codonPython.check_null import check_null
import numpy as np
import pandas as pd
import pytest
testdata = | pd.DataFrame({
"col1" : [1,2,3,4,5,6,7,8,9,10],
"col2" : [11,12,13,14,15,np.nan,np.nan,18,19,20],
}) | pandas.DataFrame |
import pandas as pd
import numpy as np
data=pd.read_csv('england-premier-league-players-2018-to-2019-stats.csv')
#print(data)
#print(list(data.columns))
data_we_need=data[["full_name","position","Current Club","appearances_overall","goals_overall","assists_overall","penalty_goals","penalty_misses", 'clean_sheets_overall','yellow_cards_overall', 'red_cards_overall','Total_Price']]
#print(data_we_need)
indexNames = | pd.DataFrame() | pandas.DataFrame |
"""
"""
import io
import os
import pandas as pd
import numpy as np
from datetime import datetime
import yaml
import tethys_utils as tu
import logging
from time import sleep
from pyproj import Proj, CRS, Transformer
pd.options.display.max_columns = 10
#############################################
### Parameters
base_path = os.path.realpath(os.path.dirname(__file__))
permit_csv = os.path.join(base_path, 'es_water_permit_data_v02.csv')
sd_csv = os.path.join(base_path, 'es_stream_depletion_details.csv')
with open(os.path.join(base_path, 'parameters-permits.yml')) as param:
param = yaml.safe_load(param)
conn_config = param['remote']['connection_config']
bucket = param['remote']['bucket']
base_key = 'es/{name}.csv'
run_date = pd.Timestamp.today(tz='utc').round('s')
# run_date_local = run_date.tz_convert(ts_local_tz).tz_localize(None).strftime('%Y-%m-%d %H:%M:%S')
run_date_key = run_date.strftime('%Y%m%dT%H%M%SZ')
def read_s3_csv(s3, bucket, key):
"""
"""
resp = s3.get_object(Bucket=bucket, Key=key)
body1 = resp['Body'].read().decode()
s_io = io.StringIO(body1)
csv1 = | pd.read_csv(s_io) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
This file has a function that, given a deck:
1. Pulls the images of all the cards in that deck (high res)
2. Resize the image to be 63mm x 88mm (same ration at least)
3. Pads the image with a dark grey border to serve as cut marker
4. Stores all the images in folder named after the deck
5. Repeating cards get a number after it to identify them
6. The back of the card should also be saved somewhere
"""
from mtgnlp import config
from PIL import Image
import os
from pathlib import Path
import numpy as np
from sqlalchemy import create_engine
from tqdm import tqdm
import pandas as pd
import requests
import requests_cache
import json
import logging
logPathFileName = config.LOGS_DIR.joinpath("decks_create_images_for_printing.log")
# create logger'
logger = logging.getLogger("decks_create_images_for_printing")
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(f"{logPathFileName}", mode="w")
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
engine = create_engine(config.DB_STR)
tqdm.pandas(desc="Progress")
requests_cache.install_cache(
"scryfall_cache", backend="sqlite", expire_after=24 * 60 * 60
)
# import the necessary packages
# defining argument parsers
# ap = argparse.ArgumentParser()
# ap.add_argument("-i","--image",required=True,help="Input image path")
# args = vars(ap.parse_args())
startX_std, startY_std, endX_std, endY_std = 1295, 238, 1553, 615
width_std = endX_std - startX_std
height_std = endY_std - startY_std
max_h, max_w = 2560 + 1000, 3264
def get_card_updated_data(scryfall_id) -> str:
"""Gets updated data from scryfall
Args:
scryfall_id ([type]): [description]
Returns:
json: result from scryfalls card api
ref: https://scryfall.com/docs/api/cards/id
ex scryfall_id="e9d5aee0-5963-41db-a22b-cfea40a967a3"
"""
r = requests.get(
f"https://api.scryfall.com/cards/{scryfall_id}",
)
if r.status_code == 200:
return json.loads(r.text)
raise Exception("Could not download os save card image")
def download_card_image(scryfall_id, card_name: str) -> str:
"""Downloads card image from scryfall with best resolution
and saves to ./card_images/{card_name}.png
Args:
scryfall_id ([type]): [description]
card_name (str): [description]
Returns:
str: path to card relative to this scripts location
"""
config.DECKS_DIR.joinpath("card_images").mkdir(parents=True, exist_ok=True)
path = config.DECKS_DIR.joinpath(f"card_images/{card_name}.png")
filename = os.path.realpath(path)
# Download the file if it does not exist
if os.path.isfile(filename):
return path
r = requests.get(
f"https://api.scryfall.com/cards/{scryfall_id}?format=image&version=png",
stream=True,
)
if r.status_code == 200:
with open(filename, "wb") as f:
for chunk in r:
f.write(chunk)
return path
raise Exception("Could not download os save card image")
def enhance_with_scryfall_api(df: pd.DataFrame) -> pd.DataFrame:
"""Add current scryfall data about the card"""
df["scryfall_data"] = df["scryfallId"].progress_apply(
get_card_updated_data,
)
return df
def add_price_usd(df: pd.DataFrame) -> pd.DataFrame:
"""Add current scryfall USD prices"""
df["price_usd"] = df["scryfall_data"].progress_apply(
lambda x: x.get("prices", {}).get("usd", np.nan),
)
df["price_usd"] = pd.to_numeric(df["price_usd"])
return df
def add_img_paths_col(df: pd.DataFrame) -> pd.DataFrame:
"""Downloads images and saves local path to
image_local_path column
"""
df["image_local_path"] = df.progress_apply(
lambda row: download_card_image(row.scryfallId, card_name=row.card_name),
axis="columns",
)
return df
def add_image_height_and_width(df: pd.DataFrame) -> pd.DataFrame:
"""Adds columns image, height and width"""
df["image"] = df["image_local_path"].progress_apply(lambda x: Image.open(x))
if pd.isnull(df["image"]).any():
problems = df[pd.isnull(df["image"])]
problems.to_csv("problems.csv")
raise Exception('There are empty images in the dataframe. See "problems".')
df["height"] = df["image"].progress_apply(lambda x: x.size[1])
df["width"] = df["image"].progress_apply(lambda x: x.size[0])
return df
def should_correct_aspect_ratio(
df: pd.DataFrame, tolerance: float = 0.02
) -> pd.DataFrame:
"""If image file does not meet aspect ration 88/63,
resize the image
Tolerates by default 2% difference in aspect ratio
ref: https://stackoverflow.com/questions/23853632/which-kind-of-interpolation-best-for-resizing-image
"""
EXPECTED_RATIO = 88 / 63
df["aspect_ratio"] = df["height"] / df["width"]
df["aspect_ratio_higher"] = df["aspect_ratio"] > EXPECTED_RATIO
df["aspect_ratio_correct_by_proportion"] = df["aspect_ratio"] / EXPECTED_RATIO
df["should_resize"] = (
np.abs(df["aspect_ratio"] - EXPECTED_RATIO) / (EXPECTED_RATIO) > tolerance
)
# shrink height if aspect ration is higher than expected
df["new_height"] = df["height"].where(
~df["aspect_ratio_higher"],
(df["height"] * df["aspect_ratio_correct_by_proportion"]).apply(int),
)
# shrink width if aspect ration is lower than expected
df["new_width"] = df["width"].where(
df["aspect_ratio_higher"],
(df["width"] * df["aspect_ratio_correct_by_proportion"]).apply(int),
)
return df
def generate_deck_img_dir(df: pd.DataFrame, deck_slug: str) -> pd.DataFrame:
"""Generates images in deck image dirs, risezed if needed"""
logger.info("generate_deck_img_dir")
Path(f"./deck_images/{deck_slug}").mkdir(parents=True, exist_ok=True)
df["deck_image_path"] = df.progress_apply(
lambda row: f"./deck_images/{deck_slug}/{row.card_id_in_deck}-{row.card_name}-{'resized' if row.should_resize else ''}.png",
axis="columns",
)
logger.info("Start resizing images")
df["deck_image"] = df.progress_apply(
lambda row: row.image
if not row.should_resize
else row.image.resize((row.new_width, row.new_height)),
axis="columns",
)
logger.info("Saving resized images")
df.progress_apply(
lambda row: row.deck_image.save(row.deck_image_path)
if not os.path.isfile(row.deck_image_path)
else np.nan,
axis="columns",
)
return df
if __name__ == "__main__":
pass
DECK_SLUG = "00deck_passarinhos_as_is"
deck_df_query = f"""
SELECT deck_id, card_id_in_deck, card_name, cards."scryfallId"
FROM "{config.DECKS_TNAME}"
JOIN "{config.CARDS_TNAME}"
ON "{config.DECKS_TNAME}".card_name="{config.CARDS_TNAME}".name
WHERE deck_id='{DECK_SLUG}'
"""
deck_df = | pd.read_sql_query(deck_df_query, engine) | pandas.read_sql_query |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import itertools
from tqdm import tqdm
from lidopt.model import evaluate, calculate_metrics
from lidopt import PARAM_GRID, METRICS, EXP, SIM, MODE
from lidopt.parsers import parse_experiment
def run(event=None, event_name=None, path='./data/output/results.csv'):
col_names = list(PARAM_GRID.keys())
all_combinations = list(itertools.product(*[PARAM_GRID[k] for k in PARAM_GRID.keys()]))
df = pd.DataFrame.from_records(all_combinations, columns=col_names)
for metric in METRICS:
df[metric] = 0
for i in tqdm(range(df.shape[0])):
if event_name is not None:
results = evaluate(reportfile='./data/output/reports/parameters_RE_{0}_{1}.txt'.format(event_name+1,i+1), experiment=event, params=df.loc[i, col_names])
else:
results = evaluate(reportfile='./data/output/reports/parameters_{}.txt'.format(str(i+1)), experiment=event, params=df.loc[i, col_names])
for metric in METRICS:
df.loc[i, metric] = results[metric]
if event_name is not None:
df.to_csv(path.format(event_name+1), index_label='simulation_number')
else:
idx = pd.Series([i+1 for i in range(len(df))])
df.set_index(idx, inplace=True)
df.to_csv(path, index_label='simulation_number')
return df
def run_per_event():
rain_events = pd.read_csv(EXP['rain_events'], parse_dates=True)
total_rain_events = rain_events.shape[0]
event_path = "./data/output/events/event_CAL_{}_RE_{}.csv"
df = run()
for i,row in tqdm(df.iterrows(), total = df.shape[0]):
for j in range(total_rain_events):
# Initialize metrics object
col_names = list(PARAM_GRID.keys())
all_combinations = list(itertools.product(*[PARAM_GRID[k] for k in PARAM_GRID.keys()]))
metrics_df = pd.DataFrame.from_records(all_combinations, columns=col_names)
for metric in METRICS:
metrics_df[metric] = 0
start= pd.to_datetime(rain_events.loc[j,'Start'], format="%m/%d/%Y %H:%M")
end = | pd.to_datetime(rain_events.loc[j,'End'], format='%m/%d/%Y %H:%M') | pandas.to_datetime |
import unittest
import os
import shutil
import subprocess
import pandas as pd
from q2_mlab import orchestrate_hyperparameter_search
from pandas.testing import assert_frame_equal
class OrchestratorTests(unittest.TestCase):
def setUp(self):
self.dataset = "dset_test"
self.TEST_DIR = os.path.split(__file__)[0]
self.dataset_file = os.path.join(self.TEST_DIR, "data/table.qza")
# The metadata doesn't matter here, as mlab will only accept
# SampleData[Target] artifacts from preprocessing.
self.metadata_file = os.path.join(
self.TEST_DIR, "data/sample-metadata.tsv"
)
self.target = "reported-antibiotic-usage"
self.prep = "16S"
self.alg = "LinearSVR"
(
self.script_fp,
self.params_fp,
self.run_info_fp,
) = orchestrate_hyperparameter_search(
dataset=self.dataset,
preparation=self.prep,
target=self.target,
algorithm=self.alg,
base_dir=self.TEST_DIR,
table_path=self.dataset_file,
metadata_path=self.metadata_file,
chunk_size=20,
dry=False,
)
# Make a runnable bash script for testing:
# We remove the first 43 lines from the job script as they contain
# only #PBS directives and unused environment variables such as
# $PBS_O_WORKDIR and $PBS_NODEFILE.
self.test_script = os.path.splitext(self.script_fp)[0] + "_test.sh"
with open(self.script_fp) as f:
keeplines = f.readlines()[43:]
with open(self.test_script, "w") as out:
for line in keeplines:
out.write(line)
out.write("\n")
subprocess.run(["chmod", "755", self.test_script])
def tearDown(self):
# Remove files we generated
files_generated = [
self.script_fp,
self.params_fp,
self.run_info_fp,
self.test_script,
]
for file in files_generated:
if file and os.path.exists(file):
os.remove(file)
# Remove parameter subset lists
results_dir = os.path.join(
self.TEST_DIR, self.dataset, self.prep, self.target, self.alg
)
for subset_file in os.listdir(results_dir):
os.remove(os.path.join(results_dir, subset_file))
os.removedirs(results_dir)
# Remove the barnacle output directory
error_dir = os.path.join(
self.TEST_DIR, self.dataset, "barnacle_output/"
)
os.rmdir(error_dir)
def testDryRun(self):
target = "reported-antibiotic-usage"
dataset = self.dataset
prep = "16S"
alg = "LinearSVR"
script_fp, params_fp, run_info_fp = orchestrate_hyperparameter_search(
dataset=dataset,
preparation=prep,
target=target,
algorithm=alg,
base_dir=self.TEST_DIR,
table_path=self.dataset_file,
metadata_path=self.metadata_file,
dry=True,
)
self.assertEqual(
script_fp,
os.path.join(
self.TEST_DIR, f"{dataset}/{prep}_{target}_{alg}.sh"
),
)
expected = f"{dataset}/{prep}/{target}/{alg}/{alg}_parameters.txt"
self.assertEqual(params_fp, os.path.join(self.TEST_DIR, expected))
expected = f"{dataset}/{prep}_{target}_{alg}_info.txt"
self.assertEqual(run_info_fp, os.path.join(self.TEST_DIR, expected))
def testRun(self):
self.assertTrue(os.path.exists(self.script_fp))
self.assertTrue(os.path.exists(self.params_fp))
self.assertTrue(os.path.exists(self.run_info_fp))
expected_run_info = [
{
"parameters_fp": self.params_fp,
"parameter_space_size": 112,
"chunk_size": 20,
"remainder": 12,
"n_chunks": 6,
}
]
expected_run_info_df = | pd.DataFrame.from_records(expected_run_info) | pandas.DataFrame.from_records |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 6 10:24:07 2020
@author: TeSolva
"""
import numpy as np
import pandas as pd
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import os
import re
#%%
def my_int(my_string):
try:
out = int(my_string)
except:
out = -9999
return out
#%%
def func_nummer(x):
temp = re.findall(r"[\w']+", x)
my_list=list(map(my_int, temp))
temp = np.array(my_list)[np.array(my_list)>-9999]
if len(temp) > 1:
out = temp[0]
else:
out = False
return out
#%%
def func_id(x):
temp = re.findall(r"[\w']+", x)
my_list=list(map(my_int, temp))
temp = np.array(my_list)[np.array(my_list)>-9999]
if len(temp) > 1:
out = temp[1]
else:
out = False
return out
#%%
def outlier_1d_mad_based(sample, thresh=3.5):
"""
outlier_1d_mad_based(sample, thresh=3.5)
routine to analyse a given 1d data sample to check for outliers.
see reference for more details on the background of the used algorithm.
the function returns a boolean array with True if a value in the sample
is an outliers and False otherwise.
Parameters:
-----------
sample : array_like
An numobservations by numdimensions array of observations
thresh : float
The modified z-score to use as a threshold. Observations with
a modified z-score (based on the median absolute deviation) greater
than this value will be classified as outliers.
Returns:
--------
A numobservations-length boolean array.
Examples
--------
# Generate some data
sample = np.random.normal(0, 0.5, 50)
# Add three outliers...
sample = np.r_[sample, -3, -10, 12]
# call function and check for outliers
out = outlier_1d_mad_based(sample)
References:
----------
<NAME> and <NAME> (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control:
Statistical Techniques, <NAME>, Ph.D., Editor.
"""
if len(sample.shape) == 1:
sample = sample[:, None]
median = np.median(sample, axis=0)
diff = np.sum((sample - median) ** 2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
#%%
# #path = "PATH\"
# # define all filenames in current directory
# path = os.getcwd() # path to this file here
# list_dir = os.listdir(os.getcwd()) # all filenames in the directory
# file_set = []
# for i in list_dir:
# if i.endswith(".csv"):
# file_set.append(i)
#%%
# define file name
file1 = "chronicreplay-audiothek-appapi.tesolva.dev_2020-08-08_07-29-08.csv"
file2 = "chronicreplay-audiothek-appapi.solr.tesolva.dev_2020-08-11_08-47-32.csv"
#
file_set = [file1, file2]
###############################################################################
#%% pre-process
X_file = file_set[1]
filename = os.path.splitext(X_file)[0]
path = os.getcwd()
outputPath = path + '/' + filename + '/' + filename
os.makedirs(filename, exist_ok=True)
df = | pd.read_csv(X_file,sep='\t') | pandas.read_csv |
import scipy
import pickle
import re
import pandas as pd
import numpy as np
import json
import seaborn as sns
import metrics
from pathlib import Path
def is_singletask(cfg):
return cfg['games'][0] == cfg['games'][1]
def get_config(exp):
log_filename = str(exp['dir'] / "log.txt")
with open(log_filename, "r") as f:
data = f.read()
config_data = data.find(" Logging to: ")
config_data = data[0:config_data]
config_data = config_data[23:]
config_data = config_data[0:len(config_data)-23]
config = json.loads(config_data)
config['iterations'] = get_iterations(exp['dir'])
config['last_iteration'] = config['iterations'][-1]
return config
def get_iterations(logdir):
iterations = []
for i in logdir.iterdir():
f = i.name
if not f.endswith('-game0_elite.pkl'):
continue
itr = re.sub('^0*', '', f.split('-')[0])
if itr == '':
itr = 0
else:
itr = int(itr)
iterations.append(itr)
iterations.sort()
return iterations
def plot_rewards(exp):
rewards = exp['rewards']
cfg = exp['cfg']
sns.set(rc={'figure.figsize':(20, 10)})
df = rewards.copy()
df.set_index('iteration')
myplot = df.set_index('iteration').plot()
myplot.set(title="Game score comparison")
myplot.set(ylabel='Game score')
return myplot.get_figure()
def plot_pareto(exp):
rewards = exp['rewards']
cfg = exp['cfg']
if is_singletask(cfg):
return None
sns.set(rc={'figure.figsize': (20, 10)})
df = rewards.copy()
cols = [exp['type'] + '-' + cfg['games'][0] + '_rewards', exp['type'] + '-' + cfg['games'][1] + '_rewards']
df = df.loc[:, cols]
myplot = sns.scatterplot(x=df[cols[0]], y=df[cols[1]], data=df)
myplot.set(title="Pareto")
return myplot.get_figure()
def compute_pareto(rewards_game0, rewards_game1):
costs = np.transpose(np.array([rewards_game0, rewards_game1]))
points_PF_x, points_PF_y = metrics.f_true_PF_points(costs)
return points_PF_x, points_PF_y
def compute_igd_value(rewards_game0, rewards_game1):
points_PF_x, points_PF_y = compute_pareto(rewards_game0, rewards_game1)
return np.random.random(200)
def compute_hv_value(rewards_game0, rewards_game1):
points_PF_x, points_PF_y = compute_pareto(rewards_game0, rewards_game1)
HV_value = metrics.f_computeHypervolume(np.array([points_PF_x, points_PF_y]))
return HV_value
def get_all_rewards_from_experiments(experiments):
ST_z = experiments['ST-zaxxon']['rewards']
ST_r = experiments['ST-riverraid']['rewards']
MT = experiments['MT']['rewards']
return ST_z.join(ST_r).join(MT)
def get_rewards(exp):
logdir = exp['dir']
last_iteration = exp['cfg']['iterations']
iteration_limit = 201
if str(exp['dir']).endswith('MT-zaxxon-riverraid-50000'):
iteration_limit = 801
if str(exp['dir']).endswith("evaluate_riverraid_using_zaxxon_model"):
last_iteration = 200
rewards_df = | pd.DataFrame(columns=['game0_rewards', 'game1_rewards', 'game0_elite', 'game1_elite', 'iteration']) | pandas.DataFrame |
from ...config import config
try:
if config["RAS"].getboolean("synchronous", False):
import dask
dask.config.set(scheduler="single-threaded")
from dask_sql import Context
from dask_sql.mappings import sql_to_python_type
from dask.distributed import Client
import dask.dataframe as dd
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
"Unable to use the dask backend because dask"
" has not been installed. Make sure to install with"
" `pip install neurolang[dask]` to enable the dask backend."
) from e
import ast
import inspect
import logging
import threading
import time
import types
from abc import ABC, abstractmethod
from collections import namedtuple
from typing import Type, Union
import numpy as np
from neurolang.type_system import (
Unknown,
get_args,
infer_type_builtins,
typing_callable_from_annotated_function,
)
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import functions
import pandas as pd
from pandas.api.types import pandas_dtype
LOG = logging.getLogger(__name__)
def timeit(func):
"""
This decorator logs the execution time for the decorated function.
Log times will not be acurate for Dask operations if using asynchronous
scheduler.
"""
def wrapper(*args, **kwargs):
start = time.perf_counter()
result = func(*args, **kwargs)
elapsed = time.perf_counter() - start
LOG.debug("======================================")
LOG.debug(f"{func.__name__} took {elapsed:2.4f} s")
LOG.debug("======================================")
return result
return wrapper
def _id_generator():
lock = threading.RLock()
i = 0
while True:
with lock:
fresh = f"{i:08}"
i += 1
yield fresh
class DaskContextManager(ABC):
"""
Singleton class to manage Dask-related objects, mainly
Dask's Client and Dask-SQL's Context.
"""
_context = None
_client = None
_id_generator_ = _id_generator()
@abstractmethod
def _do_not_instantiate_singleton_class(self):
pass
@classmethod
def _create_client(cls):
if cls._client is None:
cls._client = Client(processes=False)
@classmethod
def get_context(cls, new=False):
if cls._context is None or new:
if not config["RAS"].getboolean("synchronous", False):
cls._create_client()
cls._context = Context()
# We register an aggregate function called len which applies to string columns
# Used for example in `test_probabilistic_frontend:test_postprob_conjunct_with_wlq_result`
cls._context.register_aggregation(
len, "len", [("x", pd.StringDtype())], np.int32
)
# We also register a sum which applies to objects (i.e `Symbol` or sets)
# since by default sum applies only to numbers in SQL and Calcite will
# try to cast objects to float before applying the default sum op.
cls._context.register_aggregation(
sum, "sum", [("x", np.object_)], np.object_
)
return cls._context
@classmethod
def sql(cls, query):
compiled_query = cls.compile_query(query)
LOG.info(f"Executing SQL query :\n{compiled_query}")
return cls.get_context().sql(compiled_query)
@staticmethod
def compile_query(query):
return str(
query.compile(
dialect=postgresql.dialect(),
compile_kwargs={"literal_binds": True},
)
)
@classmethod
def register_function(cls, f_, fname, params, return_type, wrapped):
func_to_register = f_
if wrapped:
func_to_register = cls.wrap_function_with_dataframe(
f_, params, return_type
)
cls.get_context().register_function(
func_to_register, fname, params, return_type
)
@classmethod
def register_aggregation(cls, f_, fname, params, return_type):
# FIXME: We should preferably try to use GroupBy-aggregations
# instead of GroupBy-apply when doing aggregations. I.e.
# create a dd.Aggregation from the given function and register it
# on the context. But this doesnt work in all cases, since dask
# applies GroupBy-aggregations first on each chunk, then again to
# the results of all the chunk aggregations.
# So transformative aggregation will not work properly, for
# instance sum(x) - 1 will result in sum(x) - 2 in the end.
# agg = dd.Aggregation(
# fname, lambda chunk: chunk.agg(f_), lambda total: total.agg(f_)
# )
func_to_register = f_
if len(params) > 1:
func_to_register = cls.wrap_function_with_param_names(f_, params)
cls.get_context().register_aggregation(
func_to_register, fname, params, return_type
)
@staticmethod
def wrap_function_with_param_names(f_, params):
try:
pnames = [name for (name, _) in params]
named_tuple_type = namedtuple("LambdaTuple", pnames)
except ValueError:
# Invalid column names, just use a tuple instead.
named_tuple_type = None
def wrapped_custom_function(*values):
if named_tuple_type:
return f_(named_tuple_type(*values))
else:
return f_(tuple(values))
return wrapped_custom_function
@staticmethod
def wrap_function_with_dataframe(f_, params, return_type):
"""
The way a function is called in dask_sql is by calling it with a list
of its parameters (dask series,see dask_sql.physical.rex.core.call.py)
Also, dask_sql is changing the names of the columns internally.
What we want to do is transform these Series into a dataframe with the
expected column names (params) and call apply on it.
This is what wrapped_custom_function does.
Concatenating Series into a Dask DataFrame is complicated because of
unknown partitions, so we turn the first Series into a DataFrame and
then assign the other Series as columns.
"""
pnames = [name for (name, _) in params]
def wrapped_custom_function(*values):
s0 = values[0]
if not isinstance(s0, dd.Series):
# Sometimes the Calcite optimizer will push a constant into
# the params of a call so we need to turn it into a Series.
s0 = dd.from_pandas(
| pd.Series([s0], name=pnames[0]) | pandas.Series |
"""
Collect worldbank
=================
Collect worldbank sociodemographic data by country.
"""
import requests
from retrying import retry
import json
from collections import defaultdict
import re
import math
import pandas as pd
WORLDBANK_ENDPOINT = "http://api.worldbank.org/v2/{}"
DEAD_RESPONSE = (None, None) # tuple to match the default python return type
def worldbank_request(suffix, page, per_page=10000, data_key_path=None):
"""Hit the worldbank API and extract metadata and data from the response.
Args:
suffix (str): Suffix to append to :obj:`WORLDBANK_ENDPOINT`.
page (int): Pagination number in API request.
per_page (int): Number of results to return per request.
data_key_path (list): List specifying json path to data object.
Returns:
metadata, data (dict, list): Metadata and data from API response.
"""
response = _worldbank_request(suffix=suffix, page=page, per_page=per_page)
metadata, data = data_from_response(response=response,
data_key_path=data_key_path)
return metadata, data
@retry(stop_max_attempt_number=3, wait_fixed=2000)
def _worldbank_request(suffix, page, per_page):
"""Hit the worldbank API and return the response.
Args:
suffix (str): Suffix to append to :obj:`WORLDBANK_ENDPOINT`.
page (int): Pagination number in API request.
per_page (int): Number of results to return per request.
Returns:
response (:obj:`requests.Response`)
"""
# Hit the API
r = requests.get(WORLDBANK_ENDPOINT.format(suffix),
params=dict(per_page=per_page, format="json", page=page))
# There are some non-404 status codes which indicate invalid API request
if r.status_code == 400:
return DEAD_RESPONSE
r.raise_for_status()
# There are even some 200 status codes which indicate invalid API request
# purely by returning non-json data
response = DEAD_RESPONSE
try:
response = r.json()
except json.JSONDecodeError:
pass
finally:
return response
def data_from_response(response, data_key_path=None):
"""Split up the response from the worldbank API.
Args:
response (tuple): Response from worldbank API, expected to be a tuple of two json items.
data_key_path (list): List specifying json path to data object.
Returns:
metadata, data (dict, list): Metadata and data from API response.
"""
# If the data is stored ({metadata}, [datarows])
if data_key_path is None or response == DEAD_RESPONSE:
metadata, datarows = response
# Otherwise if the data is stored as {metadata, path:{[to:data]}}
# (or similar)
else:
metadata = response
datarows = response.copy()
for key in data_key_path:
datarows = datarows[key]
if key != data_key_path[-1] and type(datarows) is list:
datarows = datarows[0]
return metadata, datarows
def calculate_number_of_api_pages(suffix, per_page=10000, data_key_path=None):
"""Calculate the number of API scrolls required to paginate through this
request.
Args:
suffix (str): Suffix to append to :obj:`WORLDBANK_ENDPOINT`.
per_page (int): Number of results to return per request.
data_key_path (list): List specifying json path to data object.
Returns:
n_pages (int): Number of API scrolls required.
"""
# Discover the shape of the data by inspecting the metadata with
# a tiny request (1 result, 1 page)
metadata, _ = worldbank_request(suffix=suffix, page=1,
per_page=1,
data_key_path=data_key_path)
# If the request was invalid, there are no pages
if metadata is None:
return 0
# Calculate the number of pages required
total = int(metadata["total"])
n_pages = math.floor(total / per_page) + int(total % per_page > 0)
return n_pages
def worldbank_data_interval(suffix, first_page, last_page,
per_page=10000, data_key_path=None):
"""Yield a row of data from worldbank API in a page interval.
Args:
suffix (str): Suffix to append to :obj:`WORLDBANK_ENDPOINT`.
{first, last}_page (int): First (last) page number of the API request.
per_page (int): Number of results to return per request.
data_key_path (list): List specifying json path to data object.
Yields:
row (dict): A row of data from the worldbank API.
"""
for page in range(first_page, last_page+1):
_, datarows = worldbank_request(suffix=suffix, page=page,
per_page=per_page,
data_key_path=data_key_path)
if datarows is None:
continue
for row in datarows:
yield row
def worldbank_data(suffix, per_page=10000, data_key_path=None):
"""Yield a row of data from worldbank API in a page interval.
Args:
suffix (str): Suffix to append to :obj:`WORLDBANK_ENDPOINT`.
per_page (int): Number of results to return per request.
data_key_path (list): List specifying json path to data object.
Yields:
row (dict): A row of data from the worldbank API.
"""
n_pages = calculate_number_of_api_pages(suffix=suffix,
per_page=per_page,
data_key_path=data_key_path)
return worldbank_data_interval(suffix, first_page=1,
last_page=n_pages,
per_page=per_page,
data_key_path=data_key_path)
def get_worldbank_resource(resource):
"""Extract and flatten all data for one worldbank resource.
Args:
resource (str): One of "countries", "series" or "source"
Returns:
collection (list): A list of resource data.
"""
collection = []
for row in worldbank_data(resource):
# Flatten out any data stored by a key named "value"
data = {}
for k, v in row.items():
if type(v) is dict:
v = v["value"]
data[k] = v
collection.append(data)
return collection
def get_variables_by_code(codes):
"""Discover all dataset locations for each variable id, by variable code.
Note: one variable may exist in many datasets, which is handy in the case
of missing data.
Args:
codes (list): The codes of all variables to be discovered.
Returns:
variables (dict): Mapping of variable id --> dataset names.
"""
key_path = ["source", "concept", "variable"]
# Mapping variable id --> dataset names
variables = defaultdict(list)
sources = get_worldbank_resource("source")
for source in sources:
# Extract variables in this "source" (dataset)
suffix = f"sources/{source['id']}/series/data"
data = worldbank_data(suffix, data_key_path=key_path)
# Filter out variables that we don't want
filtered_data = filter(lambda row: (row['id'] in codes), data)
# Assign remaining datasets to this variable
for row in filtered_data:
variables[row['id']].append(source['id'])
return variables
def unpack_quantity(row, concept, value):
"""Unpack row like {"variable": [{"concept":<concept>, <value>:_i_want_this_}]}
Args:
row (dict): Row of Worldbank API data.
concept (str): The name of the dataset containing the variable.
value (str): The name of the variable to unpack.
Returns:
A value.
"""
for quantity in row['variable']:
if quantity['concept'] == concept:
return quantity[value]
raise NameError(f"No item found in {row['variable']} with "
f"concept = {concept}")
def unpack_data(row):
"""Unpack an entire row of Worldbank API data.
Args:
row (dict): Row of Worldbank API data.
Returns:
country, variable, time, value
"""
country = unpack_quantity(row, 'Country', 'id')
#variable = unpack_quantity(row, 'Series', 'value')
time = unpack_quantity(row, 'Time', 'value')
value = row['value']
return country, time, value
def get_country_data(variables, aliases, time="all"):
"""Extract data for specified variables for all available
countries, in a specified year.
Args:
variables (dict): Mapping of variable --> dataset ids.
aliases (dict): Mapping of dirty -> clean variable name.
time (str): String to identify time period to request.
Returns:
country_data (dict): Mapping of country --> variable name --> value
"""
# Iterate through datasets
country_data = defaultdict(dict) # lambda: defaultdict(list))
kwargs_list = get_country_data_kwargs(variables=variables,
aliases=aliases,
time=time)
for kwargs in kwargs_list:
_country_data = country_data_single_request(**kwargs)
for country, data in _country_data.items():
for var_name, data_row in data.items():
country_data[country][var_name] = data_row
return country_data
def get_country_data_kwargs(variables, aliases, time="all", per_page=10000, max_pages=None):
"""Generate every set of kwargs required to make single requests.
Designed to be used with :obj:`country_data_single_request` in order
to be batched.
Args:
variables (dict): Mapping of variable --> dataset ids.
aliases (dict): Mapping of variable name aliases, to ensure
consistent variable naming between data collections.
per_page (int): Number of results to return per request.
Returns:
kwargs_list (list): kwargs list for :obj:`country_data_single_request`.
"""
# Iterate through datasets
kwargs_list = []
key_path = ["source", "data"]
for series, sources in variables.items():
# The name of a given variable varies subtlely across multiple
# datasets, so we extract the variable name the first time for
# consistency across datasets.
alias = aliases[series]
for source in sources:
suffix = (f"sources/{source}/country/all/"
f"series/{series}/time/{time}/data")
n_pages = calculate_number_of_api_pages(suffix=suffix,
per_page=per_page,
data_key_path=key_path)
for page in range(1, n_pages+1):
if max_pages is not None and page > max_pages:
break
parameters = dict(alias=alias,
suffix=suffix, first_page=page,
last_page=page, per_page=per_page,
data_key_path=key_path)
kwargs_list.append(parameters)
return kwargs_list
def country_data_single_request(alias, **kwargs):
"""Extract data for all countries using kwargs generated by
:obj:`get_country_data_kwargs`.
Args:
kwargs (dict): An item returned by :obj:`get_country_data_kwargs`.
Returns:
country_data (dict): Mapping of country --> variable name --> value
"""
country_data = defaultdict(lambda: defaultdict(list))
done_pkeys = set()
data = worldbank_data_interval(**kwargs)
for country, time, value in map(unpack_data, data):
if value is None: # Missing data for this country
continue
pkey = (country, time)
if pkey in done_pkeys: # Already done this country
continue
done_pkeys.add(pkey)
new_row = {"value": value, "time": time}
country_data[country][alias].append(new_row)
return country_data
# def flatten_country_data(country_data, country_metadata):
# """Merge and flatten country data and metadata together.
# Args:
# country_data (dict): Mapping of country --> variable name --> value
# country_metadata (list): List of country metadata.
# Returns:
# flat_country_data (list): Flattened country data and metadata.
# """
# flat_country_data = [dict(**country_data[metadata['id']], **metadata)
# for metadata in country_metadata
# if metadata['id'] in country_data]
# return flat_country_data
def discover_variable_name(series):
"""Discover variable names from each series [short hand code].
Args:
series (str): The short hand code for the variable name,
according to Worldbank API.
Returns:
alias (str): The variable name for the given series.
"""
_, data = worldbank_request(f"en/indicator/{series}", page=1)
alias = data[0]["name"]
return alias
def clean_variable_name(var_name):
"""Clean a single variable name ready for DB storage.
Args:
var_name (str): Variable name to be cleaned
Returns:
new_var_name (str): A MySQL compliant variable name.
"""
# Lower, replace '%', remove non-alphanums and use '_'
new_var_name = var_name.lower().replace("%", "pc")
new_var_name = re.sub('[^0-9a-zA-Z]+', ' ', new_var_name)
new_var_name = new_var_name.lstrip().rstrip().replace(" ", "_")
# Recursively shorten from middle character until less than 64 chars long
# (this is the default MySQL limit)
# Middle character has been chosen to allow some readability.
while len(new_var_name) > 64:
# Find the longest term
longest_term = ""
for term in new_var_name.split("_"):
if len(term) <= len(longest_term):
continue
longest_term = term
# Remove the middle character from the longest term
middle = len(longest_term) - 1
new_term = longest_term[:middle] + longest_term[middle+1:]
new_var_name = new_var_name.replace(longest_term, new_term)
return new_var_name
def clean_variable_names(flat_country_data):
"""Clean variable names ready for DB storage.
Args:
flat_country_data (list): Flattened country data.
Returns:
out_data (list): Same as input data, with MySQL compliant field names.
"""
out_data = []
for row in flat_country_data:
new_row = {}
for key, v in row.items():
# Only clean names containing spaces
if " " not in key:
new_row[key] = v
continue
new_key = clean_variable_name(key)
new_row[new_key] = v
out_data.append(new_row)
return out_data
def is_bad_quarter(x, bad_quarters):
return any(q in x for q in bad_quarters)
def flatten_country_data(country_data, country_metadata,
bad_quarters=("Q1", "Q3", "Q4")):
# Discover the year from the time variable.
# Expected to be in the formats "XXXX" and "XXXX QX"
country_metadata = {metadata["id"]: metadata
for metadata in country_metadata}
fairly_flat_data = []
for iso3, _ in country_metadata.items():
for variable, data in country_data[iso3].items():
for row in data:
year = re.findall(r'(\d{4})', row["time"])[0]
flatter_row = dict(country=iso3, variable=variable,
year=year, **row)
fairly_flat_data.append(flatter_row)
# Group by country and year and remove quarters we don't want.
very_flat_data = []
df = | pd.DataFrame(fairly_flat_data) | pandas.DataFrame |
#code will get the proper values like emyield, marketcap, cacl, etc, and supply a string and value to put back into the dataframe.
import pandas as pd
import numpy as np
import logging
import inspect
from scipy import stats
from dateutil.relativedelta import relativedelta
from datetime import datetime
from scipy import stats
import math
class quantvaluedata: #just contains functions, will NEVEFR actually get the data
def __init__(self,allitems=None):
if allitems is None:
self.allitems=[]
else:
self.allitems=allitems
return
def get_value(self,origdf,key,i=-1):
if key not in origdf.columns and key not in self.allitems and key not in ['timedepositsplaced','fedfundssold','interestbearingdepositsatotherbanks']:
logging.error(key+' not found in allitems')
#logging.error(self.allitems)
return None
df=origdf.copy()
df=df.sort_values('yearquarter')
if len(df)==0:
##logging.error("empty dataframe")
return None
if key not in df.columns:
#logging.error("column not found:"+key)
return None
interested_quarter=df['yearquarter'].iloc[-1]+i+1#because if we want the last quarter we need them equal
if not df['yearquarter'].isin([interested_quarter]).any(): #if the quarter we are interested in is not there
return None
s=df['yearquarter']==interested_quarter
df=df[s]
if len(df)>1:
logging.error(df)
logging.error("to many rows in df")
exit()
pass
value=df[key].iloc[0]
if pd.isnull(value):
return None
return float(value)
def get_sum_quarters(self,df,key,seed,length):
values=[]
#BIG BUG, this was origionally -length-1, which was always truncating the array and producing nans.
periods=range(seed,seed-length,-1)
for p in periods:
values.append(self.get_value(df,key,p))
#logging.info('values:'+str(values))
if pd.isnull(values).any(): #return None if any of the values are None
return None
else:
return float(np.sum(values))
def get_market_cap(self,statements_df,prices_df,seed=-1):
total_shares=self.get_value(statements_df,'weightedavedilutedsharesos',seed)
if pd.isnull(total_shares):
return None
end_date=statements_df['end_date'].iloc[seed]
if seed==-1: #get the latest price but see if there was a split between the end date and now
s=pd.to_datetime(prices_df['date'])>pd.to_datetime(end_date)
tempfd=prices_df[s]
splits=tempfd['split_ratio'].unique()
adj=pd.Series(splits).product() #multiply all the splits together to get the total adjustment factor from the last total_shares
total_shares=total_shares*adj
last_price=prices_df.sort_values('date').iloc[-1]['close']
price=float(last_price)
market_cap=price*float(total_shares)
return market_cap
else:
marketcap=self.get_value(statements_df,'marketcap',seed)
if pd.isnull(marketcap):
return None
else:
return marketcap
def get_netdebt(self,statements_df,seed=-1):
shorttermdebt=self.get_value(statements_df,'shorttermdebt',seed)
longtermdebt=self.get_value(statements_df,'longtermdebt',seed)
capitalleaseobligations=self.get_value(statements_df,'capitalleaseobligations',seed)
cashandequivalents=self.get_value(statements_df,'cashandequivalents',seed)
restrictedcash=self.get_value(statements_df,'restrictedcash',seed)
fedfundssold=self.get_value(statements_df,'fedfundssold',seed)
interestbearingdepositsatotherbanks=self.get_value(statements_df,'interestbearingdepositsatotherbanks',seed)
timedepositsplaced=self.get_value(statements_df,'timedepositsplaced',seed)
s=pd.Series([shorttermdebt,longtermdebt,capitalleaseobligations,cashandequivalents,restrictedcash,fedfundssold,interestbearingdepositsatotherbanks,timedepositsplaced]).astype('float')
if pd.isnull(s).all(): #return None if everything is null
return None
m=pd.Series([1,1,1,-1,-1,-1,-1])
netdebt=s.multiply(m).sum()
return float(netdebt)
def get_enterprise_value(self,statements_df,prices_df,seed=-1):
#calculation taken from https://intrinio.com/data-tag/enterprisevalue
marketcap=self.get_market_cap(statements_df,prices_df,seed)
netdebt=self.get_netdebt(statements_df,seed)
totalpreferredequity=self.get_value(statements_df,'totalpreferredequity',seed)
noncontrollinginterests=self.get_value(statements_df,'noncontrollinginterests',seed)
redeemablenoncontrollinginterest=self.get_value(statements_df,'redeemablenoncontrollinginterest',seed)
s=pd.Series([marketcap,netdebt,totalpreferredequity,noncontrollinginterests,redeemablenoncontrollinginterest])
if | pd.isnull(s) | pandas.isnull |
from flask import Flask,render_template,url_for,session,request,make_response,send_file
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from pandas import DataFrame,read_csv
import random
from flask import Response
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import io
from fbprophet import Prophet
festivals=pd.DataFrame({
'holiday':(['New Year Day', 'Guru Govind Singh Birthday',
'<NAME>',
'Pongal',
'Republic Day',
'<NAME>',
'Guru Ravidas Birthday',
'<NAME>',
'<NAME>',
'<NAME>/Shivaratri',
'<NAME> Birthday',
'<NAME>',
'Holi',
'<NAME>/Gudi Padava/Ugadi/Cheti Chand',
'<NAME>',
'<NAME>',
'Good Friday',
'Easter Day',
'Mesadi',
'Vaisakhi/Vishu',
'Vaisakhadi(Bengal)/Bahag Bihu (Assam)',
'Guru Rabindranath birthday',
'<NAME>',
'Jamat Ul-Vida',
'<NAME>',
'<NAME>',
'Id-ul-Zuha(Bakrid)',
'Independence Day',
'Rak<NAME>han (Rakhi)',
'Parsi New Year day',
'Janmashtarni',
'Onam',
'Muharram',
'<NAME>',
'<NAME>',
'Dussehra',
'Maharishi Valmiki Birthday',
'<NAME> (Karva Chouth)',
'<NAME>',
'Diwali (Deepavali)',
'Govardhan Puja',
'<NAME>',
'Chhat Puja',
'Milad-un-Nabi or Id-e- Milad',
'Guru Nanaks Birthday',
'Guru Teg Bahadur Martyrdom Day',
'Christmas Eve',
'Christmas Day',
'New Year Day',
'Guru Gobind Singh Jayanti',
'Lohri',
'Pongal, Uttarayan, Makar Sankranti',
'Republic Day',
'<NAME>',
'Guru Ravidas Jayanti',
'<NAME> <NAME>',
'Mahashivratri',
'<NAME>',
'Holi',
'Ugadi, Gudi Padwa',
'Bank Holiday',
'<NAME>',
'<NAME>',
'Good Friday',
'Easter',
'Baisakhi',
'<NAME>, <NAME>',
'Eid-al-Fitr / Ramadan',
'<NAME>',
'Guru Purnima',
'<NAME>',
'Bakrid',
'<NAME>',
'Janmashtami',
'Independence Day',
'<NAME>',
'Muharram',
'Onam',
'<NAME>',
'Dussehra',
'Id-e-Milad',
'<NAME>',
'<NAME>',
'Dhanteras',
"Diwali, Narak Chaturdashi, Children's day",
'Govardhan Puja',
'<NAME>',
'<NAME>',
'Guru Nanak Birthday',
'Christmas'
]),
'ds':([
'Jan-1-2019',
'Jan-13-2019',
'Jan-14-2019',
'Jan-15-2019',
'Jan-26-2019',
'Feb-10-2019',
'Feb-19-2019',
'Feb-19-2019',
'Mar-1-2019',
'Mar-4-2019',
'Mar-19-2019',
'Mar-20-2019',
'Mar-21-2019',
'Apr-6-2019',
'Apr-13-2019',
'Apr-17-2019',
'Apr-19-2019',
'Apr-21-2019',
'Apr-13-2019',
'Apr-14-2019',
'Apr-14-2019',
'May-9-2019',
'May-18-2019',
'May-31-2019',
'Jun-5-2019',
'Jul-4-2019',
'Aug-12-2019',
'Aug-15-2019',
'Aug-15-2019',
'Aug-17-2019',
'Aug-24-2019',
'Sep-11-2019',
'Sep-10-2019',
'Sep-2-2019',
'Oct-2-2019',
'Oct-7-2019',
'Oct-13-2019',
'Oct-17-2019',
'Oct-27-2019',
'Oct-27-2019',
'Oct-28-2019',
'Oct-29-2019',
'Nov-2-2019',
'Nov-10-2019',
'Nov-12-2019',
'Nov-24-2019',
'Dec-24-2019',
'Dec-25-2019',
'Jan-1-2020',
'Jan-2-2020',
'Jan-14-2020',
'Jan-15-2020',
'Jan-26-2020',
'Jan-29-2020',
'Feb-9-2020',
'Feb-18-2020',
'Feb-21-2020',
'Mar-9-2020',
'Mar-10-2020',
'Mar-25-2020',
'Apr-1-2020',
'Apr-2-2020',
'Apr-6-2020',
'Apr-10-2020',
'Apr-12-2020',
'Apr-13-2020',
'May-7-2020',
'May-24-2020',
'Jun-23-2020',
'Jul-5-2020',
'Jul-23-2020',
'Jul-31-2020',
'Aug-3-2020',
'Aug-11-2020',
'Aug-15-2020',
'Aug-22-2020',
'Aug-23-2020',
'Aug-29-2020',
'Aug-31-2020',
'Oct-2-2020',
'Oct-25-2020',
'Oct-29-2020',
'Oct-31-2020',
'Nov-4-2020',
'Nov-12-2020',
'Nov-14-2020',
'Nov-15-2020',
'Nov-16-2020',
'Nov-30-2020',
'Dec-25-2020'])
})
holidays=festivals.filter(['holiday','ds'])
holidays['ds']=pd.to_datetime(festivals['ds'])
file=r'./forecasting-prj-sample-data-v0.1.csv'
df=pd.read_csv(file)
df.to_html(header="true",table_id="table")
app=Flask(__name__)
app.config['SECRET_KEY']='f2091f20a36545a4ffe6ef38439845f5'
frame1=df.filter(['Date','Occupancy Adults'])
frame1['Date']=pd.to_datetime(df['Date'])
frame1=frame1.set_index('Date')
f_frame1=frame1.reset_index().dropna()
f_frame1.to_html(header="true",table_id="table")
@app.route("/")
@app.route("/adult_dataframe",methods=["POST","GET"])
def html_table():
return render_template('adult_table.html',tables=[f_frame1.to_html(classes='data',header="true")])
frame2=df.filter(['Date','Occupancy Children'])
frame2['Date']=pd.to_datetime(df['Date'])
frame2=frame2.set_index('Date')
f_frame2=frame2.reset_index().dropna()
f_frame2.to_html(header="true",table_id="table")
@app.route("/children_dataframe",methods=["POST","GET"])
def child_table():
return render_template('children_table.html',tables=[f_frame2.to_html(classes='data',header="true")])
@app.route("/trends",methods=["GET"])
def plot_png():
fig=create_figure()
output=io.BytesIO()
FigureCanvas(fig).print_png(output)
return Response(output.getvalue(),mimetype='image/png')
def create_figure():
fig=Figure(figsize=(25,20))
axis=fig.add_subplot(2,1,1)
xs=f_frame1['Date']
ys=f_frame1['Occupancy Adults']
axis.plot(xs,ys)
axis.set_title('Adult occupancy')
axis.set_xlabel('Date')
axis.set_ylabel('Adults')
axis=fig.add_subplot(2,1,2)
xs=f_frame2['Date']
ys=f_frame2['Occupancy Children']
axis.plot(xs,ys)
axis.set_title('Children occupancy')
axis.set_xlabel('Date')
axis.set_ylabel('Children')
return fig
def chartTest():
return render_template('plot.html',name=plt.show(),name2=model.plot_components(forecast))
prophetframe1=df.filter(['Date','Occupancy Adults'])
prophetframe1['Date']=pd.to_datetime(df['Date'])
prophetframe1=prophetframe1.set_index('Date')
prophet_frame1=prophetframe1.reset_index().dropna()
prophet_frame1.columns=['ds','y']
model=Prophet(yearly_seasonality=True,weekly_seasonality=True,holidays=holidays)
model.fit(prophet_frame1)
future1=model.make_future_dataframe(periods=36,freq='MS')
forecast=model.predict(future1)
@app.route("/adult_prediction",methods=["GET"])
def predict_adult():
fig=create_adults()
output=io.BytesIO()
FigureCanvas(fig).print_png(output)
return Response(output.getvalue(),mimetype='image/png')
def create_adults():
fig=Figure(figsize=(25,20))
fig=model.plot_components(forecast)
return fig
def show_prophet():
return render_template('adult_prophetgraph.html',name=model.plot_components(forecast))
prophetframe2=df.filter(['Date','Occupancy Children'])
prophetframe2['Date']= | pd.to_datetime(df['Date']) | pandas.to_datetime |
import streamlit as st
import base64
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.formula.api import glm
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
class LogReg:
"""Страница логистической регрессии."""
def __init__(self, df):
st.title('Логистическая регрессиия')
self.df = df
self.features_selection()
self.model_fit()
self.show_summary()
self.show_students_with_problems()
self.show_metrics()
def features_selection(self):
"""Выбор предикторов и целевой переменной."""
self.target_feature = st.sidebar.selectbox('Целевая переменная:', self.df.columns, len(self.df.columns) - 1)
st.sidebar.text('Предикторы:')
features = np.array([f for f in self.df.columns if f != self.target_feature])
self.selected_features = features[[st.sidebar.checkbox(f, f) for f in features]]
self.X = self.df[self.selected_features]
self.y = self.df[self.target_feature]
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=.33)
def model_fit(self):
"""Создание и обучение модели."""
self.log_reg = glm(
f'{self.target_feature} ~ {" + ".join(self.selected_features) if len(self.selected_features) else "1"}',
data= | pd.concat([self.X_train, self.y_train], axis=1) | pandas.concat |
from django.forms.models import model_to_dict
from rules.contrib.views import permission_required, objectgetter
import math, json, logging
from datetime import datetime, timedelta
from django.utils import timezone
import numpy as np
import pandas as pd
from django.conf import settings
from django.contrib import auth
from django.db import connection as conn
from django.http import HttpResponse, HttpResponseForbidden
from django.shortcuts import redirect, render
from pinax.eventlog.models import log as eventlog
from dashboard.event_logs_types.event_logs_types import EventLogTypes
from dashboard.common.db_util import canvas_id_to_incremented_id
from dashboard.common import utils
from django.core.exceptions import ObjectDoesNotExist
from collections import namedtuple
from dashboard.models import Course, CourseViewOption, Resource, UserDefaultSelection
from dashboard.settings import RESOURCE_VALUES, COURSES_ENABLED
logger = logging.getLogger(__name__)
# strings for construct resource download url
RESOURCE_URLS = settings.RESOURCE_URLS
CANVAS_FILE_ID_NAME_SEPARATOR = "|"
# string for no grade
GRADE_A="90-100"
GRADE_B="80-89"
GRADE_C="70-79"
GRADE_LOW="low_grade"
NO_GRADE_STRING = "NO_GRADE"
# string for resource type
RESOURCE_TYPE_STRING = "resource_type"
RESOURCE_VALUES = settings.RESOURCE_VALUES
# Is courses_enabled api enabled/disabled?
COURSES_ENABLED = settings.COURSES_ENABLED
# how many decimal digits to keep
DECIMAL_ROUND_DIGIT = 1
def gpa_map(grade):
if grade is None:
return NO_GRADE_STRING
# convert to float
grade_float = float(grade)
if grade_float >= 90:
return GRADE_A
elif grade_float >=80:
return GRADE_B
elif grade_float >=70:
return GRADE_C
else:
return GRADE_LOW
def get_home_template(request):
return render(request, 'frontend/index.html')
@permission_required('dashboard.get_course_template',
fn=objectgetter(Course, 'course_id', 'canvas_id'), raise_exception=True)
def get_course_template(request, course_id=0):
return render(request, 'frontend/index.html', {'course_id': course_id})
@permission_required('dashboard.get_course_info',
fn=objectgetter(Course, 'course_id', 'canvas_id'), raise_exception=True)
def get_course_info(request, course_id=0):
"""Returns JSON data about a course
:param request: HTTP Request
:type request: Request
:param course_id: Unizin Course ID, defaults to 0
:param course_id: int, optional
:return: JSON to be used
:rtype: str
"""
course_id = canvas_id_to_incremented_id(course_id)
today = timezone.now()
try:
course = Course.objects.get(id=course_id)
except ObjectDoesNotExist:
return HttpResponse("{}")
course_resource_list = []
try:
resource_list = Resource.objects.get_course_resource_type(course_id)
if resource_list is not None:
logger.info(f"Course {course_id} resources data type are: {resource_list}")
resource_defaults = settings.RESOURCE_VALUES
for item in resource_list:
result = utils.look_up_key_for_value(resource_defaults, item)
if result is not None:
course_resource_list.append(result.capitalize())
logger.info(f"Mapped generic resource types in a course {course_id}: {course_resource_list}")
except(ObjectDoesNotExist,Exception) as e:
logger.info(f"getting the course {course_id} resources types has errors due to:{e}")
course_resource_list.sort()
resp = model_to_dict(course)
course_start, course_end = course.get_course_date_range()
current_week_number = math.ceil((today - course_start).days/7)
total_weeks = math.ceil((course_end - course_start).days/7)
if course.term is not None:
resp['term'] = model_to_dict(course.term)
else:
resp['term'] = None
# Have a fixed maximum number of weeks
if total_weeks > settings.MAX_DEFAULT_WEEKS:
logger.debug(f'{total_weeks} is greater than {settings.MAX_DEFAULT_WEEKS} setting total weeks to default.')
total_weeks = settings.MAX_DEFAULT_WEEKS
resp['current_week_number'] = current_week_number
resp['total_weeks'] = total_weeks
resp['course_view_options'] = CourseViewOption.objects.get(course=course).json(include_id=False)
resp['resource_types'] = course_resource_list
return HttpResponse(json.dumps(resp, default=str))
# show percentage of users who read the resource within prior n weeks
@permission_required('dashboard.resource_access_within_week',
fn=objectgetter(Course, 'course_id','canvas_id'), raise_exception=True)
def resource_access_within_week(request, course_id=0):
course_id = canvas_id_to_incremented_id(course_id)
current_user = request.user.get_username()
logger.debug("current_user=" + current_user)
# environment settings:
df_default_display_settings()
# read quefrom request param
week_num_start = int(request.GET.get('week_num_start','1'))
week_num_end = int(request.GET.get('week_num_end','0'))
grade = request.GET.get('grade','all')
filter_values = request.GET.get(RESOURCE_TYPE_STRING, ['files', 'videos'])
filter_values = filter_values.split(",")
filter_list = []
for filter_value in filter_values:
if filter_value != '':
filter_list.extend(RESOURCE_VALUES[filter_value.lower()])
# json for eventlog
data = {
"week_num_start": week_num_start,
"week_num_end": week_num_end,
"grade": grade,
"course_id": course_id,
"resource_type": filter_values
}
eventlog(request.user, EventLogTypes.EVENT_VIEW_RESOURCE_ACCESS.value, extra=data)
# get total number of student within the course_id
total_number_student_sql = "select count(*) from user where course_id = %(course_id)s and enrollment_type='StudentEnrollment'"
if (grade == GRADE_A):
total_number_student_sql += " and current_grade >= 90"
elif (grade == GRADE_B):
total_number_student_sql += " and current_grade >= 80 and current_grade < 90"
elif (grade == GRADE_C):
total_number_student_sql += " and current_grade >= 70 and current_grade < 80"
total_number_student_df = | pd.read_sql(total_number_student_sql, conn, params={"course_id": course_id}) | pandas.read_sql |
# RHR Online Anomaly Detection & Alert Monitoring
######################################################
# Author: <NAME> #
# Email: <EMAIL> #
# Location: Dept.of Genetics, Stanford University #
# Date: Oct 29 2020 #
######################################################
# uses raw heart rate and steps data (this stpes data doesn't have zeroes and need to innfer from hr datetime stamp)
## simple command
# python rhrad_online_alerts.py --heart_rate hr.csv --steps steps.csv
## full command
# python rhrad_online_alerts.py --heart_rate pbb_fitbit_oldProtocol_hr.csv --steps pbb_fitbit_oldProtocol_steps.csv --myphd_id pbb_RHR_online --figure1 pbb_RHR_online_anomalies.pdf --anomalies pbb_RHR_online_anomalies.csv --symptom_date 2020-01-10 --diagnosis_date 2020-01-11 --outliers_fraction 0.1 --random_seed 10 --baseline_window 744 --sliding_window 1 --alerts pbb_RHR_online_alerts.csv --figure2 pbb_RHR_online_alerts.pdf
# python rhrad_online_alerts.py --heart_rate pbb_fitbit_oldProtocol_hr.csv \
# --steps pbb_fitbit_oldProtocol_steps.csv \
# --myphd_id pbb_RHR_online \
# --figure1 pbb_RHR_online_anomalies.pdf \
# --anomalies pbb_RHR_online_anomalies.csv \
# --symptom_date 2020-01-10 --diagnosis_date 2020-01-11 \
# --outliers_fraction 0.1 \
# --random_seed 10 \
# --baseline_window 744 --sliding_window 1
# --alerts pbb_RHR_online_alerts.csv \
# --figure2 pbb_RHR_online_alerts.pdf
import warnings
warnings.filterwarnings('ignore')
import sys
import argparse
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
#%matplotlib inline
import seaborn as sns
from statsmodels.tsa.seasonal import seasonal_decompose
from sklearn.preprocessing import StandardScaler
from sklearn.covariance import EllipticEnvelope
####################################
parser = argparse.ArgumentParser(description='Find anomalies in wearables time-series data.')
parser.add_argument('--heart_rate', metavar='', help ='raw heart rate count with a header = heartrate')
parser.add_argument('--steps',metavar='', help ='raw steps count with a header = steps')
parser.add_argument('--myphd_id',metavar='', default = 'myphd_id', help ='user myphd_id')
parser.add_argument('--anomalies', metavar='', default = 'myphd_id_anomalies.csv', help='save predicted anomalies as a CSV file')
parser.add_argument('--figure1', metavar='', default = 'myphd_id_anomalies.pdf', help='save predicted anomalies as a PDF file')
parser.add_argument('--symptom_date', metavar='', default = 'NaN', help = 'symptom date with y-m-d format')
parser.add_argument('--diagnosis_date', metavar='', default = 'NaN', help='diagnosis date with y-m-d format')
parser.add_argument('--outliers_fraction', metavar='', type=float, default=0.1, help='fraction of outliers or anomalies')
parser.add_argument('--random_seed', metavar='', type=int, default=10, help='random seed')
parser.add_argument('--baseline_window', metavar='',type=int, default=744, help='baseline window is used for training (in hours)')
parser.add_argument('--sliding_window', metavar='',type=int, default=1, help='sliding window is used to slide the testing process each hour')
parser.add_argument('--alerts', metavar='', default = 'myphd_id_alerts.csv', help='save predicted anomalies as a CSV file')
parser.add_argument('--figure2', metavar='', default = 'myphd_id_alerts.pdf', help='save predicted anomalies as a PDF file')
args = parser.parse_args()
# as arguments
fitbit_oldProtocol_hr = args.heart_rate
fitbit_oldProtocol_steps = args.steps
myphd_id = args.myphd_id
myphd_id_anomalies = args.anomalies
myphd_id_figure1 = args.figure1
symptom_date = args.symptom_date
diagnosis_date = args.diagnosis_date
RANDOM_SEED = args.random_seed
outliers_fraction = args.outliers_fraction
baseline_window = args.baseline_window
sliding_window = args.sliding_window
myphd_id_alerts = args.alerts
myphd_id_figure2 = args.figure2
####################################
class RHRAD_online:
# Infer resting heart rate ------------------------------------------------------
def resting_heart_rate(self, heartrate, steps):
"""
This function uses heart rate and steps data to infer resting heart rate.
It filters the heart rate with steps that are zero and also 12 minutes ahead.
"""
# heart rate data
df_hr = pd.read_csv(fitbit_oldProtocol_hr)
df_hr = df_hr.set_index('datetime')
df_hr.index.name = None
df_hr.index = pd.to_datetime(df_hr.index)
# steps data
df_steps = pd.read_csv(fitbit_oldProtocol_steps)
df_steps = df_steps.set_index('datetime')
df_steps.index.name = None
df_steps.index = pd.to_datetime(df_steps.index)
# merge dataframes
#df_hr = df_hr.resample('1min').mean()
#df_steps = df_steps.resample('1min').mean()
# added "outer" paramter for merge function to adjust the script to the new steps format
#df1 = pd.merge(df_hr, df_steps, left_index=True, right_index=True)
df1 = pd.merge(df_hr, df_steps, left_index=True, right_index=True, how="outer")
df1 = df1[pd.isnull(df1).any(axis=1)].fillna(0)
df1 = df1.rename(columns={"value_x": "heartrate", "value_y": "steps"})
df1 = df1.resample('1min').mean()
print(myphd_id)
print("Data size (in miutes) before removing missing data")
print(df1.shape)
ax = df1.plot(figsize=(20,4), title=myphd_id)
ax.figure.savefig(myphd_id+'_data.png')
#print(df1)
df1 = df1.dropna(how='any')
df1 = df1.loc[df1['heartrate']!=0]
print("Data size (in miutes) after removing missing data")
print(df1.shape)
#print(df1)
# define RHR as the HR measurements recorded when there were less than two steps taken during a rolling time window of the preceding 12 minutes (including the current minute)
df1['steps'] = df1['steps'].apply(np.int64)
df1['steps_window_12'] = df1['steps'].rolling(12).sum()
df1 = df1.loc[(df1['steps_window_12'] == 0 )]
print(df1['heartrate'].describe())
print(df1['steps_window_12'].describe())
# impute missing data
#df1 = df1.resample('1min').mean()
#df1 = df1.ffill()
print("No.of timesteps for RHR (in minutes)")
print(df1.shape)
return df1
# Pre-processing ------------------------------------------------------
def pre_processing(self, resting_heart_rate):
"""
This function takes resting heart rate data and applies moving averages to smooth the data and
downsamples to one hour by taking the avegare values
"""
# smooth data
df_nonas = df1.dropna()
df1_rom = df_nonas.rolling(400).mean()
# resample
df1_resmp = df1_rom.resample('1H').mean()
df2 = df1_resmp.drop(['steps'], axis=1)
df2 = df2.dropna()
print("No.of timesteps for RHR (in hours)")
print(df2.shape)
return df2
# Seasonality correction ------------------------------------------------------
def seasonality_correction(self, resting_heart_rate, steps):
"""
This function takes output pre-processing and applies seasonality correction
"""
sdHR_decomposition = seasonal_decompose(sdHR, model='additive', freq=1)
sdSteps_decomposition = seasonal_decompose(sdSteps, model='additive', freq=1)
sdHR_decomp = pd.DataFrame(sdHR_decomposition.resid + sdHR_decomposition.trend)
sdHR_decomp.rename(columns={sdHR_decomp.columns[0]:'heartrate'}, inplace=True)
sdSteps_decomp = pd.DataFrame(sdSteps_decomposition.resid + sdSteps_decomposition.trend)
sdSteps_decomp.rename(columns={sdSteps_decomp.columns[0]:'steps_window_12'}, inplace=True)
frames = [sdHR_decomp, sdSteps_decomp]
data = pd.concat(frames, axis=1)
#print(data)
#print(data.shape)
return data
# Train model and predict anomalies ------------------------------------------------------
def online_anomaly_detection(self, data_seasnCorec, baseline_window, sliding_window):
"""
# split the data, standardize the data inside a sliding window
# parameters - 1 month baseline window and 1 hour sliding window
# fit the model and predict the test set
"""
for i in range(baseline_window, len(data_seasnCorec)):
data_train_w = data_seasnCorec[i-baseline_window:i]
# train data normalization ------------------------------------------------------
data_train_w += 0.1
standardizer = StandardScaler().fit(data_train_w.values)
data_train_scaled = standardizer.transform(data_train_w.values)
data_train_scaled_features = pd.DataFrame(data_train_scaled, index=data_train_w.index, columns=data_train_w.columns)
data = pd.DataFrame(data_train_scaled_features)
data_1 = pd.DataFrame(data).fillna(0)
data_1['steps'] = '0'
data_1['steps_window_12'] = (data_1['steps'])
data_train_w = data_1
data_train.append(data_train_w)
data_test_w = data_seasnCorec[i:i+sliding_window]
# test data normalization ------------------------------------------------------
data_test_w += 0.1
data_test_scaled = standardizer.transform(data_test_w.values)
data_scaled_features = pd.DataFrame(data_test_scaled, index=data_test_w.index, columns=data_test_w.columns)
data = pd.DataFrame(data_scaled_features)
data_1 = pd.DataFrame(data).fillna(0)
data_1['steps'] = '0'
data_1['steps_window_12'] = (data_1['steps'])
data_test_w = data_1
data_test.append(data_test_w)
# fit the model ------------------------------------------------------
model = EllipticEnvelope(random_state=RANDOM_SEED,
support_fraction=0.7,
contamination=outliers_fraction).fit(data_train_w)
# predict the test set
preds = model.predict(data_test_w)
#preds = preds.rename(lambda x: 'anomaly' if x == 0 else x, axis=1)
dfs.append(preds)
# Merge predictions ------------------------------------------------------
def merge_test_results(self, data_test):
"""
Merge predictions
"""
# concat all test data (from sliding window) with their datetime index and others
data_test = pd.concat(data_test)
# merge predicted anomalies from test data with their corresponding index and other features
preds = pd.DataFrame(dfs)
preds = preds.rename(lambda x: 'anomaly' if x == 0 else x, axis=1)
data_test_df = pd.DataFrame(data_test)
data_test_df = data_test_df.reset_index()
data_test_preds = data_test_df.join(preds)
return data_test_preds
# Positive Anomalies -----------------------------------------------------------------
"""
Selects anomalies in positive direction and saves in a CSV file
"""
def positive_anomalies(self, data):
a = data.loc[data['anomaly'] == -1, ('index', 'heartrate')]
positive_anomalies = a[(a['heartrate']> 0)]
# Anomaly results
positive_anomalies['Anomalies'] = myphd_id
positive_anomalies.columns = ['datetime', 'std.rhr', 'name']
positive_anomalies.to_csv(myphd_id_anomalies, header=True)
return positive_anomalies
# Alerts ------------------------------------------------------
def create_alerts(self, anomalies, data, fitbit_oldProtocol_hr):
"""
# creates alerts at every 24 hours and send at 9PM.
# visualise alerts
"""
# function to assign different alert names
# summarize hourly alerts
def alert_types(alert):
if alert['alerts'] >=6:
return 'RED'
elif alert['alerts'] >=1:
return 'YELLOW'
else:
return 'GREEN'
# summarize hourly alerts
#anomalies.columns = ['datetime', 'std.rhr', 'name']
anomalies = anomalies[['datetime']]
anomalies['datetime'] = pd.to_datetime(anomalies['datetime'], errors='coerce')
anomalies['alerts'] = 1
anomalies = anomalies.set_index('datetime')
anomalies = anomalies[~anomalies.index.duplicated(keep='first')]
anomalies = anomalies.sort_index()
alerts = anomalies.groupby(pd.Grouper(freq = '24H', base=21)).cumsum()
# apply alert_types function
alerts['alert_type'] = alerts.apply(alert_types, axis=1)
alerts_reset = alerts.reset_index()
#print(alerts_reset)
# save alerts
#alerts.to_csv(myphd_id_alerts, mode='a', header=True)
# summarize hourly alerts to daily alerts
daily_alerts = alerts_reset.resample('24H', on='datetime', base=21, label='right').count()
daily_alerts = daily_alerts.drop(['datetime'], axis=1)
#print(daily_alerts)
# function to assign different alert names
def alert_types(alert):
if alert['alert_type'] >=6:
return 'RED'
elif alert['alert_type'] >=1:
return 'YELLOW'
else:
return 'GREEN'
# apply alert_types function
daily_alerts['alert_type'] = daily_alerts.apply(alert_types, axis=1)
# merge missing 'datetime' with 'alerts' as zero aka GREEN
data1 = data[['index']]
data1['alert_type'] = 0
data1 = data1.rename(columns={"index": "datetime"})
data1['datetime'] = pd.to_datetime(data1['datetime'], errors='coerce')
data1 = data1.resample('24H', on='datetime', base=21, label='right').count()
data1 = data1.drop(data1.columns[[0,1]], axis=1)
data1 = data1.reset_index()
data1['alert_type'] = 0
data3 = pd.merge(data1, daily_alerts, on='datetime', how='outer')
data4 = data3[['datetime', 'alert_type_y']]
data4 = data4.rename(columns={ "alert_type_y": "alert_type"})
daily_alerts = data4.fillna("GREEN")
daily_alerts = daily_alerts.set_index('datetime')
daily_alerts = daily_alerts.sort_index()
# merge alerts with main data and pass 'NA' when there is a missing day instead of 'GREEN'
df_hr = | pd.read_csv(fitbit_oldProtocol_hr) | pandas.read_csv |
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from pytz import timezone, utc
from scipy import stats
from time import gmtime, strftime, mktime
def data_sampler_renamer_parser(path='weather-data.txt'):
# Take columns that are useful, rename them, parse the timestamp string
data = pd.read_csv(path, delimiter=r"\s+")
data_useful = data[
['YR--MODAHRMN', 'DIR', 'SPD', 'CLG', 'SKC', 'VSB', 'MW', 'AW', 'AW.1', 'TEMP', 'DEWP', 'SLP', 'ALT', 'MAX',
'MIN', 'PCP01', 'PCP06', 'PCP24', 'PCPXX', 'SD']]
data_useful.rename(
columns={'YR--MODAHRMN': 'timestamp', 'DIR': 'wind_direction', 'SPD': 'wind_speed', 'CLG': 'cloud_ceiling',
'SKC': 'sky_cover', 'VSB': 'visibility_miles', 'MW': 'manual_weather', 'AW': 'auto_weather',
'AW.1': 'auto_weather1', 'TEMP': 'temprature', 'DEWP': 'dew_point', 'SLP': 'sea_level',
'ALT': 'altimeter', 'MAX': 'max_temp', 'MIN': 'min_temp', 'PCP01': '1hour_precip',
'PCP06': '6hour_precip', 'PCP24': '24hour_precip', 'PCPXX': '3hour_precip', 'SD': 'snow_depth'},
inplace=True)
data_useful.timestamp = data_useful.timestamp.astype(str)
data_useful['year'] = data_useful.timestamp.str[0:4]
data_useful['month'] = data_useful.timestamp.str[4:6]
data_useful['day'] = data_useful.timestamp.str[6:8]
data_useful['hour'] = data_useful.timestamp.str[8:10]
data_useful['minutes'] = data_useful.timestamp.str[10:12]
data_useful.minutes = data_useful.minutes.astype(int)
data_useful.year = data_useful.year.astype(int)
data_useful.month = data_useful.month.astype(int)
data_useful.day = data_useful.day.astype(int)
data_useful.hour = data_useful.hour.astype(int)
return data_useful
def days_fixer(dataframe):
# Unify times to have observations at every hour. Fix all the dates/times based on this criteria
df = dataframe
df.loc[(df['minutes'].values < 31) & (df['minutes'].values != 0), 'minutes'] = 0
df.loc[(df['minutes'].values > 30) & (df['minutes'].values != 0), 'hour'] = df[(df.minutes != 0) & (
df.minutes > 30)].hour + 1
df.loc[(df['minutes'].values > 30) & (df['minutes'].values != 0), 'minutes'] = 0
df.loc[(df['hour'].values == 24), 'day'] = df[df.hour == 24].day + 1
df.loc[(df['hour'].values == 24), 'hour'] = 0
df.loc[(df['day'].values == 32), 'month'] = df[df.day == 32].month + 1
df.loc[(df['day'].values == 32), 'day'] = 1
df.loc[(df['day'].values == 29) & (df['month'].values == 2), ['month', 'day']] = 3, 1
df.loc[(df['day'].values == 31) & (df['month'].values == 4), ['month', 'day']] = 5, 1
df.loc[(df['day'].values == 31) & (df['month'].values == 6), ['month', 'day']] = 7, 1
df.loc[(df['day'].values == 31) & (df['month'].values == 9), ['month', 'day']] = 10, 1
df.loc[(df['day'].values == 31) & (df['month'].values == 11), ['month', 'day']] = 12, 1
df.loc[(df['day'].values == 1) & (df['month'].values == 13), ['month', 'day', 'year']] = 1, 1, 2016
df.hour = df.hour.map("{:02}".format)
df['datetime'] = pd.to_datetime(
df.year.astype(str) + ' ' + df.month.astype(str) + ' ' + df.day.astype(str) + ' ' + df.hour.astype(str),
format='%Y %m %d %H')
return df
def grouper(dataframe):
# Take a subset of colums and group them by time stamp. Afterwards take the mean/mode of the values depending on dataype
sub_df = dataframe[
['wind_direction', 'wind_speed', 'cloud_ceiling', 'sky_cover', 'visibility_miles', 'temprature', 'dew_point',
'sea_level', 'altimeter', '1hour_precip', 'datetime']]
sub_df = sub_df.convert_objects(convert_numeric=True)
f = {'wind_direction': ['mean'], 'wind_speed': ['mean'], 'cloud_ceiling': ['mean'], 'visibility_miles': ['mean'],
'temprature': ['mean'], 'dew_point': ['mean'], 'sea_level': ['mean'], 'altimeter': ['mean'],
'1hour_precip': ['mean']}
grouped = sub_df.groupby('datetime').agg(f)
grouped.columns = grouped.columns.droplevel(-1)
grouped2 = sub_df[['sky_cover', 'datetime']]
grouped2.loc[(grouped2['sky_cover'].values == '***'), 'sky_cover'] = np.nan
grouped3 = grouped2.groupby(['datetime']).agg(lambda x: stats.mode(x)[0][0])
grouped3.loc[(grouped3['sky_cover'].values == 0), 'sky_cover'] = np.nan
data_full = grouped.merge(grouped3, how='left', on=None, left_on=None, right_on=None, left_index=True,
right_index=True)
data_full.reset_index(inplace=True)
data_full['1hour_precip'].fillna(0, inplace=True)
data_full.loc[data_full[data_full['1hour_precip'] > 0.049].index, 'precip'] = 'high'
data_full.loc[data_full[data_full['1hour_precip'] <= 0.049].index, 'precip'] = 'low'
data_full.loc[data_full[data_full['1hour_precip'] == 0].index, 'precip'] = 'no'
data_full['precip_shift'] = data_full.precip.shift(-1)
data_full = pd.get_dummies(data_full, prefix=None, columns=['precip_shift'], sparse=False, drop_first=False)
data_full = data_full.fillna(method='bfill', axis=0, inplace=False, limit=None, downcast=None)
return data_full
def convert_gmt_to_easttime(string_date):
"""
:param string_date: GMT date
:return: Date converted to eastern time
"""
# Converts the string to datetime object
string_date = str(string_date)
try:
gtm = timezone('GMT')
eastern_tz = timezone('US/Eastern')
date_obj = datetime.strptime(string_date, '%Y-%m-%d %H:%M:%S')
date_obj = date_obj.replace(tzinfo=gtm)
date_eastern = date_obj.astimezone(eastern_tz)
date_str = date_eastern.strftime('%Y-%m-%d %H:%M:%S')
return date_str
except IndexError:
return ''
def add_easterntime_column(dataframe):
"""
:param dataframe: Weather dataframe
:return: dataframe with easter time column
"""
dataframe['est_datetime'] = dataframe['datetime'].apply(convert_gmt_to_easttime)
dataframe['est_datetime'] = pd.to_datetime(dataframe['est_datetime'])
return dataframe
#Set
#Interpolation
### 5 functions:
### ToTimestamp(d) from date to number
### toStringDate(d) from number to Date
### RepeatLast() interpolate by the last number
### toMinute() from hours to minutes
### Inter() Interpolate the dataset of weather
def toTimestamp(d):
return mktime(d.timetuple())
def repeatLast(left,right, values):
right= pd.concat((pd.DataFrame(right),pd.DataFrame(values)),axis=1)
right.columns=['first','second']
left.columns=['first']
inter = left.merge(right, how='left', on='first')
return inter.fillna(method='ffill')
def toMinute(datatime):
date_aux = datatime[0]
minute_dates = []
minute_dates_str=[]
while (date_aux <= datatime[len(datatime)-1]):
minute_dates.append(toTimestamp(date_aux))
minute_dates_str.append(date_aux)
date_aux +=timedelta(minutes=1) # days, seconds, then other fields.
return minute_dates, minute_dates_str
def inter(weather):
datatime = pd.to_datetime(weather['datetime'])
minute_dates, minute_dates_str=toMinute(weather['datetime'])
datatime = datatime.apply(toTimestamp)
wind = np.interp(minute_dates, datatime, weather['wind_speed'])
dew = np.interp(minute_dates, datatime, weather['dew_point'])
visibility= np.interp(minute_dates, datatime, weather['visibility_miles'])
wind_dir= np.interp(minute_dates, datatime, weather['wind_direction'])
sea_level= np.interp(minute_dates, datatime, weather['sea_level'])
altimeter = np.interp(minute_dates, datatime, weather['altimeter'])
temprature = np.interp(minute_dates, datatime, weather['temprature'])
precip=repeatLast(pd.DataFrame(minute_dates_str),weather['datetime'], weather[ 'precip'])
precip_shift_high=repeatLast(pd.DataFrame(minute_dates_str),weather['datetime'], weather[ 'precip_shift_high'])
precip_shift_low=repeatLast(pd.DataFrame(minute_dates_str),weather['datetime'], weather[ 'precip_shift_low'])
precip_shift_no=repeatLast(pd.DataFrame(minute_dates_str),weather['datetime'], weather[ 'precip_shift_no'])
interDf = pd.concat((pd.DataFrame(minute_dates_str),pd.DataFrame(wind),
pd.DataFrame(dew),pd.DataFrame(visibility),
| pd.DataFrame(wind_dir) | pandas.DataFrame |
import pandas as pd
from matching.preprocess import (
add_epc_features,
add_ppd_features,
casefold_epc_addresses,
casefold_ppd_addresses,
extract_building_name,
extract_building_number,
extract_flat_number,
)
def test_extract_building_name() -> None:
assert extract_building_name(None) is None
assert extract_building_name("wayfarer") == "wayfarer"
assert extract_building_name("grey house") == "grey house"
assert extract_building_name("flat a") is None
assert extract_building_name("apartment 32") is None
assert extract_building_name("32 rochdale road") is None
assert extract_building_name("32, rochdale road") is None
def test_extract_building_number() -> None:
assert extract_building_number(None) is None
assert extract_building_number("8 south road") == "8"
assert extract_building_number("23, halifax road") == "23"
assert extract_building_number("8c, letchmore road") == "8c"
assert extract_building_number("18c meldon road") == "18c"
assert extract_building_number("flat a") is None
assert extract_building_number("flat 3") is None
def test_extract_flat_number() -> None:
assert extract_flat_number(None) is None
assert extract_flat_number("flat 15") == "15"
assert extract_flat_number("flat 7") == "7"
assert extract_flat_number("flat 1 queens court") == "1"
assert extract_flat_number("flat a") == "a"
assert extract_flat_number("basement flat") == "basement"
assert extract_flat_number("first floor flat") == "first floor"
assert extract_flat_number("fancy building name") is None
assert extract_flat_number("apartment 123") == "123"
def test_casefold_epc_addresses() -> None:
epc_addresses = pd.DataFrame(
[
{
"address_line_1": "First Floor Flat",
"address_line_2": "1, London Road",
"address_line_3": "<NAME>",
}
]
)
got = epc_addresses.pipe(casefold_epc_addresses)
want = pd.DataFrame(
[
{
"address_line_1": "first floor flat",
"address_line_2": "1, london road",
"address_line_3": "<NAME>",
}
]
)
pd.testing.assert_frame_equal(got, want)
def test_add_epc_features() -> None:
tests = [
(
{
"address_line_1": "flat c",
"address_line_2": "97, grove road",
},
{"building_number": "97", "flat_number": "c"},
),
(
{
"address_line_1": "32, pilkington road",
"address_line_2": None,
},
{"building_number": "32"},
),
(
{
"address_line_1": "the mansion",
"address_line_2": "1, garden road",
},
{"building_name": "the mansion", "building_number": "1"},
),
(
{"address_line_1": "flat 100", "address_line_2": "nice mansions"},
{"building_name": "nice mansions", "flat_number": "100"},
),
]
addresses, features = zip(*tests)
got = pd.DataFrame(addresses).pipe(add_epc_features)[
["building_number", "flat_number", "building_name"]
]
want = pd.DataFrame(features)
pd.testing.assert_frame_equal(got, want)
def test_casefold_ppd_addresses() -> None:
ppd_addresses = pd.DataFrame(
[
{
"primary_addressable_object_name": "6a",
"secondary_addressable_object_name": "LOWER MAISONETTE",
"street": "SURREY AVENUE",
}
]
)
got = ppd_addresses.pipe(casefold_ppd_addresses)
want = pd.DataFrame(
[
{
"primary_addressable_object_name": "6a",
"secondary_addressable_object_name": "lower maisonette",
"street": "surrey avenue",
}
]
)
pd.testing.assert_frame_equal(got, want)
def test_add_ppd_features() -> None:
tests = [
(
{
"primary_addressable_object_name": "21",
"secondary_addressable_object_name": "flat 4",
},
{"flat_number": "4"},
),
]
addresses, features = zip(*tests)
got = pd.DataFrame(addresses).pipe(add_ppd_features)[["flat_number"]]
want = | pd.DataFrame(features) | pandas.DataFrame |
#!/usr/bin/env python3
import os
import sys
import re
import pandas as pd, geopandas as gpd
import numpy as np
import argparse
import matplotlib.pyplot as plt
import seaborn as sns
from functools import reduce
from multiprocessing import Pool
from os.path import isfile, join
import shutil
import warnings
from pathlib import Path
import time
warnings.simplefilter(action='ignore', category=FutureWarning)
import rasterio
from rasterio import features as riofeatures
from rasterio import plot as rioplot
from shapely.geometry import Polygon
"""
Plot Rating Curves and Compare to USGS Gages
Parameters
----------
fim_dir : str
Directory containing FIM output folders.
output_dir : str
Directory containing rating curve plots and tables.
usgs_gages_filename : str
File name of USGS rating curves.
nwm_flow_dir : str
Directory containing NWM recurrence flows files.
number_of_jobs : str
Number of jobs.
stat_groups : str
string of columns to group eval metrics.
"""
def check_file_age(file):
'''
Checks if file exists, determines the file age, and recommends
updating if older than 1 month.
Returns
-------
None.
'''
file = Path(file)
if file.is_file():
modification_time = file.stat().st_mtime
current_time = time.time()
file_age_days = (current_time - modification_time)/86400
if file_age_days > 30:
check = f'{file.name} is {int(file_age_days)} days old, consider updating.\nUpdate with rating_curve_get_usgs_curves.py'
else:
check = f'{file.name} is {int(file_age_days)} days old.'
return check
# recurr_intervals = ['recurr_1_5_cms.csv','recurr_5_0_cms.csv','recurr_10_0_cms.csv']
def generate_rating_curve_metrics(args):
elev_table_filename = args[0]
branches_folder = args[1]
usgs_gages_filename = args[2]
usgs_recurr_stats_filename = args[3]
nwm_recurr_data_filename = args[4]
rc_comparison_plot_filename = args[5]
nwm_flow_dir = args[6]
catfim_flows_filename = args[7]
huc = args[8]
alt_plot = args[9]
elev_table = pd.read_csv(elev_table_filename,dtype={'location_id': object, 'feature_id':object,'HydroID':object, 'levpa_id':object})
elev_table.dropna(subset=['location_id'], inplace=True)
usgs_gages = pd.read_csv(usgs_gages_filename,dtype={'location_id': object, 'feature_id':object})
# Aggregate FIM4 hydroTables
hydrotable = pd.DataFrame()
for branch in elev_table.levpa_id.unique():
branch_elev_table = elev_table.loc[elev_table.levpa_id == branch].copy()
branch_hydrotable = pd.read_csv(join(branches_folder, str(branch), f'hydroTable_{branch}.csv'),dtype={'HydroID':object,'feature_id':object})
# Only pull SRC for hydroids that are in this branch
branch_hydrotable = branch_hydrotable.loc[branch_hydrotable.HydroID.isin(branch_elev_table.HydroID)]
branch_hydrotable.drop(columns=['order_'], inplace=True)
# Join SRC with elevation data
branch_elev_table.rename(columns={'feature_id':'fim_feature_id'}, inplace=True)
branch_hydrotable = branch_hydrotable.merge(branch_elev_table, on="HydroID")
# Append to full rating curve dataframe
if hydrotable.empty:
hydrotable = branch_hydrotable
else:
hydrotable = hydrotable.append(branch_hydrotable)
# Join rating curves with elevation data
#elev_table.rename(columns={'feature_id':'fim_feature_id'}, inplace=True)
#hydrotable = hydrotable.merge(elev_table, on="HydroID")
relevant_gages = list(hydrotable.location_id.unique())
usgs_gages = usgs_gages[usgs_gages['location_id'].isin(relevant_gages)]
usgs_gages = usgs_gages.reset_index(drop=True)
if len(usgs_gages) > 0:
# Adjust rating curve to elevation
hydrotable['elevation_ft'] = (hydrotable.stage + hydrotable.dem_adj_elevation) * 3.28084 # convert from m to ft
# hydrotable['raw_elevation_ft'] = (hydrotable.stage + hydrotable.dem_elevation) * 3.28084 # convert from m to ft
hydrotable['discharge_cfs'] = hydrotable.discharge_cms * 35.3147
usgs_gages = usgs_gages.rename(columns={"flow": "discharge_cfs", "elevation_navd88": "elevation_ft"})
hydrotable['source'] = "FIM"
usgs_gages['source'] = "USGS"
limited_hydrotable = hydrotable.filter(items=['location_id','elevation_ft','discharge_cfs','source', 'HydroID', 'levpa_id', 'dem_adj_elevation'])
select_usgs_gages = usgs_gages.filter(items=['location_id', 'elevation_ft', 'discharge_cfs','source'])
if 'default_discharge_cms' in hydrotable.columns: # check if both "FIM" and "FIM_default" SRCs are available
hydrotable['default_discharge_cfs'] = hydrotable.default_discharge_cms * 35.3147
limited_hydrotable_default = hydrotable.filter(items=['location_id','elevation_ft', 'default_discharge_cfs'])
limited_hydrotable_default['discharge_cfs'] = limited_hydrotable_default.default_discharge_cfs
limited_hydrotable_default['source'] = "FIM_default"
rating_curves = limited_hydrotable.append(select_usgs_gages)
rating_curves = rating_curves.append(limited_hydrotable_default)
else:
rating_curves = limited_hydrotable.append(select_usgs_gages)
# Add stream order
stream_orders = hydrotable.filter(items=['location_id','order_']).drop_duplicates()
rating_curves = rating_curves.merge(stream_orders, on='location_id')
rating_curves['order_'] = rating_curves['order_'].astype('int')
# NWM recurr intervals
recurr_intervals = ("2","5","10","25","50","100")
recurr_dfs = []
for interval in recurr_intervals:
recurr_file = join(nwm_flow_dir, 'nwm21_17C_recurr_{}_0_cms.csv'.format(interval))
df = pd.read_csv(recurr_file, dtype={'feature_id': str})
# Update column names
df = df.rename(columns={"discharge": interval})
recurr_dfs.append(df)
# Merge NWM recurr intervals into a single layer
nwm_recurr_intervals_all = reduce(lambda x,y: pd.merge(x,y, on='feature_id', how='outer'), recurr_dfs)
nwm_recurr_intervals_all = pd.melt(nwm_recurr_intervals_all, id_vars=['feature_id'], value_vars=recurr_intervals, var_name='recurr_interval', value_name='discharge_cms')
# Append catfim data (already set up in format similar to nwm_recurr_intervals_all)
cat_fim = pd.read_csv(catfim_flows_filename, dtype={'feature_id':str})
nwm_recurr_intervals_all = nwm_recurr_intervals_all.append(cat_fim)
# Convert discharge to cfs and filter
nwm_recurr_intervals_all['discharge_cfs'] = nwm_recurr_intervals_all.discharge_cms * 35.3147
nwm_recurr_intervals_all = nwm_recurr_intervals_all.filter(items=['discharge_cfs', 'recurr_interval','feature_id']).drop_duplicates()
# Identify unique gages
usgs_crosswalk = hydrotable.filter(items=['location_id', 'feature_id']).drop_duplicates()
usgs_crosswalk.dropna(subset=['location_id'], inplace=True)
nwm_recurr_data_table = pd.DataFrame()
usgs_recurr_data = pd.DataFrame()
# Interpolate USGS/FIM elevation at each gage
for index, gage in usgs_crosswalk.iterrows():
# Interpolate USGS elevation at NWM recurrence intervals
usgs_rc = rating_curves.loc[(rating_curves.location_id==gage.location_id) & (rating_curves.source=="USGS")]
if len(usgs_rc) <1:
print(f"missing USGS rating curve data for usgs station {gage.location_id} in huc {huc}")
continue
str_order = np.unique(usgs_rc.order_).item()
feature_id = str(gage.feature_id)
usgs_pred_elev = get_reccur_intervals(usgs_rc, usgs_crosswalk,nwm_recurr_intervals_all)
# Handle sites missing data
if len(usgs_pred_elev) <1:
print(f"missing USGS elevation data for usgs station {gage.location_id} in huc {huc}")
continue
# Clean up data
usgs_pred_elev['location_id'] = gage.location_id
usgs_pred_elev = usgs_pred_elev.filter(items=['location_id','recurr_interval', 'discharge_cfs','pred_elev'])
usgs_pred_elev = usgs_pred_elev.rename(columns={"pred_elev": "USGS"})
# Interpolate FIM elevation at NWM recurrence intervals
fim_rc = rating_curves.loc[(rating_curves.location_id==gage.location_id) & (rating_curves.source=="FIM")]
if len(fim_rc) <1:
print(f"missing FIM rating curve data for usgs station {gage.location_id} in huc {huc}")
continue
fim_pred_elev = get_reccur_intervals(fim_rc, usgs_crosswalk,nwm_recurr_intervals_all)
# Handle sites missing data
if len(fim_pred_elev) <1:
print(f"missing FIM elevation data for usgs station {gage.location_id} in huc {huc}")
continue
# Clean up data
fim_pred_elev = fim_pred_elev.rename(columns={"pred_elev": "FIM"})
fim_pred_elev = fim_pred_elev.filter(items=['recurr_interval', 'discharge_cfs','FIM'])
usgs_pred_elev = usgs_pred_elev.merge(fim_pred_elev, on=['recurr_interval','discharge_cfs'])
# Add attributes
usgs_pred_elev['HUC'] = huc
usgs_pred_elev['HUC4'] = huc[0:4]
usgs_pred_elev['str_order'] = str_order
usgs_pred_elev['feature_id'] = feature_id
# Melt dataframe
usgs_pred_elev = | pd.melt(usgs_pred_elev, id_vars=['location_id','feature_id','recurr_interval','discharge_cfs','HUC','HUC4','str_order'], value_vars=['USGS','FIM'], var_name="source", value_name='elevation_ft') | pandas.melt |
import pandas as pd
from time import time,localtime,strftime
from os.path import join
import sys
starttime = time()
print(strftime("%x %X", localtime(starttime)) + " Started")
id_map = {'4':'5','5':'6','6':'7','7':'8','8':'9','9':'10','10':'11',
'11':'13','12':'14','13':'16','14':'18','15':'20','16':'21',
'17':'22','18':'23','19':'24','20':'25','21':'27','22':'28','23':'30',
'24':'31','25':'32','26':'34','27':'35','28':'36','29':'37'}
month_dict = {'APRIL':'4', 'MAY':'5'}
BASE_DIR = 'Q:\CMP\LOS Monitoring 2021\Counts\ADT\SFCTA 2021 ADT Results'
time_dict = {'t1':['00','15'],'t2':['15','30'],'t3':['30','45'],'t4':['45','00']}
def fmt_time(x):
t_nxt = str(int(x['Time'])+1).zfill(2)
if x['variable']=='t4':
x['Time'] = x['Time'] + time_dict[x['variable']][0] + '-' + t_nxt + time_dict[x['variable']][1]
else:
x['Time'] = x['Time'] + time_dict[x['variable']][0] + '-' + x['Time'] + time_dict[x['variable']][1]
return x
def proc_data(temp_df, direction):
temp_df.columns = ['Time','t1','t2','t3','t4']
temp_df['Time'] = pd.to_datetime(temp_df['Time'],format= '%H:%M:%S' ).dt.hour.astype(str)
temp_df['Time'] = temp_df['Time'].apply(lambda x: x.zfill(2))
temp_df = pd.melt(temp_df, id_vars=['Time'], value_name='Vol')
temp_df = temp_df.apply(fmt_time, axis=1)
temp_df['Direction'] = direction
return temp_df
out_df = pd.DataFrame()
for i in range(1,30):
infile = join(BASE_DIR, 'LOC #{}.xls'.format(i))
for dayno in range(1,7):
sheet_name = 'Day' + str(dayno)
try:
street = pd.read_excel(infile, sheet_name, header=None, skiprows=5, usecols="D:D", nrows=1).values[0][0]
date = pd.read_excel(infile, sheet_name, header=None, skiprows=7, usecols="D:D", nrows=1).values[0][0].split()
date = '%s.%s.%s' %(date[3], month_dict[date[1]],date[2][:-3])
if i in [1,11,14]:
dir1 = | pd.read_excel(infile, sheet_name, header=None, skiprows=9, usecols="C:C", nrows=1) | pandas.read_excel |
'''
This file is part of the PSL software.
Copyright 2011-2015 University of Maryland
Copyright 2013-2019 The Regents of the University of California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import pandas
from pslpython.partition import Partition
from pslpython.predicate import Predicate
from pslpython.predicate import PredicateError
from tests.base_test import PSLTest
class TestPredicate(PSLTest):
def test_name_normalization(self):
# [(input, expected), ...]
names = [
('a', 'A'),
('foo', 'FOO'),
('Bar', 'BAR'),
('BAZ', 'BAZ'),
('123', '123'),
]
for (input_name, expected_name) in names:
predicate = Predicate(input_name, closed = True, size = 2)
self.assertEqual(predicate.name(), expected_name)
def test_init_args(self):
failing_configs = [
({'raw_name': 'Foo', 'closed': False}, 'No size supplied.'),
({'raw_name': 'Foo', 'closed': False, 'size': -1}, 'Negative size.'),
({'raw_name': 'Foo', 'closed': False, 'size': 0}, 'Zero size.'),
({'raw_name': 'Foo', 'closed': False, 'size': 2, 'arg_types': [Predicate.ArgType.UNIQUE_INT_ID]}, 'Type size mismatch.'),
({'raw_name': 'Foo', 'closed': False, 'size': 1, 'arg_types': ['UniqueIntID']}, 'Non-enum arg type.'),
]
for (args, reason) in failing_configs:
try:
predicate = Predicate(**args)
self.fail('Failed to raise exception on: ' + reason)
except PredicateError as ex:
# Expected
pass
def test_add_record(self):
predicate = Predicate('Foo', closed = True, size = 2)
predicate.add_data_row(Partition.OBSERVATIONS, ['A', 'B'])
predicate.add_data_row(Partition.OBSERVATIONS, ['C', 'D'], 0.5)
predicate.add_data_row(Partition.OBSERVATIONS, [1, 2])
expected = pandas.DataFrame([
['A', 'B', 1.0],
['C', 'D', 0.5],
[1, 2, 1.0],
])
pandas.testing.assert_frame_equal(predicate._data[Partition.OBSERVATIONS], expected)
def test_add_frame(self):
predicate = Predicate('Foo', closed = True, size = 2)
input_data = pandas.DataFrame([
['A', 'B'],
['C', 'D'],
[1, 2],
])
predicate.add_data(Partition.OBSERVATIONS, input_data)
expected = pandas.DataFrame([
['A', 'B', 1.0],
['C', 'D', 1.0],
[1, 2, 1.0],
])
pandas.testing.assert_frame_equal(predicate._data[Partition.OBSERVATIONS], expected)
def test_add_list(self):
predicate = Predicate('Foo', closed = True, size = 2)
input_data = [
['A', 'B', 0.0],
['C', 'D', 0.5],
[1, 2, 1.0],
]
predicate.add_data(Partition.OBSERVATIONS, input_data)
expected = pandas.DataFrame([
['A', 'B', 0.0],
['C', 'D', 0.5],
[1, 2, 1.0],
])
pandas.testing.assert_frame_equal(predicate._data[Partition.OBSERVATIONS], expected)
def test_add_file(self):
predicate = Predicate('1', closed = True, size = 2)
path = os.path.join(PSLTest.TEST_DATA_DIR, 'misc', 'binary_small.txt')
predicate.add_data_file(Partition.OBSERVATIONS, path)
expected = pandas.DataFrame([
['A', 'B', 1.0],
['C', 'D', 1.0],
['1', '2', 1.0],
])
pandas.testing.assert_frame_equal(predicate._data[Partition.OBSERVATIONS], expected)
predicate = Predicate('2', closed = True, size = 2)
path = os.path.join(PSLTest.TEST_DATA_DIR, 'misc', 'binary_small.txt')
predicate.add_data_file(Partition.OBSERVATIONS, path, has_header = True)
expected = pandas.DataFrame([
['C', 'D', 1.0],
['1', '2', 1.0],
])
pandas.testing.assert_frame_equal(predicate._data[Partition.OBSERVATIONS], expected)
predicate = Predicate('3', closed = True, size = 2)
path = os.path.join(PSLTest.TEST_DATA_DIR, 'misc', 'binary_small.csv')
predicate.add_data_file(Partition.OBSERVATIONS, path, delim = ',')
expected = pandas.DataFrame([
['A', 'B', 1.0],
['C', 'D', 1.0],
['1', '2', 1.0],
])
| pandas.testing.assert_frame_equal(predicate._data[Partition.OBSERVATIONS], expected) | pandas.testing.assert_frame_equal |
import datetime
import math
import warnings
from geopy.distance import great_circle as distance
import googlemaps
import numpy as np
import pandas
from scipy.interpolate import interp1d
from scipy.signal import savgol_filter
class Elevation(object):
"""Class to get elevations from lat-lon coordinates.
Construction of an Elevation object reads in the latlon coords and
calculates the cumulative distance to each point from the start
of the latlon sequence.
TODO: Come up with a more descriptive class name.
"""
def __init__(self, latlon_list, user_gmaps_key=None, img_dir=None):
"""Creates an Elevation from a list of latlon coords.
Args:
latlon_list: An array-like object of [lon, lat] pairs.
user_gmaps_key: String representing Google Maps API key. Required
for 'google' method.
img_dir: String representing the directory containing .img files
to be searched. Required for 'img' method.
"""
self.user_gmaps_key = user_gmaps_key
self.img_dir = img_dir
if type(latlon_list) == pandas.DataFrame:
latlon_list = latlon_list.values.squeeze()
self.data = pandas.DataFrame(data=latlon_list,
columns=['lon', 'lat'])
self._clean_up_coordinates()
# Build a new column for the DataFrame representing cumulative
# distance to each point, with an initial zero value because
# no movement has occurred at first.
distances_cum = [0.0]
for i in range(1, len(self.data)):
# Calculate cumulative distance up to this point by adding
# the distance between the previous and current point
# to the cumulative distance to the previous point.
row_prev = self.data.iloc[i-1]
row_curr = self.data.iloc[i]
distance_cum = distances_cum[i-1] + \
distance((row_prev['lat'], row_prev['lon']),
(row_curr['lat'], row_curr['lon'])).meters
distances_cum.append(distance_cum)
self.data['distance'] = distances_cum
def _clean_up_coordinates(self):
"""Infers missing lat/lon coordinates in simple cases."""
self.data.fillna(method='bfill', inplace=True)
@property
def latlons(self):
return self.data[['lon', 'lat']]
@property
def distance(self):
return np.array(self.data['distance']).squeeze()
def google(self, units='meters'):
"""Queries google maps' Elevation API at each point.
Args:
units: String representing desired units. 'meters' or 'feet',
case-insensitive.
Returns:
Google elevation values as a ndarray of floats, in meters.
"""
if self.user_gmaps_key is None:
return None
if 'google' not in self.data.columns:
gmaps = googlemaps.Client(key=self.user_gmaps_key)
# Google maps elevation api allows 500 elevation values
# per request. Break latlon coordinates into 500-piece chunks
# and pass to the api, then assemble returned elevations into one
# consolidated list, and add to dataframe as a new column.
unit_factor = 5280.0/1609 if units.lower() == 'feet' else 1.0
elevs = []
for _, chunk in self.latlons.groupby(np.arange(len(self.latlons)) // 500):
# Format coordinates for google maps api request
locations = [(float(row['lat']), float(row['lon']))
for _, row in chunk.iterrows()]
elevs.extend([round(elevobj['elevation'] * unit_factor, 1)
for elevobj in gmaps.elevation(locations)])
self.data['google'] = elevs
return np.array(self.data['google']).squeeze()
def img(self, units='meters'):
"""Accesses elevation data from user-owned .img files.
In my case, I have downloaded high-resolution elevation data for
the Boulder area based on the National Map's 1-meter DEM.
Args:
units: String representing desired units. 'meters' or 'feet',
case-insensitive.
Returns:
Elevations from files in .img directory as a ndarray of floats.
NaN values are returned for coordinates outside the bounds of
all the .img files.
TODO: Properly handle case where utm/gdal not installed, or
the .img directory is not specified.
"""
if self.img_dir is None:
return None
import glob
import struct
from osgeo import gdal
import utm
# TODO: Employ os.join so slashes aren't so important.
file_objs = {}
fnames = glob.glob(self.img_dir + '*.img')
fnames.extend(glob.glob(self.img_dir + '*.IMG'))
# Compile geospatial information about each IMG file, then close the
# file to avoid blowing up the server's memory.
for fname in fnames:
img = gdal.Open(fname)
xoffset, px_w, rot1, yoffset, rot2, px_h = img.GetGeoTransform()
file_obj = {
'xoffset': xoffset,
'yoffset': yoffset,
'px_w': px_w,
'px_h': px_h,
'points_to_try': []
}
file_objs[fname] = file_obj
img = None
# For each (lon, lat) pair, search through all img files for a
# valid elevation coordinate, and save every one.
for index, row in self.latlons.iterrows():
posX, posY, zone_num, zone_letter = utm.from_latlon(row['lat'], row['lon'])
# These values are not used in current implementation. They
# facilitate finding the correct .IMG file by fname. Now however,
# I search through every file.
x_index = math.floor(posX / 10000)
y_index = math.ceil(posY / 10000)
for fname in fnames:
file_obj = file_objs[fname]
#px = int((posX - file_obj['xoffset']) / file_obj['px_w']) # x pixel
#py = int((posY - file_obj['yoffset'])/ file_obj['px_h']) # y pixel
# Find decimal pixel for x and y, then check if a valid index
# exists by rounding each pixel index down or up. This means
# there could be up to 4 different combinations of px/py.
px_dec = (posX - file_obj['xoffset']) / file_obj['px_w']
py_dec = (posY - file_obj['yoffset'])/ file_obj['px_h']
pxs = [math.floor(px_dec), math.ceil(px_dec)]
pys = [math.floor(py_dec), math.ceil(py_dec)]
for px in pxs:
for py in pys:
if px > 0 and py > 0:
file_obj['points_to_try'].append({'px': px,
'py': py,
'index': index})
# For each IMG file, iterate through the pre-assembled list of
# points which should have a valid elevation coordinate.
# Catch exceptions resulting from each raster not having
# the full spatial extent.
unit_factor = 5280.0/1609 if units.lower() == 'feet' else 1.0
elevs = np.full(len(self.latlons), np.nan)
for fname in fnames:
img = gdal.Open(fname)
rb = img.GetRasterBand(1)
# I think prevents printing to screen when point is outside grid.
gdal.UseExceptions()
for point_to_try in file_objs[fname]['points_to_try']:
try:
#rb = img.GetRasterBand(1)
# Assumes 32 bit int aka 'float'.
structval = rb.ReadRaster(point_to_try['px'],
point_to_try['py'],
1, 1, buf_type=gdal.GDT_Float32)
intval = struct.unpack('f', structval)
# Check if point within bounds, but elevation undefined (this
# would likely be due to an incomplete image). The value used
# for missing elevation data is a large negative number.
if intval[0] > -9999:
ix = int(point_to_try['index']);
if elevs[ix] != intval[0]*unit_factor:
# There isn't a value yet.
elevs[ix] = intval[0]*unit_factor
#rb = None
except:
pass
img = None
rb = None
# Clean up null elevation values in the array by forward-filling.
# Should be very few, right at IMG edges. This method should work
# except in the case of leading nan values in the array.
#ixs_missing = np.argwhere(np.isnan(elevs))
#for ix_missing in ixs_missing:
# elevs[ix_missing] = elevs[ix_missing-1]
# Round to the nearest tenth of a unit. Higher precision elevation
# differences could not be differentiated from measurement noise.
elevs_round = np.around(elevs, decimals=1)
return elevs_round.squeeze()
def _retry(exceptions, tries=4, delay=3, backoff=2, logger=None):
"""A deco to keep the National Map's elevation query working."""
from functools import wraps
import time
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except exceptions as e:
msg = ('{}, Retrying in {} seconds...').format(e, mdelay)
if logger:
logger.warning(msg)
else:
print(msg)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry
return deco_retry
@_retry(ValueError, tries=4, delay=3, backoff=2)
def _query_natmap(lat, lon, units='Meters'):
"""Returns elevation data with 1/3 arc-second resolution (~30m).
Data is accessed via based the National Map's Elevation Point Query
Service. The 1/3 arc-second digital elevation model covers the
entire US, so this should work anywhere.
Because the Elevation Point Query Service only permits one point at
a time, this function is not suitable for querying large sets of
latlon coordinates. It goes incredibly slow.
TODO: Find an approach that goes faster or lets this function work
in the background.
"""
import json
import ssl
from urllib.request import urlopen
ssl.match_hostname = lambda cert, hostname: True
url = 'https://nationalmap.gov/epqs/pqs.php?x=' + str(lon) \
+ '&y=' + str(lat) + '&units=' + units.capitalize() + '&output=json'
request = urllib.request.urlopen(url)
json_request = json.load(request)
return json_request['USGS_Elevation_Point_Query_Service'][
'Elevation_Query']['Elevation']
def _query(latlon):
import json
import ssl
from urllib.request import urlopen
#import requests
ssl.match_hostname = lambda cert, hostname: True
url = 'https://nationalmap.gov/epqs/pqs.php?x=' + str(latlon[0]) \
+ '&y=' + str(latlon[1]) + '&units=Meters&output=json'
#request = urllib.request.urlopen(url)
request = urlopen(url)
#request = requests.get(url)
#return request.json()['USGS_Elevation_Point_Query_Service'][
# 'Elevation_Query']['Elevation']
return json.load(request)['USGS_Elevation_Point_Query_Service'][
'Elevation_Query']['Elevation']
def _elevations_natmap(latlons, units='Meters'):
from multiprocessing import Pool
with Pool() as p:
print(p.map(_query, latlons))
def elevation_gain(elevations):
"""Naive elevation gain calculation.
TODO: Make this algorithm smarter so noise doesn't affect it as much.
"""
return sum([max(elevations[i+1] - elevations[i], 0.0)
for i in range(len(elevations) - 1)])
def elevation_smooth_time(elevations, sample_len=1,
window_len=21, polyorder=2):
"""Smooths noisy elevation time series.
Because of GPS and DEM inaccuracy, elevation data is not smooth.
Calculations involving terrain slope (the derivative of elevation
with respect to distance, d_elevation/d_x) will not yield reasonable
values unless the data is smoothed.
This method's approach follows the overview outlined in the
NREL paper found in the Resources section and cited in README.
However, unlike the algorithm in the paper, which samples regularly
over distance, this algorithm samples regularly over time (well, it
presumes the elevation values are sampled at even 1-second intervals.
The body only cares about energy use over time, not over distance.
The noisy elevation data is downsampled and passed through a
Savitzky-Golay (SG) filter. Parameters for the filters were not
described in the paper, so they must be tuned to yield intended
results when applied to a particular type of data. Because the
assumptions about user behavior depend on the activiy being performed,
the parameters will likely differ for a road run, a trail run, or a
trail hike.
Args:
elevations: Array-like object of elevations above sea level
corresponding to each time.
sample_len: A float describing the time (in seconds) between between
desired resampled data. Default is 1.
window_len: An integer describing the length of the window used
in the SG filter. Must be positive odd integer.
polyorder: An integer describing the order of the polynomial used
in the SG filter, must be less than window_len.
TODO(aschroeder) ? Combine a binomial filter with existing SG filter
and test effects on algorithm performance.
"""
#times = np.arange(0, len(elevations))
#
#if isinstance(times[0], datetime.timedelta):
# times = [time.total_seconds() for time in times]
#else:
# times = list(time)
# Pass downsampled data through a Savitzky-Golay filter (attenuating
# high-frequency noise).
# TODO (aschroeder): Add a second, binomial filter?
# TODO (aschroeder): Fix the scipy/signal/arraytools warning!
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=RuntimeWarning)
elevs_smooth = savgol_filter(list(elevations), window_len, polyorder)
return elevs_smooth
def elevation_smooth(distances, elevations, sample_len=5.0,
window_len=21, polyorder=2):
"""Smooths noisy elevation data for use in grade calculations.
Because of GPS and DEM inaccuracy, elevation data is not smooth.
Calculations involving terrain slope (the derivative of elevation
with respect to distance, d_elevation/d_x) will not yield reasonable
values unless the data is smoothed.
This method's approach follows the overview outlined in the
NREL paper found in the Resources section and cited in README.
The noisy elevation data is downsampled and passed through a
Savitzy-Golay (SG) filter. Parameters for the filters were not
described in the paper, so they must be tuned to yield intended
results when applied to the data.
Args:
distances: Array-like object of cumulative distances along a path.
elevations: Array-like object of elevations above sea level
corresponding to the same path.
sample_len: A float describing the distance (in meters) between
data samples. Data will be resampled at this interval.
window_len: An integer describing the length of the window used
in the SG filter. Must be positive odd integer.
polyorder: An integer describing the order of the polynomial used
in the SG filter, must be less than window_len.
TODO(aschroeder) ? Combine a binomial filter with existing SG filter
and test effects on algorithm performance.
"""
distances = pandas.Series(distances, name='distance')
elevations = pandas.Series(elevations, name='elevation')
# Subsample elevation data in evenly-spaced intervals, with each
# point representing elevation value at the interval midpoint.
n_sample = math.ceil((distances.iloc[-1] - distances.iloc[0]) / sample_len)
xvals = np.linspace(distances.iloc[0], distances.iloc[-1], n_sample + 1)
interp_fn = interp1d(distances, elevations, kind='linear')
elevations_ds = interp_fn(xvals)
# Create a DataFrame to handle calculations.
data_ds = pandas.DataFrame(data=elevations_ds, columns=['elevation'])
data_ds['distance'] = xvals
#idx = pandas.cut(distances, n_sample)
#with warnings.catch_warnings():
# warnings.simplefilter('ignore', category=RuntimeWarning)
# data_ds = elevations.groupby(idx).apply(np.median).interpolate(
# limit_direction='both').to_frame()
#data_ds['distance'] = pandas.IntervalIndex(
# data_ds.index.get_level_values('distance')).mid
# Pass downsampled data through a Savitzky-Golay filter (attenuating
# high-frequency noise). Calculate elevations at the original distance
# values via interpolation.
# TODO (aschroeder): Add a second, binomial filter?
# TODO (aschroeder): Fix the scipy/signal/arraytools warning!
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=RuntimeWarning)
data_ds['sg'] = savgol_filter(data_ds['elevation'], window_len, polyorder)
# Backfill the elevation values at the original distances by
# interpolation between the downsampled, smoothed points.
interp_function = interp1d(
data_ds['distance'], data_ds['sg'],
#fill_value='extrapolate', kind='linear')
fill_value='extrapolate', kind='quadratic')
smooth = interp_function(distances)
# TODO (aschroeder): Switch this back when done.
#return data_ds
return smooth
def grade_smooth_time(distances, elevations):
"""Calculates smoothed point-to-point grades based on time.
This method assumes elevation values are evenly sampled with respect
to time.
Args:
distances: Array-like object of cumulative distances sampled at each
second along a path.
elevations: Array-like object of elevations above sea level
corresponding to the same path.
"""
distances = pandas.Series(distances).reset_index(drop=True)
elevations = pandas.Series(elevations).reset_index(drop=True)
elevations_smooth = pandas.Series(elevation_smooth_time(elevations))
grade = elevations_smooth.diff() / distances.diff()
# Clean up spots with NaNs from dividing by zero distance.
# This assumes the distances and elevations arrays have no NaNs.
grade.fillna(0, inplace=True)
return np.array(grade)
def grade_smooth(distances, elevations):
"""Calculates smoothed point-to-point grades based on distance.
TODO(aschroeder): Check if distances and elevations are same length.
Args:
distances: Array-like object of cumulative distances along a path.
elevations: Array-like object of elevations above sea level
corresponding to the same path.
"""
distances = pandas.Series(distances).reset_index(drop=True)
elevations = pandas.Series(elevations).reset_index(drop=True)
elevations_smooth = pandas.Series(elevation_smooth(distances, elevations))
grade = elevations_smooth.diff() / distances.diff()
# Clean up spots with NaNs from dividing by zero distance.
# This assumes the distances and elevations arrays have no NaNs.
grade.fillna(0, inplace=True)
return np.array(grade)
def grade_raw(distances, elevations):
"""Calculates unsmoothed point-to-point grades.
TODO(aschroeder): check if distances and elevations are same length.
Args:
distances: Array-like object of cumulative distances along a path.
elevations: Array-like object of elevations above sea level
corresponding to the same path.
"""
distances = | pandas.Series(distances) | pandas.Series |
"""
Function to estimate the FDR and q-values for a set of PSMs
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from . import methods
def estimate(metric, target, desc=True):
"""
Estimate q-values using target decoy competition.
Deprecated. See `methods.tdc()` instead
"""
return methods.tdc(metric, target, desc)
def plot(qvalues, target=None, threshold=0.1, ax=None, **kwargs):
"""
Plot the number of accepted PSMs at each q-value.
Parameters
----------
qvalues : numpy.ndarray
The estimated q-values for a set of PSMs.
target : numpy.ndarray, optional
A 1D array indicating if the entry is from a target or decoy
hit. This should be boolean, where `True` indicates a target
and `False` indicates a decoy. `target[i]` is the label for
`qvalues[i]`; thus `target` and `metric` should be of equal
length. If `None` (the default), all elements of `qvalues` are
assumed to be targets.
threshold : float, optional
Indicates the maximum q-value to plot, since often we are
uninterested in performance at a high FDR. The default in 0.1.
ax : matplotlib.pyplot.Axes, optional
The matplotlib Axes on which to plot. If `None` the current Axes
instance is used.
**kwargs : dict
Arguments passed to matplotlib.pyplot.plot()
Returns
-------
matplotlib.pyplot.Axes
A plot of the cumulative number of accepted targets.
"""
# Change pd.Series to np.ndarray
if isinstance(qvalues, pd.Series):
qvalues = qvalues.values
if isinstance(target, pd.Series):
target = target.values
elif target is None:
target = np.ones(len(qvalues))
# Check arguments
msg = "'{}' must be a 1D numpy.ndarray or pandas.Series"
if not isinstance(qvalues, np.ndarray):
raise ValueError(msg.format("qvalues"))
elif len(qvalues.shape) != 1:
raise ValueError(msg.format("qvalues"))
if not isinstance(target, np.ndarray):
raise ValueError(msg.format("target"))
elif len(target.shape) != 1:
raise ValueError(msg.format("target"))
if qvalues.shape[0] != target.shape[0]:
raise ValueError("'qvalues' and 'target' must be the same length.")
try:
target = np.array(target, dtype=bool)
except ValueError:
raise ValueError("'target' should be boolean.")
try:
threshold = float(threshold)
except ValueError:
raise ValueError("'threshold' should be numeric.")
if ax is None:
ax = plt.gca()
elif not isinstance(ax, plt.Axes):
raise ValueError("'ax' must be a matplotlib Axes instance.")
# Calculate cumulative targets at each q-value
qvalues = pd.Series(qvalues, name="qvalues")
target = | pd.Series(target, name="target") | pandas.Series |
# Intro from here
# https://colab.research.google.com/notebooks/mlcc/intro_to_pandas.ipynb?utm_source=mlcc&utm_campaign=colab-external&utm_medium=referral&utm_content=pandas-colab&hl=en#scrollTo=U5ouUp1cU6pC
from __future__ import print_function
import pandas as pd
import numpy as np
pd.Series(['San Francisco', 'San Jose', 'Sacramento'])
city_names = | pd.Series(['San Francisco', 'San Jose', 'Sacramento']) | pandas.Series |
import unittest
import numpy as np
import pandas as pd
from haychecker.chc.metrics import constraint
class TestConstraint(unittest.TestCase):
def test_empty(self):
df = pd.DataFrame()
df["c1"] = []
df["c2"] = []
condition1 = {"column": "c1", "operator": "lt", "value": 1000}
condition2 = {"column": "c1", "operator": "gt", "value": 0}
conditions = [condition1, condition2]
r = constraint([0], [1], conditions, df)[0]
self.assertEqual(r, 100.)
def test_allnull(self):
df = pd.DataFrame()
df["c1"] = [None for _ in range(100)]
df["c2"] = [np.NaN for _ in range(100)]
df["c3"] = [None for _ in range(100)]
r = constraint([0, 1], [2], df=df)[0]
self.assertEqual(r, 100.0)
def test_allnull_with_conditions(self):
df = pd.DataFrame()
df["c1"] = [None for _ in range(100)]
df["c2"] = [None for _ in range(100)]
df["c3"] = [np.NaN for _ in range(100)]
condition1 = {"column": "c1", "operator": "lt", "value": 1000}
condition2 = {"column": "c1", "operator": "gt", "value": 0}
conditions = [condition1, condition2]
r = constraint([0, 1], [2], conditions, df)[0]
self.assertEqual(r, 100.0)
def test_halfnull_halfequal_respected(self):
df = pd.DataFrame()
c1 = [chr(1) for _ in range(50)]
c2 = [2 for _ in range(50)]
c3 = [2 / 0.6 for _ in range(50)]
c1.extend([None for _ in range(50)])
c2.extend([np.NaN for _ in range(50)])
c3.extend([None for _ in range(50)])
df["c1"] = c1
df["c2"] = c2
df["c3"] = c3
r = constraint([0, 1], [2], df=df)[0]
self.assertEqual(r, 100.0)
condition1 = {"column": "c2", "operator": "lt", "value": 2}
condition2 = {"column": "c2", "operator": "gt", "value": 0}
conditions = [condition1, condition2]
r = constraint([0, 1], [2], conditions, df)[0]
self.assertEqual(r, 100.0)
r = constraint([0, 2], [1], conditions, df)[0]
self.assertEqual(r, 100.0)
def test_halfnull_halfequal_notrespected1(self):
df = pd.DataFrame()
c1 = [chr(1) for _ in range(50)]
c2 = [2 for _ in range(50)]
c3 = [2 / 0.6 for _ in range(50)]
c1.extend([None for _ in range(50)])
c2.extend([None for _ in range(50)])
c3.extend([np.NaN for _ in range(40)])
c3.extend([10. for _ in range(10)])
df["c1"] = c1
df["c2"] = c2
df["c3"] = c3
r = constraint([0, 1], [2], df=df)[0]
self.assertEqual(r, 50.0)
condition1 = {"column": "c2", "operator": "lt", "value": 3}
condition2 = {"column": "c2", "operator": "gt", "value": 0}
conditions = [condition1, condition2]
r = constraint([0, 1], [2], conditions, df)[0]
self.assertEqual(r, 100.0)
r = constraint([0, 2], [1], conditions, df)[0]
self.assertEqual(r, 100.0)
def test_halfnull_halfequal_notrespected2(self):
df = pd.DataFrame()
c1 = [chr(1) for _ in range(50)]
c2 = [2 for _ in range(50)]
c3 = [2 / 0.6 for _ in range(40)]
c3.extend(6. for _ in range(10))
c1.extend([None for _ in range(50)])
c2.extend([None for _ in range(50)])
c3.extend([None for _ in range(50)])
df["c1"] = c1
df["c2"] = c2
df["c3"] = c3
r = constraint([0, 1], [2], df=df)[0]
self.assertEqual(r, 50.0)
condition1 = {"column": "c2", "operator": "lt", "value": 3}
condition2 = {"column": "c2", "operator": "gt", "value": 0}
conditions = [condition1, condition2]
r = constraint([2, 1], [0], conditions, df)[0]
self.assertEqual(r, 100.0)
r = constraint([2], [0], conditions, df)[0]
self.assertEqual(r, 100.0)
r = constraint([1], [0], conditions, df)[0]
self.assertEqual(r, 100.0)
def test_halfnull_halfequal_notrespected3(self):
df = | pd.DataFrame() | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 5 18:37:28 2020
@author: <NAME>
Airway Atlas: https://www.genomique.eu/cellbrowser/HCA/
"""
#%%
import pandas as pd
from matplotlib import pyplot as plt
import scanpy as sc
import matplotlib as mpl
import numpy as np
import os
#%% Set scanpy and matplotlib settings
sc.settings.figdir = 'data/protease-expression/cluster-plots/airway-atlas/'
sc.settings.verbosity = 4
mpl.rcParams.update(mpl.rcParamsDefault)
mpl.rcParams.update({
'font.sans-serif': 'Arial',
'font.family': 'sans-serif',
'axes.titlesize': 18,
'axes.labelsize': 18,
'font.size': 18,
})
#%%
df = pd.read_csv('data/raw-data/airway-atlas/exprMatrix.tsv', sep='\t', index_col=0,)
#%%
adata = sc.AnnData(df.T)
sc.pp.filter_cells(adata, min_genes=500)
sc.pp.highly_variable_genes(adata)
sc.tl.pca(adata)
sc.pp.neighbors(adata)
#%%
LEARNING_RATE = 1000
EARLY_EXAGGERATION = 12
RESOLUTION = 1.5
PERPLEXITY=130
sc.tl.tsne(adata, learning_rate=LEARNING_RATE, n_jobs=8, early_exaggeration=EARLY_EXAGGERATION, perplexity=PERPLEXITY)
sc.tl.leiden(adata, resolution=RESOLUTION)
#%%
params = {'learning_rate': LEARNING_RATE,
'early_exaggeration':EARLY_EXAGGERATION,
'resolution': RESOLUTION,
'perplexity': PERPLEXITY,
'genes': 'all'}
pd.Series(params).to_csv(os.path.join(sc.settings.figdir, 'params.txt'))
adata.write(os.path.join(sc.settings.figdir, 'adata.h5ad'))
#%%
markers = pd.read_csv('data/highlighted_genes.csv', header=None, names=['gene', 'cluster'])
markers['gene'] = markers['gene'].str.upper()
markers = markers[markers['gene'].isin(adata.var.index)]
markers['title'] = markers['gene'] + '+: ' + markers['cluster']
markers = markers.set_index('gene')
markers.loc['PTPRC']['title'] = 'PTPRC (CD45)+: Immune Cells'
markers.loc['leiden'] = ['Leiden', 'Clusters']
#%%
addl_genes = pd.read_csv(os.path.join(sc.settings.figdir, 'additional_genes.csv'), header=None)
addl_genes['title'] = addl_genes[0]
addl_genes = addl_genes.set_index(0)
markers = markers.append(addl_genes)
#%%
sc.pl.tsne(adata, color='leiden',
title='Leiden',
color_map='plasma',
size=25,
save='_' + 'leiden' + '_all.pdf',
show=False)
#%%
for i, g in markers.iterrows():
sc.pl.tsne(adata, color=i,
title=g['title'],
color_map='plasma',
size=25,
save='_' + i + '_all.pdf',
show=False)
#%%
adata.obs['TMPRS11F'] = 0
proteases = ['TMPRSS2', 'TMPRSS13', 'TMPRSS11D', 'TMPRSS11E']
for i in proteases:
sc.pl.tsne(adata, color=i,
title=i,
color_map='plasma',
size=25,
save='_' + i + '_all.pdf',
show=False)
sc.pl.tsne(adata, color='TMPRS11F',
title='TMPRS11F',
color_map='plasma',
size=25,
save='_' + 'TMPRS11F' + '_all.pdf',
vmin=0,
vmax=1,
show=False)
#%%
any_tmprss_meta = ((adata.to_df()[proteases] > 0).sum(axis=1) > 0)
any_tmprss_meta = any_tmprss_meta.map({True: 'Present', False: 'Absent'}).astype('category').cat.reorder_categories(['Present', 'Absent'])
adata.obs['any_tmprss_metagene'] = any_tmprss_meta
fig = sc.pl.tsne(adata, color='any_tmprss_metagene',
title='TTSP-expressing cells', sort_order=True,
palette=['#1fb5f0', '#DCDCDC'],
size=25,
groups=['Present'],
save='_' + 'tmprss_metagene' + '_all.png',
return_fig = True,
show=False)
legend_elements = [plt.Line2D([0], [0], marker='o', color='#1fb5f0', label='TTSP-positive', linestyle='None'),
plt.Line2D([0],[0], marker='o', color='#DCDCDC', label='TTSP-negative', markerfacecolor='#DCDCDC', linestyle='None')]
fig.axes[0].legend(handles=legend_elements, frameon=False,
loc='center left',
bbox_to_anchor=(1, 0.5),)
plt.show()
fig.savefig(os.path.join(sc.settings.figdir, 'tsne_tmprss_metagene_all-ttsp.pdf'), bbox_inches='tight')
#%%
sc.tl.rank_genes_groups(adata, 'leiden', method='t-test', n_genes=20)
| pd.DataFrame(adata.uns['rank_genes_groups']['names']) | pandas.DataFrame |
#!/usr/bin/python3
import math
from statistics import mean
import dpkt, datetime, glob, os, csv
import socket
from pathlib import Path
import matplotlib
from PIL import Image
from matplotlib.colors import Normalize
from matplotlib.pyplot import cm
from collections import deque
from sklearn import metrics
import numpy as np
import pandas as pd
import joblib
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from sklearn.manifold import TSNE
import seaborn as sns
import hdbscan
import time
from graphviz import render
from util.numba_cosine import cosine_similarity_numba
from util.odtw import _dtw_distance
class MalpacaMeImprovedWindowNetflow():
expname = 'exp'
window_size = 20
RPY2 = False
totalconn = 0
def __init__(self, path_to_folder, path_to_results, path_to_detailed_label_folder, expname, window_size, RPY2):
self.path_to_folder = path_to_folder
self.path_to_detailed_label_folder = path_to_detailed_label_folder
self.expname = expname
self.window_size = window_size
self.RPY2 = RPY2
path_to_results = path_to_results
os.mkdir(path_to_results + "/" + expname)
self.path_to_store = str(Path.joinpath(Path(path_to_results), expname)) + "/"
self.readfolde_window()
if RPY2 == True:
pass
def difference(self, str1, str2):
return sum([str1[x] != str2[x] for x in range(len(str1))])
# @profile
def connlevel_sequence(self, metadata, mapping):
inv_mapping = {v: k for k, v in mapping.items()}
data = metadata
timing = {}
values = list(data.values())
keys = list(data.keys())
distm = []
labels = []
ipmapping = []
# save intermediate results
path_to_intermediate_results = self.path_to_store + "/intermediate_results/"
os.mkdir(path_to_intermediate_results)
path_to_features = path_to_intermediate_results +"/features/"
os.mkdir(path_to_features)
path_to_distances = path_to_intermediate_results +"/distances/"
os.mkdir(path_to_distances)
addition = '_' + self.expname + '_' + str(self.window_size)
# ----- start porting -------
utils, r = None, None
for n, feat in [(1, 'bytes'), (0, 'gaps'), (3, 'sport'), (4, 'dport')]:
f = open(path_to_features + feat + '-features' + addition + '.txt', 'w')
for val in values:
vi = [str(x[n]) for x in val]
f.write(','.join(vi))
f.write("\n")
f.close()
startb = time.time()
start_time = time.time()
filename = path_to_distances + 'bytesDist' + addition + '.txt'
print("Starting bytes dist")
distm = [-1] * len(data.values())
distm = [[-1] * len(data.values()) for i in distm]
for a in range(len(data.values())): # range(10):
labels.append(mapping[keys[a]])
ipmapping.append((mapping[keys[a]], inv_mapping[mapping[keys[a]]]))
for b in range(a + 1):
i = [x[1] for x in values[a]][:self.window_size]
j = [x[1] for x in values[b]][:self.window_size]
if len(i) == 0 or len(j) == 0: continue
if a == b:
distm[a][b] = 0.0
else:
first_array = np.array(i)
second_array = np.array(j)
dist = _dtw_distance(first_array, second_array)
distm[a][b] = dist
distm[b][a] = dist
with open(filename, 'w') as outfile:
for a in range(len(distm)): # len(data.values())): #range(10):
outfile.write(' '.join([str(e) for e in distm[a]]) + "\n")
outfile.close()
with open(path_to_intermediate_results + 'labels' + addition + '.txt', 'w') as outfile:
outfile.write(' '.join([str(l) for l in labels]) + '\n')
outfile.close()
with open(path_to_intermediate_results + 'mapping' + addition + '.txt', 'w') as outfile:
outfile.write(' '.join([str(l) for l in ipmapping]) + '\n')
outfile.close()
endb = time.time()
print('Time bytes: ' + str(round((endb - startb), 3)))
ndistmB = []
mini = min(min(distm))
maxi = max(max(distm))
for a in range(len(distm)):
ndistmB.append([])
for b in range(len(distm)):
normed = (distm[a][b] - mini) / (maxi - mini)
ndistmB[a].append(normed)
startg = time.time()
distm = []
filename = path_to_distances + 'gapsDist' + addition + '.txt'
print("Starting gaps dist")
distm = [-1] * len(data.values())
distm = [[-1] * len(data.values()) for i in distm]
for a in range(len(data.values())): # range(10):
for b in range(a + 1):
i = [x[0] for x in values[a]][:self.window_size]
j = [x[0] for x in values[b]][:self.window_size]
if len(i) == 0 or len(j) == 0: continue
if a == b:
distm[a][b] = 0.0
else:
first_array = np.array(i)
second_array = np.array(j)
dist = _dtw_distance(first_array, second_array)
distm[a][b] = dist
distm[b][a] = dist
with open(filename, 'w') as outfile:
for a in range(len(distm)): # len(data.values())): #range(10):
# print distm[a]
outfile.write(' '.join([str(e) for e in distm[a]]) + "\n")
endg = time.time()
print('Time gaps: ' + str(round((endg - startg), 3)))
ndistmG = []
mini = min(min(distm))
maxi = max(max(distm))
for a in range(len(distm)): # len(data.values())): #range(10):
ndistmG.append([])
for b in range(len(distm)):
normed = (distm[a][b] - mini) / (maxi - mini)
ndistmG[a].append(normed)
# source port
ndistmS = []
distm = []
starts = time.time()
filename = path_to_distances + 'sportDist' + addition + '.txt'
same, diff = set(), set()
print("Starting sport dist")
distm = [-1] * len(data.values())
distm = [[-1] * len(data.values()) for i in distm]
ngrams = []
for a in range(len(values)):
profile = dict()
dat = [x[3] for x in values[a]][:self.window_size]
li = zip(dat, dat[1:], dat[2:])
for b in li:
if b not in profile.keys():
profile[b] = 0
profile[b] += 1
ngrams.append(profile)
profiles = []
# update for arrays
assert len(ngrams) == len(values)
for a in range(len(ngrams)):
for b in range(a + 1):
if a == b:
distm[a][b] = 0.0
else:
i = ngrams[a]
j = ngrams[b]
ngram_all = list(set(i.keys()) | set(j.keys()))
i_vec = [(i[item] if item in i.keys() else 0) for item in ngram_all]
j_vec = [(j[item] if item in j.keys() else 0) for item in ngram_all]
#dist = cosine(i_vec, j_vec)
first_array = np.array(i_vec)
second_array = np.array(j_vec)
dist = round(cosine_similarity_numba(first_array, second_array), 8)
distm[a][b] = dist
distm[b][a] = dist
with open(filename, 'w') as outfile:
for a in range(len(distm)):
outfile.write(' '.join([str(e) for e in distm[a]]) + "\n")
ends = time.time()
print('Sport time: ' + str(round((ends - starts), 3)))
for a in range(len(distm)):
ndistmS.append([])
for b in range(len(distm)):
ndistmS[a].append(distm[a][b])
# dest port
ndistmD = []
distm = []
startd = time.time()
filename = path_to_distances + 'dportDist' + addition + '.txt'
print("Starting dport dist")
distm = [-1] * len(data.values())
distm = [[-1] * len(data.values()) for i in distm]
ngrams = []
for a in range(len(values)):
profile = dict()
dat = [x[4] for x in values[a]][:self.window_size]
li = zip(dat, dat[1:], dat[2:])
for b in li:
if b not in profile.keys():
profile[b] = 0
profile[b] += 1
ngrams.append(profile)
assert len(ngrams) == len(values)
for a in range(len(ngrams)):
for b in range(a + 1):
if a == b:
distm[a][b] = 0.0
else:
i = ngrams[a]
j = ngrams[b]
ngram_all = list(set(i.keys()) | set(j.keys()))
i_vec = [(i[item] if item in i.keys() else 0) for item in ngram_all]
j_vec = [(j[item] if item in j.keys() else 0) for item in ngram_all]
#dist = round(cosine(i_vec, j_vec), 8)
first_array = np.array(i_vec)
second_array = np.array(j_vec)
dist = round(cosine_similarity_numba(first_array, second_array), 8)
distm[a][b] = dist
distm[b][a] = dist
with open(filename, 'w') as outfile:
for a in range(len(distm)):
outfile.write(' '.join([str(e) for e in distm[a]]) + "\n")
endd = time.time()
print('Time dport: ' + str(round((endd - startd), 3)))
mini = min(min(distm))
maxi = max(max(distm))
for a in range(len(distm)):
ndistmD.append([])
for b in range(len(distm)):
ndistmD[a].append(distm[a][b])
ndistm = []
for a in range(len(ndistmS)):
ndistm.append([])
for b in range(len(ndistmS)):
ndistm[a].append((ndistmB[a][b] + ndistmG[a][b] + ndistmD[a][b] + ndistmS[a][b]) / 4.0)
print("Done with distance measurement")
print("----------------")
###################
# Data Clustering #
###################
print("TSNE Projection 1")
graphs_folder = self.path_to_store + "/graphs_folder"
os.mkdir(graphs_folder)
path_clustering_results = graphs_folder + "/clustering_results/"
os.mkdir(path_clustering_results)
plot_kwds = {'alpha': 0.5, 's': 80, 'linewidths': 0}
RS = 3072018
projection = TSNE(random_state=RS).fit_transform(ndistm)
plt.scatter(*projection.T)
plt.savefig(path_clustering_results + "tsne-result" + addition)
plt.close()
plt.clf()
#########
# Model #
#########
path_to_model = path_to_intermediate_results +"/model/"
os.mkdir(path_to_model)
size = 7
sample = 7
model = hdbscan.HDBSCAN(min_cluster_size=size, min_samples=sample, cluster_selection_method='leaf',
metric='precomputed')
clu = model.fit(np.array([np.array(x) for x in ndistm])) # final for citadel and dridex
input_array = np.array([np.array(x) for x in ndistm])
validity_index = hdbscan.validity_index(X=input_array, labels=clu.labels_, metric='precomputed', d=4)
unique_labels = np.unique(np.array(clu.labels_))
if (len(unique_labels) >= 2):
silhouette_score = round(metrics.silhouette_score(X=input_array, labels=np.array(clu.labels_), metric='precomputed'), 3)
else:
silhouette_score = "nan"
joblib.dump(clu, path_to_model + 'model' + addition + '.pkl')
print("Num clusters: " + str(len(set(clu.labels_)) - 1))
end_time = time.time()
avg = 0.0
for l in list(set(clu.labels_)):
if l != -1:
avg += sum([(1 if x == l else 0) for x in clu.labels_])
#print("average size of cluster:" + str(float(avg) / float(len(set(clu.labels_)) - 1)))
print("Samples in noise: " + str(sum([(1 if x == -1 else 0) for x in clu.labels_])))
########################
# Creating Projections #
########################
print("Creating projections")
cols = ['royalblue', 'red', 'darksalmon', 'sienna', 'mediumpurple', 'palevioletred', 'plum', 'darkgreen',
'lightseagreen', 'mediumvioletred', 'gold', 'navy', 'sandybrown', 'darkorchid', 'olivedrab', 'rosybrown',
'maroon', 'deepskyblue', 'silver']
pal = sns.color_palette(cols) #
extra_cols = len(set(clu.labels_)) - 18
pal_extra = sns.color_palette('Paired', extra_cols)
pal.extend(pal_extra)
col = [pal[x] for x in clu.labels_]
assert len(clu.labels_) == len(ndistm)
mem_col = [sns.desaturate(x, p) for x, p in zip(col, clu.probabilities_)]
plt.scatter(*projection.T, s=50, linewidth=0, c=col, alpha=0.2)
for i, txt in enumerate(clu.labels_):
realind = labels[i]
name = inv_mapping[realind]
plt.scatter(projection.T[0][i], projection.T[1][i], color=col[i], alpha=0.6)
if txt == -1:
continue
plt.annotate(txt, (projection.T[0][i], projection.T[1][i]), color=col[i], alpha=0.6)
plt.savefig(path_clustering_results + "clustering-result" + addition)
plt.close()
plt.clf()
print("----------------")
#####################
# Creating CSV file #
#####################
print("Writing csv file")
path_to_summaries = self.path_to_store + "/summaries/"
os.mkdir(path_to_summaries)
summary_csv_file_path = path_to_summaries + 'summary' + addition + '.csv'
summary_list = []
final_clusters = {}
final_probs = {}
for lab in set(clu.labels_):
occ = [i for i, x in enumerate(clu.labels_) if x == lab]
final_probs[lab] = [x for i, x in zip(clu.labels_, clu.probabilities_) if i == lab]
print("cluster: " + str(lab) + " num items: " + str(len([labels[x] for x in occ])))
final_clusters[lab] = [labels[x] for x in occ]
outfile = open(summary_csv_file_path, 'w')
outfile.write("clusnum,connnum,probability,scenario,file,src_ip,dst_ip,ip_protocol,src_port,dst_port,window\n")
for n, clus in final_clusters.items():
for idx, el in enumerate([inv_mapping[x] for x in clus]):
ip = el.split('->')
name = ip[0]
scenario = name.split("_", maxsplit=1)[0]
filename = name.split("_", maxsplit=1)[1]
src_ip = ip[1]
dst_ip = ip[2]
protocol = ip[3]
src_port = ip[4]
dst_port = ip[5]
window = ip[6]
new_line = str(n) + "," + str(mapping[el]) + "," + str(final_probs[n][idx]) + "," + str(scenario) + "," + str(filename) + "," + src_ip + "," + dst_ip + "," + str(protocol) + "," + str(src_port) + "," + str(dst_port) + "," + window + "\n"
outfile.write(new_line)
new_line_summary = [n, mapping[el], final_probs[n][idx], scenario, filename, src_ip, dst_ip, protocol, src_port, dst_port, window, 0]
summary_list.append(new_line_summary)
outfile.close()
other_csv_files = glob.glob(self.path_to_folder + "/*.csv")
for index, csv_file_path in enumerate(other_csv_files):
temp_df = pd.read_csv(csv_file_path)
if index == 0:
combined_df = temp_df
else:
combined_df = combined_df.append(temp_df)
csv_df = pd.read_csv(summary_csv_file_path)
csv_df = csv_df.sort_values(by=['src_ip', 'dst_ip', "ip_protocol", "src_port", "dst_port"])
combined_df = combined_df.sort_values(by=['src_ip', 'dst_ip', "ip_protocol", "src_port", "dst_port"])
combined_df["src_ip"] = combined_df["src_ip"].apply(lambda x: str(x).strip())
combined_df["dst_ip"] = combined_df["dst_ip"].apply(lambda x: str(x).strip())
combined_df["src_port"] = combined_df["src_port"].apply(lambda x: str(x).strip())
combined_df["dst_port"] = combined_df["dst_port"].apply(lambda x: str(x).strip())
combined_df["ip_protocol"] = combined_df["ip_protocol"].apply(lambda x: str(x).strip())
combined_df["src_ip"] = combined_df["src_ip"].astype(str)
combined_df["dst_ip"] = combined_df["dst_ip"].astype(str)
combined_df["src_port"] = combined_df["src_port"].astype(str)
combined_df["dst_port"] = combined_df["dst_port"].astype(str)
combined_df["ip_protocol"] = combined_df["ip_protocol"].astype(str)
csv_df["src_ip"] = csv_df["src_ip"].apply(lambda x: str(x).strip())
csv_df["dst_ip"] = csv_df["dst_ip"].apply(lambda x: str(x).strip())
csv_df["src_port"] = csv_df["src_port"].apply(lambda x: str(x).strip())
csv_df["dst_port"] = csv_df["dst_port"].apply(lambda x: str(x).strip())
csv_df["ip_protocol"] = csv_df["ip_protocol"].apply(lambda x: str(x).strip())
csv_df["src_ip"] = csv_df["src_ip"].astype(str)
csv_df["dst_ip"] = csv_df["dst_ip"].astype(str)
csv_df["src_port"] = csv_df["src_port"].astype(str)
csv_df["dst_port"] = csv_df["dst_port"].astype(str)
csv_df["ip_protocol"] = csv_df["ip_protocol"].astype(str)
csv_df = csv_df.merge(right=combined_df, on=['src_ip', 'dst_ip', 'window', "ip_protocol", "src_port", "dst_port", 'scenario', 'file'], how="left")
csv_df = csv_df.sort_values(by="clusnum")
csv_df.to_csv(summary_csv_file_path, index=False)
###############
# Reliability #
###############
print("Determining Reliability")
path_to_reliability = self.path_to_store +"/reliability/"
os.mkdir(path_to_reliability)
path_to_reliability_summary = path_to_summaries + 'reliability_summary' + addition + '.csv'
reliability_info_csv_file = path_to_reliability + 'reliability_info' + addition + '.csv'
summary_list_columns = ["clusnum", "connnum", "probability", "scenario", "file", "src_ip", "dst_ip", "ip_protocol", "src_port", "dst_port", "window", "run"]
for run_index in range(1, 10):
size = 7
sample = 7
temp_model = hdbscan.HDBSCAN(min_cluster_size=size, min_samples=sample, cluster_selection_method='leaf',
metric='precomputed')
new_clu = temp_model.fit(np.array([np.array(x) for x in ndistm]))
final_clusters = {}
final_probs = {}
for lab in set(new_clu.labels_):
occ = [i for i, x in enumerate(new_clu.labels_) if x == lab]
final_probs[lab] = [x for i, x in zip(new_clu.labels_, new_clu.probabilities_) if i == lab]
final_clusters[lab] = [labels[x] for x in occ]
for n, clus in final_clusters.items():
for idx, el in enumerate([inv_mapping[x] for x in clus]):
ip = el.split('->')
name = ip[0]
scenario = name.split("_", maxsplit=1)[0]
filename = name.split("_", maxsplit=1)[1]
src_ip = ip[1]
dst_ip = ip[2]
protocol = ip[3]
src_port = ip[4]
dst_port = ip[5]
window = ip[6]
run = run_index
new_line_summary_list = [n, mapping[el], final_probs[n][idx], scenario, filename, src_ip, dst_ip, protocol, src_port, dst_port, window, run]
summary_list.append(new_line_summary_list)
reliability_df = pd.DataFrame.from_records(summary_list, columns=summary_list_columns)
reliability_df.to_csv(reliability_info_csv_file, index=False)
cluster_distribution_df = reliability_df.groupby("connnum")["clusnum"].value_counts().to_frame()
cluster_distribution_df = cluster_distribution_df.rename(columns={"clusnum": "#_occurrences_clusnum"})
cluster_distribution_df = cluster_distribution_df.reset_index()
less_ten_same_cluster_df = cluster_distribution_df[cluster_distribution_df["#_occurrences_clusnum"] < 10]
percentage_cluster_change = round((len(less_ten_same_cluster_df) / len(reliability_df[reliability_df["run"] == 0])) * 100, 3)
cluster_probability_df = reliability_df.groupby("connnum")["probability"].value_counts().to_frame()
cluster_probability_df = cluster_probability_df.rename(columns={"probability": "#_occurrences_probability"})
cluster_probability_df = cluster_probability_df.reset_index()
less_ten_same_probability_df = cluster_probability_df[cluster_probability_df["#_occurrences_probability"] < 10]
percentage_probability_change = round((len(less_ten_same_probability_df) / len(reliability_df[reliability_df["run"] == 0])) * 100, 3)
data = {"percentage_cluster_change": percentage_cluster_change, "percentage_probability_change": percentage_probability_change}
reliability_summary_df = pd.DataFrame(data, index=[0])
reliability_summary_df.to_csv(path_to_reliability_summary, index=False)
#################
# Producing DAG #
#################
print('Producing DAG with relationships between pcaps')
os.mkdir(Path.joinpath(Path(graphs_folder), "dag"))
path_to_dag_results = str(Path.joinpath(Path(graphs_folder), "dag")) + "/"
clusters = {}
numclus = len(set(clu.labels_))
with open(summary_csv_file_path, 'r') as f1:
reader = csv.reader(f1, delimiter=',')
for i, line in enumerate(reader): # f1.readlines()[1:]:
if i > 0:
if line[3] not in clusters.keys():
clusters[line[3]] = []
clusters[line[3]].append((line[6], line[0])) # classname, cluster#
# print(clusters)
f1.close()
array = [str(x) for x in range(numclus - 1)]
array.append("-1")
treeprep = dict()
for filename, val in clusters.items():
arr = [0] * numclus
for fam, clus in val:
ind = array.index(clus)
arr[ind] = 1
# print(filename, )
mas = ''.join([str(x) for x in arr[:-1]])
famname = fam
if mas not in treeprep.keys():
treeprep[mas] = dict()
if famname not in treeprep[mas].keys():
treeprep[mas][famname] = set()
treeprep[mas][famname].add(str(filename))
f2 = open(path_to_dag_results + 'mas-details' + addition + '.csv', 'w')
for k, v in treeprep.items():
for kv, vv in v.items():
f2.write(str(k) + ';' + str(kv) + ';' + str(len(vv)) + '\n')
f2.close()
with open(path_to_dag_results + 'mas-details' + addition + '.csv', 'rU') as f3:
csv_reader = csv.reader(f3, delimiter=';')
graph = {}
names = {}
for line in csv_reader:
graph[line[0]] = set()
if line[0] not in names.keys():
names[line[0]] = []
names[line[0]].append(line[1] + "(" + line[2] + ")")
zeros = ''.join(['0'] * (numclus - 1))
if zeros not in graph.keys():
graph[zeros] = set()
ulist = graph.keys()
covered = set()
next = deque()
specials = []
next.append(zeros)
while (len(next) > 0):
l1 = next.popleft()
covered.add(l1)
for l2 in ulist:
if l2 not in covered and self.difference(l1, l2) == 1:
graph[l1].add(l2)
if l2 not in next:
next.append(l2)
val = set()
for v in graph.values():
val.update(v)
notmain = [x for x in ulist if x not in val]
notmain.remove(zeros)
nums = [sum([int(y) for y in x]) for x in notmain]
notmain = [x for _, x in sorted(zip(nums, notmain))]
specials = notmain
extras = set()
for nm in notmain:
comp = set()
comp.update(val)
comp.update(extras)
mindist = 1000
minli1, minli2 = None, None
for l in comp:
if nm != l:
diff = self.difference(nm, l)
if diff < mindist:
mindist = diff
minli = l
diffbase = self.difference(nm, zeros)
if diffbase <= mindist:
mindist = diffbase
minli = zeros
num1 = sum([int(s) for s in nm])
num2 = sum([int(s) for s in minli])
if num1 < num2:
graph[nm].add(minli)
else:
graph[minli].add(nm)
extras.add(nm)
val = set()
for v in graph.values():
val.update(v)
f2 = open(path_to_dag_results + 'relation-tree' + addition + '.dot', 'w')
f2.write("digraph dag {\n")
f2.write("rankdir=LR;\n")
num = 0
for idx, li in names.items():
text = ''
name = str(idx) + '\n'
for l in li:
name += l + ',\n'
if idx not in specials:
text = str(idx) + " [label=\"" + name + "\" , shape=box;]"
else: # treat in a special way. For now, leaving intact
text = str(idx) + " [shape=box label=\"" + name + "\"]"
f2.write(text)
f2.write('\n')
for k, v in graph.items():
for vi in v:
f2.write(str(k) + "->" + str(vi))
f2.write('\n')
f2.write("}")
f2.close()
# Rendering DAG
try:
filename = path_to_dag_results + 'relation-tree' + addition + '.dot'
# src = Source(source=test)
# new_name = self.path_to_store + "DAG" + addition + '.png'
# src.render(new_name, view=True)
render('dot', 'png', filename)
except:
print('Rendering DAG')
# os.system('dot -Tpng relation-tree' + addition + '.dot -o DAG' + addition + '.png')
# print('Done')
#############################
# Original Dataset Analysis #
#############################
print("Analyzing Original Dataset")
original_dataset_analysis = self.path_to_store + "/original_dataset_analysis/"
os.mkdir(original_dataset_analysis)
combined_summary_path = original_dataset_analysis + "/combined_summary/"
os.mkdir(combined_summary_path)
length_summary_path = original_dataset_analysis + "/length_summary/"
os.mkdir(length_summary_path)
ratios_path = original_dataset_analysis + "/ratios/"
os.mkdir(ratios_path)
combined_csv_path = combined_summary_path + "/combined_summary.csv"
path_detailed_label_csv = length_summary_path + "/detailed_label_summary_" + addition + ".csv"
path_label_csv = length_summary_path + "/label_summary_" + addition + ".csv"
path_application_name_csv = length_summary_path + "/application_name_summary_" + addition + ".csv"
path_application_category_name_csv = length_summary_path + "/application_category_name_summary_" + addition + ".csv"
path_name_csv = length_summary_path + "/name_summary_" + addition + ".csv"
path_detailed_label_table = length_summary_path + "/detailed_label_info_" + addition + ".png"
path_label_table = length_summary_path + "/label_info_" + addition + ".png"
path_application_name_table = length_summary_path + "/application_name_info_" + addition + ".png"
path_application_category_name_table = length_summary_path + "/application_category_name_info_" + addition + ".png"
path_name_table = length_summary_path + "/name_info_" + addition + ".png"
total_ratio_path = ratios_path + "/" + addition + "_total_ratio.csv"
relative_ratio_path = ratios_path + "/" + addition + "_relative_ratio.csv"
# summary creation
csv_files = glob.glob(self.path_to_folder + "/*.csv")
df_list = []
for csv_file_path in csv_files:
temp_df = pd.read_csv(csv_file_path)
df_list.append(temp_df)
combined_summary_df = df_list.pop()
loop_length = len(df_list)
for to_add_df in range(loop_length):
combined_summary_df = combined_summary_df.append(df_list.pop())
combined_summary_df.to_csv(index=False, path_or_buf=combined_csv_path)
# length analysis
total_amount_connections = len(combined_summary_df.index)
dl_average_length_df = combined_summary_df.groupby("detailed_label")[
"connection_length"].mean().to_frame().reset_index()
dl_average_length_df = dl_average_length_df.rename(columns={"connection_length": "avg_connection_length"})
dl_average_length_df["avg_connection_length"] = dl_average_length_df["avg_connection_length"].apply(
lambda x: round(x, 2))
dl_con_count_df = combined_summary_df.groupby("detailed_label")["connection_length"].count().to_frame().reset_index()
dl_con_count_df = dl_con_count_df.rename(columns={"connection_length": "connection_count"})
detailed_label_info_df = dl_average_length_df.merge(right=dl_con_count_df, on="detailed_label")
detailed_label_info_df["ratio"] = round(
(detailed_label_info_df["connection_count"] / total_amount_connections) * 100, 4)
detailed_label_info_df = detailed_label_info_df.sort_values(by="connection_count", ascending=False)
detailed_label_info_df.to_csv(path_detailed_label_csv, index=False)
fig, ax = plt.subplots()
fig.patch.set_visible(False)
ax.axis('off')
ax.axis('tight')
table = ax.table(cellText=detailed_label_info_df.values, colLabels=detailed_label_info_df.columns, loc='center',
cellLoc='center')
table.auto_set_column_width(col=list(range(len(detailed_label_info_df.columns))))
for (row, col), cell in table.get_celld().items():
if (row == 0):
cell.set_text_props(fontproperties=FontProperties(weight='bold'))
fig.tight_layout(pad=3.0)
plt.savefig(path_detailed_label_table, dpi=1200, bbox_inches='tight')
plt.close()
plt.clf()
l_average_length_df = combined_summary_df.groupby("label")["connection_length"].mean().to_frame().reset_index()
l_average_length_df = l_average_length_df.rename(columns={"connection_length": "avg_connection_length"})
l_average_length_df["avg_connection_length"] = l_average_length_df["avg_connection_length"].apply(
lambda x: round(x, 2))
l_con_count_df = combined_summary_df.groupby("label")["connection_length"].count().to_frame().reset_index()
l_con_count_df = l_con_count_df.rename(columns={"connection_length": "connection_count"})
label_info_df = l_average_length_df.merge(right=l_con_count_df, on="label")
label_info_df["ratio"] = round((label_info_df["connection_count"] / total_amount_connections) * 100, 4)
label_info_df = label_info_df.sort_values(by="connection_count", ascending=False)
label_info_df.to_csv(path_label_csv, index=False)
fig, ax = plt.subplots()
fig.patch.set_visible(False)
ax.axis('off')
ax.axis('tight')
table = ax.table(cellText=label_info_df.values, colLabels=label_info_df.columns, loc='center',
cellLoc='center')
table.auto_set_column_width(col=list(range(len(label_info_df.columns))))
for (row, col), cell in table.get_celld().items():
if (row == 0):
cell.set_text_props(fontproperties=FontProperties(weight='bold'))
fig.tight_layout(pad=3.0)
plt.savefig(path_label_table, dpi=1200, bbox_inches='tight')
plt.close()
plt.clf()
name_average_length_df = combined_summary_df.groupby("name")["connection_length"].mean().to_frame().reset_index()
name_average_length_df = name_average_length_df.rename(columns={"connection_length": "avg_connection_length"})
name_average_length_df["avg_connection_length"] = name_average_length_df["avg_connection_length"].apply(
lambda x: round(x, 2))
name_con_count_df = combined_summary_df.groupby("name")["connection_length"].count().to_frame().reset_index()
name_con_count_df = name_con_count_df.rename(columns={"connection_length": "connection_count"})
name_info_df = name_average_length_df.merge(right=name_con_count_df, on="name")
name_info_df["ratio"] = round((name_info_df["connection_count"] / total_amount_connections) * 100, 4)
name_info_df = name_info_df.sort_values(by="connection_count", ascending=False)
name_info_df.to_csv(path_name_csv, index=False)
name_info_df["name"] = name_info_df["name"].apply(lambda x: x[0:30])
fig, ax = plt.subplots()
fig.patch.set_visible(False)
ax.axis('off')
ax.axis('tight')
table = ax.table(cellText=name_info_df.values, colLabels=name_info_df.columns, loc='center',
cellLoc='center')
table.auto_set_column_width(col=list(range(len(name_info_df.columns))))
for (row, col), cell in table.get_celld().items():
if (row == 0):
cell.set_text_props(fontproperties=FontProperties(weight='bold'))
fig.tight_layout(pad=3.0)
plt.savefig(path_name_table, dpi=1200, bbox_inches='tight')
plt.close()
plt.clf()
acn_average_length_df = combined_summary_df.groupby("application_category_name")[
"connection_length"].mean().to_frame().reset_index()
acn_average_length_df = acn_average_length_df.rename(columns={"connection_length": "avg_connection_length"})
acn_average_length_df["avg_connection_length"] = acn_average_length_df["avg_connection_length"].apply(
lambda x: round(x, 2))
acn_con_count_df = combined_summary_df.groupby("application_category_name")[
"connection_length"].count().to_frame().reset_index()
acn_con_count_df = acn_con_count_df.rename(columns={"connection_length": "connection_count"})
application_category_name_info_df = acn_average_length_df.merge(right=acn_con_count_df,
on="application_category_name")
application_category_name_info_df["ratio"] = round(
(application_category_name_info_df["connection_count"] / total_amount_connections) * 100, 4)
application_category_name_info_df = application_category_name_info_df.sort_values(by="connection_count", ascending=False)
application_category_name_info_df.to_csv(path_application_category_name_csv, index=False)
application_category_name_info_df["application_category_name"] = application_category_name_info_df[
"application_category_name"].apply(lambda x: x[0:30])
fig, ax = plt.subplots()
fig.patch.set_visible(False)
ax.axis('off')
ax.axis('tight')
table = ax.table(cellText=application_category_name_info_df.values,
colLabels=application_category_name_info_df.columns, loc='center',
cellLoc='center')
table.auto_set_column_width(col=list(range(len(application_category_name_info_df.columns))))
for (row, col), cell in table.get_celld().items():
if (row == 0):
cell.set_text_props(fontproperties=FontProperties(weight='bold'))
cell.set_height(0.15)
fig.tight_layout(pad=3.0)
plt.savefig(path_application_category_name_table, dpi=fig.dpi, bbox_inches='tight')
plt.close()
plt.clf()
an_average_length_df = combined_summary_df.groupby("application_name")[
"connection_length"].mean().to_frame().reset_index()
an_average_length_df = an_average_length_df.rename(columns={"connection_length": "avg_connection_length"})
an_average_length_df["avg_connection_length"] = an_average_length_df["avg_connection_length"].apply(
lambda x: round(x, 2))
an_con_count_df = combined_summary_df.groupby("application_name")[
"connection_length"].count().to_frame().reset_index()
an_con_count_df = an_con_count_df.rename(columns={"connection_length": "connection_count"})
application_name_info_df = an_average_length_df.merge(right=an_con_count_df, on="application_name")
application_name_info_df["ratio"] = round(
(application_name_info_df["connection_count"] / total_amount_connections) * 100, 4)
application_name_info_df = application_name_info_df.sort_values(by="connection_count", ascending=False)
application_name_info_df.to_csv(path_application_name_csv, index=False)
application_name_info_df["application_name"] = application_name_info_df["application_name"].apply(
lambda x: x[0:30])
fig, ax = plt.subplots()
fig.patch.set_visible(False)
ax.axis('off')
ax.axis('tight')
table = ax.table(cellText=application_name_info_df.values, colLabels=application_name_info_df.columns,
loc='center',
cellLoc='center')
table.auto_set_column_width(col=list(range(len(application_name_info_df.columns))))
for (row, col), cell in table.get_celld().items():
if (row == 0):
cell.set_text_props(fontproperties=FontProperties(weight='bold'))
cell.set_height(0.1)
fig.tight_layout(pad=3.0)
plt.savefig(path_application_name_table, dpi=fig.dpi, bbox_inches='tight')
plt.close()
plt.clf()
# ratio analysis
total_detailed_label_list = pd.read_csv(self.path_to_detailed_label_folder)["detailed_label"].tolist()
total_detailed_label_list.sort()
combined_summary_df["detailed_label"].str.lower()
combined_summary_df["detailed_label"] = combined_summary_df['detailed_label'].replace(["Unknown", "-"], 'Benign')
detailed_label_df = combined_summary_df.groupby("scenario")["detailed_label"].value_counts().to_frame()
detailed_label_df = detailed_label_df.rename(columns={"detailed_label" : "count"}).reset_index()
detailed_label_df = detailed_label_df.reindex(sorted(detailed_label_df.columns), axis=1)
detailed_label_pt = pd.pivot_table(data=detailed_label_df, values="count", index="scenario", columns="detailed_label", aggfunc=np.sum, fill_value=0)
detailed_label_pt.reset_index(drop=False, inplace=True)
if "Unknown" in detailed_label_pt.columns:
detailed_label_pt = detailed_label_pt.rename(columns={"Unknown" : "Benign"})
detailed_label_pt.columns = detailed_label_pt.columns.to_series().apply(lambda x: x.lower())
for detailed_label in total_detailed_label_list:
if detailed_label not in detailed_label_pt.columns:
detailed_label_pt[detailed_label] = 0
column_order_list = total_detailed_label_list.copy()
column_order_list.insert(0, "scenario")
total_ratio_df = detailed_label_pt.reindex(columns=column_order_list)
total_ratio_df = total_ratio_df.sort_values(by="scenario")
total_ratio_df.to_csv(total_ratio_path, index = False)
relative_ratio_df = detailed_label_pt
for detailed_label in total_detailed_label_list:
if relative_ratio_df[detailed_label].sum() != 0:
relative_ratio_df[detailed_label] = relative_ratio_df[detailed_label].apply(lambda x: (x / (relative_ratio_df[detailed_label].sum())))
relative_ratio_df = relative_ratio_df.reindex(columns=column_order_list)
relative_ratio_df = relative_ratio_df.sort_values(by="scenario")
relative_ratio_df.to_csv(relative_ratio_path, index=False)
###################
# Cluster Summary #
###################
print("Creating cluster summary file")
summary_csv_df = pd.read_csv(summary_csv_file_path)
cluster_summary_path = path_to_summaries + "cluster_summary" + addition + '.csv'
total_number_connections = len(summary_csv_df.index)
total_number_packets = total_number_connections * self.window_size
cluster_numbers = sorted(summary_csv_df["clusnum"].unique().tolist())
cluster_numbers = list(map(lambda x: str(x), cluster_numbers))
# clustering_error_list_df = []
# clustering_error_list = []
#
# for cluster_number in cluster_numbers:
#
# if cluster_number != '-1':
# if cluster_number in error_packets_per_cluster:
# error_packets = len(error_packets_per_cluster[cluster_number])
# correct_packets = len(correct_packets_per_cluster[cluster_number])
# per_cluster_error = error_packets / (correct_packets + error_packets)
#
# else:
# per_cluster_error = 0
#
# clustering_error_list.append(per_cluster_error)
# clustering_error_list_df.append(per_cluster_error)
#
# clustering_error_list_df.insert(0, "nan")
clustering_error_list_df = []
for cluster_number in cluster_numbers:
clustering_error_list_df.append("na")
packets_per_cluster_list = summary_csv_df.groupby("clusnum")["connection_length"].sum().tolist()
connections_per_cluster_list = summary_csv_df.groupby("clusnum")["connection_length"].count().tolist()
avg_cluster_probability_list = summary_csv_df.groupby("clusnum")["probability"].mean().tolist()
per_cluster_label_count = summary_csv_df.groupby("clusnum")["label"].value_counts(normalize=True)
max_label_per_cluster = per_cluster_label_count.groupby("clusnum").idxmax().to_frame().reset_index()
max_label_per_cluster["label"] = max_label_per_cluster["label"].apply(lambda x: x[1])
max_label_percentage_per_cluster = per_cluster_label_count.groupby("clusnum").max().to_frame().reset_index()
max_label_percentage_per_cluster = max_label_percentage_per_cluster.rename(columns={"label": "percentage"})
label_merged_df_1 = max_label_per_cluster.merge(right=max_label_percentage_per_cluster, on="clusnum")
avg_label_cluster_purity_list = label_merged_df_1["percentage"].tolist()
per_cluster_detailed_label_count = summary_csv_df.groupby("clusnum")["detailed_label"].value_counts(
normalize=True)
max_detailed_label_per_cluster = per_cluster_detailed_label_count.groupby(
"clusnum").idxmax().to_frame().reset_index()
max_detailed_label_per_cluster["detailed_label"] = max_detailed_label_per_cluster["detailed_label"].apply(
lambda x: x[1])
max_detailed_label_percentage_per_cluster = per_cluster_detailed_label_count.groupby(
"clusnum").max().to_frame().reset_index()
max_detailed_label_percentage_per_cluster = max_detailed_label_percentage_per_cluster.rename(
columns={"detailed_label": "percentage"})
detailed_label_merged_df_1 = max_detailed_label_per_cluster.merge(
right=max_detailed_label_percentage_per_cluster, on="clusnum")
avg_detailed_label_cluster_purity_list = detailed_label_merged_df_1["percentage"].tolist()
per_cluster_application_name_count = summary_csv_df.groupby("clusnum")["application_name"].value_counts(
normalize=True)
max_cluster_application_name_per_cluster = per_cluster_application_name_count.groupby(
"clusnum").idxmax().to_frame().reset_index()
max_cluster_application_name_per_cluster["application_name"] = max_cluster_application_name_per_cluster[
"application_name"].apply(lambda x: x[1])
max_cluster_application_name_percentage_per_cluster = per_cluster_application_name_count.groupby(
"clusnum").max().to_frame().reset_index()
max_cluster_application_name_percentage_per_cluster = max_cluster_application_name_percentage_per_cluster.rename(
columns={"application_name": "percentage"})
application_name_merged_df_1 = max_cluster_application_name_per_cluster.merge(
right=max_cluster_application_name_percentage_per_cluster, on="clusnum")
avg_application_name_cluster_purity_list = application_name_merged_df_1["percentage"].tolist()
per_cluster_application_category_name_count = summary_csv_df.groupby("clusnum")[
"application_category_name"].value_counts(normalize=True)
max_cluster_application_category_name_per_cluster = per_cluster_application_category_name_count.groupby(
"clusnum").idxmax().to_frame().reset_index()
max_cluster_application_category_name_per_cluster["application_category_name"] = \
max_cluster_application_category_name_per_cluster["application_category_name"].apply(lambda x: x[1])
max_cluster_application_category_name_percentage_per_cluster = per_cluster_application_category_name_count.groupby(
"clusnum").max().to_frame().reset_index()
max_cluster_application_category_name_percentage_per_cluster = max_cluster_application_category_name_percentage_per_cluster.rename(
columns={"application_category_name": "percentage"})
application_category_name_merged_df_1 = max_cluster_application_category_name_per_cluster.merge(
right=max_cluster_application_category_name_percentage_per_cluster, on="clusnum")
avg_application_category_name_cluster_purity_list = application_category_name_merged_df_1["percentage"].tolist()
# application_category_name_per_cluster = summary_csv_df.groupby("clusnum")["application_category_name"].count().to_frame().reset_index()
# application_category_name_per_cluster = application_category_name_per_cluster.rename(columns={"application_category_name": "packet_count"})
# application_category_name_merged_df_2 = application_category_name_merged_df_1.merge(right=application_category_name_per_cluster, on="clusnum")
# application_category_name_merged_df_2["av_application_category_name_cluster_purity"] = \
# application_category_name_merged_df_2["percentage"] * application_category_name_merged_df_2["packet_count"]
# avg_application_category_name_cluster_purity_list = application_category_name_merged_df_2["av_application_category_name_cluster_purity"].tolist()
#avg_cluster_error
per_cluster_name_count = summary_csv_df.groupby("clusnum")["name"].value_counts(normalize=True)
max_name_per_cluster = per_cluster_name_count.groupby("clusnum").idxmax().to_frame().reset_index()
max_name_per_cluster["label"] = max_name_per_cluster["name"].apply(lambda x: x[1])
max_name_percentage_per_cluster = per_cluster_name_count.groupby("clusnum").max().to_frame().reset_index()
max_name_percentage_per_cluster = max_name_percentage_per_cluster.rename(columns={"name": "percentage"})
name_merged_df_1 = max_name_per_cluster.merge(right=max_name_percentage_per_cluster, on="clusnum")
avg_name_purity_list = name_merged_df_1["percentage"].tolist()
data = {"cluster": cluster_numbers,
"clustering_error": clustering_error_list_df,
"num_packets": packets_per_cluster_list,
"num_connections": connections_per_cluster_list,
"avg_cluster_probability": avg_cluster_probability_list,
"avg_label_purity": avg_label_cluster_purity_list,
"avg_detailed_label_purity": avg_detailed_label_cluster_purity_list,
"avg_application_name_purity": avg_application_name_cluster_purity_list,
"avg_application_category_name_purity": avg_application_category_name_cluster_purity_list,
"avg_name_purity": avg_name_purity_list}
cluster_summary_df = pd.DataFrame(data)
cluster_summary_df.to_csv(cluster_summary_path, index=False)
###################
# Overall Summary #
###################
print("Creating overall summary file")
overall_summary_path = path_to_summaries + "overall_summary" + addition + '.csv'
time_for_processing = round(end_time - start_time, 2)
validity_index = round(validity_index, 3)
number_of_clusters = len(summary_csv_df["clusnum"].unique())
avg_size_of_cluster = int(summary_csv_df.groupby("clusnum")["label"].count().mean())
if number_of_clusters > 1:
std_size_of_cluster = round(summary_csv_df.groupby("clusnum")["label"].count().std(), 2)
else:
std_size_of_cluster = "nan"
number_of_connections_in_noise_cluster = summary_csv_df[summary_csv_df["clusnum"] == -1]["clusnum"].count()
noise_percentage = round((number_of_connections_in_noise_cluster / total_number_connections) * 100, 3)
percentage_detailed_labels_in_noise_cluster = round(((summary_csv_df[
(summary_csv_df["detailed_label"] != "-") & (
summary_csv_df["clusnum"] == -1)][
"clusnum"].count()) / (
summary_csv_df[
summary_csv_df["detailed_label"] != "-"][
"clusnum"].count())) * 100, 3)
avg_overall_label_purity = mean(avg_label_cluster_purity_list)
avg_overall_detailed_label_purity = mean(avg_detailed_label_cluster_purity_list)
avg_overall_application_name_purity = mean(avg_application_name_cluster_purity_list)
avg_overall_application_category_name_purity = mean(avg_application_category_name_cluster_purity_list)
avg_overall_name_purity = mean(avg_name_purity_list)
labels_present = summary_csv_df["label"].unique()
avg_label_separation_list = []
avg_label_separation_list_df = []
for label in labels_present:
label_count_per_cluster = \
summary_csv_df[summary_csv_df["label"] == label].groupby("clusnum")[
"label"].count().to_frame().reset_index()
label_count_per_cluster_as_tuple = list(
label_count_per_cluster.itertuples(index=False, name=None))
max_value = 0
total_count = 0
for clusname, count_labels in label_count_per_cluster_as_tuple:
if count_labels > max_value:
max_value = count_labels
total_count = total_count + count_labels
separation = max_value / total_count
avg_label_separation_list_df.append((separation, total_count, label))
avg_label_separation_list.append(separation)
avg_label_cohesion = round(mean(avg_label_separation_list), 3)
detailed_labels_present = summary_csv_df["detailed_label"].unique()
avg_detailed_label_separation_list = []
avg_detailed_label_separation_list_df = []
for detailed_label in detailed_labels_present:
detailled_label_count_per_cluster = \
summary_csv_df[summary_csv_df["detailed_label"] == detailed_label].groupby("clusnum")[
"detailed_label"].count().to_frame().reset_index()
detailled_label_count_per_cluster_as_tuple = list(
detailled_label_count_per_cluster.itertuples(index=False, name=None))
max_value = 0
total_count = 0
for clusname, count_detailed_labels in detailled_label_count_per_cluster_as_tuple:
if count_detailed_labels > max_value:
max_value = count_detailed_labels
total_count = total_count + count_detailed_labels
separation = max_value / total_count
avg_detailed_label_separation_list_df.append((separation, total_count, detailed_label))
avg_detailed_label_separation_list.append(separation)
avg_detailed_label_cohesion = round(mean(avg_detailed_label_separation_list), 3)
application_name_present = summary_csv_df["application_name"].unique()
avg_application_name_separation_list = []
avg_application_name_separation_list_df = []
for application_name in application_name_present:
application_name_count_per_cluster = \
summary_csv_df[summary_csv_df["application_name"] == application_name].groupby("clusnum")[
"application_name"].count().to_frame().reset_index()
application_name_count_per_cluster_as_tuple = list(
application_name_count_per_cluster.itertuples(index=False, name=None))
max_value = 0
total_count = 0
for clusname, count_application_name in application_name_count_per_cluster_as_tuple:
if count_application_name > max_value:
max_value = count_application_name
total_count = total_count + count_application_name
separation = max_value / total_count
avg_application_name_separation_list_df.append((separation, total_count, application_name))
avg_application_name_separation_list.append(separation)
avg_application_name_cohesion = round(mean(avg_application_name_separation_list), 3)
application_category_name_present = summary_csv_df["application_category_name"].unique()
avg_application_category_name_separation_list = []
avg_application_category_name_separation_list_df = []
for application_category_name in application_category_name_present:
application_category_name_count_per_cluster = \
summary_csv_df[summary_csv_df["application_category_name"] == application_category_name].groupby(
"clusnum")[
"application_category_name"].count().to_frame().reset_index()
application_category_name_count_per_cluster_as_tuple = list(
application_category_name_count_per_cluster.itertuples(index=False, name=None))
max_value = 0
total_count = 0
for clusname, count_application_category_name in application_category_name_count_per_cluster_as_tuple:
if count_application_category_name > max_value:
max_value = count_application_category_name
total_count = total_count + count_application_category_name
separation = max_value / total_count
avg_application_category_name_separation_list_df.append(
(separation, total_count, application_category_name))
avg_application_category_name_separation_list.append(separation)
avg_application_category_name_cohesion = round(mean(avg_application_category_name_separation_list), 3)
name_present = summary_csv_df["name"].unique()
avg_name_separation_list = []
avg_name_separation_list_df = []
for name in name_present:
name_count_per_cluster = \
summary_csv_df[summary_csv_df["name"] == name].groupby("clusnum")[
"name"].count().to_frame().reset_index()
name_count_per_cluster_as_tuple = list(
name_count_per_cluster.itertuples(index=False, name=None))
max_value = 0
total_count = 0
for clusname, count_name in name_count_per_cluster_as_tuple:
if count_name > max_value:
max_value = count_name
total_count = total_count + count_name
separation = max_value / total_count
avg_name_separation_list_df.append((separation, total_count, count_name))
avg_name_separation_list.append(separation)
avg_name_cohesion = round(mean(avg_name_separation_list), 3)
probablity_no_noise = summary_csv_df[summary_csv_df["clusnum"] != -1]
avg_cluster_probability = round(probablity_no_noise["probability"].mean(), 3)
# if len(clustering_error_list) > 1:
# avg_clustering_error = round(mean(clustering_error_list), 3)
# else:
# avg_clustering_error = "nan"
avg_clustering_error = "nan"
data_overall = {"total_time_processing": time_for_processing,
"validity_index": validity_index,
"shilouette_score": silhouette_score,
"total_number_connections": total_number_connections,
"total_number_packets": total_number_packets,
"total_number_clusters": number_of_clusters,
"avg_cluster_size": avg_size_of_cluster,
"std_cluster_size": std_size_of_cluster,
"noise_percentage": noise_percentage,
"avg_label_cohesion": avg_label_cohesion,
"avg_detailed_label_cohesion": avg_detailed_label_cohesion,
"avg_application_name_cohesion": avg_application_name_cohesion,
"avg_application_category_name_cohesion": avg_application_category_name_cohesion,
"avg_name_cohesion": avg_name_cohesion,
"avg_label_purity": avg_overall_label_purity,
"avg_detailed_label_purity": avg_overall_detailed_label_purity,
"avg_application_name_purity": avg_overall_application_name_purity,
"avg_application_category_name_purity": avg_overall_application_category_name_purity,
"avg_name_purity": avg_overall_name_purity,
"avg_cluster_probability": avg_cluster_probability,
"avg_clustering_error": avg_clustering_error}
summary_overall_df = pd.DataFrame(data_overall, index=[0])
summary_overall_df.to_csv(overall_summary_path, index=False)
#####################
# shortened summary #
#####################
print("Creating shortened summary")
shortened_summary_path = path_to_summaries + "shortened_summary" + addition + '.csv'
cohesion_score = 0.35 * avg_label_cohesion + 0.45 * avg_detailed_label_cohesion + 0.05 * avg_application_name_cohesion + 0.05 * avg_application_category_name_cohesion + 0.1 * avg_name_cohesion
purity_score = 0.35 * avg_overall_label_purity + 0.45 * avg_overall_detailed_label_purity + 0.05 * avg_overall_application_name_purity + 0.05 * avg_overall_application_category_name_purity + 0.1 * avg_overall_name_purity
data_shortened = {
"validity_index": validity_index,
"shilouette_score": silhouette_score,
"noise_percentage": noise_percentage,
"number_clusters": number_of_clusters,
"cohesion_score" : cohesion_score,
"purity_score" : purity_score,
"avg_cluster_probability": avg_cluster_probability,
"avg_clustering_error": avg_clustering_error}
shortened_summary = pd.DataFrame(data_shortened, index=[0])
shortened_summary.to_csv(shortened_summary_path, index=False)
###################
# Window Analysis #
###################
print("Analyzing window info")
window_info_path = path_to_summaries + "window_info" + addition + '.csv'
summary_csv_df = pd.read_csv(summary_csv_file_path)
summary_csv_df["combined_address"] = summary_csv_df["scenario"] + "_" + summary_csv_df["file"] + "->" + summary_csv_df["src_ip"] + "->" + summary_csv_df["dst_ip"]
per_connection_cluster_count = summary_csv_df.groupby("combined_address")["clusnum"].value_counts(
normalize=True)
max_cluster_per_connection = per_connection_cluster_count.groupby(
"combined_address").idxmax().to_frame().reset_index()
max_cluster_per_connection["clusnum"] = max_cluster_per_connection["clusnum"].apply(
lambda x: x[1])
max_cluster_percentage_per_connection = per_connection_cluster_count.groupby(
"combined_address").max().to_frame().reset_index()
max_cluster_percentage_per_connection = max_cluster_percentage_per_connection.rename(
columns={"clusnum": "percentage"})
connection_cluster_merged_df_1 = max_cluster_per_connection.merge(
right=max_cluster_percentage_per_connection, on="combined_address")
avg_window_cohesion_list = connection_cluster_merged_df_1["percentage"].tolist()
avg_window_cohesion = round(mean(avg_window_cohesion_list), 3)
data_window = {
"avg_window_cohesion" : avg_window_cohesion
}
window_summary = pd.DataFrame(data_window, index=[0])
window_summary.to_csv(window_info_path, index=False)
###############################
# Performance Matrix Creation #
###############################
print("Creating performance matrices")
performance_matrix_folder = graphs_folder + "/performance_matrices"
os.mkdir(performance_matrix_folder)
label_performance_matrix = performance_matrix_folder + "/label_performance_matrix" + addition + ".csv"
label_performance_matrix_table = performance_matrix_folder + "/label_performance_matrix" + addition + ".png"
detailed_label_performance_matrix = performance_matrix_folder + "/detailed_label_performance_matrix" + addition + ".csv"
detailed_label_performance_matrix_table = performance_matrix_folder + "/detailed_label_performance_matrix" + addition + ".png"
label_df = summary_csv_df.groupby("clusnum")["label"].value_counts().to_frame()
label_df = label_df.rename(columns={"label": "count"})
label_df = label_df.reset_index()
labels = label_df["label"].unique()
for label in labels:
lower_label = label.lower()
label_df[lower_label] = np.where(label_df["label"] == label, label_df["count"], 0)
label_df = label_df.drop(["count", "label"], axis=1)
label_df = label_df.rename(columns={"clusnum": "Cluster"})
columns = label_df.columns.tolist()
labels = label_df.columns.tolist()
labels.remove("Cluster")
clusters = label_df["Cluster"].unique().tolist()
data = []
for cluster in clusters:
cluster_column_data = []
cluster_column_data.append(cluster)
for label in labels:
count = int(label_df[(label_df["Cluster"] == cluster)][label].sum())
cluster_column_data.append(count)
data.append(cluster_column_data)
improved_label_df = pd.DataFrame(data, columns=columns)
detailed_label_df = summary_csv_df.groupby("clusnum")["detailed_label"].value_counts().to_frame()
detailed_label_df = detailed_label_df.rename(columns={"detailed_label": "count"})
detailed_label_df = detailed_label_df.reset_index()
detailed_labels = detailed_label_df["detailed_label"].unique()
for detail_label in detailed_labels:
lower_detail_label = detail_label.lower()
detailed_label_df[lower_detail_label] = np.where(detailed_label_df["detailed_label"] == detail_label,
detailed_label_df["count"], 0)
detailed_label_df = detailed_label_df.drop(["count", "detailed_label"], axis=1)
detailed_label_df = detailed_label_df.rename(columns={"clusnum": "Cluster"})
columns = detailed_label_df.columns.tolist()
labels = detailed_label_df.columns.tolist()
labels.remove("Cluster")
clusters = detailed_label_df["Cluster"].unique().tolist()
data = []
for cluster in clusters:
cluster_column_data = []
cluster_column_data.append(cluster)
for label in labels:
count = int(detailed_label_df[(detailed_label_df["Cluster"] == cluster)][label].sum())
cluster_column_data.append(count)
data.append(cluster_column_data)
improved_detail_label_df = pd.DataFrame(data, columns=columns)
improved_label_df.to_csv(label_performance_matrix, index=False)
fig, ax = plt.subplots()
fig.patch.set_visible(False)
ax.axis('off')
ax.axis('tight')
table = ax.table(cellText=improved_label_df.values, colLabels=improved_label_df.columns, loc='center',
cellLoc='center')
table.auto_set_column_width(col=list(range(len(improved_label_df.columns))))
for (row, col), cell in table.get_celld().items():
if (row == 0):
cell.set_text_props(fontproperties=FontProperties(weight='bold'))
fig.tight_layout()
plt.savefig(label_performance_matrix_table)
plt.close()
plt.clf()
improved_detail_label_df.to_csv(detailed_label_performance_matrix, index=False)
reduced_column_size_name = [x[0:10] for x in improved_detail_label_df.columns.tolist()]
fig, ax = plt.subplots()
fig.patch.set_visible(False)
ax.axis('off')
ax.axis('tight')
table2 = ax.table(cellText=improved_detail_label_df.values, colLabels=reduced_column_size_name, loc='center',
cellLoc='center')
table2.auto_set_column_width(col=list(range(len(reduced_column_size_name))))
for (row, col), cell in table2.get_celld().items():
if (row == 0):
cell.set_text_props(fontproperties=FontProperties(weight='bold'))
fig.tight_layout()
plt.savefig(detailed_label_performance_matrix_table, dpi=1200, bbox_inches='tight')
plt.close()
plt.clf()
##################
# Graph Creation #
#################
print("Creating graphs")
cluster_graphs_path = graphs_folder + "/cluster_graphs/"
os.mkdir(cluster_graphs_path)
summary_csv_df = pd.read_csv(summary_csv_file_path)
application_name_graph = cluster_graphs_path + "/application_name_graph" + addition + ".png"
path_to_application_name_legend_storage = cluster_graphs_path + "/application_name_legend" + addition + ".png"
path_to_application_name_combined = cluster_graphs_path + "/application_name_combined" + addition + ".png"
application_category_name_graph = cluster_graphs_path + "/application_category_name_graph" + addition + ".png"
path_to_application_category_name_legend_storage = cluster_graphs_path + "/application_category_name_legend" + addition + ".png"
path_to_application_category_name_combined = cluster_graphs_path + "/application_category_name_combined" + addition + ".png"
label_distribution_graph = cluster_graphs_path + "/label_graph" + addition + ".png"
path_to_label_legend_storage = cluster_graphs_path + "/label_legend" + addition + ".png"
path_to_label_combined = cluster_graphs_path + "/label_combined" + addition + ".png"
detailed_label_distribution_graph = cluster_graphs_path + "/detailed_label_graph" + addition + ".png"
path_to_detailed_label_legend_storage = cluster_graphs_path + "/detailed_label_legend" + addition + ".png"
path_to_detailed_label_combined = cluster_graphs_path + "/detailed_label_combined" + addition + ".png"
name_distribution_graph = cluster_graphs_path + "/name_graph" + addition + ".png"
path_to_name_legend_storage = cluster_graphs_path + "/name_legend" + addition + ".png"
path_to_name_combined = cluster_graphs_path + "/name_combined" + addition + ".png"
####################
# application name #
####################
overall_detailed_label_df = summary_csv_df.groupby("clusnum")["application_name"].value_counts().to_frame()
overall_detailed_label_df = overall_detailed_label_df.rename(columns={"application_name": "count"})
overall_detailed_label_df = overall_detailed_label_df.reset_index()
clusters = overall_detailed_label_df["clusnum"].unique().tolist()
if len(clusters) < 4:
ncols = len(clusters)
else:
ncols = 4
nrows = math.ceil(len(clusters) / 4)
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(7, 7))
list_of_names_dfs = []
for cluster in clusters:
cluster_df = overall_detailed_label_df[overall_detailed_label_df["clusnum"] == cluster][
["application_name", "count"]]
cluster_df["application_name"] = np.where(cluster_df["count"] <= 4, "Other", cluster_df.application_name)
cluster_df = cluster_df.groupby("application_name")["count"].aggregate(sum).reset_index().sort_values(
by=["count"], ascending=False)
list_of_names_dfs.append(cluster_df)
detailed_label_name_df = list_of_names_dfs.pop()
for name_df in list_of_names_dfs:
detailed_label_name_df = detailed_label_name_df.append(name_df)
detailed_label_name_df = detailed_label_name_df.groupby("application_name")["count"].aggregate(
sum).reset_index().sort_values(by=["count"])
unique_application_category_names = detailed_label_name_df["application_name"].tolist()
colors = {}
cmap = cm.tab20c(np.linspace(0, 1, len(unique_application_category_names)))
for index, color in enumerate(cmap):
application_name = unique_application_category_names.pop()
colors[application_name] = color
for index, cluster in enumerate(clusters):
cluster_df = overall_detailed_label_df[overall_detailed_label_df["clusnum"] == cluster][
["application_name", "count"]]
cluster_df["application_name"] = np.where(cluster_df["count"] <= 4, "Other",
cluster_df.application_name)
cluster_df = cluster_df.groupby("application_name")["count"].aggregate(sum).reset_index().sort_values(
by=["count"])
cluster_df["relative_count"] = round((cluster_df["count"] / cluster_df["count"].sum()) * 100, 2)
if len(clusters) == 1:
patches, texts = ax.pie(cluster_df["count"], labels=cluster_df["relative_count"],
colors=[colors[key] for key in cluster_df["application_name"]])
new_labels = self.clean_up_labels(texts)
ax.clear()
ax.pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in cluster_df["application_name"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax.set_title("Cluster " + str(cluster) + " (N=" + str(cluster_df["count"].sum()) + ")")
elif len(clusters) <= 4:
patches, texts = ax[index].pie(cluster_df["count"], labels=cluster_df["relative_count"],
colors=[colors[key] for key in
cluster_df["application_name"]],
labeldistance=1.25)
new_labels = self.clean_up_labels(texts)
ax[index].clear()
ax[index].pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in cluster_df["application_name"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax[index].set_title("Cluster " + str(cluster) + " (N=" + str(cluster_df["count"].sum()) + ")")
else:
patches, texts = ax[math.floor(index / 4), index % 4].pie(cluster_df["count"],
labels=cluster_df["relative_count"],
colors=[colors[key] for key in
cluster_df[
"application_name"]],
labeldistance=1.25)
new_labels = self.clean_up_labels(texts)
ax[math.floor(index / 4), index % 4].clear()
ax[math.floor(index / 4), index % 4].pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in
cluster_df["application_name"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax[math.floor(index / 4), index % 4].set_title(
"Cluster " + str(cluster) + " (N=" + str(cluster_df["count"].sum()) + ")")
if len(clusters) % 4 != 0:
if len(clusters) > 4:
for missing_axis in range(4 - len(clusters) % 4, 4):
ax[nrows - 1, missing_axis].axis('off')
markers = [plt.Line2D([0, 0], [0, 0], color=color, marker='o', linestyle='') for color in colors.values()]
plt.suptitle("Application Name Distribution per Cluster", y=0.985, x=0.5, fontweight='bold')
fig.tight_layout()
fig.canvas.draw()
fig.savefig(application_name_graph, dpi=1200)
label_list = colors.keys()
label_list = [x[0:40] for x in label_list]
legend = plt.legend(handles=markers, labels=label_list, loc=3, framealpha=1, frameon=True,
bbox_to_anchor=(2, 0))
separate_legend = legend.figure
separate_legend.canvas.draw()
bbox = legend.get_window_extent()
bbox = bbox.from_extents(*(bbox.extents + np.array([-4, -4, 4, 4])))
bbox = bbox.transformed(fig.dpi_scale_trans.inverted())
fig.savefig(path_to_application_name_legend_storage, dpi=1200, bbox_inches=bbox)
legend.remove()
plt.close()
plt.clf()
graph_img = Image.open(application_name_graph)
legend_im = Image.open(path_to_application_name_legend_storage)
widths_graph = graph_img.width
heights_graph = graph_img.height
widths_legend = legend_im.width
heights_legend = legend_im.height
if heights_legend > heights_graph:
resize_percentage = heights_graph / heights_legend
new_width = int(resize_percentage * widths_legend)
legend_im = legend_im.resize((new_width, heights_graph), Image.ANTIALIAS)
total_width = widths_graph + widths_legend
y_offset = int((heights_graph - heights_legend) / 2)
combined_im = Image.new('RGB', (total_width, heights_graph), color=(255, 255, 255, 1))
combined_im.paste(graph_img, (0, 0))
combined_im.paste(legend_im, (widths_graph, y_offset))
combined_im.save(path_to_application_name_combined)
#############################
# application category name #
#############################
overall_detailed_label_df = summary_csv_df.groupby("clusnum")[
"application_category_name"].value_counts().to_frame()
overall_detailed_label_df = overall_detailed_label_df.rename(columns={"application_category_name": "count"})
overall_detailed_label_df = overall_detailed_label_df.reset_index()
clusters = overall_detailed_label_df["clusnum"].unique().tolist()
if len(clusters) < 4:
ncols = len(clusters)
else:
ncols = 4
nrows = math.ceil(len(clusters) / 4)
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(7, 7))
list_of_names_dfs = []
for cluster in clusters:
cluster_df = overall_detailed_label_df[overall_detailed_label_df["clusnum"] == cluster][
["application_category_name", "count"]]
cluster_df = cluster_df.groupby("application_category_name")["count"].aggregate(
sum).reset_index().sort_values(
by=["count"], ascending=False)
list_of_names_dfs.append(cluster_df)
detailed_label_name_df = list_of_names_dfs.pop()
for name_df in list_of_names_dfs:
detailed_label_name_df = detailed_label_name_df.append(name_df)
detailed_label_name_df = detailed_label_name_df.groupby("application_category_name")["count"].aggregate(
sum).reset_index().sort_values(by=["count"])
unique_application_category_names = detailed_label_name_df["application_category_name"].tolist()
colors = {}
cmap = cm.gist_rainbow(np.linspace(0, 1, len(unique_application_category_names)))
for index, color in enumerate(cmap):
application_name = unique_application_category_names.pop()
colors[application_name] = color
for index, cluster in enumerate(clusters):
cluster_df = overall_detailed_label_df[overall_detailed_label_df["clusnum"] == cluster][
["application_category_name", "count"]]
cluster_df = cluster_df.groupby("application_category_name")["count"].aggregate(
sum).reset_index().sort_values(
by=["count"])
cluster_df["relative_count"] = round((cluster_df["count"] / cluster_df["count"].sum()) * 100, 2)
if len(clusters) == 1:
patches, texts = ax.pie(cluster_df["count"], labels=cluster_df["relative_count"],
colors=[colors[key] for key in cluster_df["application_category_name"]])
new_labels = self.clean_up_labels(texts)
ax.clear()
ax.pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in cluster_df["application_category_name"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax.set_title("Cluster " + str(cluster) + " (N=" + str(cluster_df["count"].sum()) + ")")
elif len(clusters) <= 4:
patches, texts = ax[index].pie(cluster_df["count"], labels=cluster_df["relative_count"],
colors=[colors[key] for key in
cluster_df["application_category_name"]],
labeldistance=1.25)
new_labels = self.clean_up_labels(texts)
ax[index].clear()
ax[index].pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in cluster_df["application_category_name"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax[index].set_title("Cluster " + str(cluster) + " (N=" + str(cluster_df["count"].sum()) + ")")
else:
patches, texts = ax[math.floor(index / 4), index % 4].pie(cluster_df["count"],
labels=cluster_df["relative_count"],
colors=[colors[key] for key in
cluster_df[
"application_category_name"]],
labeldistance=1.25)
new_labels = self.clean_up_labels(texts)
ax[math.floor(index / 4), index % 4].clear()
ax[math.floor(index / 4), index % 4].pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in
cluster_df["application_category_name"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax[math.floor(index / 4), index % 4].set_title(
"Cluster " + str(cluster) + " (N=" + str(cluster_df["count"].sum()) + ")")
if len(clusters) % 4 != 0:
if len(clusters) > 4:
for missing_axis in range(4 - len(clusters) % 4, 4):
ax[nrows - 1, missing_axis].axis('off')
markers = [plt.Line2D([0, 0], [0, 0], color=color, marker='o', linestyle='') for color in colors.values()]
fig.subplots_adjust(bottom=0.25)
plt.suptitle("Application Category Name Distribution per Cluster", y=0.985, x=0.5, fontweight='bold')
fig.tight_layout()
fig.canvas.draw()
fig.savefig(application_category_name_graph, dpi=1200)
label_list = colors.keys()
label_list = [x[0:40] for x in label_list]
legend = plt.legend(handles=markers, labels=label_list, loc=3, framealpha=1, frameon=True,
bbox_to_anchor=(2, 0))
separate_legend = legend.figure
separate_legend.canvas.draw()
bbox = legend.get_window_extent()
bbox = bbox.from_extents(*(bbox.extents + np.array([-4, -4, 4, 4])))
bbox = bbox.transformed(fig.dpi_scale_trans.inverted())
fig.savefig(path_to_application_category_name_legend_storage, dpi=1200, bbox_inches=bbox)
legend.remove()
plt.close()
plt.clf()
graph_img = Image.open(application_category_name_graph)
legend_im = Image.open(path_to_application_category_name_legend_storage)
widths_graph = graph_img.width
heights_graph = graph_img.height
widths_legend = legend_im.width
heights_legend = legend_im.height
if heights_legend > heights_graph:
resize_percentage = heights_graph / heights_legend
new_width = int(resize_percentage * widths_legend)
legend_im = legend_im.resize((new_width, heights_graph), Image.ANTIALIAS)
total_width = widths_graph + widths_legend
y_offset = int((heights_graph - heights_legend) / 2)
combined_im = Image.new('RGB', (total_width, heights_graph), color=(255, 255, 255, 1))
combined_im.paste(graph_img, (0, 0))
combined_im.paste(legend_im, (widths_graph, y_offset))
combined_im.save(path_to_application_category_name_combined)
#########
# label #
#########
overall_detailed_label_df = summary_csv_df.groupby("clusnum")["label"].value_counts().to_frame()
overall_detailed_label_df = overall_detailed_label_df.rename(columns={"label": "count"})
overall_detailed_label_df = overall_detailed_label_df.reset_index()
clusters = overall_detailed_label_df["clusnum"].unique().tolist()
if len(clusters) < 4:
ncols = len(clusters)
else:
ncols = 4
nrows = math.ceil(len(clusters) / 4)
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(7, 7))
colors = {}
colors["Malicious"] = "r"
colors["Benign"] = "g"
colors["Unknown"] = "grey"
for index, cluster in enumerate(clusters):
cluster_df = \
overall_detailed_label_df[overall_detailed_label_df["clusnum"] == cluster][
["label", "count"]]
cluster_df = cluster_df.groupby("label")["count"].aggregate(
sum).reset_index().sort_values(
by=["count"])
cluster_df["relative_count"] = round((cluster_df["count"] / cluster_df["count"].sum()) * 100, 2)
if (len(cluster_df.index) > 7):
cluster_df["relative_count"] = np.where(cluster_df["relative_count"] <= 5, "",
cluster_df["relative_count"])
if len(clusters) == 1:
patches, texts = ax.pie(cluster_df["count"], labels=cluster_df["relative_count"],
colors=[colors[key] for key in cluster_df["label"]])
new_labels = self.clean_up_labels(texts)
ax.clear()
ax.pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in cluster_df["label"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax.set_title("Cluster " + str(cluster) + " (N=" + str(cluster_df["count"].sum()) + ")")
elif len(clusters) <= 4:
patches, texts = ax[index].pie(cluster_df["count"], labels=cluster_df["relative_count"],
colors=[colors[key] for key in
cluster_df["label"]],
labeldistance=1.25)
new_labels = self.clean_up_labels(texts)
ax[index].clear()
ax[index].pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in cluster_df["label"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax[index].set_title("Cluster " + str(cluster) + " (N=" + str(cluster_df["count"].sum()) + ")")
else:
patches, texts = ax[math.floor(index / 4), index % 4].pie(cluster_df["count"],
labels=cluster_df["relative_count"],
colors=[colors[key] for key in
cluster_df[
"label"]],
labeldistance=1.25)
new_labels = self.clean_up_labels(texts)
ax[math.floor(index / 4), index % 4].clear()
ax[math.floor(index / 4), index % 4].pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in
cluster_df["label"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax[math.floor(index / 4), index % 4].set_title(
"Cluster " + str(cluster) + " (N=" + str(cluster_df["count"].sum()) + ")")
if len(clusters) % 4 != 0:
if len(clusters) > 4:
for missing_axis in range(4 - len(clusters) % 4, 4):
ax[nrows - 1, missing_axis].axis('off')
markers = [plt.Line2D([0, 0], [0, 0], color=color, marker='o', linestyle='') for color in colors.values()]
fig.subplots_adjust(bottom=0.25)
plt.suptitle("Label Distribution per Cluster", y=0.985, x=0.5, fontweight='bold')
fig.tight_layout()
fig.canvas.draw()
fig.savefig(label_distribution_graph, dpi=1200)
legend = plt.legend(handles=markers, labels=colors.keys(), loc=3, framealpha=1, frameon=True,
bbox_to_anchor=(2, 0))
separate_legend = legend.figure
separate_legend.canvas.draw()
bbox = legend.get_window_extent()
bbox = bbox.from_extents(*(bbox.extents + np.array([-4, -4, 4, 4])))
bbox = bbox.transformed(fig.dpi_scale_trans.inverted())
fig.savefig(path_to_label_legend_storage, dpi=1200, bbox_inches=bbox)
legend.remove()
plt.close()
plt.clf()
graph_img = Image.open(label_distribution_graph)
legend_im = Image.open(path_to_label_legend_storage)
widths_graph = graph_img.width
heights_graph = graph_img.height
widths_legend = legend_im.width
heights_legend = legend_im.height
if heights_legend > heights_graph:
resize_percentage = heights_graph / heights_legend
new_width = int(resize_percentage * widths_legend)
legend_im = legend_im.resize((new_width, heights_graph), Image.ANTIALIAS)
total_width = widths_graph + widths_legend
y_offset = int((heights_graph - heights_legend) / 2)
combined_im = Image.new('RGB', (total_width, heights_graph), color=(255, 255, 255, 1))
combined_im.paste(graph_img, (0, 0))
combined_im.paste(legend_im, (widths_graph, y_offset))
combined_im.save(path_to_label_combined)
##################
# detailed label #
##################
overall_detailed_label_df = summary_csv_df.groupby("clusnum")["detailed_label"].value_counts().to_frame()
overall_detailed_label_df = overall_detailed_label_df.rename(columns={"detailed_label": "count"})
overall_detailed_label_df = overall_detailed_label_df.reset_index()
clusters = overall_detailed_label_df["clusnum"].unique().tolist()
if len(clusters) < 4:
ncols = len(clusters)
else:
ncols = 4
nrows = math.ceil(len(clusters) / 4)
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(7, 7))
list_of_names_dfs = []
for cluster in clusters:
cluster_df = overall_detailed_label_df[overall_detailed_label_df["clusnum"] == cluster][
["detailed_label", "count"]]
cluster_df["detailed_label"] = np.where(cluster_df["detailed_label"] == "-", "Unknown",
cluster_df.detailed_label)
cluster_df = cluster_df.groupby("detailed_label")["count"].aggregate(sum).reset_index().sort_values(
by=["count"], ascending=False)
list_of_names_dfs.append(cluster_df)
detailed_label_name_df = list_of_names_dfs.pop()
for name_df in list_of_names_dfs:
detailed_label_name_df = detailed_label_name_df.append(name_df)
detailed_label_name_df = detailed_label_name_df.groupby("detailed_label")["count"].aggregate(
sum).reset_index().sort_values(by=["count"])
unique_application_category_names = detailed_label_name_df["detailed_label"].tolist()
colors = {}
cmap = cm.terrain(np.linspace(0, 1, len(unique_application_category_names)))
for index, color in enumerate(cmap):
application_name = unique_application_category_names.pop()
colors[application_name] = color
for index, cluster in enumerate(clusters):
cluster_df = overall_detailed_label_df[overall_detailed_label_df["clusnum"] == cluster][
["detailed_label", "count"]]
cluster_df = cluster_df.groupby("detailed_label")["count"].aggregate(sum).reset_index().sort_values(
by=["count"])
cluster_df["relative_count"] = round((cluster_df["count"] / cluster_df["count"].sum()) * 100, 2)
if len(clusters) == 1:
patches, texts = ax.pie(cluster_df["count"], labels=cluster_df["relative_count"],
colors=[colors[key] for key in cluster_df["detailed_label"]])
new_labels = self.clean_up_labels(texts)
ax.clear()
ax.pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in cluster_df["detailed_label"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax.set_title("Cluster " + str(cluster) + " (N=" + str(cluster_df["count"].sum()) + ")")
elif len(clusters) <= 4:
patches, texts = ax[index].pie(cluster_df["count"], labels=cluster_df["relative_count"],
colors=[colors[key] for key in
cluster_df["detailed_label"]],
labeldistance=1.25)
new_labels = self.clean_up_labels(texts)
ax[index].clear()
ax[index].pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in cluster_df["detailed_label"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax[index].set_title("Cluster " + str(cluster) + " (N=" + str(cluster_df["count"].sum()) + ")")
else:
patches, texts = ax[math.floor(index / 4), index % 4].pie(cluster_df["count"],
labels=cluster_df["relative_count"],
colors=[colors[key] for key in
cluster_df[
"detailed_label"]],
labeldistance=1.25)
new_labels = self.clean_up_labels(texts)
ax[math.floor(index / 4), index % 4].clear()
ax[math.floor(index / 4), index % 4].pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in
cluster_df["detailed_label"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax[math.floor(index / 4), index % 4].set_title(
"Cluster " + str(cluster) + " (N=" + str(cluster_df["count"].sum()) + ")")
if len(clusters) % 4 != 0:
if len(clusters) > 4:
for missing_axis in range(4 - len(clusters) % 4, 4):
ax[nrows - 1, missing_axis].axis('off')
markers = [plt.Line2D([0, 0], [0, 0], color=color, marker='o', linestyle='') for color in colors.values()]
fig.subplots_adjust(bottom=0.25)
plt.suptitle("Detailed Label Distribution per Cluster", y=0.985, x=0.5, fontweight='bold')
fig.tight_layout()
fig.canvas.draw()
fig.savefig(detailed_label_distribution_graph, dpi=1200)
legend = plt.legend(handles=markers, labels=colors.keys(), loc=3, framealpha=1, frameon=True,
bbox_to_anchor=(2, 0))
separate_legend = legend.figure
separate_legend.canvas.draw()
bbox = legend.get_window_extent()
bbox = bbox.from_extents(*(bbox.extents + np.array([-4, -4, 4, 4])))
bbox = bbox.transformed(fig.dpi_scale_trans.inverted())
fig.savefig(path_to_detailed_label_legend_storage, dpi=1200, bbox_inches=bbox)
legend.remove()
plt.close()
plt.clf()
graph_img = Image.open(detailed_label_distribution_graph)
legend_im = Image.open(path_to_detailed_label_legend_storage)
widths_graph = graph_img.width
heights_graph = graph_img.height
widths_legend = legend_im.width
heights_legend = legend_im.height
if heights_legend > heights_graph:
resize_percentage = heights_graph / heights_legend
new_width = int(resize_percentage * widths_legend)
legend_im = legend_im.resize((new_width, heights_graph), Image.ANTIALIAS)
total_width = widths_graph + widths_legend
y_offset = int((heights_graph - heights_legend) / 2)
combined_im = Image.new('RGB', (total_width, heights_graph), color=(255, 255, 255, 1))
combined_im.paste(graph_img, (0, 0))
combined_im.paste(legend_im, (widths_graph, y_offset))
combined_im.save(path_to_detailed_label_combined)
########
# name #
########
overall_name_df = summary_csv_df.groupby("clusnum")["name"].value_counts().to_frame()
overall_name_df = overall_name_df.rename(columns={"name": "count"})
overall_name_df = overall_name_df.reset_index()
clusters = overall_name_df["clusnum"].unique().tolist()
if len(clusters) < 4:
ncols = len(clusters)
else:
ncols = 4
nrows = math.ceil(len(clusters) / 4)
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(7, 7))
list_of_names_dfs = []
for cluster in clusters:
cluster_df = overall_name_df[overall_name_df["clusnum"] == cluster][
["name", "count"]]
cluster_df = cluster_df.groupby("name")["count"].aggregate(sum).reset_index().sort_values(
by=["count"], ascending=False)
list_of_names_dfs.append(cluster_df)
detailed_label_name_df = list_of_names_dfs.pop()
for name_df in list_of_names_dfs:
detailed_label_name_df = detailed_label_name_df.append(name_df)
detailed_label_name_df = detailed_label_name_df.groupby("name")["count"].aggregate(
sum).reset_index().sort_values(by=["count"])
unique_application_category_names = detailed_label_name_df["name"].tolist()
colors = {}
cmap = cm.ocean(np.linspace(0, 1, len(unique_application_category_names)))
for index, color in enumerate(cmap):
application_name = unique_application_category_names.pop()
colors[application_name] = color
for index, cluster in enumerate(clusters):
cluster_df = overall_name_df[overall_name_df["clusnum"] == cluster][
["name", "count"]]
cluster_df = cluster_df.groupby("name")["count"].aggregate(sum).reset_index().sort_values(
by=["count"])
cluster_df["relative_count"] = round((cluster_df["count"] / cluster_df["count"].sum()) * 100, 2)
if len(clusters) == 1:
patches, texts = ax.pie(cluster_df["count"], labels=cluster_df["relative_count"],
colors=[colors[key] for key in cluster_df["name"]])
new_labels = self.clean_up_labels(texts)
ax.clear()
ax.pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in cluster_df["name"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax.set_title("Cluster " + str(cluster) + " (N=" + str(cluster_df["count"].sum()) + ")")
elif len(clusters) <= 4:
patches, texts = ax[index].pie(cluster_df["count"], labels=cluster_df["relative_count"],
colors=[colors[key] for key in
cluster_df["name"]],
labeldistance=1.25)
new_labels = self.clean_up_labels(texts)
ax[index].clear()
ax[index].pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in cluster_df["name"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax[index].set_title("Cluster " + str(cluster) + " (N=" + str(cluster_df["count"].sum()) + ")")
else:
patches, texts = ax[math.floor(index / 4), index % 4].pie(cluster_df["count"],
labels=cluster_df["relative_count"],
colors=[colors[key] for key in
cluster_df[
"name"]],
labeldistance=1.25)
new_labels = self.clean_up_labels(texts)
ax[math.floor(index / 4), index % 4].clear()
ax[math.floor(index / 4), index % 4].pie(cluster_df["count"], labels=new_labels,
colors=[colors[key] for key in
cluster_df["name"]],
labeldistance=1.15, textprops={'fontsize': 8})
ax[math.floor(index / 4), index % 4].set_title(
"Cluster " + str(cluster) + " (N=" + str(cluster_df["count"].sum()) + ")")
if len(clusters) % 4 != 0:
if len(clusters) > 4:
for missing_axis in range(4 - len(clusters) % 4, 4):
ax[nrows - 1, missing_axis].axis('off')
markers = [plt.Line2D([0, 0], [0, 0], color=color, marker='o', linestyle='') for color in colors.values()]
fig.subplots_adjust(bottom=0.25)
plt.suptitle("Device / Malware Distribution per Cluster", y=0.985, x=0.5, fontweight='bold')
fig.tight_layout()
fig.canvas.draw()
fig.savefig(name_distribution_graph, dpi=1200)
legend = plt.legend(handles=markers, labels=colors.keys(), loc=3, framealpha=1, frameon=True,
bbox_to_anchor=(2, 0))
separate_legend = legend.figure
separate_legend.canvas.draw()
bbox = legend.get_window_extent()
bbox = bbox.from_extents(*(bbox.extents + np.array([-4, -4, 4, 4])))
bbox = bbox.transformed(fig.dpi_scale_trans.inverted())
fig.savefig(path_to_name_legend_storage, dpi=1200, bbox_inches=bbox)
legend.remove()
plt.close()
plt.clf()
graph_img = Image.open(name_distribution_graph)
legend_im = Image.open(path_to_name_legend_storage)
widths_graph = graph_img.width
heights_graph = graph_img.height
widths_legend = legend_im.width
heights_legend = legend_im.height
if heights_legend > heights_graph:
resize_percentage = heights_graph / heights_legend
new_width = int(resize_percentage * widths_legend)
legend_im = legend_im.resize((new_width, heights_graph), Image.ANTIALIAS)
total_width = widths_graph + widths_legend
y_offset = int((heights_graph - heights_legend) / 2)
combined_im = Image.new('RGB', (total_width, heights_graph), color=(255, 255, 255, 1))
combined_im.paste(graph_img, (0, 0))
combined_im.paste(legend_im, (widths_graph, y_offset))
combined_im.save(path_to_name_combined)
#####################
# temporal heatmaps #
#####################
print("Writing temporal heatmaps")
heatmap_path = graphs_folder + "/heatmaps/"
os.mkdir(heatmap_path)
overall_heatmap_path = heatmap_path + "/overall_heatmaps/"
os.mkdir(overall_heatmap_path)
error_heatmap_path = heatmap_path + "/error_heatmaps/"
os.mkdir(error_heatmap_path)
correct_heatmap_path = heatmap_path + "/correct_heatmaps/"
os.mkdir(correct_heatmap_path)
for sub_folder in [overall_heatmap_path, error_heatmap_path, correct_heatmap_path]:
bytes_heatmap_path = sub_folder + "/bytes/"
os.mkdir(bytes_heatmap_path)
gaps_heatmap_path = sub_folder + "/gaps/"
os.mkdir(gaps_heatmap_path)
sport_heatmap_path = sub_folder + "/sport/"
os.mkdir(sport_heatmap_path)
dport_heatmap_path = sub_folder + '/dport'
os.mkdir(dport_heatmap_path)
actlabels = []
for a in range(len(values)): # range(10):
actlabels.append(mapping[keys[a]])
clusterinfo = {}
cluster_info_dic = {}
seqclufile = summary_csv_file_path
lines = []
lines = open(seqclufile).readlines()[1:]
for line in lines:
li = line.split(",") # clusnum,connnum,probability,scenario,file,src_ip,dst_ip,window
clusnum = li[0]
has = int(li[1])
scenario_name = li[3]
file_name = li[4]
srcip = li[5]
dstip = li[6]
window = li[7]
name = scenario_name + "_" + file_name + "->" + str(srcip) + "->" + str(dstip) + "->" + str(window)
# name = str('%12s->%12s' % (srcip, dstip))
if li[0] not in clusterinfo.keys():
clusterinfo[li[0]] = []
clusterinfo[li[0]].append((has, name))
cluster_info_dic[name] = has
cluster_error_dic = {}
error_packets_per_cluster = {}
correct_packets_per_cluster = {}
sns.set(font_scale=0.9)
matplotlib.rcParams.update({'font.size': 10})
vmax_dic = {}
vmin_dic = {}
color_amount_dic = {}
connection_color_dic = {}
for names, sname, q in [("Packet sizes", "bytes", 1), ("Interval", "gaps", 0), ("Source Port", "sport", 3),
("Dest. Port", "dport", 4)]:
for clusnum, cluster in clusterinfo.items():
cluster.sort(key=lambda tuple: tuple[0])
items = [int(x[0]) for x in cluster]
labels = [x[1] for x in cluster]
acha = [actlabels.index(int(x[0])) for x in cluster]
blah = [values[a] for a in acha]
dataf = []
for b in blah:
dataf.append([x[q] for x in b][:self.window_size])
df = pd.DataFrame(dataf, index=labels)
df = df.sort_index()
g = sns.clustermap(df, xticklabels=False, col_cluster=False) # , vmin= minb, vmax=maxb)
ind = g.dendrogram_row.reordered_ind
fig = plt.figure(figsize=(10.0, 9.0))
plt.suptitle("Overall Exp: " + self.expname + " | Cluster: " + clusnum + " | Feature: " + names)
ax = fig.add_subplot(111)
datanew = []
labelsnew = []
labels_heatmap = []
lol = []
for it in sorted(ind):
labelsnew.append(labels[it])
labels_heatmap.append(labels[it].split("->", maxsplit=1)[1])
lol.append(cluster[[x[1] for x in cluster].index(labels[it])][0])
acha = [actlabels.index(int(x)) for x in lol]
blah = [values[a] for a in acha]
dataf = []
for b in blah:
dataf.append([x[q] for x in b][:self.window_size])
vmax = max(max(dataf))
vmin = min(min(dataf))
if sname not in vmax_dic:
vmax_dic[sname] = {}
vmax_dic[sname][clusnum] = vmax
if sname not in vmin_dic:
vmin_dic[sname] = {}
vmin_dic[sname][clusnum] = vmin
df = pd.DataFrame(dataf, index=labels_heatmap)
df = df.sort_index()
cmap = cm.get_cmap('rocket_r')
g = sns.heatmap(df, xticklabels=False, cmap=cmap, vmax=vmax, vmin=vmin)
norm = Normalize(vmin=vmin, vmax=vmax)
rgba_values = cmap(norm(dataf)).tolist()
color_df = | pd.DataFrame(rgba_values, index=labelsnew) | pandas.DataFrame |
# REF Python seleniumの基本 https://torina.top/detail/264/
import re
import sys
import pandas as pd
from time import sleep
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver import DesiredCapabilities
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException, TimeoutException
################################################################################
df_from_to = pd.read_csv("data/bus_routes/from_to_all.csv")
df_from_to = df_from_to[160:164]
# define empty dataframe
returned_dataset = | pd.DataFrame(columns=['ObjectID', "START", "END", "DEPARTURE", "Route1_detail" ,"Route1", "Route2", "Route3"], index=[]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import scipy.stats as stats
import itertools
#from waldo.conf import settings
#from waldo.wio.experiment import Experiment
from waldo.output.speed import SpeedWriter
from waldo.wio.worm_writer import WormWriter
class BinSelfTest(object):
def __init__(self, speed_df, bid=None, worm_writer=None,
bin_sweep_step=5):
"""
"""
# inputs
self.bid = bid # id of blob
self.ww = worm_writer # WormWriter object for reading/writing this stuff
self.df = speed_df # pandas DataFrame with 'speed' and 'time' columns (time should be in minutes)
self.bin_sweep_step_size = bin_sweep_step
# shorthand for some input features
self.t = np.array(self.df['time'])
self.s = np.array(self.df['speed'])
self.first_t = min(self.t)
self.last_t = max(self.t)
# when bins are assigned
self.bin_size = None
self.start_pos = None
self.bin_num = None
self.bin_starts = None
self.bin_ends = None
self.cropped_df = self.df
self.is_cropped = False
def test_bin_v_bin(self, min_data_fraction=0.9):
""" currently defunt. tests bins against other bins of same size
rather than against the final distribution.
"""
bin_tests = []
bins = range(self.bin_num)
#print bins
for bin_a, bin_b in itertools.combinations(bins, 2):
bin_name = '{size}-{a}-{b}'.format(size=self.bin_size, a=bin_a, b=bin_b)
df_a = self.df[self.df['bin'] == bin_a]
df_b = self.df[self.df['bin'] == bin_b]
#print bin_name
#print df_a.head()
#print df_b.head()
ta = np.array(df_a['time'])
tb = np.array(df_b['time'])
sa = np.array(df_a['speed'])
sb = np.array(df_b['speed'])
if df_a is None or df_b is None:
print('skipping', bin_name, 'because it contains no data')
continue
min_len = min([len(ta), len(tb)])
if min_data_fraction:
if min_len < min_data_fraction * self.bin_size:
#print 'skipping', bin_name, 'because it contains only', float(len(bin_df))/self.bin_size, '% bin data'
continue
if min_len < 30:
print('skipping', bin_name, 'because it contains only', min_len, 'data points')
print(bin_a, len(ta))
print(bin_b, len(tb))
continue
ks_stat, p_ks = stats.ks_2samp(sa, sb)
row = {'bin-name': bin_name,
'dur': self.bin_size,
'bin-start-a': self.bin_starts[bin_a],
'bin-end-a': self.bin_ends[bin_a],
't-min-a': min(ta),
't-max-a': max(ta),
'N-a': len(ta),
'bin-start-b': self.bin_starts[bin_b],
'bin-end-b': self.bin_ends[bin_b],
't-min-b': min(tb),
't-max-b': max(tb),
'N-b': len(tb),
'ks': ks_stat,
'p-ks': p_ks,
}
bin_tests.append(row)
bin_comparison = | pd.DataFrame(bin_tests) | pandas.DataFrame |
import numpy
import h5py
import pandas as pd
from os import listdir
from scipy.stats import kurtosis,moment,skew,entropy,gmean,median_absolute_deviation,gstd
from re import search
from warnings import filterwarnings
filterwarnings('ignore')
numpy.seterr(divide='ignore',invalid='ignore')
class feat_extract:
def compute(self,idxs,data,ldata,bounds,lbp_idx):
idxs=idxs-1
chk=numpy.where(idxs<1)
if(len(chk[0])>0):
idxs=numpy.delete(idxs,chk[0])
chk=numpy.where(idxs>=data.shape[0])
if(len(chk[0])>0):
idxs=numpy.delete(idxs,chk[0])
feat=numpy.zeros((1,135))
if(len(idxs)==1):
val=data[idxs[0]:idxs[0]+1]
lbpval=ldata[idxs[0]:idxs[0]+1]
else:
val=data[idxs]
lbpval=ldata[idxs]
if(len(val)==0):
return feat
feat[0,0:17]=self.stat_feats(val)
feat[0,17:75]=self.global_lbp(lbpval,lbp_idx)
feat[0,75:135]=numpy.histogram(val,range=bounds,bins=60)[0]
return feat
def stat_feats(self,val):
feat=numpy.zeros((1,17))
feat[0,0]=numpy.mean(val)
feat[0,1]=numpy.max(val)
feat[0,2]=numpy.var(val,ddof=1)
feat[0,3]=numpy.min(val)
feat[0,4]=numpy.median(val)
feat[0,5]=numpy.quantile(val,0.9)
feat[0,6]=numpy.quantile(val,0.7)
feat[0,7]=numpy.var(val,ddof=1)/numpy.mean(val)
feat[0,8]=skew(val)
feat[0,9]=kurtosis(val)
feat[0,10]=moment(val,moment=2)
feat[0,11]=moment(val,moment=3)
feat[0,12]=moment(val,moment=4)
feat[0,13]=entropy(val)
feat[0,14]=gmean(val)
feat[0,15]=median_absolute_deviation(val)
if(len(val)>1):
feat[0,16]=gstd(val)
else:
feat[0,16]=numpy.nan
return feat
def global_lbp(self,lbp_val,x_idx):
lbp_feat=numpy.histogram(lbp_val,range=(0,256),bins=256)[0]
lbp_feat=lbp_feat[x_idx]
return lbp_feat
def get_uni_idx(self):
ls=numpy.zeros(58).astype('int')
cont=0
for i in range(256):
x=numpy.array(list(format(i,"08b"))).astype('int')
xd=numpy.roll(x,-1)
S=sum(numpy.logical_xor(x,xd).astype('int'))
if(S<=2):
ls[cont]=i
cont=cont+1
return ls
class feat_ext:
def bins(self,dfx):
OR=0.50 #Overlapping ratio, 50%
RI=50 #Half bin size, 100/2=50
Over=OR*RI #Overlapped length
FRANG=[]
for y in dfx.index:
S=dfx.loc[y,'start']
E=dfx.loc[y,'end']
N1=int((S-1)/RI)+1
N2=int((E-1)/RI)+0
n1=(S-1)%RI
n2=(E-1)%RI
if(n1>=Over):
N1=N1+1
if(n2>=Over):
N2=N2+1
if(N2>=N1):
FRANG.extend(list(range(N1,N2+1)))
else:
FRANG.extend(list(range(0,1)))
return FRANG
def correct_order(self,sel_exon):
total_exons=sel_exon.shape[0]
str_idx=sel_exon.loc[0,'start']
end_idx=sel_exon.loc[total_exons-1,'start']
if(str_idx>end_idx):
sel_exon1= | pd.DataFrame(columns=sel_exon.columns) | pandas.DataFrame |
#!/usr/bin/env python2
import pandas as pd
import numpy as np
import datetime
import glob
from shutil import copy
import errno
import os
import random
random.seed(datetime.datetime.now())
import tokenf
TMPDIR=tokenf.TMPDIR
DATADIR=tokenf.DATADIR
def Mkdir(dirname):
try:
os.mkdir(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
pass
def emptyData():
if os.path.isdir(TMPDIR+'/msg.csv'):
return
if os.path.isdir(DATADIR+'/post.csv'):
return
m = pd.read_csv(TMPDIR+"/msg.csv",
names=[0, 1, 2, 3, 4, 5, 6],
dtype='str', quotechar='"',header=None)
m.drop_duplicates(inplace=True)
c = pd.read_csv(TMPDIR+"/comments.csv",
names=[0, 1, 2, 3, 4, 5, 6],
dtype='str', quotechar='"',header=None)
c.drop_duplicates(inplace=True)
l = pd.read_csv(TMPDIR+"/likes.csv",
names=[0, 1, 2, 3, 4],
dtype='str', quotechar='"',header=None)
l.drop_duplicates(inplace=True)
mem = pd.read_csv(TMPDIR+"/members.csv",
names=[0, 1, 2],
dtype='str', quotechar='"',header=None)
mem['gid']=tokenf.FACEBOOK_GROUP
mem.drop_duplicates(inplace=True)
ll=l[l[1]=='x']
like_count=ll[[0,1]].groupby([0]).count().reset_index()
like_count.columns=[0,'like']
like_count.head()
m=pd.merge(m, like_count, how='left',left_on=[0], right_on=[0])
# Column changes
m.columns = ['pid','id','name','timeStamp','shares','url','msg','likes']
l.columns = ['pid','cid','response','id','name']
c.columns = ['pid','cid','timeStamp','id','name','rid','msg']
mem.columns= ['id','name','url','gid']
m['gid']=m['pid'].apply(lambda x: x.split('_')[0])
l['gid']=l['pid'].apply(lambda x: x.split('_')[0])
c['gid']=c['pid'].apply(lambda x: x.split('_')[0])
m['timeStamp']=pd.DatetimeIndex(m.timeStamp)
c['timeStamp']=pd.DatetimeIndex(c.timeStamp)
m['timeStamp']= m.timeStamp.apply(lambda x: x.strftime("%Y-%m-%d %H:%M:%S"))
c['timeStamp']= c.timeStamp.apply(lambda x: x.strftime("%Y-%m-%d %H:%M:%S"))
mem.drop_duplicates(inplace=True)
m.drop_duplicates(subset=['gid','pid', 'id', 'name', 'timeStamp', 'shares', 'url', 'msg','likes'],inplace=True)
c.drop_duplicates(subset=['gid','pid', 'cid', 'timeStamp', 'id', 'name', 'rid', 'msg'],inplace=True)
l.drop_duplicates(subset=['gid','pid', 'cid', 'response', 'id', 'name'],inplace=True)
m.timeStamp=pd.DatetimeIndex(m.timeStamp)
m.sort_values(by='timeStamp',ascending=False)
m.drop_duplicates(subset=['gid','pid', 'id', 'name', 'timeStamp', 'url', 'msg'],inplace=True,keep='last')
m[['gid','pid', 'id', 'name', 'timeStamp', 'shares', 'url', 'msg','likes']].to_csv(TMPDIR+"/post.csv",index=False,header=True)
c[['gid','pid', 'cid', 'timeStamp', 'id', 'name', 'rid', 'msg']].to_csv(TMPDIR+"/comment.csv",index=False,header=True)
l[['gid','pid', 'cid', 'response', 'id', 'name']].to_csv(TMPDIR+"/like.csv",index=False,header=True)
mem[['gid', 'id', 'name', 'url']].to_csv(TMPDIR+"/member.csv",index=False,header=True)
# No header
m[['gid','pid', 'id', 'name', 'timeStamp', 'shares', 'url', 'msg','likes']].to_csv(TMPDIR+"/postNH.csv",index=False,header=False)
l[['gid','pid', 'cid', 'response', 'id', 'name']].to_csv(TMPDIR+"/likeNH.csv",index=False,header=False)
c[['gid','pid', 'cid', 'timeStamp', 'id', 'name', 'rid', 'msg']].to_csv(TMPDIR+"/commentNH.csv",index=False,header=False)
mem[['gid', 'id', 'name', 'url']].to_csv(TMPDIR+"/memberNH.csv",index=False,header=False)
# Copy everything
for file in glob.glob(TMPDIR+'/*.csv'):
copy(file,DATADIR)
def dataExists():
m = pd.read_csv(DATADIR+"/msg.csv",
names=[0, 1, 2, 3, 4, 5, 6],
dtype='str', quotechar='"',header=None)
m_delta = pd.read_csv(TMPDIR+"/msg.csv",
names=[0, 1, 2, 3, 4, 5, 6],
dtype='str', quotechar='"',header=None)
m=pd.concat([m,m_delta])
m.drop_duplicates(inplace=True)
c = pd.read_csv(DATADIR+"/comments.csv",
names=[0, 1, 2, 3, 4, 5, 6],
dtype='str', quotechar='"',header=None)
c_delta = pd.read_csv(TMPDIR+"/comments.csv",
names=[0, 1, 2, 3, 4, 5, 6],
dtype='str', quotechar='"',header=None)
c=pd.concat([c,c_delta])
c.drop_duplicates(inplace=True)
l = pd.read_csv(DATADIR+"/likes.csv",
names=[0, 1, 2, 3, 4],
dtype='str', quotechar='"',header=None)
l_delta = pd.read_csv(TMPDIR+"/likes.csv",
names=[0, 1, 2, 3, 4],
dtype='str', quotechar='"',header=None)
l=pd.concat([l,l_delta])
l.drop_duplicates(inplace=True)
mem = pd.read_csv(DATADIR+"/members.csv",
names=[0, 1, 2],
dtype='str', quotechar='"',header=None)
mem['gid']=tokenf.FACEBOOK_GROUP
mem_delta = pd.read_csv(TMPDIR+"/members.csv",
names=[0, 1, 2],
dtype='str', quotechar='"',header=None)
mem_delta['gid']=tokenf.FACEBOOK_GROUP
mem=pd.concat([mem,mem_delta])
mem.drop_duplicates(inplace=True)
ll=l[l[1]=='x']
like_count=ll[[0,1]].groupby([0]).count().reset_index()
like_count.columns=[0,'like']
like_count.head()
m=pd.merge(m, like_count, how='left',left_on=[0], right_on=[0])
m.columns = ['pid','id','name','timeStamp','shares','url','msg','likes']
l.columns = ['pid','cid','response','id','name']
c.columns = ['pid','cid','timeStamp','id','name','rid','msg']
mem.columns= ['id','name','url','gid']
m['gid']=m['pid'].apply(lambda x: x.split('_')[0])
l['gid']=l['pid'].apply(lambda x: x.split('_')[0])
c['gid']=c['pid'].apply(lambda x: x.split('_')[0])
m['timeStamp']=pd.DatetimeIndex(m.timeStamp)
c['timeStamp']=pd.DatetimeIndex(c.timeStamp)
m['timeStamp']= m.timeStamp.apply(lambda x: x.strftime("%Y-%m-%d %H:%M:%S"))
c['timeStamp']= c.timeStamp.apply(lambda x: x.strftime("%Y-%m-%d %H:%M:%S"))
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
#['gid','pid', 'id', 'name', 'timeStamp', 'shares', 'url', 'msg','likes']
m_delta = pd.read_csv(DATADIR+"/post.csv",
header=0,names=['gid','pid', 'id','name', 'timeStamp', 'shares', 'url', 'msg', 'likes'],
dtype={'gid':str,'pid':str,'id':str,'name':str,'timeStamp':datetime.datetime,'shares':str,
'url':str,'msg':str,'likes':str},
parse_dates=['timeStamp'],date_parser=dateparse)
m=pd.concat([m,m_delta])
m.drop_duplicates(inplace=True)
l_delta = pd.read_csv(TMPDIR+"/like.csv",
header=0,names=['gid','pid','cid','response', 'id','name'],
dtype={'gid':str,'pid':str,'cid':str,'response':str,'id':str,'name':str})
l=pd.concat([l,l_delta])
l.drop_duplicates(inplace=True)
c_delta = pd.read_csv(TMPDIR+"/comment.csv",
header=0,names=['gid','pid', 'cid', 'timeStamp', 'id', 'name', 'rid', 'msg'],
dtype={'gid':str,'pid':str,'cid':str,'timeStamp':datetime.datetime,'id':str,
'name':str,'rid':str,'msg':str},
parse_dates=['timeStamp'],date_parser=dateparse)
c=pd.concat([c,c_delta])
c.drop_duplicates(inplace=True)
mem_delta = pd.read_csv(TMPDIR+"/member.csv",
header=0,names=['gid','id', 'name', 'url'],
dtype={'gid':str,'id':str,'name':str,'url':str})
mem=pd.concat([mem,mem_delta])
mem.drop_duplicates(inplace=True)
# Final drop duplicate check, it's funny with timeStamps...spaces?
c['timeStamp']=pd.DatetimeIndex(c.timeStamp)
m['timeStamp']=pd.DatetimeIndex(m.timeStamp)
m.drop_duplicates(subset=['gid','pid', 'id', 'name', 'timeStamp', 'shares', 'url', 'msg','likes'],inplace=True)
c.drop_duplicates(subset=['gid','pid', 'cid', 'timeStamp', 'id', 'name', 'rid', 'msg'],inplace=True)
l.drop_duplicates(subset=['gid','pid', 'cid', 'response', 'id', 'name'],inplace=True)
m.timeStamp= | pd.DatetimeIndex(m.timeStamp) | pandas.DatetimeIndex |
"""
Functions specific to preprocess raw extract data from HMIS.
The raw data is provided in the following format:
(king data is divided by year;
for pierce & snohomish all years are in one folder)
data/*county*/*year*/Affiliation.csv
Client.csv
Disabilities.csv
EmploymentEducation.csv
Enrollment_families.csv
Enrollment.csv
EnrollmentCoC.csv
Exit.csv
Export.csv
Funder.csv
HealthAndDV.csv
IncomeBenefits.csv
Inventory.csv
Organization.csv
Project.csv
ProjectCoC.csv
Services.csv
Site.csv
"""
import pandas as pd
import datetime
import os.path as op
import numpy as np
import json
import puget.utils as pu
import warnings
from puget.data import DATA_PATH
# Paths of csvs
COUNTY_FOLDERS = {'king': [str(s) for s in range(2012, 2017)],
'pierce': ['2012_2016'], 'snohomish': ['2012_2016']}
# these values translate to unknown data for various reasons. Treat as NANs
CATEGORICAL_UNKNOWN = [8, 9, 99]
# entry/exit suffixes for columns
ENTRY_EXIT_SUFFIX = ['_entry', '_exit', '_update']
# Names that should be excluded:
NAME_EXCLUSION = ["consent", "refused", "anonymous", "client",
"refsued", "noname", "unknown"]
# dict of default metadata file names
METADATA_FILES = {'enrollment': 'enrollment.json',
'exit': 'exit.json',
'client': 'client.json',
'disabilities': 'disabilities.json',
'employment_education': 'employment_education.json',
'health_dv': 'health_dv.json',
'income': 'income.json',
'project': 'project.json'}
for k, v in METADATA_FILES.items():
METADATA_FILES[k] = op.join(DATA_PATH, 'metadata', v)
file_path_boilerplate = (
"""
file_spec : dict or string
if a dict, keys should be paths, values should be full path to files
if a string, should be the filename of the .csv table and data_dir &
paths parameters are required
county: string
name of county to get data for. Must be a key in COUNTY_FOLDERS and
have a folder of the same name in the data folder. Not required
if file_spec is a dictionary
data_dir : string
full path to general data folder (usually puget/data/*county*);
not required if file_spec is a dictionary
paths : list
list of directories inside data_dir to look for csv files in;
not required if file_spec is a dictionary
""")
metdata_boilerplate = (
"""
metadata_file : string
name of json metadata file with lists of columns to use for
deduplication, columns to drop, categorical and time-like columns
""")
def std_path_setup(filename, data_dir, paths):
"""
Setup filenames for read_table assuming standard data directory structure.
Parameters
----------
filename : string
This should be the filename of the .csv table
data_dir : string
full path to general data folder (usually puget/data/*county*)
paths : list
list of directories inside data_dir to look for csv files in
Returns
----------
dict with key of paths, value of filenames for all included folders
"""
file_list = []
for p in paths:
file_list.append(op.join(data_dir, p, filename))
file_spec = dict(zip(paths, file_list))
return file_spec
def read_table(file_spec, county=None, data_dir=None, paths=None,
columns_to_drop=None, categorical_var=None,
categorical_unknown=CATEGORICAL_UNKNOWN,
time_var=None, duplicate_check_columns=None, dedup=True,
encoding=None, name_columns=None):
"""
Read in any .csv table from multiple folders in the raw data.
Parameters
----------
%s
columns_to_drop : list
A list of of columns to drop. The default is None.
categorical_var : list
A list of categorical (including binary) variables where values
listed in categorical_unknown should be recorded as NaNs
categorical_unknown: list
values that should be recorded as NaNs for categorical variables
typically: 8, 9, 99
time_var : list
A list of time (variables) in yyyy-mm-dd format that are
reformatted into pandas timestamps. Default is None.
duplicate_check_columns : list
list of columns to conside in deduplication.
Generally, duplicate rows may happen when the same record is
registered across the .csv files for each folder.
dedup: boolean
flag to turn on/off deduplication. Defaults to True
Returns
----------
dataframe of a csv tables from all included folders
"""
if columns_to_drop is None:
columns_to_drop = []
if categorical_var is None:
categorical_var = []
if time_var is None:
time_var = []
if not isinstance(file_spec, dict):
if data_dir is None:
if county is None:
raise ValueError('If file_spec is a string, data_dir or ' +
'county must be passed')
else:
if not isinstance(county, str):
raise ValueError('county must be a string -- '
'one county at a time, please!')
data_dir = op.join(DATA_PATH, county)
if paths is None:
if county is None:
raise ValueError('If file_spec is a string, paths or county ' +
'must be passed')
else:
if not isinstance(county, str):
raise ValueError('county must be a string -- '
'one county at a time, please!')
paths = COUNTY_FOLDERS[county]
file_spec = std_path_setup(file_spec, data_dir, paths)
else:
if data_dir is not None or paths is not None:
raise ValueError(
'If file_spec is a dict, data_dir and paths cannot be passed')
file_spec_use = file_spec.copy()
# Start by reading the first file into a DataFrame
path, fname = file_spec_use.popitem()
df = pd.read_csv(fname, low_memory=False, encoding=encoding)
# Then, for the rest of the files, append to the DataFrame.
for path, fname in file_spec_use.items():
this_df = pd.read_csv(fname, low_memory=False, encoding=encoding)
df = df.append(this_df)
# Sometimes, column headers can have the unicode 'zero width no-break space
# character' (http://www.fileformat.info/info/unicode/char/FEFF/index.htm)
# appended to them (because, why not?). We eliminate that here:
for col in df.columns:
if col.startswith('\ufeff'):
df.rename(columns={col: col[1:]}, inplace=True)
# Drop unnecessary columns
cols_drop_use = list(set(columns_to_drop).intersection(set(df.columns)))
df = df.drop(cols_drop_use, axis=1)
# Drop duplicates
if dedup:
if duplicate_check_columns is None:
warnings.warn('dedup is True but duplicate_check_columns is ' +
'None, no deduplication')
else:
df = df.drop_duplicates(duplicate_check_columns, keep='last',
inplace=False)
# Turn values in categorical_unknown in any categorical_var into NaNs
for col in categorical_var:
df[col] = df[col].replace(categorical_unknown,
[np.NaN, np.NaN, np.NaN])
# Reformat yyyy-mm-dd variables to pandas timestamps
for col in time_var:
df[col] = pd.to_datetime(df[col], errors='coerce')
return df
read_table.__doc__ = read_table.__doc__ % file_path_boilerplate
def split_rows_to_columns(df, category_column, category_suffix, merge_columns):
"""
create separate entry and exit columns for dataframes that have that
information provided as a column giving the collection stage
(coded as numerical values) and other columns containing the measurements
at entry/exit
Parameters
----------
df: dataframe
input dataframe
category_column : string
name of column containing the categories to be remapped to columns
category_suffix : dict
keys are values in category_column, values are suffixes to attach to
the column for that category
merge_columns: list or string
name(s) of column(s) containing to merge on.
Returns
----------
new dataframe with response columns split into *_entry and *_exit columns
"""
columns_to_rename = list(df.columns.values)
if isinstance(merge_columns, list):
for col in merge_columns:
columns_to_rename.remove(col)
else:
columns_to_rename.remove(merge_columns)
if isinstance(category_column, (list, tuple)):
e_s = "The type column (e.g. 'CollectionStage') needs to be defined as"
e_s += "a single string in the relevant metadata file. Cannot be a "
e_s += "container!"
raise TypeError(e_s)
columns_to_rename.remove(category_column)
# group by each type in turn
gb = df.groupby(category_column)
for index, tpl in enumerate(gb):
name, group = tpl
rename_dict = dict(zip(
columns_to_rename,
[s + category_suffix[name] for s in columns_to_rename]))
this_df = group.rename(columns=rename_dict).drop(category_column,
axis=1)
if index == 0:
df_wide = this_df
else:
df_wide = pd.merge(df_wide, this_df, how='outer',
left_on=merge_columns, right_on=merge_columns)
return df_wide
def read_entry_exit_table(metadata, county=None, file_spec=None, data_dir=None,
paths=None, suffixes=ENTRY_EXIT_SUFFIX):
"""
Read in tables with entry & exit values, convert entry & exit rows to
columns
Parameters
----------
metadata : string or dict
if dict: metadata dict
if string: name of json metadata file
lists of columns to use for deduplication, columns to drop,
categorical and time-like columns
ALSO names of columns containing collection stage and
person_enrollment_IDs, and values indicating entry and exit
collection stage
%s
Returns
----------
dataframe with one row per person per enrollment -- rows containing
separate entry & exit values are combined with different columns for
entry & exit
"""
if not isinstance(metadata, dict):
metadata = get_metadata_dict(metadata)
extra_metadata = {'collection_stage_column': None,
'entry_stage_val': None,
'exit_stage_val': None,
'update_stage_val': None,
'annual_assessment_stage_val': None,
'post_exit_stage_val': None,
'person_enrollment_ID': None}
for k in extra_metadata:
if k in metadata:
extra_metadata[k] = metadata.pop(k)
else:
raise ValueError(k + ' entry must be present in metadata file')
df = read_table(file_spec, county=county, data_dir=data_dir, paths=paths,
**metadata)
# Don't use the update stage data:
df = df[(df[extra_metadata['collection_stage_column']] !=
extra_metadata['update_stage_val']) &
(df[extra_metadata['collection_stage_column']] != extra_metadata['annual_assessment_stage_val']) &
(df[extra_metadata['collection_stage_column']] != extra_metadata['post_exit_stage_val'])]
df_wide = split_rows_to_columns(df, extra_metadata['collection_stage_column'],
dict(zip([extra_metadata['entry_stage_val'],
extra_metadata['exit_stage_val']], suffixes)),
extra_metadata['person_enrollment_ID'])
return df_wide
read_entry_exit_table.__doc__ = read_entry_exit_table.__doc__ % (
file_path_boilerplate)
def get_metadata_dict(metadata_file):
"""Little function to read a JSON metadata file into a dict."""
metadata_handle = open(metadata_file)
metadata = json.loads(metadata_handle.read())
_ = metadata.pop('name')
return metadata
def get_enrollment(county=None, groups=True, file_spec=None, data_dir=None,
paths=None, metadata_file=METADATA_FILES['enrollment']):
"""
Read in the raw Enrollment tables.
Return rows with some minor clean-up that
includes dropping unusable columns de-deplication.
Parameters
----------
%s
%s
groups : boolean
If true, only return rows for groups (>1 person)
Returns
----------
dataframe with rows representing enrollment record of a person per
enrollment, optionally with people who are not in groups removed
"""
if file_spec is None:
file_spec = 'Enrollment.csv'
metadata = get_metadata_dict(metadata_file)
groupID_column = metadata.pop('groupID_column')
enid_column = metadata.pop('person_enrollment_ID')
pid_column = metadata.pop('person_ID')
prid_column = metadata.pop('program_ID')
entry_date_column = metadata.pop('entry_date')
df = read_table(file_spec, county=county, data_dir=data_dir, paths=paths,
**metadata)
# Now, group by HouseholdID, and only keep the groups where there are
# more than one ProjectEntryID.
# The new dataframe should represent families
# (as opposed to single people).
if groups:
gb = df.groupby(groupID_column)
def more_than_one(x): return (x.shape[0] > 1)
df = gb.filter(more_than_one)
df = df.sort_values(by=groupID_column)
return df
get_enrollment.__doc__ = get_enrollment.__doc__ % (file_path_boilerplate,
metdata_boilerplate)
def get_exit(county=None, file_spec=None, data_dir=None, paths=None,
metadata_file=METADATA_FILES['exit']):
"""
Read in the raw Exit tables and map destinations.
Parameters
----------
%s
%s
Returns
----------
dataframe with rows representing exit record of a person per enrollment
"""
if file_spec is None:
file_spec = 'Exit.csv'
metadata = get_metadata_dict(metadata_file)
df_destination_column = metadata.pop('destination_column')
enid_column = metadata.pop('person_enrollment_ID')
df = read_table(file_spec, county=county, data_dir=data_dir, paths=paths,
**metadata)
df_merge = pu.merge_destination(
df, df_destination_column=df_destination_column)
return df_merge
get_exit.__doc__ = get_exit.__doc__ % (file_path_boilerplate,
metdata_boilerplate)
def get_client(county=None, file_spec=None, data_dir=None, paths=None,
metadata_file=METADATA_FILES['client'],
name_exclusion=False):
"""
Read in the raw Client tables.
Parameters
----------
%s
%s
Returns
----------
dataframe with rows representing demographic information of a person
"""
if file_spec is None:
file_spec = 'Client.csv'
metadata = get_metadata_dict(metadata_file)
# Don't want to deduplicate before checking if DOB is sane because the last
# entry is taken in deduplication but the first entry indicates how early
# they entered the system
duplicate_check_columns = metadata.pop('duplicate_check_columns')
if 'boolean' in metadata:
boolean_cols = metadata.pop('boolean')
else:
boolean_cols = []
warnings.warn('boolean_cols is None')
if 'numeric_code' in metadata:
numeric_cols = metadata.pop('numeric_code')
else:
numeric_cols = []
warnings.warn('numeric_cols is None')
if 'person_ID' in metadata:
pid_column = metadata.pop('person_ID')
else:
raise ValueError('person_ID entry must be present in metadata file')
dob_column = metadata.pop("dob_column")
# for initial deduplication, don't deduplicate time_var, boolean or
# numeric columns until after resolving differences
mid_dedup_cols = list(set(list(duplicate_check_columns) +
list(metadata['time_var']) +
list(boolean_cols) + list(numeric_cols) +
[pid_column]))
df = read_table(file_spec, county=county, data_dir=data_dir, paths=paths,
duplicate_check_columns=mid_dedup_cols, **metadata)
df = df.set_index(np.arange(df.shape[0]))
# iterate through people with more than one entry and resolve differences.
# Set all rows to the same sensible value
gb = df.groupby(pid_column)
n_entries = gb.size()
for pid, group in gb:
# turn off SettingWithCopy warning for this object
group.is_copy = False
if n_entries.loc[pid] > 1:
# for differences in time columns, if the difference is less than
# a year then take the midpoint, otherwise set to NaN
for col in metadata['time_var']:
if col == dob_column:
continue
if len(np.unique(group[col])) > 1:
is_valid = ~pd.isnull(group[col])
n_valid = np.sum(is_valid)
if n_valid == 1:
group[col] = group[col][is_valid].values[0]
elif n_valid > 1:
t_diff = np.max(group[col]) - np.min(group[col])
if t_diff < datetime.timedelta(365):
t_diff_sec = t_diff.seconds + 86400 * t_diff.days
new_date = (np.min(group[col]) +
datetime.timedelta(
seconds=t_diff_sec / 2.)).date()
group[col] = pd.datetime(new_date.year,
new_date.month,
new_date.day)
else:
group[col] = pd.NaT
# for differences in boolean columns, if ever true then set to true
for col in boolean_cols:
if len(np.unique(group[col])) > 1:
is_valid = ~pd.isnull(group[col])
n_valid = np.sum(is_valid)
if n_valid == 1:
group[col] = group[col][is_valid].values[0]
elif n_valid > 1:
group[col] = np.max(group[col][is_valid])
# for differences in numeric type columns, if there are conflicting
# valid answers, set to NaN
for col in numeric_cols:
if len(np.unique(group[col])) > 1:
is_valid = ~pd.isnull(group[col])
n_valid = np.sum(is_valid)
if n_valid == 1:
group[col] = group[col][is_valid].values[0]
elif n_valid > 1:
group[col] = np.nan
# push these changes back into the dataframe
df.iloc[np.where(df[pid_column] == pid)[0]] = group
# Now all rows with the same pid_column have identical time_var,
# boolean & numeric_col values so we can perform full deduplication
# that was skipped in read_table.
# Note: we can still have multiple rows if DOBs are different,
# we leave that deduplication until the merge because we need enrollment
# info to determine if DOBs are sane
df = df.drop_duplicates(duplicate_check_columns, keep='last',
inplace=False)
if name_exclusion:
name_cols = metadata.pop('name_columns')
df['exclude_by_name'] = df.apply(_name_exclude,
args=[name_cols, NAME_EXCLUSION],
axis=1)
# The function returns True for keepers:
df = df[df["exclude_by_name"]]
df.drop(['exclude_by_name'], axis=1, inplace=True)
return df
get_client.__doc__ = get_client.__doc__ % (file_path_boilerplate,
metdata_boilerplate)
def get_disabilities(county=None, file_spec=None, data_dir=None, paths=None,
metadata_file=METADATA_FILES['disabilities'],
disability_type_file=op.join(DATA_PATH, 'metadata',
'disability_type.json')):
"""
Read in the raw Disabilities tables, convert sets of disablity type
and response rows to columns to reduce to one row per
primaryID (ie ProjectEntryID) with a column per disability type
Parameters
----------
%s
%s
disability_type_file : string
name of json file with mapping between disability numeric codes and
string description
Returns
----------
dataframe with rows representing presence of disability types at entry &
exit of a person per enrollment
"""
if file_spec is None:
file_spec = 'Disabilities.csv'
metadata = get_metadata_dict(metadata_file)
extra_metadata = {'type_column': None,
'response_column': None}
for k in extra_metadata:
if k in metadata:
extra_metadata[k] = metadata.pop(k)
else:
raise ValueError(k + ' entry must be present in metadata file')
extra_metadata['person_enrollment_ID'] = metadata['person_enrollment_ID']
stage_suffixes = ENTRY_EXIT_SUFFIX
df_stage = read_entry_exit_table(metadata, county=county,
file_spec=file_spec, data_dir=data_dir,
paths=paths, suffixes=stage_suffixes)
mapping_dict = get_metadata_dict(disability_type_file)
# convert to integer keys
mapping_dict = {int(k): v for k, v in mapping_dict.items()}
type_suffixes = ['_' + s for s in mapping_dict.values()]
merge_columns = [extra_metadata['person_enrollment_ID'],
extra_metadata['type_column'] + stage_suffixes[1],
extra_metadata['response_column'] + stage_suffixes[1]]
df_type1 = split_rows_to_columns(df_stage, (extra_metadata['type_column'] +
stage_suffixes[0]),
dict(zip(list(mapping_dict.keys()),
type_suffixes)), merge_columns)
merge_columns = [extra_metadata['person_enrollment_ID']]
for ts in type_suffixes:
col = extra_metadata['response_column'] + stage_suffixes[0] + ts
if col in list(df_type1.columns.values):
merge_columns.append(col)
df_wide = split_rows_to_columns(df_type1, (extra_metadata['type_column'] +
stage_suffixes[1]),
dict(zip(list(mapping_dict.keys()),
type_suffixes)), merge_columns)
response_cols = []
new_cols = []
for ss in stage_suffixes:
for i, ts in enumerate(type_suffixes):
col = extra_metadata['response_column'] + ss + ts
if col in list(df_wide.columns.values):
response_cols.append(col)
new_cols.append(ts[1:] + ss)
rename_dict = dict(zip(response_cols, new_cols))
df_wide = df_wide.rename(columns=rename_dict)
return df_wide
get_disabilities.__doc__ = get_disabilities.__doc__ % (file_path_boilerplate,
metdata_boilerplate)
def get_employment_education(county=None, file_spec=None, data_dir=None, paths=None,
metadata_file=METADATA_FILES['employment_education']):
"""
Read in the raw EmploymentEducation tables.
Parameters
----------
%s
%s
Returns
----------
dataframe with rows representing employment and education at entry & exit
of a person per enrollment
"""
if file_spec is None:
file_spec = 'EmploymentEducation.csv'
df_wide = read_entry_exit_table(metadata_file, county=county,
file_spec=file_spec, data_dir=data_dir,
paths=paths)
return df_wide
get_employment_education.__doc__ = get_employment_education.__doc__ % (
file_path_boilerplate, metdata_boilerplate)
def get_health_dv(county=None, file_spec=None, data_dir=None, paths=None,
metadata_file=METADATA_FILES['health_dv']):
"""
Read in the raw HealthAndDV tables.
Parameters
----------
%s
%s
Returns
----------
dataframe with rows representing employment and education at entry & exit
of a person per enrollment
"""
if file_spec is None:
file_spec = 'HealthAndDV.csv'
df_wide = read_entry_exit_table(metadata_file, county=county,
file_spec=file_spec, data_dir=data_dir,
paths=paths)
return df_wide
get_health_dv.__doc__ = get_health_dv.__doc__ % (file_path_boilerplate,
metdata_boilerplate)
def get_income(county=None, file_spec=None, data_dir=None, paths=None,
metadata_file=METADATA_FILES['income']):
"""
Read in the raw IncomeBenefits tables.
Parameters
----------
%s
%s
Returns
----------
dataframe with rows representing income at entry & exit of a person per
enrollment
"""
if file_spec is None:
file_spec = 'IncomeBenefits.csv'
metadata = get_metadata_dict(metadata_file)
if 'columns_to_take_max' in metadata:
columns_to_take_max = metadata.pop('columns_to_take_max')
else:
raise ValueError('columns_to_take_max entry must be present in' +
' metadata file')
person_enrollment_ID = metadata['person_enrollment_ID']
suffixes = ENTRY_EXIT_SUFFIX
df_wide = read_entry_exit_table(metadata, county=county,
file_spec=file_spec, data_dir=data_dir,
paths=paths, suffixes=suffixes)
maximize_cols = []
for sf in suffixes:
for col in columns_to_take_max:
colname = col + sf
maximize_cols.append(colname)
non_max_cols = [x for x in df_wide.columns.values
if x not in maximize_cols]
for col in non_max_cols:
if (col != person_enrollment_ID):
warnings.warn(col + ' column is not the person_enrollment_ID and' +
' is not in maximize_cols so only the first value' +
' per projectID per entry or exit will be kept')
gb = df_wide.groupby(person_enrollment_ID)
for index, tpl in enumerate(gb):
name, group = tpl
if len(group) == 1:
continue
update_dict = {}
for col in maximize_cols:
if col in group.columns:
max_val = group[col].max()
row_index = df_wide[df_wide[person_enrollment_ID] == name].index.tolist()
df_wide.set_value(row_index[0], col, max_val)
df_wide = df_wide.drop_duplicates([person_enrollment_ID])
return df_wide
get_income.__doc__ = get_income.__doc__ % (file_path_boilerplate,
metdata_boilerplate)
def get_project(county=None, file_spec=None, data_dir=None, paths=None,
metadata_file=METADATA_FILES['project'],
project_type_file=op.join(DATA_PATH, 'metadata',
'project_type.json')):
"""
Read in the raw Exit tables and map to project info.
Parameters
----------
%s
%s
Returns
----------
dataframe with rows representing exit record of a person per enrollment
"""
if file_spec is None:
file_spec = 'Project.csv'
metadata = get_metadata_dict(metadata_file)
project_type_column = metadata.pop('project_type_column')
projectID = metadata.pop('program_ID')
df = read_table(file_spec, county=county, data_dir=data_dir, paths=paths,
**metadata)
# get project_type dict
mapping_dict = get_metadata_dict(project_type_file)
# convert to integer keys
mapping_dict = {int(k): v for k, v in mapping_dict.items()}
map_df = pd.DataFrame(columns=['ProjectNumeric'],
data=list(mapping_dict.keys()))
map_df['ProjectType'] = list(mapping_dict.values())
if project_type_column == 'ProjectType':
df = df.rename(index=str,
columns={project_type_column: 'ProjectTypeNum'})
project_type_column = 'ProjectTypeNum'
df_merge = pd.merge(left=df, right=map_df, how='left',
left_on=project_type_column,
right_on='ProjectNumeric')
df_merge = df_merge.drop(project_type_column, axis=1)
return df_merge
get_project.__doc__ = get_project.__doc__ % (file_path_boilerplate,
metdata_boilerplate)
def merge_tables(county=None, meta_files=METADATA_FILES, data_dir=None,
paths=None, files=None, groups=True, name_exclusion=False):
""" Run all functions that clean up raw tables separately, and merge them
all into the enrollment table, where each row represents the project
enrollment of an individual.
Parameters
----------
county: string
name of county
meta_files: dict
dictionary giving names of metadata files for each table type
If any table type is missing it is defaulted using METADATA_FILES
files: dict
dictionary giving short data file names for each table type.
(these must be combined with data_dir, county & paths to get
the full file names)
If any table type is missing the file name is defaulted in the
respective get_* functions
data_dir : string
full path to general data folder (usually puget/data)
paths : list
list of directories inside data_dir to look for csv files in
Returns
----------
dataframe with rows representing the record of a person per
project enrollment
"""
if not isinstance(files, dict):
files = {}
# Get enrollment data
enroll = get_enrollment(county=county,
file_spec=files.get('enrollment', None),
metadata_file=meta_files.get('enrollment', None),
groups=groups, data_dir=data_dir,
paths=paths)
print('enroll n_rows:', len(enroll))
enrollment_metadata = get_metadata_dict(meta_files.get('enrollment',
METADATA_FILES['enrollment']))
enrollment_enid_column = enrollment_metadata['person_enrollment_ID']
enrollment_pid_column = enrollment_metadata['person_ID']
enrollment_prid_column = enrollment_metadata['program_ID']
# print(enroll)
# Merge exit in
exit_table = get_exit(county=county, file_spec=files.get('exit', None),
metadata_file=meta_files.get('exit', None),
data_dir=data_dir, paths=paths)
print('exit n_rows:', len(exit_table))
exit_metadata = get_metadata_dict(meta_files.get('exit',
METADATA_FILES['exit']))
exit_ppid_column = exit_metadata['person_enrollment_ID']
enroll_merge = pd.merge(left=enroll, right=exit_table, how='left',
left_on=enrollment_enid_column,
right_on=exit_ppid_column)
if enrollment_enid_column != exit_ppid_column and \
exit_ppid_column in enroll_merge.columns:
enroll_merge = enroll_merge.drop(exit_ppid_column, axis=1)
# Merge client in
client = get_client(county=county, file_spec=files.get('client', None),
metadata_file=meta_files.get('client', None),
data_dir=data_dir, paths=paths,
name_exclusion=name_exclusion)
print('client n_rows:', len(client))
client_metadata = get_metadata_dict(meta_files.get('client',
METADATA_FILES['client']))
client_pid_column = client_metadata['person_ID']
dob_column = client_metadata['dob_column']
n_bad_dob = 0
# set any DOBs to NaNs if they are in the future relative to the earliest
# enrollment. Also set to NaN if the DOB is too early (pre 1900)
gb = client.groupby(client_pid_column)
for pid, group in gb:
enroll_dates = enroll_merge[enroll_merge[enrollment_pid_column] ==
pid][enrollment_metadata['entry_date']]
earliest_enrollment = enroll_dates.min()
bad_dob = np.logical_or(group[dob_column] > earliest_enrollment,
group[dob_column] < pd.to_datetime(
'1900/1/1', format='%Y/%m/%d'))
n_bad_dob += np.sum(bad_dob)
n_entries = group.shape[0]
if np.sum(bad_dob) > 0:
if n_entries == 1:
client.loc[group.index, dob_column] = pd.NaT
else:
if max(group[dob_column]) == min(group[dob_column]):
client.loc[group.index, dob_column] = pd.NaT
else:
client.loc[group.index[np.where(bad_dob)],
dob_column] = pd.NaT
gb = client.groupby(client_pid_column)
for pid, group in gb:
n_entries = group.shape[0]
if n_entries > 1:
# for differences in DOB, if the difference is less than
# a year then take the midpoint, otherwise set to NaN
if len(np.unique(group[dob_column])) > 1:
is_valid = ~pd.isnull(group[dob_column])
n_valid = np.sum(is_valid)
if n_valid == 1:
client.loc[group.index, dob_column] = \
group[dob_column][is_valid].values[0]
elif n_valid > 1:
t_diff = (np.max(group[dob_column]) -
np.min(group[dob_column]))
if t_diff < datetime.timedelta(365):
t_diff_sec = t_diff.seconds + 86400 * t_diff.days
new_date = (np.min(group[dob_column]) +
datetime.timedelta(
seconds=t_diff_sec / 2.)).date()
client.loc[group.index, dob_column] = \
pd.datetime(new_date.year, new_date.month,
new_date.day)
else:
client.loc[group.index, dob_column] = pd.NaT
# now drop duplicates
client = client.drop_duplicates(client_metadata['duplicate_check_columns'],
keep='last', inplace=False)
print('Found %d entries with bad DOBs' % n_bad_dob)
enroll_merge = pd.merge(left=enroll_merge, right=client, how='right',
left_on=enrollment_pid_column,
right_on=client_pid_column)
if enrollment_pid_column != client_pid_column and \
client_pid_column in enroll_merge.columns:
enroll_merge = enroll_merge.drop(client_pid_column, axis=1)
# Merge disabilities in
disabilities = get_disabilities(county=county,
file_spec=files.get('disabilities', None),
metadata_file=meta_files.get('disabilities', None),
data_dir=data_dir,
paths=paths)
print('disabilities n_rows:', len(disabilities))
disabilities_metadata = get_metadata_dict(meta_files.get('disabilities',
METADATA_FILES['disabilities']))
disabilities_ppid_column = disabilities_metadata['person_enrollment_ID']
enroll_merge = enroll_merge.merge(disabilities, how='left',
left_on=enrollment_enid_column,
right_on=disabilities_ppid_column)
if enrollment_enid_column != disabilities_ppid_column and \
disabilities_ppid_column in enroll_merge.columns:
enroll_merge = enroll_merge.drop(disabilities_ppid_column, axis=1)
# Merge employment_education in
emp_edu = get_employment_education(county=county,
file_spec=files.get('employment_education', None),
metadata_file=meta_files.get('employment_education', None),
data_dir=data_dir, paths=paths)
print('emp_edu n_rows:', len(emp_edu))
emp_edu_metadata = get_metadata_dict(meta_files.get('employment_education',
METADATA_FILES['employment_education']))
emp_edu_ppid_column = emp_edu_metadata['person_enrollment_ID']
enroll_merge = enroll_merge.merge(emp_edu, how='left',
left_on=enrollment_enid_column,
right_on=emp_edu_ppid_column)
if enrollment_enid_column != emp_edu_ppid_column and \
emp_edu_ppid_column in enroll_merge.columns:
enroll_merge = enroll_merge.drop(emp_edu_ppid_column, axis=1)
# Merge health in
health_dv = get_health_dv(county=county,
file_spec=files.get('health_dv', None),
metadata_file=meta_files.get('health_dv', None),
data_dir=data_dir, paths=paths)
print('health_dv n_rows:', len(health_dv))
health_dv_metadata = get_metadata_dict(meta_files.get('health_dv',
METADATA_FILES['health_dv']))
health_dv_ppid_column = health_dv_metadata['person_enrollment_ID']
enroll_merge = enroll_merge.merge(health_dv, how='left',
left_on=enrollment_enid_column,
right_on=health_dv_ppid_column)
if enrollment_enid_column != health_dv_ppid_column and \
health_dv_ppid_column in enroll_merge.columns:
enroll_merge = enroll_merge.drop(health_dv_ppid_column, axis=1)
# Merge income in
income = get_income(county=county, file_spec=files.get('income', None),
metadata_file=meta_files.get('income', None),
data_dir=data_dir, paths=paths)
print('income n_rows:', len(income))
income_metadata = get_metadata_dict(meta_files.get('income',
METADATA_FILES['income']))
income_ppid_column = income_metadata['person_enrollment_ID']
enroll_merge = enroll_merge.merge(income, how='left',
left_on=enrollment_enid_column,
right_on=income_ppid_column)
if enrollment_enid_column != income_ppid_column and \
income_ppid_column in enroll_merge.columns:
enroll_merge = enroll_merge.drop(income_ppid_column, axis=1)
# Merge project in
project = get_project(county=county, file_spec=files.get('project', None),
metadata_file=meta_files.get('project', None),
data_dir=data_dir, paths=paths)
print('project n_rows:', len(project))
project_metadata = get_metadata_dict(meta_files.get('project',
METADATA_FILES['project']))
project_prid_column = project_metadata['program_ID']
enroll_merge = enroll_merge.merge(project, how='left',
left_on=enrollment_prid_column,
right_on=project_prid_column)
if enrollment_prid_column != project_prid_column and \
project_prid_column in enroll_merge.columns:
enroll_merge = enroll_merge.drop(project_prid_column, axis=1)
return enroll_merge
def _has_digit(my_str):
return any(char.isdigit() for char in my_str)
def _is_in_exclusion(my_str, exclusion_list):
for item in exclusion_list:
if item in my_str:
return True
return False
def _name_exclude(row,
name_cols,
exclusion_list=NAME_EXCLUSION):
"""
Criteria for name exclusion. Returns True for keepers.
"""
for c in name_cols:
if | pd.isnull(row[c]) | pandas.isnull |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = | MultiIndex.from_tuples([('A', 1), ('A', 2)]) | pandas.MultiIndex.from_tuples |
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import os
from wordcloud import WordCloud
import jieba
file_dir="./Danmu/"
# 获取文件名
files=[files for root,dirs,files in os.walk(file_dir)]
# 去重
def duplicate(files):
for file in files:
df = pd.read_csv(file_dir + file,encoding="utf-8-sig",index_col=0)
data = df.drop_duplicates(subset=['DM_id'], keep='first')
data.to_csv(file_dir + file,encoding='utf-8-sig',index=True,index_label="")
print("去重完毕")
# 每一期弹幕总数的变化折线图
def danmuSumPlot(files):
print("弹幕总数变化图绘制中...")
list1 = ['110','111','112','113','114','115','116','117','118','119','120','121','122','123','124']
data_sum=[]
for file in files:
data = pd.read_csv(file_dir + file,encoding="utf-8-sig",index_col=0)
data_sum.append(len(data))
matplotlib.rcParams["font.family"] = "SimHei"
plt.plot(list1, data_sum, "c")
plt.ylabel("弹幕数")
plt.xlabel("《睡前消息》期数")
plt.title("每一期弹幕总数的变化图")
plt.savefig('./Analysis/弹幕总数变化图', dpi=600)
plt.show()
print("绘制完毕")
# 发弹幕总数TOP10的用户柱状图
def danmuUserTopBarh(files):
print("弹幕TOP10用户图绘制中...")
datas=[]
for file in files:
datas.append(pd.read_csv(file_dir + file,encoding="utf-8-sig",index_col=0))
# 先合并全部csv文件,再进行统计
data=pd.concat(datas)
data = data.groupby('DM_userID').size().reset_index(name="count")
data = data.sort_values("count", ascending=False)
label = [] # y轴的值
width = [] # 给出具体每个直方图的数值
i = 0
for item in data.values:
if i < 10:
label.append(item[0])
width.append(item[1])
i += 1
else:
break
matplotlib.rcParams["font.family"] = "SimHei"
y = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1] # 给出在y轴上的位置
plt.barh(y=y, width=width, tick_label=label) # 绘制水平直方图
plt.ylabel("用户ID")
plt.xlabel("弹幕数")
plt.title("发弹幕总数TOP10的用户柱状图")
plt.subplots_adjust(left=0.17) # 控制图片左边的间隔 避免显示不全
plt.savefig('./Analysis/TOP10', dpi=600, left=0.17)
print("绘制完毕")
# 每期弹幕密度变化图
def danmuDensityChange(files):
print("弹幕密度变化图绘制中...")
sets=110
for file in files:
data = | pd.read_csv(file_dir + file, encoding="utf-8-sig", index_col=0) | pandas.read_csv |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""Base module for record linkage."""
from __future__ import division
import time
import warnings
from abc import ABCMeta, abstractmethod
import six
import pandas
import numpy as np
from joblib import Parallel, delayed
from recordlinkage.utils import (listify,
unique,
is_label_dataframe,
VisibleDeprecationWarning,
return_type_deprecator,
index_split,
frame_indexing)
from recordlinkage.types import (is_numpy_like,
is_pandas_2d_multiindex)
from recordlinkage.measures import max_pairs
from recordlinkage import rl_logging as logging
from recordlinkage.utils import LearningError, DeprecationHelper
import recordlinkage.config as cf
def _parallel_compare_helper(class_obj, pairs, x, x_link=None):
"""Internal function to overcome pickling problem in python2."""
return class_obj._compute(pairs, x, x_link)
def chunk_pandas(frame_or_series, chunksize=None):
"""Chunk a frame into smaller, equal parts."""
if not isinstance(chunksize, int):
raise ValueError('argument chunksize needs to be integer type')
bins = np.arange(0, len(frame_or_series), step=chunksize)
for b in bins:
yield frame_or_series[b:b + chunksize]
class BaseIndex(object):
"""Base class for all index classes in Python Record Linkage Toolkit.
Can be used for index passes.
"""
def __init__(self, algorithms=[]):
logging.info("indexing - initialize {} class".format(
self.__class__.__name__)
)
self.algorithms = []
self.add(algorithms)
# logging
self._i = 1
self._i_max = None
self._n = []
self._n_max = []
self._eta = []
self._output_log_total = True
def __repr__(self):
class_name = self.__class__.__name__
return "<{}>".format(class_name)
def __str__(self):
return repr(self)
def add(self, model):
"""Add a index method.
This method is used to add index algorithms. If multiple algorithms
are added, the union of the record pairs from the algorithm is taken.
Parameters
----------
model : list, class
A (list of) index algorithm(s) from
:mod:`recordlinkage.index`.
"""
if isinstance(model, list):
self.algorithms = self.algorithms + model
else:
self.algorithms.append(model)
def index(self, x, x_link=None):
"""Make an index of record pairs.
Parameters
----------
x: pandas.DataFrame
A pandas DataFrame. When `x_link` is None, the algorithm makes
record pairs within the DataFrame. When `x_link` is not empty,
the algorithm makes pairs between `x` and `x_link`.
x_link: pandas.DataFrame, optional
A second DataFrame to link with the DataFrame x.
Returns
-------
pandas.MultiIndex
A pandas.MultiIndex with record pairs. Each record pair contains
the index labels of two records.
"""
if not self.algorithms:
raise ValueError("No algorithms given.")
# start timing
start_time = time.time()
pairs = None
for cl_alg in self.algorithms:
pairs_i = cl_alg.index(x, x_link)
if pairs is None:
pairs = pairs_i
else:
pairs = pairs.union(pairs_i)
if x_link is not None:
n_max = max_pairs((x, x_link))
else:
n_max = max_pairs(x)
# store the number of pairs
n = pairs.shape[0]
eta = time.time() - start_time
rr = 1 - n / n_max
i_max = '?' if self._i_max is None else self._i_max
self._eta.append(eta)
self._n.append(n)
self._n_max.append(n_max)
# log
logging.info("indexing [{:d}/{}] - time: {:.2f}s - pairs: {:d}/{:d} - "
"rr: {:0.5f}".format(self._i, i_max, eta, n, n_max, rr))
# log total
if self._output_log_total:
n_total = np.sum(self._n)
n_max_total = np.sum(self._n_max)
rr_avg = 1 - n_total / n_max_total
eta_total = np.sum(self._eta)
logging.info("indexing [{:d}/{}] - time: {:.2f}s - "
"pairs_total: {:d}/{:d} - rr_total: {:0.5f}".format(
self._i, i_max, eta_total,
n_total, n_max_total, rr_avg))
self._i += 1
return pairs
class BaseIndexAlgorithm(object):
"""Base class for all index algorithms.
BaseIndexAlgorithm is an abstract class for indexing algorithms.
The method
:func:`~recordlinkage.base.BaseIndexAlgorithm._link_index`
Parameters
----------
verify_integrity : bool
Verify the integrity of the input dataframe(s). The index is
checked for duplicate values.
suffixes : tuple
If the names of the resulting MultiIndex are identical, the
suffixes are used to distinguish the names.
Example
-------
Make your own indexation class::
class CustomIndex(BaseIndexAlgorithm):
def _link_index(self, df_a, df_b):
# Custom link index.
return ...
def _dedup_index(self, df_a):
# Custom deduplication index, optional.
return ...
Call the class in the same way::
custom_index = CustomIndex():
custom_index.index()
"""
name = None
description = None
def __init__(self, verify_integrity=True, suffixes=('_1', '_2')):
super(BaseIndexAlgorithm, self).__init__()
self.suffixes = suffixes
self.verify_integrity = verify_integrity
def __repr__(self):
class_name = self.__class__.__name__
return "<{}>".format(class_name)
def __str__(self):
return repr(self)
def _deduplication(self, x):
if isinstance(x, (tuple, list)) and len(x) > 1:
return False
else:
return True
def _verify_integrety(self, x):
if isinstance(x.index, pandas.Index):
if not x.index.is_unique:
raise ValueError('index of DataFrame is not unique')
elif isinstance(x.index, pandas.MultiIndex):
raise ValueError(
'expected pandas.Index instead of pandas.MultiIndex'
)
def _link_index(self, df_a, df_b):
"""Build an index for linking two datasets.
Parameters
----------
df_a : (tuple of) pandas.Series
The data of the left DataFrame to build the index with.
df_b : (tuple of) pandas.Series
The data of the right DataFrame to build the index with.
Returns
-------
pandas.MultiIndex
A pandas.MultiIndex with record pairs. Each record pair
contains the index values of two records.
"""
raise NotImplementedError(
"Not possible to call index for the BaseEstimator"
)
def _dedup_index(self, df_a):
"""Build an index for deduplicating a dataset.
Parameters
----------
df_a : (tuple of) pandas.Series
The data of the DataFrame to build the index with.
Returns
-------
pandas.MultiIndex
A pandas.MultiIndex with record pairs. Each record pair
contains the index values of two records. The records are
sampled from the lower triangular part of the matrix.
"""
pairs = self._link_index(df_a, df_a)
# Remove all pairs not in the lower triangular part of the matrix.
# This part can be inproved by not comparing the level values, but the
# level itself.
pairs = pairs[pairs.labels[0] > pairs.labels[1]]
return pairs
def _make_index_names(self, name1, name2):
if pandas.notnull(name1) and pandas.notnull(name2) and \
(name1 == name2):
return ["{}{}".format(name1, self.suffixes[0]),
"{}{}".format(name1, self.suffixes[1])]
else:
return [name1, name2]
def fit(self):
raise AttributeError("indexing object has no attribute 'fit'")
def index(self, x, x_link=None):
"""Make an index of record pairs.
Use a custom function to make record pairs of one or two dataframes.
Each function should return a pandas.MultiIndex with record pairs.
Parameters
----------
x: pandas.DataFrame
A pandas DataFrame. When `x_link` is None, the algorithm makes
record pairs within the DataFrame. When `x_link` is not empty,
the algorithm makes pairs between `x` and `x_link`.
x_link: pandas.DataFrame, optional
A second DataFrame to link with the DataFrame x.
Returns
-------
pandas.MultiIndex
A pandas.MultiIndex with record pairs. Each record pair contains
the index labels of two records.
"""
if x is None: # error
raise ValueError("provide at least one dataframe")
elif x_link is not None: # linking (two arg)
x = (x, x_link)
elif isinstance(x, (list, tuple)): # dedup or linking (single arg)
x = tuple(x)
else: # dedup (single arg)
x = (x,)
if self.verify_integrity:
for df in x:
self._verify_integrety(df)
# linking
if not self._deduplication(x):
pairs = self._link_index(*x)
names = self._make_index_names(x[0].index.name, x[1].index.name)
# deduplication
else:
pairs = self._dedup_index(*x)
names = self._make_index_names(x[0].index.name, x[0].index.name)
pairs.rename(names, inplace=True)
return pairs
BaseIndexator = DeprecationHelper(BaseIndexAlgorithm)
class BaseCompareFeature(object):
"""Base abstract class for compare feature engineering.
Parameters
----------
labels_left : list, str, int
The labels to use for comparing record pairs in the left
dataframe.
labels_right : list, str, int
The labels to use for comparing record pairs in the right
dataframe (linking) or left dataframe (deduplication).
args : tuple
Additional arguments to pass to the `_compare_vectorized`
method.
kwargs : tuple
Keyword additional arguments to pass to the `_compare_vectorized`
method.
label : list, str, int
The indentifying label(s) for the returned values.
"""
name = None
description = None
def __init__(self, labels_left, labels_right, args=(), kwargs={},
label=None):
self.labels_left = labels_left
self.labels_right = labels_right
self.args = args
self.kwargs = kwargs
self.label = label
self._f_compare_vectorized = None
def _repr(self):
return "<{} {!r}>".format(self.__class__.__name__, self.label)
def __repr__(self):
return self._repr()
def __str__(self):
return repr(self)
def _compute_vectorized(self, *args):
"""Compare attributes (vectorized)
Parameters
----------
*args : pandas.Series
pandas.Series' as arguments.
Returns
-------
pandas.Series, pandas.DataFrame, numpy.ndarray
The result of comparing record pairs (the features). Can be
a tuple with multiple pandas.Series, pandas.DataFrame,
numpy.ndarray objects.
"""
if self._f_compare_vectorized:
return self._f_compare_vectorized(
*(args + self.args), **self.kwargs)
else:
raise NotImplementedError()
def _compute(self, left_on, right_on):
"""Compare the data on the left and right.
:meth:`BaseCompareFeature._compute` and
:meth:`BaseCompareFeature.compute` differ on the accepted
arguments. `_compute` accepts indexed data while `compute`
accepts the record pairs and the DataFrame's.
Parameters
----------
left_on : (tuple of) pandas.Series
Data to compare with `right_on`
right_on : (tuple of) pandas.Series
Data to compare with `left_on`
Returns
-------
pandas.Series, pandas.DataFrame, numpy.ndarray
The result of comparing record pairs (the features). Can be
a tuple with multiple pandas.Series, pandas.DataFrame,
numpy.ndarray objects.
"""
result = self._compute_vectorized(*tuple(left_on + right_on))
return result
def compute(self, pairs, x, x_link=None):
"""Compare the records of each record pair.
Calling this method starts the comparing of records.
Parameters
----------
pairs : pandas.MultiIndex
A pandas MultiIndex with the record pairs to compare. The indices
in the MultiIndex are indices of the DataFrame(s) to link.
x : pandas.DataFrame
The DataFrame to link. If `x_link` is given, the comparing is a
linking problem. If `x_link` is not given, the problem is one of
deduplication.
x_link : pandas.DataFrame, optional
The second DataFrame.
Returns
-------
pandas.Series, pandas.DataFrame, numpy.ndarray
The result of comparing record pairs (the features). Can be
a tuple with multiple pandas.Series, pandas.DataFrame,
numpy.ndarray objects.
"""
if not is_pandas_2d_multiindex(pairs):
raise ValueError(
"expected pandas.MultiIndex with record pair indices "
"as first argument"
)
if not isinstance(x, pandas.DataFrame):
raise ValueError("expected pandas.DataFrame as second argument")
if x_link is not None and not isinstance(x_link, pandas.DataFrame):
raise ValueError("expected pandas.DataFrame as third argument")
labels_left = listify(self.labels_left, [])
labels_right = listify(self.labels_right, [])
if x_link is None:
df_a = frame_indexing(x[labels_left + labels_right], pairs, 0)
data1 = tuple([df_a[lbl] for lbl in listify(self.labels_left)])
data2 = tuple([df_a[lbl] for lbl in listify(self.labels_right)])
else:
df_a = frame_indexing(x[labels_left], pairs, 0)
data1 = tuple([df_a[lbl] for lbl in listify(self.labels_left)])
df_b = frame_indexing(x_link[labels_right], pairs, 1)
data2 = tuple([df_b[lbl] for lbl in listify(self.labels_right)])
results = self._compute(data1, data2)
return results
class BaseCompare(object):
"""Base class for all comparing classes in Python Record Linkage Toolkit.
Parameters
----------
features : list
List of compare algorithms.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for comparing of record pairs.
If -1, then the number of jobs is set to the number of cores.
indexing_type : string, optional (default='label')
The indexing type. The MultiIndex is used to index the DataFrame(s).
This can be done with pandas ``.loc`` or with ``.iloc``. Use the value
'label' to make use of ``.loc`` and 'position' to make use of
``.iloc``. The value 'position' is only available when the MultiIndex
consists of integers. The value 'position' is much faster.
Attributes
----------
features: list
A list of algorithms to create features.
"""
def __init__(self, features=[], n_jobs=1, indexing_type='label',
**kwargs):
logging.info("comparing - initialize {} class".format(
self.__class__.__name__)
)
self.features = []
self.add(features)
# public
self.n_jobs = n_jobs
self.indexing_type = indexing_type # label of position
self.features = []
# logging
self._i = 1
self._i_max = None
self._n = []
self._eta = []
self._output_log_total = True
# private
self._compare_functions = []
if isinstance(features, (pandas.MultiIndex, pandas.Index)):
warnings.warn(
"It seems you are using the older version of the Compare API, "
"see the documentation about how to update to the new API. "
"http://recordlinkage.readthedocs.io/"
"en/latest/ref-compare.html",
VisibleDeprecationWarning
)
def __repr__(self):
class_name = self.__class__.__name__
return "<{}>".format(class_name)
def __str__(self):
return repr(self)
def add(self, model):
"""Add a compare method.
This method is used to add compare features.
Parameters
----------
model : list, class
A (list of) compare feature(s) from
:mod:`recordlinkage.compare`.
"""
self.features.append(model)
def compare_vectorized(self, comp_func, labels_left, labels_right,
*args, **kwargs):
"""Compute the similarity between values with a callable.
This method initialises the comparing of values with a custom
function/callable. The function/callable should accept
numpy.ndarray's.
Example
-------
>>> comp = recordlinkage.Compare()
>>> comp.compare_vectorized(custom_callable, 'first_name', 'name')
>>> comp.compare(PAIRS, DATAFRAME1, DATAFRAME2)
Parameters
----------
comp_func : function
A comparison function. This function can be a built-in function
or a user defined comparison function. The function should accept
numpy.ndarray's as first two arguments.
labels_left : label, pandas.Series, pandas.DataFrame
The labels, Series or DataFrame to compare.
labels_right : label, pandas.Series, pandas.DataFrame
The labels, Series or DataFrame to compare.
*args :
Additional arguments to pass to callable comp_func.
**kwargs :
Additional keyword arguments to pass to callable comp_func.
(keyword 'label' is reserved.)
label : (list of) label(s)
The name of the feature and the name of the column. IMPORTANT:
This argument is a keyword argument and can not be part of the
arguments of comp_func.
"""
label = kwargs.pop('label', None)
if isinstance(labels_left, tuple):
labels_left = list(labels_left)
if isinstance(labels_right, tuple):
labels_right = list(labels_right)
feature = BaseCompareFeature(
labels_left, labels_right, args, kwargs, label=label)
feature._f_compare_vectorized = comp_func
self.add(feature)
def _get_labels_left(self, validate=None):
"""Get all labels of the left dataframe."""
labels = []
for compare_func in self.features:
labels = labels + listify(compare_func.labels_left)
# check requested labels (for better error messages)
if not is_label_dataframe(labels, validate):
error_msg = "label is not found in the dataframe"
raise KeyError(error_msg)
return unique(labels)
def _get_labels_right(self, validate=None):
"""Get all labels of the right dataframe."""
labels = []
for compare_func in self.features:
labels = labels + listify(compare_func.labels_right)
# check requested labels (for better error messages)
if not is_label_dataframe(labels, validate):
error_msg = "label is not found in the dataframe"
raise KeyError(error_msg)
return unique(labels)
def _compute_parallel(self, pairs, x, x_link=None, n_jobs=1):
df_chunks = index_split(pairs, n_jobs)
result_chunks = Parallel(n_jobs=n_jobs)(
delayed(_parallel_compare_helper)(self, chunk, x, x_link)
for chunk in df_chunks
)
result = pandas.concat(result_chunks)
return result
def _compute(self, pairs, x, x_link=None):
# start the timer for the comparing step
start_time = time.time()
sublabels_left = self._get_labels_left(validate=x)
df_a_indexed = frame_indexing(x[sublabels_left], pairs, 0)
if x_link is None:
sublabels_right = self._get_labels_right(validate=x)
df_b_indexed = frame_indexing(x[sublabels_right], pairs, 1)
else:
sublabels_right = self._get_labels_right(validate=x_link)
df_b_indexed = frame_indexing(x_link[sublabels_right], pairs, 1)
# log timing
# index_time = time.time() - start_time
features = []
for feat in self.features:
lbl1 = feat.labels_left
lbl2 = feat.labels_right
data1 = tuple([df_a_indexed[lbl] for lbl in listify(lbl1)])
data2 = tuple([df_b_indexed[lbl] for lbl in listify(lbl2)])
result = feat._compute(data1, data2)
features.append((result, feat.label))
features = self._union(features, pairs)
# log timing
n = pairs.shape[0]
i_max = '?' if self._i_max is None else self._i_max
eta = time.time() - start_time
self._eta.append(eta)
self._n.append(n)
# log
logging.info("comparing [{:d}/{}] - time: {:.2f}s - pairs: {}".format(
self._i, i_max, eta, n))
# log total
if self._output_log_total:
n_total = np.sum(self._n)
eta_total = np.sum(self._eta)
logging.info(
"comparing [{:d}/{}] - time: {:.2f}s - pairs_total: {}".format(
self._i, i_max, eta_total, n_total))
self._i += 1
return features
def _union(self, objs, index=None, column_i=0):
"""Make a union of the features.
The term 'union' is based on the terminology of scikit-learn.
"""
feat_conc = []
for feat, label in objs:
# result is tuple of results
if isinstance(feat, tuple):
if label is None:
label = [None] * len(feat)
partial_result = self._union(
zip(feat, label), column_i=column_i)
feat_conc.append(partial_result)
column_i = column_i + partial_result.shape[1]
# result is pandas.Series.
elif isinstance(feat, pandas.Series):
feat.reset_index(drop=True, inplace=True)
if label is None:
label = column_i
feat.rename(label, inplace=True)
feat_conc.append(feat)
column_i = column_i + 1
# result is pandas.DataFrame
elif isinstance(feat, pandas.DataFrame):
feat.reset_index(drop=True, inplace=True)
if label is None:
label = np.arange(column_i, column_i + feat.shape[1])
feat.columns = label
feat_conc.append(feat)
column_i = column_i + feat.shape[1]
# result is numpy 1d array
elif is_numpy_like(feat) and len(feat.shape) == 1:
if label is None:
label = column_i
f = pandas.Series(feat, name=label, copy=False)
feat_conc.append(f)
column_i = column_i + 1
# result is numpy 2d array
elif is_numpy_like(feat) and len(feat.shape) == 2:
if label is None:
label = np.arange(column_i, column_i + feat.shape[1])
feat_df = pandas.DataFrame(feat, columns=label, copy=False)
if label is None:
feat_df.columns = [None for _ in range(feat_df.shape[1])]
feat_conc.append(feat_df)
column_i = column_i + feat.shape[1]
# other results are not (yet) supported
else:
raise ValueError("expected numpy.ndarray or "
"pandas object to be returned, "
"got '{}'".format(feat.__class__.__name__))
result = pandas.concat(feat_conc, axis=1, copy=False)
if index is not None:
result.set_index(index, inplace=True)
return result
def compute(self, pairs, x, x_link=None):
"""Compare the records of each record pair.
Calling this method starts the comparing of records.
Parameters
----------
pairs : pandas.MultiIndex
A pandas MultiIndex with the record pairs to compare. The indices
in the MultiIndex are indices of the DataFrame(s) to link.
x : pandas.DataFrame
The DataFrame to link. If `x_link` is given, the comparing is a
linking problem. If `x_link` is not given, the problem is one of
deduplication.
x_link : pandas.DataFrame, optional
The second DataFrame.
Returns
-------
pandas.DataFrame
A pandas DataFrame with feature vectors, i.e. the result of
comparing each record pair.
"""
if not isinstance(pairs, pandas.MultiIndex):
raise ValueError(
"expected pandas.MultiIndex with record pair indices "
"as first argument"
)
if not isinstance(x, pandas.DataFrame):
raise ValueError("expected pandas.DataFrame as second argument")
if x_link is not None and not isinstance(x_link, pandas.DataFrame):
raise ValueError("expected pandas.DataFrame as third argument")
if self.n_jobs == 1:
results = self._compute(pairs, x, x_link)
elif self.n_jobs > 1:
results = self._compute_parallel(
pairs, x, x_link, n_jobs=self.n_jobs)
else:
raise ValueError("number of jobs should be positive integer")
return results
def compare(self, *args, **kwargs):
"""[DEPRECATED] Compare two records."""
raise AttributeError("this method was removed in version 0.12.0")
def clear_memory(self):
"""[DEPRECATED] Clear memory."""
raise AttributeError("this method was removed in version 0.12.0")
class BaseClassifier(six.with_metaclass(ABCMeta)):
"""Base class for classification of records pairs.
This class contains methods for training the classifier.
Distinguish different types of training, such as supervised and
unsupervised learning.
"""
def __init__(self):
pass
def learn(self, *args, **kwargs):
"""[DEPRECATED] Use 'fit_predict'.
"""
warnings.warn("learn is deprecated, {}.fit_predict "
"instead".format(self.__class__.__name__))
return self.fit_predict(*args, **kwargs)
def _initialise_classifier(self, comparison_vectors):
"""Initialise the classifier.
Parameters
----------
comparison_vectors : pandas.DataFrame
The comparison vectors (or features) to fit the classifier with.
"""
pass
@abstractmethod
def _fit(self, *args, **kwargs):
pass
def fit(self, comparison_vectors, match_index=None):
"""Train the classifier.
Parameters
----------
comparison_vectors : pandas.DataFrame
The comparison vectors (or features) to train the model with.
match_index : pandas.MultiIndex
A pandas.MultiIndex object with the true matches.
The MultiIndex contains only the true matches. Default None.
Note
----
A note in case of finding links within a single dataset (for example
deduplication). Unsure that the training record pairs are from the
lower triangular part of the dataset/matrix. See detailed information
here: link.
"""
logging.info("Classification - start training {}".format(
self.__class__.__name__)
)
self._initialise_classifier(comparison_vectors)
# start timing
start_time = time.time()
if isinstance(match_index, (pandas.MultiIndex, pandas.Index)):
try:
y = pandas.Series(0, index=comparison_vectors.index)
y.loc[match_index & comparison_vectors.index] = 1
except pandas.IndexError as err:
# The are no matches. So training is not possible.
if len(match_index & comparison_vectors.index) == 0:
raise LearningError(
"both matches and non-matches needed in the" +
"trainingsdata, only non-matches found"
)
else:
raise err
self._fit(comparison_vectors.as_matrix(), y.values)
elif match_index is None:
self._fit(comparison_vectors.as_matrix())
else:
raise ValueError("'match_index' has incorrect type '{}'".format(type(match_index)))
# log timing
logf_time = "Classification - training computation time: ~{:.2f}s"
logging.info(logf_time.format(time.time() - start_time))
@return_type_deprecator
def fit_predict(self, comparison_vectors, match_index=None):
"""Train the classifier.
Parameters
----------
comparison_vectors : pandas.DataFrame
The comparison vectors.
match_index : pandas.MultiIndex
The true matches.
return_type : str
Deprecated. Use recordlinkage.options instead. Use the option
`recordlinkage.set_option('classification.return_type', 'index')`
instead.
Returns
-------
pandas.Series
A pandas Series with the labels 1 (for the matches) and 0 (for the
non-matches).
"""
self.fit(comparison_vectors, match_index)
result = self.predict(comparison_vectors)
return result
@abstractmethod
def _predict(self, comparison_vectors):
pass
@return_type_deprecator
def predict(self, comparison_vectors):
"""Predict the class of the record pairs.
Classify a set of record pairs based on their comparison vectors into
matches, non-matches and possible matches. The classifier has to be
trained to call this method.
Parameters
----------
comparison_vectors : pandas.DataFrame
Dataframe with comparison vectors.
return_type : str
Deprecated. Use recordlinkage.options instead. Use the option
`recordlinkage.set_option('classification.return_type', 'index')`
instead.
Returns
-------
pandas.Series
A pandas Series with the labels 1 (for the matches) and 0 (for the
non-matches).
"""
logging.info("Classification - predict matches and non-matches")
# make the predicition
prediction = self._predict(comparison_vectors.as_matrix())
self._post_predict(prediction)
# format and return the result
return self._return_result(prediction, comparison_vectors)
def _post_predict(self, result):
"""Method called after prediction.
Parameters
----------
result : pandas.Series
The resulting classification.
"""
pass
@abstractmethod
def _prob_match(self, *args, **kwargs):
pass
def prob(self, comparison_vectors, return_type=None):
"""Compute the probabilities for each record pair.
For each pair of records, estimate the probability of being a match.
Parameters
----------
comparison_vectors : pandas.DataFrame
The dataframe with comparison vectors.
return_type : str
Deprecated. (default 'series')
Returns
-------
pandas.Series or numpy.ndarray
The probability of being a match for each record pair.
"""
# deprecation
if return_type is not None:
warnings.warn("The argument 'return_type' is removed. "
"Default value is now 'series'.",
VisibleDeprecationWarning, stacklevel=2)
logging.info("Classification - compute probabilities")
prob_match = self._prob_match(comparison_vectors.as_matrix())
return | pandas.Series(prob_match, index=comparison_vectors.index) | pandas.Series |
import warnings
import catboost as cgb
import hyperopt
import lightgbm as lgb
import pandas as pd
import xgboost as xgb
from homeserv_inter.constants import LABEL_COLS, MODEL_DIR, RESULT_DIR, TUNING_DIR
from homeserv_inter.datahandler import HomeServiceDataHandle
from homeserv_inter.tuning import HyperParamsTuning
from wax_toolbox import Timer
warnings.filterwarnings(action="ignore", category=DeprecationWarning)
# Notes:
# 3 times more of class 0 than class 1
def get_df_importance(booster):
if hasattr(booster, "feature_name"): # lightgbm
idx = booster.feature_name()
arr = booster.feature_importance()
df = pd.DataFrame(index=idx, data=arr, columns=["importance"])
elif hasattr(booster, "get_score"): # xgboost
serie = pd.Series(booster.get_score())
df = pd.DataFrame(columns=["importance"], data=serie)
else:
raise NotImplementedError
# Traduce in percentage:
df["importance"] = df["importance"] / df["importance"].sum() * 100
df = df.sort_values("importance", ascending=False)
return df
class BaseModelHomeService(HomeServiceDataHandle, HyperParamsTuning):
# Attributes to be defined:
@property
def algo():
raise NotImplementedError
@property
def common_params():
raise NotImplementedError
@property
def params_best_fit():
raise NotImplementedError
def save_model(self, booster):
now = pd.Timestamp.now(tz='CET').strftime("%d-%Hh-%Mm")
f = MODEL_DIR / "{}_model_{}.txt".format(self.algo, now)
booster.save_model(f.as_posix())
@staticmethod
def _generate_plot(eval_hist):
try:
import plotlyink
dfhist = pd.DataFrame(eval_hist)
fig = dfhist.iplot.scatter(as_figure=True)
import plotly
now = pd.Timestamp.now(tz='CET').strftime("%d-%Hh-%Mm")
filepath = RESULT_DIR / 'lgb_eval_hist_{}.html'.format(now)
plotly.offline.plot(fig, filename=filepath.as_posix())
except ImportError:
pass
# Methods to be implemented
def train():
raise NotImplementedError
def validate():
raise NotImplementedError
def cv():
raise NotImplementedError
def hypertuning_objective(self, params):
params = self._ensure_type_params(params)
msg = "-- HyperOpt -- CV with {}\n".format(params)
params = {
**self.common_params,
**params
} # recombine with common params
# Fix learning rate:
params["learning_rate"] = 0.04
with Timer(msg, at_enter=True):
eval_hist = self.cv(params_model=params, nfold=5)
if "auc-mean" in eval_hist.keys(): # lightgbm
score = max(eval_hist["auc-mean"])
else:
raise NotImplementedError
result = {
"loss": score,
"status": hyperopt.STATUS_OK,
# -- store other results like this
# "eval_time": time.time(),
# 'other_stuff': {'type': None, 'value': [0, 1, 2]},
# -- attachments are handled differently
"attachments": {
"eval_hist": eval_hist
},
}
return result
class LgbHomeService(BaseModelHomeService):
algo = 'lightgbm'
# Common params for LightGBM
common_params = {
"verbose": -1,
"nthreads": 16,
# 'is_unbalance': 'true', #because training data is unbalance (replaced with scale_pos_weight)
"scale_pos_weight": 0.33, # used only in binary application, weight of labels with positive class
"objective": "xentropy", # better optimize on cross-entropy loss for auc
"metric": {"auc"}, # alias for roc_auc_score
}
# Best fit params
params_best_fit = {
# "task": "train",
"boosting_type": "dart",
"learning_rate": 0.04,
"num_leaves": 100, # we should let it be smaller than 2^(max_depth)
"min_data_in_leaf": 20, # Minimum number of data need in a child
"max_depth": -1, # -1 means no limit
"bagging_fraction": 0.84, # Subsample ratio of the training instance.
"feature_fraction": 0.75, # Subsample ratio of columns when constructing each tree.
"bagging_freq": 9, # frequence of subsample, <=0 means no enable
"max_bin": 200,
# 'min_data_in_leaf': 20, # minimal number of data in one leaf
# 'min_child_weight': 5, # Minimum sum of instance weight(hessian) needed in a child(leaf)
# 'subsample_for_bin': 200000, # Number of samples for constructing bin
# 'min_split_gain': 0, # lambda_l1, lambda_l2 and min_gain_to_split to regularization
# 'reg_alpha': 0, # L1 regularization term on weights
# 'reg_lambda': 0, # L2 regularization term on weights
**common_params,
}
# Tuning attributes in relation to HyperParamsTuning
int_params = ("num_leaves", "max_depth", "min_data_in_leaf",
"bagging_freq")
float_params = ("learning_rate", "feature_fraction", "bagging_fraction")
hypertuning_space = {
# "boosting": hyperopt.hp.choice("boosting", ["gbdt", "rf", "dart"]),
"num_leaves": hyperopt.hp.quniform("num_leaves", 30, 300, 20),
"min_data_in_leaf": hyperopt.hp.quniform("min_data_in_leaf", 10, 100,
10),
# "learning_rate": hyperopt.hp.uniform("learning_rate", 0.001, 0.1),
"feature_fraction": hyperopt.hp.uniform("feature_fraction", 0.7, 0.99),
"bagging_fraction": hyperopt.hp.uniform("bagging_fraction", 0.7, 0.99),
"bagging_freq": hyperopt.hp.quniform("bagging_freq", 6, 18, 2),
}
def validate(self, save_model=True, **kwargs):
dtrain, dtest = self.get_train_valid_set(as_lgb_dataset=True)
watchlist = [dtrain, dtest]
booster = lgb.train(
params=self.params_best_fit,
train_set=dtrain,
valid_sets=watchlist,
# so that at 3000th iteration, learning_rate=0.025
# learning_rates=lambda iter: 0.5 * (0.999 ** iter),
**kwargs,
)
if save_model:
self.save_model(booster)
return booster
def cv(self,
params_model=None,
nfold=5,
num_boost_round=10000,
early_stopping_rounds=100,
generate_plot=False,
**kwargs):
dtrain = self.get_train_set(as_lgb_dataset=True)
# If no params_model is given, take self.params_best_fit
if params_model is None:
params_model = self.params_best_fit
eval_hist = lgb.cv(
params=params_model,
train_set=dtrain,
nfold=nfold,
verbose_eval=True, # display the progress
# display the standard deviation in progress, results are not affected
show_stdv=True,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
**kwargs,
)
if generate_plot:
self._generate_plot(eval_hist)
return eval_hist
def generate_submit(self, num_boost_round=None, from_model_saved=False):
if not from_model_saved:
assert num_boost_round is not None
dtrain = self.get_train_set(as_lgb_dataset=True)
booster = lgb.train(
params=self.params_best_fit,
train_set=dtrain,
num_boost_round=num_boost_round)
self.save_model(booster)
else:
booster = lgb.Booster(model_file=from_model_saved)
df = self.get_test_set()
with Timer("Predicting"):
pred = booster.predict(df)
df = pd.DataFrame({"target": pred})
now = pd.Timestamp.now(tz='CET').strftime("%d-%Hh-%Mm")
df.to_csv(RESULT_DIR / "submit_{}.csv".format(now), index=False)
class XgbHomeService(BaseModelHomeService):
algo = 'xgboost'
# Common params for Xgboost
common_params = {
"silent": True,
"nthreads": 16,
"objective": "binary:logistic",
# 'is_unbalance': 'true', #because training data is unbalance (replaced with scale_pos_weight)
"scale_pos_weight": 0.33, # used only in binary application, weight of labels with positive class
"eval_metric": "auc",
}
# https://www.analyticsvidhya.com/blog/2016/03/complete-guide-parameter-tuning-xgboost-with-codes-python/
params_best_fit = {
"booster": "gbtree",
"max_depth": 12,
"learning_rate": 0.04,
# "gamma": 0.015,
# "subsample": max(min(subsample, 1), 0),
# "colsample_bytree": max(min(colsample_bytree, 1), 0),
# "min_child_weight": min_child_weight,
# "max_delta_step": int(max_delta_step),
**common_params,
}
int_params = ()
float_params = ()
hypertuning_space = {}
def validate(self, save_model=True, **kwargs):
dtrain, dtest = self.get_train_valid_set(as_xgb_dmatrix=True)
watchlist = [(dtrain, "train"), (dtest, "eval")]
booster = xgb.train(
params=self.params_best_fit,
dtrain=dtrain,
evals=watchlist,
**kwargs,
)
if save_model:
self.save_model(booster)
return booster
def cv(self,
params_model=None,
nfold=5,
num_boost_round=10000,
early_stopping_rounds=100,
generate_plot=False,
**kwargs):
# If no params_model is given, take self.params_best_fit
if params_model is None:
params_model = self.params_best_fit
dtrain = self.get_train_set(as_xgb_dmatrix=True)
eval_hist = xgb.cv(
params=params_model,
dtrain=dtrain,
nfold=nfold,
verbose_eval=True,
show_stdv=True,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
**kwargs)
if generate_plot:
self._generate_plot(eval_hist)
return eval_hist
class CatBoostHomService(BaseModelHomeService):
algo = 'catboost'
common_params = {
"thread_count": 15,
"objective": "Logloss",
"eval_metric": "AUC",
"scale_pos_weight": 0.33, # used only in binary application, weight of labels with positive class
}
params_best_fit = {
# "max_depth": 12,
"learning_rate": 0.02,
**common_params,
}
int_params = ()
float_params = ()
hypertuning_space = {}
def validate(self, save_model=True, **kwargs):
dtrain, dtest = self.get_train_valid_set(as_cgb_pool=True)
watchlist = [dtrain, dtest]
booster = cgb.train(
dtrain=dtrain,
params=self.params_best_fit,
eval_set=watchlist,
**kwargs,
)
if save_model:
self.save_model(booster)
return booster
def cv(self,
params_model=None,
nfold=5,
num_boost_round=10000,
early_stopping_rounds=100,
**kwargs):
# If no params_model is given, take self.params_best_fit
if params_model is None:
params_model = self.params_best_fit
dtrain = self.get_train_set(as_cgb_pool=True)
eval_hist = cgb.cv(
params=params_model,
dtrain=dtrain,
nfold=nfold,
verbose_eval=True,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
**kwargs)
return eval_hist
def generate_submit(self, num_boost_round=None, from_model_saved=False):
assert num_boost_round is not None
if not from_model_saved:
dtrain = self.get_train_set(as_cgb_pool=True)
booster = cgb.train(
dtrain=dtrain,
params=self.params_best_fit,
num_boost_round=num_boost_round)
self.save_model(booster)
else:
booster = cgb.CatBoost(model_file=from_model_saved)
dftest = self.get_test_set(as_cgb_pool=True)
with Timer("Predicting"):
probas = booster.predict(dftest, prediction_type="Probability")
dfpred = | pd.DataFrame(probas) | pandas.DataFrame |
import matplotlib.pyplot as plt
import os
import pandas
from data.WorldBankDataLoader import WorldBankDataLoader
from textwrap import wrap
DEMOGRAPHIC_INDICATORS = [
'Population density (people per sq. km of land area)',
'Urban population (% of total population)',
"Birth rate, crude (per 1,000 people)",
"Death rate, crude (per 1,000 people)",
"Population, male (% of total population)",
"Sex ratio at birth (male births per female births)",
"Age dependency ratio (% of working-age population)",
"Age dependency ratio, old (% of working-age population)",
"Age dependency ratio, young (% of working-age population)",
"Urban population (% of total population)",
"Mortality rate, under-5 (per 1,000 live births)",
"Fertility rate, total (births per woman)",
"Population density (people per sq. km of land area)"
]
ECONOMIC_INDICATORS = [
'GNI per capita',
'Adjusted savings: education expenditure (% of GNI)',
'GDP (current US$)',
# 'Central government debt, total (% of GDP)',
'Exports of goods and services (% of GDP)',
'Imports of goods and services (% of GDP)',
'Final consumption expenditure (% of GDP)',
'Gross capital formation (% of GDP)',
'Expense (% of GDP)',
'Inflation, GDP deflator (annual %)'
]
CHECKED_INDICATORS = list(WorldBankDataLoader().sociodemographic_indicators().values())
all_countries = WorldBankDataLoader().all_countries()
if __name__ == "__main__":
group_name = "sociodemography"
# get countries in region - here it's Europe & Central Asia
europe_and_central_asia_countries = {country['name']: None for country in all_countries if
country['region']['id'] == 'NAC'}
for country_name in europe_and_central_asia_countries:
country_data_path = os.path.join(group_name, "downloaded_countries", country_name + ".csv")
europe_and_central_asia_countries[country_name] = | pandas.read_csv(country_data_path) | pandas.read_csv |
import numpy as np
import pandas as pd
import pickle
import pylab as plt
from matplotlib import pyplot as plt
from datetime import datetime
from collections import OrderedDict
import skvideo
import skvideo.io
import copy
skvideo.setFFmpegPath("/usr/local/bin")
# Local import
from algorithms.mcts import MCTS, CalculateScore, GetActionPrior, SelectNextAction, SelectChild, Expand, RollOut, backup, \
InitializeChildren, HeuristicDistanceToTarget
import envMujoco as env
import reward
from envSheepChaseWolf import stationaryWolfPolicy, WolfPolicyForceDirectlyTowardsSheep, DistanceBetweenActualAndOptimalNextPosition, \
GetTrialTrajectoryFromDf, GetPosFromTrajectory, GetAgentPos
from envMujoco import Reset, TransitionFunction, IsTerminal
from play import SampleTrajectory
def drawPerformanceLine(dataDf, axForDraw, xLabel, legendLabels): # Line
for key, grp in dataDf.groupby(level=['rolloutHeuristicWeight', 'maxRolloutSteps'], group_keys=False): # remove the reset_index
grp.index = grp.index.droplevel(level=[1, 2])
print(grp)
grp.plot(ax=axForDraw, kind='line', y='mean', yerr='std', label = legendLabels[key], marker='o')
class RunTrial:
def __init__(self, getRolloutHeuristic, getRollout, getMCTS, sampleTrajectory): # break this line
self.sampleTrajectory = sampleTrajectory
self.getMCTS = getMCTS
self.getRollout = getRollout
self.getRolloutHeuristic = getRolloutHeuristic
def __call__(self, conditionDf):
modelDf = conditionDf.reset_index()
rolloutHeuristicWeight = modelDf['rolloutHeuristicWeight'][0] # reset_index and use column values. Maybe reset outside
maxRolloutSteps = modelDf['maxRolloutSteps'][0]
numSimulations = modelDf['numSimulations'][0]
rolloutHeuristic = self.getRolloutHeuristic(rolloutHeuristicWeight)
rollout = self.getRollout(maxRolloutSteps, rolloutHeuristic)
sheepPolicyMCTS = self.getMCTS(numSimulations, rollout)
allAgentsPolicies = [sheepPolicyMCTS, stationaryWolfPolicy]
policy = lambda state: [agentPolicy(state) for agentPolicy in allAgentsPolicies]
trajectory = self.sampleTrajectory(policy)
trajectorySeries = pd.Series({'trajectory': trajectory})
return trajectorySeries
def main(): # comments for explanation
startTime = datetime.now()
# experiment conditions
numTrials = 2
manipulatedVariables = OrderedDict()
manipulatedVariables['numSimulations'] = [5, 6, 7]
manipulatedVariables['rolloutHeuristicWeight'] = [0.1, 0]
manipulatedVariables['maxRolloutSteps'] = [10, 0]
manipulatedVariables['trialIndex'] = list(range(numTrials))
levelNames = list(manipulatedVariables.keys())
levelValues = list(manipulatedVariables.values())
modelIndex = pd.MultiIndex.from_product(levelValues, names=levelNames)
toSplitFrame = | pd.DataFrame(index=modelIndex) | pandas.DataFrame |
"""
This is used for rllib training purpose.
"""
from dispatch.location.models import Location
from dispatch.plugins.kandbox_planner.planner_engine.naive_manual_planner_shared_jobs_in_slots import (
NaivePlannerJobsInSlots,
)
from dispatch.plugins.kandbox_planner.util.kandbox_date_util import extract_minutes_from_datetime
from dispatch.plugins.bases.kandbox_planner import (
KandboxEnvPlugin,
KandboxPlannerPluginType,
KandboxEnvProxyPlugin,
)
from dispatch.plugins.kandbox_planner.rule.lunch_break import KandboxRulePluginLunchBreak
from dispatch.plugins.kandbox_planner.rule.travel_time import KandboxRulePluginSufficientTravelTime
from dispatch.plugins.kandbox_planner.rule.working_hour import KandboxRulePluginWithinWorkingHour
from dispatch.plugins.kandbox_planner.rule.requested_skills import KandboxRulePluginRequestedSkills
from dispatch.plugins.kandbox_planner.travel_time_plugin import HaversineTravelTime
# from dispatch.plugins.kandbox_planner.routing.travel_time_routingpy_redis import RoutingPyRedisTravelTime
from dispatch.config import (
REDIS_HOST,
REDIS_PORT,
REDIS_PASSWORD,
NBR_OF_OBSERVED_WORKERS,
MINUTES_PER_DAY,
MAX_NBR_OF_JOBS_PER_DAY_WORKER,
SCORING_FACTOR_STANDARD_TRAVEL_MINUTES,
DATA_START_DAY,
MIN_START_MINUTES_FROM_NOW,
TESTING_MODE,
)
from dispatch import config
from dispatch.service.planner_models import SingleJobDropCheckOutput
from dispatch.plugins.kandbox_planner.env.env_enums import (
EnvRunModeType,
JobPlanningStatus,
)
from dispatch.plugins.kandbox_planner.env.env_enums import *
from dispatch.plugins.kandbox_planner.env.env_models import (
WorkingTimeSlot,
LocationTuple,
JobLocation,
Worker,
Job,
BaseJob,
Appointment,
ActionDict,
Absence,
SingleJobCommitInternalOutput,
ActionEvaluationScore,
JobLocationBase
)
import dataclasses
import dispatch.config as kandbox_config
import dispatch.plugins.kandbox_planner.util.kandbox_date_util as date_util
# from dispatch.plugins.kandbox_planner.env.recommendation_server import RecommendationServer
from dispatch.plugins.kandbox_planner.data_adapter.kafka_adapter import KafkaAdapter
from dispatch.plugins.kandbox_planner.data_adapter.kplanner_db_adapter import KPlannerDBAdapter
from dispatch.plugins.kandbox_planner.env.cache_only_slot_server import CacheOnlySlotServer
from dispatch.plugins.kandbox_planner.env.working_time_slot import (
WorkingTimeSlotServer,
MissingSlotException,
)
from dispatch.config import PLANNER_SERVER_ROLE, MAX_MINUTES_PER_TECH
import socket
import logging
from redis.exceptions import LockError
import redis
import pandas as pd
import pprint
import math
import random
import numpy as np
import sys
from itertools import combinations
from typing import List
import copy
import time
from datetime import datetime, timedelta
import json
from gym import spaces
from ray.rllib.utils.spaces.repeated import Repeated
# This version works on top of json input and produce json out
# Observation: each worker has multiple working_time=days, then divided by slots, each slot with start and end time,
# import holidays
hostname = socket.gethostname()
log = logging.getLogger("rllib_env_job2slot")
# log.setLevel(logging.ERROR)
# log.setLevel(logging.WARN)
# log.setLevel(logging.DEBUG)
RULE_PLUGIN_DICT = {
"kandbox_rule_within_working_hour": KandboxRulePluginWithinWorkingHour,
"kandbox_rule_sufficient_travel_time": KandboxRulePluginSufficientTravelTime,
"kandbox_rule_requested_skills": KandboxRulePluginRequestedSkills,
"kandbox_rule_lunch_break": KandboxRulePluginLunchBreak,
#
}
def min_max_normalize(x, input_min, input_max):
y = (x - input_min) / (input_max - input_min)
return y
class KPlannerJob2SlotEnv(KandboxEnvPlugin):
"""
Action should be compatible with GYM, then it should be either categorical or numerical. ActionDict can be converted..
New design of actions are:
[
vector of worker1_prob, worker2_prob (for N workers),
start_day_i, job_start_minutes, shared_worker_count (shared = 1..M)
]
# Benefit of using start_minutes is that I can use start minutes to find insert job i
# but it is bigger value space for algorithm to decide.
"""
title = "Kandbox Environment Job2Slot"
slug = "kprl_env_job2slot"
author = "Kandbox"
author_url = "https://github.com/qiyangduan"
description = "Env for GYM for RL."
version = "0.1.0"
metadata = {"render.modes": ["human"]}
NBR_FEATURE_PER_SLOT = 24
NBR_FEATURE_PER_UNPLANNED_JOB = 12
NBR_FEATURE_OVERVIEW = 8
def __del__(self):
try:
del self.kp_data_adapter
except:
log.warn("rl_env: Error when releasing kp_data_adapter.")
def __init__(self, config=None):
#
# each worker and job is a dataclass object, internally transformed
#
env_config = config
kp_data_adapter = None
reset_cache = False
self.workers_dict = {} # Dictionary of all workers, indexed by worker_code
self.workers = [] # List of Workers, pointing to same object in self.workers_dict
self.jobs_dict = {} # Dictionary of of all jobs, indexed by job_code
self.jobs = [] # List of Jobs
self.locations_dict = {} # Dictionary of JobLocation, indexed by location_code
self.changed_job_codes_set = set()
self.trial_count = 0 # used only for GYM game online training process
self.trial_step_count = 0
self.run_mode = EnvRunModeType.PREDICT
self.unplanned_job_code_list = []
self.job_generation_count = 0
self.all_locations = []
self.current_job_code = None
self.current_job_i = 0
self.total_assigned_job_duration = 0
self.nbr_inplanning = 0
self.expected_travel_time_so_far = 0
self.total_travel_time = 0
self.total_travel_worker_job_count = 0
self.kafka_input_window_offset = 0
self.kafka_slot_changes_offset = 0
self.unplanned_job_codes = []
# self.config["daily_overtime_minutes"] = []
self.national_holidays = [] # TODO, change it to set() @duan
self.weekly_working_days_flag = []
# self.daily_weekday_sequence = [] # Replaced by self.env_encode_day_seq_to_weekday(day_seq)
# 2020-11-09 04:41:16 Duan, changed from [] to {}, since this will keep growing and should be purged regularly
self.daily_working_flag = {}
self.internal_obs_slot_list = []
self.config = {
# "data_start_day": "20200501",
# "nbr_of_days_planning_window": 2,
"planner_code": "rl_job2slot",
"allow_overtime": False,
"nbr_observed_slots": NBR_OF_OBSERVED_WORKERS,
"minutes_per_day": MINUTES_PER_DAY,
"max_nbr_of_jobs_per_day_worker": MAX_NBR_OF_JOBS_PER_DAY_WORKER,
# in minutes, 100 - travel / 1000 as the score
"scoring_factor_standard_travel_minutes": SCORING_FACTOR_STANDARD_TRAVEL_MINUTES,
# Team.flex_form_data will be copied here in the env top level.
# The env does NOT have flex_form_data anymore ... 2021-07-05 12:18:45
# "flex_form_data": {
"holiday_days": "20210325",
"weekly_rest_day": "0",
"travel_speed_km_hour": 40,
"travel_min_minutes": 10,
"planning_working_days": 1,
# },
}
if env_config is None:
log.error("error, no env_config is provided!")
raise ValueError("No env_config is provided!")
if "rules" not in env_config.keys():
rule_set = []
for rule_slug_config in env_config["rules_slug_config_list"]:
rule_plugin = RULE_PLUGIN_DICT[rule_slug_config[0]](config=rule_slug_config[1])
rule_set.append(rule_plugin)
self.rule_set = rule_set
else:
self.rule_set = env_config["rules"]
env_config.pop("rules", None)
for x in env_config.keys():
self.config[x] = env_config[x]
# TODO, 2021-06-14 07:47:06
self.config["nbr_of_days_planning_window"] = int(self.config["nbr_of_days_planning_window"])
self.PLANNING_WINDOW_LENGTH = (
self.config["minutes_per_day"] * self.config["nbr_of_days_planning_window"]
)
evaluate_RequestedSkills = KandboxRulePluginRequestedSkills()
self.rule_set_worker_check = [evaluate_RequestedSkills] # , evaluate_RetainTech
self.data_start_datetime = datetime.strptime(
DATA_START_DAY, kandbox_config.KANDBOX_DATE_FORMAT
)
if kp_data_adapter is None:
log.info("env is creating db_adapter by itslef, not injected.")
kp_data_adapter = KPlannerDBAdapter(team_id=self.config["team_id"])
else:
log.error("INTERNAL:kp_data_adapter is not None for env.__init__")
self.kp_data_adapter = kp_data_adapter
if REDIS_PASSWORD == "":
self.redis_conn = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, password=None)
else:
self.redis_conn = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD)
# This team_env_key uniquely identify this Env by starting date and team key. Multiple instances on different servers share same key.
self.team_env_key = "env_{}_{}".format(self.config["org_code"], self.config["team_id"])
# env_inst_seq is numeric and unique inside the team_env_key, uniquely identify one instance on a shared Env
self.env_inst_seq = self.redis_conn.incr(self.get_env_inst_counter_redis_key())
# env_inst_code is a code for an env Instance, which **globally**, uniquely identify one instance on a shared Env
self.env_inst_code = "{}_{}_{}".format(self.team_env_key, self.env_inst_seq, hostname)
self.parse_team_flex_form_config()
SharingEfficiencyLogic = "1_1.0;2_1.6;3_2.1"
self._reset_horizon_start_minutes()
self.efficiency_dict = {}
for day_eff in SharingEfficiencyLogic.split(";"):
self.efficiency_dict[int(day_eff.split("_")[0])] = float(day_eff.split("_")[1])
self.kp_data_adapter.reload_data_from_db()
if PLANNER_SERVER_ROLE == "trainer":
self.kafka_server = KafkaAdapter(
env=None, role=KafkaRoleType.FAKE, team_env_key=self.team_env_key
)
self.slot_server = CacheOnlySlotServer(env=self, redis_conn=self.redis_conn)
self.kafka_input_window_offset = 0
else:
self.kafka_server = KafkaAdapter(env=self, role=KafkaRoleType.ENV_ADAPTER)
self.slot_server = WorkingTimeSlotServer(env=self, redis_conn=self.redis_conn)
self.kafka_input_window_offset = (
self.kp_data_adapter.get_team_env_window_latest_offset()
)
self.config.update(self.kp_data_adapter.get_team_flex_form_data())
# self.recommendation_server = RecommendationServer(env=self, redis_conn=self.redis_conn)
if "data_end_day" in self.config.keys():
log.error("data_end_day is not supported from config!")
# I don't know why, self.kp_data_adapter loses this workers_dict_by_id after next call.
# 2021-05-06 21:34:01
self.workers_dict_by_id = copy.deepcopy(self.kp_data_adapter.workers_dict_by_id)
# self._reset_data()
self.replay_env()
log.info("replay_env is done, for training purpose")
# To be compatible with GYM Environment
self._set_spaces()
log.info("__init__ done, reloaded data from db, env is ready, please call reset to start")
# Temp fix
self.naive_opti_slot = NaivePlannerJobsInSlots(env=self)
self.env_bootup_datetime = datetime.now()
def reset(self, shuffle_jobs=False):
# print("env reset")
self._reset_data()
# self._reset_gym_appt_data()
self.slot_server.reset()
self.trial_count += 1
self.trial_step_count = 0
self.inplanning_job_count = sum(
[1 if j.planning_status != JobPlanningStatus.UNPLANNED else 0 for j in self.jobs]
)
self.current_job_i = 0
self.total_assigned_job_duration = 0
self.nbr_inplanning = 0
self.expected_travel_time_so_far = 0
self.total_travel_time = 0
self.total_travel_worker_job_count = 0
self.current_observed_worker_list = self._get_sorted_worker_code_list(self.current_job_i)
self.current_appt_i = 0
self._move_to_next_unplanned_job()
return self._get_observation()
def _reset_gym_appt_data(self):
self.appts = list(self.kp_data_adapter.appointment_db_dict.keys())
self.appt_scores = {}
# Used for get_reward, only for gym training
self.job_travel_time_sample_list_static = [
(
self._get_travel_time_2_job_indices(ji, (ji + 1) % len(self.jobs))
+ self._get_travel_time_2_job_indices(ji, (ji + 2) % len(self.jobs))
+ self._get_travel_time_2_job_indices(ji, (ji + 3) % len(self.jobs))
)
/ 3
for ji in range(0, len(self.jobs))
]
self.total_travel_time_static = sum(
[self._get_travel_time_2_job_indices(ji, ji + 1) for ji in range(0, len(self.jobs) - 1)]
)
def normalize(self, x, data_type: str):
if data_type == "duration":
return min_max_normalize(x, 0, 300)
elif data_type == "start_minutes":
return min_max_normalize(x, 0, 1440 * self.config["nbr_of_days_planning_window"])
elif data_type == "day_minutes_1440":
return min_max_normalize(x, 0, 1440 * 1)
elif data_type == "longitude":
return min_max_normalize(
x, self.config["geo_longitude_min"], self.config["geo_longitude_max"]
)
elif data_type == "latitude":
return min_max_normalize(
x, self.config["geo_latitude_min"], self.config["geo_latitude_max"]
)
elif data_type == "max_nbr_shared_workers":
return min_max_normalize(x, 0, 4)
elif data_type == "max_job_in_slot":
return min_max_normalize(x, 0, 10)
else:
# log.error(f"Unknow data_type = {data_type}")
raise ValueError(f"Unknow data_type = {data_type}")
return x
def _reset_data(self):
"""It should be used only inside reset ()
Location (self.locations_dict) remain unchanged in this process.
"""
self.workers = self.load_transformed_workers()
self.workers_dict = {} # Dictionary of dict
self.permanent_pairs = set()
#
for ji, x in enumerate(self.workers):
self.workers_dict[x.worker_code] = x
x.worker_index = ji
for ji, x in self.workers_dict.items():
if x.belongs_to_pair is not None:
is_valid_pair = True
for paired_code in x.belongs_to_pair:
if paired_code not in self.workers_dict.keys():
log.error(
f"WORKER:{paired_code}:PAIR_TO:{x.belongs_to_pair}: the paired worker is not found."
)
is_valid_pair = False
if not is_valid_pair:
continue
self.permanent_pairs.add(x.belongs_to_pair)
for paired_code in x.belongs_to_pair:
self.workers_dict[paired_code].belongs_to_pair = x.belongs_to_pair
# This function also alters self.locations_dict
self.jobs = self.load_transformed_jobs()
if len(self.jobs) < 1:
log.debug("No Jobs on initialized Env.")
# This encoding includes appointments for now
# TODO, sperate appointment out? 2020-11-06 14:28:59
self.jobs_dict = {} # Dictionary of dict
for ji, job in enumerate(self.jobs):
if job.job_code in kandbox_config.DEBUGGING_JOB_CODE_SET:
log.debug(f"_reset_data: {job.job_code}")
job.requested_duration_minutes = int(job.requested_duration_minutes)
# This reset status and is for training.
# job.planning_status = "U"
self.jobs_dict[job.job_code] = job
job.job_index = ji
log.debug("env _reset_data finished")
# This is a key to map job_code to its sorted set of all recommendations (RecommendedAction)
def get_recommendation_job_key(self, job_code: str, action_day: int) -> str:
if action_day < 0:
return "{}/rec/{}".format(self.team_env_key, job_code)
else:
return "{}/rec/{}/{}".format(self.team_env_key, job_code, action_day)
# This is a key to map a slot to all recommendations (RecommendedAction) which are using this slot
def get_slot_2_recommendation_job_key(self, slot_code: str) -> str:
return "{}/slot_rec/{}".format(self.team_env_key, slot_code)
# This is a key to store latest offset on kafka ENV_WINDOW, which is already replayed.
# This is different from database team.latest_env_kafka_offset since that's indicating latest offset in PG/Mysql DB.
def get_env_window_replay_till_offset_key(self) -> str:
return "{}/env/env_window_replay_till_offset".format(self.team_env_key)
def get_env_out_kafka_offset_key(self) -> str:
return "{}/env/env_out_till_offset".format(self.team_env_key)
def get_env_window_replay_till_offset(self) -> int:
return int(self.redis_conn.get(self.get_env_window_replay_till_offset_key()))
def set_env_window_replay_till_offset(self, offset: int):
return self.redis_conn.set(self.get_env_window_replay_till_offset_key(), offset)
def get_env_inst_counter_redis_key(self) -> int:
return "{}/env/counter".format(self.team_env_key)
def get_env_config_redis_key(self) -> int:
return "{}/env/config".format(self.team_env_key)
def get_env_planning_day_redis_key(self) -> int:
return "{}/env/planning_days".format(self.team_env_key)
def get_env_replay_lock_redis_key(self) -> int:
return "lock_env/{}".format(self.team_env_key)
def get_recommened_locked_slots_by_job_code_redis_key(self, job_code: str) -> str:
return "{}/env_lock/by_job/{}".format(self.team_env_key, job_code)
def get_redis_key_commited_slot_lock(self, slot_code: str) -> str:
return "{}/env_lock/after_commit_slot/{}".format(self.team_env_key, slot_code)
def get_recommened_locked_slot_redis_key(self, slot_code: str) -> str:
return "{}/env_lock/slot/{}".format(self.team_env_key, slot_code)
def get_env_planning_horizon_start_minutes(self) -> int:
# worker_id, start_minutes, end_minutes, slot_type, worker_id,start_minutes, end_minutes, slot_type
if self.horizon_start_minutes is not None:
return self.horizon_start_minutes
return int((datetime.now() - self.data_start_datetime).total_seconds() / 60)
def get_env_planning_horizon_end_minutes(self) -> int:
# worker_id, start_minutes, end_minutes, slot_type, worker_id,start_minutes, end_minutes, slot_type
return 1440 * (
int(self.config["nbr_of_days_planning_window"])
+ int(self.get_env_planning_horizon_start_minutes() / 1440)
)
def get_start_gps_for_worker_day(self, w: Worker, day_seq: int) -> LocationTuple:
return w.weekly_start_gps[self.env_encode_day_seq_to_weekday(day_seq)]
def get_end_gps_for_worker_day(self, w: Worker, day_seq: int) -> LocationTuple:
return w.weekly_end_gps[self.env_encode_day_seq_to_weekday(day_seq)]
def get_worker_available_overtime_minutes(self, worker_code: str, day_seq: int) -> int:
worker = self.workers_dict[worker_code]
available_overtime_list = []
for limit_days_key in worker.overtime_limits.keys():
if day_seq in limit_days_key:
total_overtime = sum([worker.used_overtime_minutes[dsq] for dsq in limit_days_key])
available_overtime_list.append(
worker.overtime_limits[limit_days_key] - total_overtime
)
if len(available_overtime_list) < 1:
return 0
available_overtime = min(available_overtime_list)
return available_overtime
def get_worker_floating_slots(self, worker_code: str, query_start_minutes: int) -> List:
overlap_slots = self.slot_server.get_overlapped_slots(
worker_id=worker_code,
start_minutes=0,
end_minutes=MAX_MINUTES_PER_TECH,
)
floating_slots = []
for a_slot in overlap_slots: #
if self.slot_server.get_time_slot_key(a_slot) in kandbox_config.DEBUGGING_SLOT_CODE_SET:
log.debug("debug atomic_slot_delete_and_add_back")
if (a_slot.slot_type == TimeSlotType.FLOATING) or (
a_slot.start_overtime_minutes + a_slot.end_overtime_minutes > 0
):
if a_slot.start_minutes - query_start_minutes < 0:
continue
# if worker_code == "MY|D|3|CT29":
# print("pause")
floating_slots.append(
[
a_slot.start_minutes - query_start_minutes,
a_slot.end_minutes - query_start_minutes,
a_slot.start_overtime_minutes,
a_slot.end_overtime_minutes,
]
)
return floating_slots
def env_encode_day_seq_to_weekday(self, day_seq: int) -> int:
today_start_date = self.data_start_datetime + timedelta(days=day_seq)
return (today_start_date.weekday() + 1) % 7
def env_encode_from_datetime_to_minutes(self, input_datetime: datetime) -> int:
# worker_id, start_minutes, end_minutes, slot_type, worker_id,start_minutes, end_minutes, slot_type
assigned_start_minutes = int(
(input_datetime - self.data_start_datetime).total_seconds() / 60
)
return assigned_start_minutes
def env_encode_from_datetime_to_day_with_validation(self, input_datetime: datetime) -> int:
# worker_id, start_minutes, end_minutes, slot_type, worker_id,start_minutes, end_minutes, slot_type
the_minutes = self.env_encode_from_datetime_to_minutes(input_datetime)
if (the_minutes < self.get_env_planning_horizon_start_minutes()) or (
the_minutes >= self.get_env_planning_horizon_end_minutes()
): # I removed 10 seconds from end of window.
raise ValueError("Out of Planning Window")
return int(the_minutes / self.config["minutes_per_day"])
def env_decode_from_minutes_to_datetime(self, input_minutes: int) -> datetime:
# worker_id, start_minutes, end_minutes, slot_type, worker_id,start_minutes, end_minutes, slot_type
assigned_start_datetime = self.data_start_datetime + timedelta(minutes=input_minutes)
return assigned_start_datetime
def get_encode_shared_duration_by_planning_efficiency_factor(
self, requested_duration_minutes: int, nbr_workers: int
) -> int:
factor = self.efficiency_dict[nbr_workers]
return int(requested_duration_minutes * factor / nbr_workers)
def parse_team_flex_form_config(self):
# TODO team.country_code
# country_code = "CN" team.country_code
# self.national_holidays = holidays.UnitedStates()
# self.national_holidays = holidays.CountryHoliday('US')
if self.config["holiday_days"] is not None:
self.national_holidays = self.config["holiday_days"].split(";")
else:
self.national_holidays = []
if self.config["weekly_rest_day"] is not None:
__split = str(self.config["weekly_rest_day"]).split(";")
else:
__split = []
self.weekly_working_days_flag = [True for _ in range(7)]
for day_s in __split:
self.weekly_working_days_flag[int(day_s)] = False
# 1. Travel time formula is defined as GPS straight line distance *1.5/ (40 KM/Hour), minimum 10 minutes. Those numbers like 1.5, 40, 10 minutes,
# self.config["travel_speed_km_hour"] = self.config["flex_form_data"]["travel_speed_km_hour"]
# self.config["travel_min_minutes"] = self.config["flex_form_data"]["travel_min_minutes"]
self.travel_router = HaversineTravelTime(
travel_speed=self.config["travel_speed_km_hour"],
min_minutes=self.config["travel_min_minutes"],
)
# self.travel_router = RoutingPyRedisTravelTime(
# travel_speed=self.config["travel_speed_km_hour"],
# min_minutes=self.config["travel_min_minutes"],
# redis_conn=self.redis_conn, travel_mode="car"
# )
def reload_env_from_redis(self):
# observation = self.reset(shuffle_jobs=False)
self.mutate_refresh_planning_window_from_redis()
self._reset_data()
if len(self.workers) < 1:
log.warn(
"No workers in env, skipped loading slots. Maybe this is the first initialization?"
)
return
for absence_code in self.kp_data_adapter.absence_db_dict:
# This one loads all data first. The env will decide to replay it or not depending on cache_server(redis)
job = self.env_encode_single_absence(self.kp_data_adapter.absence_db_dict[absence_code])
absence_job = self.mutate_create_worker_absence(job, auto_replay=False)
for job_code in self.kp_data_adapter.appointment_db_dict:
# This one loads all data first. The env will decide to replay it or not depending on cache_server(redis)
if job_code in kandbox_config.DEBUGGING_JOB_CODE_SET:
log.debug(f"reload_env_from_redis: {job_code}")
appt = self.env_encode_single_appointment(
self.kp_data_adapter.appointment_db_dict[job_code]
)
job = self.mutate_create_appointment(appt, auto_replay=False)
self.slot_server.reload_from_redis_server()
def mutate_replay_jobs_single_working_day(self, day_seq: int):
for job_code in self.jobs_dict.keys():
this_job = self.jobs_dict[job_code]
if this_job.planning_status == JobPlanningStatus.UNPLANNED:
continue
if (this_job.scheduled_start_minutes < day_seq * 1440) or (
this_job.scheduled_start_minutes > (day_seq + 1) * 1440
):
continue
if this_job.job_code in kandbox_config.DEBUGGING_JOB_CODE_SET:
log.debug("mutate_replay_jobs_single_working_day: DEBUGGING_JOB_CODE_SET ")
action_dict = self.gen_action_dict_from_job(job=this_job, is_forced_action=True)
info = self.mutate_update_job_by_action_dict(
a_dict=action_dict, post_changes_flag=False
)
if info.status_code != ActionScoringResultType.OK:
log.error(
f"Error in job replay , but it will continue. code= {job_code}, info: {info} "
)
def replay_env_to_redis(self):
# self.slot_server.reset()
for key in self.redis_conn.scan_iter(f"{self.team_env_key}/s/*"):
self.redis_conn.delete(key)
observation = self.reset(shuffle_jobs=False)
self.mutate_extend_planning_window()
if len(self.jobs) < 1:
log.error("No jobs in the env, len(self.jobs) < 1:")
return
# raise ValueError("No jobs in the env, len(self.jobs) < 1:")
# we assume sequence of I P U
sorted_jobs = sorted(
self.jobs, key=lambda job__i_: (job__i_.planning_status, job__i_.job_code)
)
self.jobs = sorted_jobs
for ji, job___ in enumerate(self.jobs):
self.jobs_dict[job___.job_code].job_index = ji
self.run_mode = EnvRunModeType.REPLAY
self.current_job_i = 0
# previous_observation = self._get_observation()
for step_job_code in self.jobs_dict.keys():
this_job = self.jobs_dict[step_job_code]
if this_job.job_code in kandbox_config.DEBUGGING_JOB_CODE_SET:
log.debug(
f"JOB:{this_job.job_code}:INDEX:{self.current_job_i}: replaying on pause: starting at {this_job.scheduled_start_minutes}."
)
if (
this_job.scheduled_start_minutes < self.get_env_planning_horizon_start_minutes()
) or (this_job.scheduled_start_minutes > self.get_env_planning_horizon_end_minutes()):
log.warn(
f"JOB:{this_job.job_code}:INDEX:{self.current_job_i}: is out of horizon, starting at {this_job.scheduled_start_minutes} and therefore is skipped."
)
self.current_job_i += 1
continue
if self.current_job_i >= len(self.jobs):
break
if this_job.planning_status not in [
JobPlanningStatus.IN_PLANNING,
JobPlanningStatus.PLANNED,
JobPlanningStatus.COMPLETED,
]:
log.info(
f"Replayed until first U-status, self.current_job_i = {self.current_job_i} "
)
break
# return observation, -1, False, None
# break
if this_job.scheduled_duration_minutes < 1:
log.error(
f"JOB:{this_job.job_code}:scheduled_duration_minutes = {this_job.scheduled_duration_minutes} < 1 , not allowed, skipped from replay"
)
self.current_job_i += 1
continue
# action = self.gen_action_from_one_job(self.jobs[self.current_job_i])
# action_dict = self.decode_action_into_dict(action)
# This should work for both appt and absence.
action_dict = self.gen_action_dict_from_job(job=this_job, is_forced_action=True)
info = self.mutate_update_job_by_action_dict(
a_dict=action_dict, post_changes_flag=False
)
if info.status_code != ActionScoringResultType.OK:
print(
f"Error game replay got error, but it will continue: job_code={this_job.job_code}, current_job_i: {self.current_job_i}, info: {info} "
)
# TODO
self.replay_worker_absence_to_redis()
self.replay_appointment_to_redis()
self.run_mode = EnvRunModeType.PREDICT
def replay_appointment_to_redis(self):
# local_loader.load_batch_local_appointment_TODO(env=self)
for appt_code, value in self.kp_data_adapter.appointment_db_dict.items():
appt = self.env_encode_single_appointment(value)
self.mutate_create_appointment(appt, auto_replay=True)
self.recommendation_server.search_for_recommendations(job_code=appt.job_code)
log.info(f"APPT:{appt.job_code}: SUCCESSFULLY replayed one appointment")
def replay_worker_absence_to_redis(self):
for absence_code in self.kp_data_adapter.absence_db_dict:
job = self.env_encode_single_absence(self.kp_data_adapter.absence_db_dict[absence_code])
absence_job = self.mutate_create_worker_absence(job, auto_replay=True)
def replay_env(self):
global_env_config = self.redis_conn.get(self.get_env_config_redis_key())
if global_env_config is None:
# This initialize the env dataset in redis for this envionrment.
# all subsequent env replay_env should read from this .
log.info("Trying to get lock over the env key to applying messages")
with self.redis_conn.lock(
self.get_env_replay_lock_redis_key(), timeout=60 * 10
) as lock:
self.redis_conn.set(self.get_env_config_redis_key(), json.dumps(self.config))
self.set_env_window_replay_till_offset(0)
self.replay_env_to_redis()
# code you want executed only after the lock has been acquired
# lock.release()
log.info("Done with redis lock to applying messages")
else:
self.reload_env_from_redis()
# Catch up with recent messages on Kafka. Optional ? TODO
self.mutate_check_env_window_n_replay()
self._move_to_next_unplanned_job()
obs = None # self._get_observation()
reward = -1
done = False
if obs is None:
done = True
info = {"message": f"Replay done"}
return (obs, reward, done, info)
# Is this timeout seconds?
# TODO, replace two with locks by this unified locking
def mutate_check_env_window_n_replay(self):
# I need to lock it to protect kafka, generator not threadsafe.
# ValueError: generator already executing
# with lock:
if PLANNER_SERVER_ROLE == "trainer":
return
self.kafka_server.consume_env_messages()
def env_encode_single_worker(self, worker=None):
assert type(worker) == dict, "Wrong type, it must be dict"
if worker["worker_code"] in kandbox_config.DEBUGGING_JOB_CODE_SET:
log.debug("env_encode_single_worker debug MY|D|3|CT07")
# worker_week_day = (self.data_start_datetime.weekday() + 1) % 7
flex_form_data = worker["flex_form_data"].copy()
# db_session = self.cnx
# _workers = pd.read_sql(
# db_session.query(Location).filter(Location.location_code ==
# worker['location_id']).statement,
# db_session.bind,
# )
# worker_info = _workers.set_index("id").to_dict(orient="index")
# Here it makes planner respect initial travel time
if self.config["respect_initial_travel"] == True:
location_type = LocationType.JOB
else:
location_type = LocationType.HOME
home_location = JobLocationBase(
float(worker["geo_longitude"]),
float(worker["geo_latitude"]),
location_type,
worker["location_code"]
)
# working_minutes_array = flex_form_data["StartEndTime"].split(";")
weekly_start_gps = [home_location for _ in range(7)]
weekly_end_gps = [home_location for _ in range(7)]
_weekly_working_slots = [None for _ in range(7)]
week_day_i = 0
for week_day_code in [
"sunday",
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
]:
# (week_day_i * 1440) +
today_start_minutes = date_util.int_hhmm_to_minutes(
int(worker["business_hour"][week_day_code][0]["open"])
if worker["business_hour"][week_day_code][0]["isOpen"]
else 0
)
today_end_minutes = date_util.int_hhmm_to_minutes(
int(worker["business_hour"][week_day_code][0]["close"])
if worker["business_hour"][week_day_code][0]["isOpen"]
else 0
)
_weekly_working_slots[week_day_i] = (
today_start_minutes,
today_end_minutes,
)
week_day_i += 1
skills = {
"skills": set(flex_form_data["skills"]),
}
# loc = LocationTuple(loc_t[0], loc_t[1], LocationType.HOME, f"worker_loc_{worker['id']}",)
belongs_to_pair = None
if "assistant_to" in flex_form_data.keys():
if flex_form_data["assistant_to"]:
belongs_to_pair = (
flex_form_data["assistant_to"],
worker["code"],
)
if "is_assistant" not in flex_form_data.keys():
flex_form_data["is_assistant"] = False
overtime_minutes = 0
if "max_overtime_minutes" in flex_form_data.keys():
overtime_minutes = int(float(flex_form_data["max_overtime_minutes"]))
w_r = Worker(
worker_id=worker["id"],
worker_code=worker["code"],
flex_form_data=flex_form_data,
# "level= worker.flex_form_data["level"],
skills=skills,
# location=loc,
#
# TODO, @duan, only 1 slot per day? 2020-11-17 15:36:00
# 6 times for _ in range(len(k_worker['result']))
weekly_working_slots=_weekly_working_slots,
weekly_start_gps=weekly_start_gps,
weekly_end_gps=weekly_end_gps,
linear_working_slots=[], # 6 times for _ in range(len(k_worker['result']))
# linear_daily_start_gps=daily_start_gps,
# linear_daily_end_gps=daily_end_gps,
historical_job_location_distribution=worker["job_history_feature_data"],
worker_index=worker["id"],
belongs_to_pair=belongs_to_pair,
is_active=worker["is_active"],
daily_max_overtime_minutes=overtime_minutes,
weekly_max_overtime_minutes=60 * 10,
overtime_limits={},
used_overtime_minutes={},
)
return w_r
def load_transformed_workers(self):
# start_date = self.data_start_datetime
w = []
# w_dict = {}
#
# index = 0
for wk, worker in self.kp_data_adapter.workers_db_dict.items():
active_int = 1 if worker["is_active"] else 0
if active_int != 1:
print(
"worker {} is not active, maybe it shoud be skipped from loading? ",
worker.id,
)
# TODO
# included for now , since maybe job on it?
continue
# worker_dict = worker.__dict__
# if type(worker_dict["location"]) != dict:
# worker_dict["location"] = worker.location.__dict__
w_r = self.env_encode_single_worker(worker)
w.append(w_r)
# w_dict[w_r.worker_code] = index
# index += 1
sorted_workers = sorted(w, key=lambda x: x.worker_code)
return sorted_workers
def env_encode_single_appointment(self, appointment=None):
assigned_start_minutes = self.env_encode_from_datetime_to_minutes(
appointment["scheduled_start_datetime"]
)
job_form = appointment["flex_form_data"]
try:
abs_location = self.locations_dict[appointment["location_code"]]
except:
included_job_code = appointment["included_job_codes"][0]
if included_job_code not in self.jobs_dict.keys():
log.error(
f"{appointment['appointment_code']}: I failed to find the job by code= {included_job_code}, and then failed to find the Location. Skipped this appointment"
)
#
return
# abs_location = None # TODO?
else:
abs_location = self.jobs_dict[included_job_code].location
requested_skills = {}
scheduled_worker_codes = []
for jc in job_form["included_job_codes"]:
if jc not in self.jobs_dict.keys():
log.error(
f"missing included_job_codes, appointment_code= {appointment['appointment_code']}, job_code={jc} "
)
return None
if self.jobs_dict[jc].planning_status != JobPlanningStatus.UNPLANNED:
# Keep last one for now
scheduled_worker_codes = self.jobs_dict[jc].scheduled_worker_codes
if len(scheduled_worker_codes) < 1:
#
log.debug(
f"APPT:{appointment['appointment_code']}:Only u status in appt? I will take requested. "
)
scheduled_worker_codes.append(self.jobs_dict[jc].requested_primary_worker_code)
requested_day_minutes = int(assigned_start_minutes / 1440) * 1440
appt_job = Job(
job_code=appointment["appointment_code"],
job_id=-1,
job_index=appointment["id"],
job_type=JobType.APPOINTMENT,
job_schedule_type=JobScheduleType.FIXED_SCHEDULE, # job_seq: 89, job['job_code']
planning_status=JobPlanningStatus.PLANNED,
location=abs_location,
requested_skills=requested_skills,
#
scheduled_worker_codes=scheduled_worker_codes,
scheduled_start_minutes=assigned_start_minutes,
scheduled_duration_minutes=appointment["scheduled_duration_minutes"],
#
requested_start_min_minutes=(job_form["ToleranceLower"] * 1440) + requested_day_minutes,
requested_start_max_minutes=(job_form["ToleranceUpper"] * 1440) + requested_day_minutes,
requested_start_minutes=requested_day_minutes,
requested_time_slots=[], # job["requested_time_slots"]
#
requested_primary_worker_code=requested_primary_worker_code,
requested_duration_minutes=appointment["scheduled_duration_minutes"],
flex_form_data=job_form,
included_job_codes=job_form["included_job_codes"],
new_job_codes=[],
searching_worker_candidates=[],
appointment_status=job_form["appointment_status"],
#
is_active=True,
is_auto_planning=False,
)
self.set_searching_worker_candidates(appt_job)
return appt_job
def load_transformed_appointments(self):
a = []
a_dict = {}
#
index = 0
for _, appointment in self.kp_data_adapter.appointments_db_dict.items():
a_r = self.env_encode_single_appointment(appointment)
a.append(a_r)
a_dict[a_r.appointment_code] = index
index += 1
return a
def _convert_availability_slot(self, x_str, prev_available_slots):
orig_slot = x_str.split(";")
the_result = [] # copy.copy(prev_available_slots)
for slot in orig_slot:
if (slot is None) or len(slot) < 1:
continue
start_time = datetime.strptime(
slot.split("_")[0], kandbox_config.KANDBOX_DATETIME_FORMAT_GTS_SLOT
)
end_time = datetime.strptime(
slot.split("_")[1], kandbox_config.KANDBOX_DATETIME_FORMAT_GTS_SLOT
)
start_minutes = self.env_encode_from_datetime_to_minutes(start_time)
end_minutes = self.env_encode_from_datetime_to_minutes(end_time)
the_result.append((start_minutes, end_minutes))
# if len(prev_available_slots) < 1:
# TODO, i take new one simply as Last Known. Is it Right? 2021-02-20 10:01:44
return the_result
# prev_tree= IntervalTree.from_tuples(prev_available_slots)
# curr_tree = IntervalTree.from_tuples(the_result)
# new_result = [(s[0], s[1]) for s in list(prev_tree)]
# return new_result
def _convert_lists_to_slots(self, input_list: list, job):
orig_loc = self.locations_dict[job["location_code"]]
CONSTANT_JOB_LOCATION = JobLocationBase(
geo_longitude=orig_loc.geo_longitude,
geo_latitude=orig_loc.geo_latitude,
location_type=LocationType.HOME,
location_code=orig_loc.location_code,
# historical_serving_worker_distribution=None,
# avg_actual_start_minutes=0,
# avg_days_delay=0,
# stddev_days_delay=0,
# available_slots=tuple(), # list of [start,end] in linear scale
# rejected_slots=job["rejected_slots"],
)
job_available_slots = []
for slot in sorted(list(input_list), key=lambda x: x[0]):
wts = WorkingTimeSlot(
slot_type=TimeSlotType.FLOATING,
start_minutes=slot[0],
end_minutes=slot[1],
prev_slot_code=None,
next_slot_code=None,
start_location=CONSTANT_JOB_LOCATION,
end_location=CONSTANT_JOB_LOCATION,
worker_id="_",
available_free_minutes=0,
assigned_job_codes=[],
)
job_available_slots.append(wts)
return job_available_slots
def env_encode_single_job(self, job: dict) -> Job: # from db dict object to dataclass object
if job is None:
log.error("env_encode_single_job: job is None")
return
if job["code"] in kandbox_config.DEBUGGING_JOB_CODE_SET:
log.debug(f"debug {kandbox_config.DEBUGGING_JOB_CODE_SET}")
flex_form_data = job["flex_form_data"]
if pd.isnull(job["requested_start_datetime"]):
log.error("requested_start_datetime is null, not allowed")
return None
if pd.isnull(job["requested_duration_minutes"]):
log.error("requested_duration_minutes is null, not allowed")
return None
assigned_start_minutes = 0
if job["planning_status"] in (JobPlanningStatus.PLANNED, JobPlanningStatus.IN_PLANNING):
try:
assigned_start_minutes = int(
(job["scheduled_start_datetime"] - self.data_start_datetime).total_seconds()
/ 60
)
except ValueError as ve:
log.error(f"Data error: failed to convert scheduled_start_datetime, job = {job}")
return None
requested_start_minutes = int(
(job["requested_start_datetime"] - self.data_start_datetime).total_seconds() / 60
)
min_minutes = requested_start_minutes
max_minutes = requested_start_minutes
if "tolerance_start_minutes" in flex_form_data.keys():
min_minutes = requested_start_minutes + (flex_form_data["tolerance_start_minutes"])
if "tolerance_end_minutes" in flex_form_data.keys():
max_minutes = requested_start_minutes + (flex_form_data["tolerance_end_minutes"])
assigned_start_minutes = 0
if job["planning_status"] in (JobPlanningStatus.PLANNED, JobPlanningStatus.IN_PLANNING):
assigned_start_minutes = int(
(job["scheduled_start_datetime"] - self.data_start_datetime).total_seconds() / 60
)
historical_serving_worker_distribution = None
if "job_history_feature_data" in job.keys():
if "historical_serving_worker_distribution" in job["job_history_feature_data"].keys():
historical_serving_worker_distribution = job["job_history_feature_data"][
"historical_serving_worker_distribution"
]
if "requested_primary_worker_code" not in job.keys():
log.debug("No request primary code")
if historical_serving_worker_distribution is None:
historical_serving_worker_distribution = {job["requested_primary_worker_code"]: 1}
if job["location_code"] not in self.locations_dict.keys():
job_location = JobLocation(
geo_longitude=job["geo_longitude"],
geo_latitude=job["geo_latitude"],
location_type=LocationType.JOB,
location_code=job["location_code"],
historical_serving_worker_distribution=historical_serving_worker_distribution,
avg_actual_start_minutes=0,
avg_days_delay=0,
stddev_days_delay=0,
# rejected_slots=job["rejected_slots"],
)
self.locations_dict[job_location.location_code] = job_location
else:
job_location = self.locations_dict[job["location_code"]]
# if job_location.geo_longitude < 0:
# log.warn(
# f"Job {job['code']} has invalid location : {job_location.location_code} with geo_longitude = {job_location.geo_longitude}"
# )
# prev_available_slots = [
# (s.start_minutes, s.end_minutes) for s in job_location.available_slots
# ]
net_avail_slots = [
[
self.get_env_planning_horizon_start_minutes(),
self.get_env_planning_horizon_end_minutes(),
]
]
# TODO duan, change job_location from tuple to dataclass.
# job_location.available_slots.clear()
job_available_slots = self._convert_lists_to_slots(net_avail_slots, job)
# job_location.available_slots = sorted(list(net_avail_slots), key=lambda x: x[0])
if "requested_skills" in flex_form_data.keys():
requested_skills = {
"skills": set(flex_form_data["requested_skills"]),
}
else:
requested_skills = set()
log.error(f"job({job['code']}) has no requested_skills")
the_final_status_type = job["planning_status"]
is_appointment_confirmed = False
# if job_schedule_type == JobScheduleType.FIXED_SCHEDULE:
# is_appointment_confirmed = True
primary_workers = []
if not | pd.isnull(job["scheduled_primary_worker_id"]) | pandas.isnull |
import pytest
from pandas import Series
import pandas._testing as tm
class TestSeriesUnaryOps:
# __neg__, __pos__, __inv__
def test_neg(self):
ser = | tm.makeStringSeries() | pandas._testing.makeStringSeries |
import pandas as pd
import numpy as np
import pickle
def preprocess(data_path, labels_path=None):
features = pd.read_csv(data_path)
if labels_path:
labels = | pd.read_csv(labels_path) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.