prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import time
import argparse
import csv
from vivino import utils
from vivino.geocoder import helpers
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Bulk geocode a Vivino export file in tsv format.')
parser.add_argument('--inputpath', required=True, dest="inputpath",
help='The path to the input tsv file', type=lambda x: utils.is_valid_file(x))
parser.add_argument('--outputpath', required=True, dest="outputpath",
help='The path to the output tsv file')
parser.add_argument('--referer', required=True, dest="referer",
help='The refere string used to identify the application in the Nominatim API request')
args = parser.parse_args()
data =
|
pd.read_csv(args.inputpath, sep=',', quotechar='"', header=0, encoding='utf8', quoting=csv.QUOTE_ALL, engine='python')
|
pandas.read_csv
|
import re
import pandas as pd
import os.path
from os import path
from IPython.display import clear_output
def check_simplified_name(varlist, multiindex_df, exclude_vars=[]):
stig_var_list = []
excluded_var_list = []
for i in range(1, len(multiindex_df["simplified_name"])):
for var in varlist:
if re.search(var, multiindex_df['simplified_name'][i], re.IGNORECASE):
for ex in exclude_vars:
if multiindex_df['simplified_name'][i].lower() == ex:
if multiindex_df['simplified_name'][i] not in excluded_var_list:
excluded_var_list.append(multiindex_df['name'][i])
if multiindex_df['name'][i] not in excluded_var_list:
if multiindex_df['name'][i] not in stig_var_list:
stig_var_list.append(multiindex_df['name'][i])
return stig_var_list, excluded_var_list
def regex_filter(stig_vars, terms_to_filter, terms_to_filter_out):
filter_out = []
keep_in = []
for i in stig_vars:
simple_var = i.strip('\\').split('\\')[-1]
for term in terms_to_filter:
if re.search(term, simple_var, re.IGNORECASE):
keep_in.append(i)
for term in terms_to_filter_out:
if re.search(term, simple_var, re.IGNORECASE):
filter_out.append(i)
keep_list = [item for item in stig_vars if item in keep_in]
print("Found", len(keep_list), "that are stigmatizing")
full_list = filter_out+keep_list
list_difference = [item for item in stig_vars if item not in full_list]
print(len(list_difference), "still need review")
return keep_list, list_difference
def manual_check(final_vars, out_file, keep_vars, prev_file=None, ex_vars=None):
while path.exists(out_file):
print("Output file already exists. Would you like rename the output file or exit?")
res = input("Type 'r' to rename or 'e' for exit:\n")
if res == 'r':
out_file = input("Type new output file:\n")
elif res == 'e':
return None
print("Continue to review of", len(final_vars), 'variables?')
status = input("y/n: ")
if len(keep_vars) > 0:
total = len(keep_vars)
df = pd.DataFrame(keep_vars, columns=['full name'])
df['simple name'] = ''
df['stigmatizing'] = ''
for i in range(df.shape[0]):
df['simple name'][i] = df['full name'][i].strip('\\').split('\\')[-1]
df['stigmatizing'][i] = 'y'
print("Stigmatizing:", df['full name'][i].strip('\\').split('\\')[-1], "<, recording result", i+1, "of", total, "already identified as stigmatizing")
if status == "y":
stig_vars_df = go_through_df(final_vars, prev_file)
stig_vars_df = stig_vars_df.append(df, ignore_index=True)
stig_vars_df.to_csv(out_file, sep='\t')
print("\n \nSTIGMATIZING VARIABLE RESULTS SAVED TO:\t", out_file)
else:
stig_vars_df = None
if ex_vars is not None:
print("Would you like to review the excluded variables?")
ex_var_review = input('Type "yes" or "no": \n')
if ex_var_review == 'yes':
ex_vars_df = go_through_df(ex_vars)
ex_out_file = out_file.replace('.tsv', '_excluded.tsv')
ex_vars_df.to_csv(ex_out_file, sep='\t')
print("\n \nEXCLUDED VARIABLE RESULTS SAVED TO:\t", ex_out_file)
else:
ex_vars_df = None
print("Clear cell output and display pandas dataframe?")
clear = input('Type "y" or "n": \n')
if clear == 'y':
clear_output()
display(stig_vars_df)
return stig_vars_df, ex_vars_df
def go_through_df(var_list, prev_file):
df =
|
pd.DataFrame(var_list, columns=['full name'])
|
pandas.DataFrame
|
"""
__author__ = <NAME>
Many analysis functions for dF/F. Main class is CalciumReview.
"""
import pathlib
import re
import itertools
import warnings
from typing import Optional, Dict
import attr
from attr.validators import instance_of
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
from typing import List, Tuple, Dict
from enum import Enum
from scipy import stats
from calcium_bflow_analysis.dff_analysis_and_plotting import dff_analysis
from calcium_bflow_analysis.single_fov_analysis import filter_da
class Condition(Enum):
HYPER = "HYPER"
HYPO = "HYPO"
class AvailableFuncs(Enum):
""" Allowed analysis functions that can be used with CalciumReview.
The values of the enum variants are names of functions in dff_analysis.py """
AUC = "calc_total_auc_around_spikes"
MEAN = "calc_mean_auc_around_spikes"
MEDIAN = "calc_median_auc_around_spikes"
SPIKERATE = "calc_mean_spike_num"
@attr.s
class CalciumReview:
"""
Evaluate and analyze calcium data from TAC-like experiments.
The attributes ending with `_data` are pd.DataFrames that
contain the result of different function from dff_analysis.py. If you wish
to add a new function, first make sure that its output is
compatible with that of existing functions, then add a new
attribute to the class and a new variant to the enum,
and finally patch the __attrs_post_init__ method to include this
new attribute. Make sure to not change the order of the enum - add
the function at the bottom of that list.
"""
folder = attr.ib(validator=instance_of(pathlib.Path))
glob = attr.ib(default=r"*data_of_day_*.nc")
files = attr.ib(init=False)
days = attr.ib(init=False)
conditions = attr.ib(init=False)
df_columns = attr.ib(init=False)
funcs_dict = attr.ib(init=False)
raw_data = attr.ib(init=False)
auc_data = attr.ib(init=False)
mean_data = attr.ib(init=False)
spike_data = attr.ib(init=False)
def __attrs_post_init__(self):
"""
Find all files and parsed days for the experiment, and (partially) load them
into memory.
"""
self.files = []
self.raw_data = {}
all_files = self.folder.rglob(self.glob)
day_reg = re.compile(r".+?of_day_(\d+).nc")
parsed_days = []
print("Found the following files:")
day = 0
for file in all_files:
print(file)
self.files.append(file)
try:
day = int(day_reg.findall(file.name)[0])
except IndexError:
continue
parsed_days.append(day)
self.raw_data[day] = xr.open_dataset(file)
self.days = np.unique(np.array(parsed_days))
stats = ["_mean", "_std"]
self.conditions = list(set(self.raw_data[day].condition.values.tolist()))
self.df_columns = [
"".join(x) for x in itertools.product(self.conditions, stats)
] + ["t", "p"]
self.auc_data =
|
pd.DataFrame(columns=self.df_columns)
|
pandas.DataFrame
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import pandas as pd
import pandas.util.testing as pdt
import qiime2
from q2_taxa import collapse, filter_table, filter_seqs
class CollapseTests(unittest.TestCase):
def assert_index_equal(self, a, b):
# this method is derived from scikit-bio 0.5.1
pdt.assert_index_equal(a, b,
exact=True,
check_names=True,
check_exact=True)
def assert_data_frame_almost_equal(self, left, right):
# this method is derived from scikit-bio 0.5.1
pdt.assert_frame_equal(left, right,
check_dtype=True,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False)
self.assert_index_equal(left.index, right.index)
def test_collapse(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;c', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_missing_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;__', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_bad_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
with self.assertRaisesRegex(ValueError, 'of 42 is larger'):
collapse(table, taxonomy, 42)
with self.assertRaisesRegex(ValueError, 'of 0 is too low'):
collapse(table, taxonomy, 0)
def test_collapse_missing_table_ids_in_taxonomy(self):
table = pd.DataFrame([[2.0, 2.0],
[1.0, 1.0],
[9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat3'])
with self.assertRaisesRegex(ValueError, 'missing.*feat2'):
collapse(table, taxonomy, 1)
class FilterTable(unittest.TestCase):
def test_filter_no_filters(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'At least one'):
filter_table(table, taxonomy)
def test_alt_delimiter(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# include with delimiter
obs = filter_table(table, taxonomy, include='<EMAIL>',
query_delimiter='@peanut@')
pdt.assert_frame_equal(obs, table, check_like=True)
# exclude with delimiter
obs = filter_table(table, taxonomy, exclude='<EMAIL>',
query_delimiter='@peanut@')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
def test_filter_table_unknown_mode(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'Unknown mode'):
filter_table(table, taxonomy, include='bb', mode='not-a-mode')
def test_filter_table_include(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='bb')
pdt.assert_frame_equal(obs, table, check_like=True)
obs = filter_table(table, taxonomy, include='cc,ee')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, include='cc')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='aa; bb; cc')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, include='dd')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='dd ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='aa; bb; dd ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, include='peanut!')
def test_filter_table_include_exact_match(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='aa; bb; cc,aa; bb; dd ee',
mode='exact')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, include='aa; bb; cc',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, include='aa; bb; dd ee',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, include='bb', mode='exact')
def test_filter_table_exclude(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, exclude='ab')
pdt.assert_frame_equal(obs, table, check_like=True)
obs = filter_table(table, taxonomy, exclude='xx')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, exclude='dd')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='dd ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, exclude='aa')
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, exclude='aa; bb')
def test_filter_table_exclude_exact_match(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, exclude='peanut!',
mode='exact')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee,aa',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, exclude='aa; bb; cc',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; cc,aa',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
exclude='aa; bb; cc,aa; bb; dd ee',
mode='exact')
def test_filter_table_include_exclude(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='aa', exclude='peanut!')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only - feat2 dropped at exclusion step
obs = filter_table(table, taxonomy, include='aa', exclude='ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat1 only - feat2 dropped at inclusion step
obs = filter_table(table, taxonomy, include='cc', exclude='ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only - feat1 dropped at exclusion step
obs = filter_table(table, taxonomy, include='aa', exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only - feat1 dropped at inclusion step
obs = filter_table(table, taxonomy, include='ee', exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features - all dropped at exclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='aa',
exclude='bb',
mode='exact')
# keep no features - one dropped at inclusion, one dropped at exclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='cc',
exclude='cc',
mode='exact')
# keep no features - all dropped at inclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='peanut',
exclude='bb',
mode='exact')
def test_filter_table_underscores_escaped(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep feat1 only - underscore not treated as a wild card
obs = filter_table(table, taxonomy, include='cc,d_')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat1 only - underscore in query matches underscore in
# taxonomy annotation
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; c_', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
obs = filter_table(table, taxonomy, include='c_')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
def test_all_features_with_frequency_greater_than_zero_get_filtered(self):
table = pd.DataFrame([[2.0, 0.0], [1.0, 0.0], [9.0, 0.0], [1.0, 0.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# empty - feat2, which is matched by the include term, has a frequency
# of zero in all samples, so all samples end up dropped from the table
with self.assertRaisesRegex(ValueError,
expected_regex='greater than zero'):
filter_table(table, taxonomy, include='dd')
def test_extra_taxon_ignored(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee', 'aa; bb; cc'],
index=pd.Index(['feat1', 'feat2', 'feat3'],
name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='bb')
pdt.assert_frame_equal(obs, table, check_like=True)
def test_missing_taxon_errors(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc'],
index=pd.Index(['feat1'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, expected_regex='All.*feat2'):
filter_table(table, taxonomy, include='bb')
class FilterSeqs(unittest.TestCase):
def test_filter_no_filters(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'At least one'):
filter_seqs(seqs, taxonomy)
def test_alt_delimiter(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# include with delimiter
obs = filter_seqs(seqs, taxonomy, include='cc<EMAIL>',
query_delimiter='@peanut@')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# exclude with delimiter
obs = filter_seqs(seqs, taxonomy, exclude='ww<EMAIL>',
query_delimiter='@peanut@')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
def test_filter_seqs_unknown_mode(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'Unknown mode'):
filter_seqs(seqs, taxonomy, include='bb', mode='not-a-mode')
def test_filter_seqs_include(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_seqs(seqs, taxonomy, include='bb')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='cc,ee')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat1 only
obs = filter_seqs(seqs, taxonomy, include='cc')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='aa; bb; cc')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat2 only
obs = filter_seqs(seqs, taxonomy, include='dd')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='ee')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='dd ee')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='aa; bb; dd ee')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep no features
with self.assertRaisesRegex(ValueError,
expected_regex='empty collection'):
obs = filter_seqs(seqs, taxonomy, include='peanut!')
def test_filter_seqs_include_exact_match(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_seqs(seqs, taxonomy, include='aa; bb; cc,aa; bb; dd ee',
mode='exact')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat1 only
obs = filter_seqs(seqs, taxonomy, include='aa; bb; cc',
mode='exact')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat2 only
obs = filter_seqs(seqs, taxonomy, include='aa; bb; dd ee',
mode='exact')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep no features
with self.assertRaisesRegex(ValueError,
expected_regex='empty collection'):
obs = filter_seqs(seqs, taxonomy, include='bb', mode='exact')
def test_filter_seqs_exclude(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_seqs(seqs, taxonomy, exclude='ab')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, exclude='xx')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat1 only
obs = filter_seqs(seqs, taxonomy, exclude='dd')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, exclude='dd ee')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, exclude='aa; bb; dd ee')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat2 only
obs = filter_seqs(seqs, taxonomy, exclude='cc')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, exclude='aa; bb; cc')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep no features
with self.assertRaisesRegex(ValueError,
expected_regex='empty collection'):
obs = filter_seqs(seqs, taxonomy, exclude='aa')
with self.assertRaisesRegex(ValueError,
expected_regex='empty collection'):
obs = filter_seqs(seqs, taxonomy, exclude='aa; bb')
def test_filter_seqs_exclude_exact_match(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_seqs(seqs, taxonomy, exclude='peanut!',
mode='exact')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat1 only
obs = filter_seqs(seqs, taxonomy, exclude='aa; bb; dd ee',
mode='exact')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, exclude='aa; bb; dd ee,aa',
mode='exact')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat2 only
obs = filter_seqs(seqs, taxonomy, exclude='aa; bb; cc',
mode='exact')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, exclude='aa; bb; cc,aa',
mode='exact')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep no features
with self.assertRaisesRegex(ValueError,
expected_regex='empty collection'):
obs = filter_seqs(seqs, taxonomy,
exclude='aa; bb; cc,aa; bb; dd ee',
mode='exact')
def test_filter_seqs_include_exclude(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_seqs(seqs, taxonomy, include='aa', exclude='peanut!')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat1 only - feat2 dropped at exclusion step
obs = filter_seqs(seqs, taxonomy, include='aa', exclude='ee')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat1 only - feat2 dropped at inclusion step
obs = filter_seqs(seqs, taxonomy, include='cc', exclude='ee')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
|
pdt.assert_series_equal(obs, exp)
|
pandas.util.testing.assert_series_equal
|
# TwitterHashtagScraper
# Copyright 2020 <NAME>
# See LICENSE for details.
import csv
import json
import math
import sys
import time
from datetime import datetime
from itertools import cycle
from re import findall
import requests
import pandas as pd
from utils_json import json_tweet_parser
class TwitterHashtagScraper():
"""Twitter Hashtag Scraper
"""
def __init__(self, hashtag, x_guest_token, use_proxy, output_path=None, max_tweets=None):
"""Twitter Hashtag Scraper constructor
Args:
hashtag (str): hashtag to scrap
x_guest_token (str): a valid guest token
use_proxy (boolean): boolean to activate proxies or not
output_path (str, optional): output path. Defaults to None.
max_tweets (int, optional): max number of tweets to download else try to get tweets until scroll is over. Defaults to None.
"""
self.hashtag = hashtag
self.use_proxy = use_proxy
self.output_path = output_path
self.max_tweets = max_tweets
self.x_guest_token = x_guest_token
def _init_proxies(self):
"""Function to obtain available proxies from sslproxies
Returns:
[list] -- list of proxies
"""
r = requests.get('https://www.sslproxies.org/')
matches = findall(r"<td>\d+.\d+.\d+.\d+</td><td>\d+</td>", r.text)
revised = [m.replace('<td>', '') for m in matches]
proxies = [s[:-5].replace('</td>', ':') for s in revised]
return proxies
def _make_request(self, proxy_active, proxy_active_value, url, headers, params):
"""Function to make a request through a proxy
Arguments:
proxy_active {int} -- boolean about the active proxy
proxy_active_value {str} -- active proxy
url {str} -- url to make the request
headers {dict} -- headers to send with the request
params {dict} -- params to send with the request
Returns:
[int, str, dict] -- proxy information and result of the request
"""
if self.use_proxy:
proxies = self._init_proxies()
proxy_pool = cycle(proxies)
# Iterate trying free proxies to make the request
for i in range(100):
if proxy_active != 1:
proxy_active_value = next(proxy_pool)
try:
response = requests.get('https://httpbin.org/ip', timeout=2.0, proxies={
"http": 'http://' + proxy_active_value, "https": 'https://' + proxy_active_value})
page = requests.get(url, headers=headers, params=params, proxies={
"http": 'http://' + proxy_active_value, "https": 'https://' + proxy_active_value})
proxy_active = 1
if page.status_code == 200:
return proxy_active, proxy_active_value, page
except Exception as e:
proxy_active = 0
continue
return proxy_active, proxy_active_value, "Error"
else:
page = requests.get(url, headers=headers, params=params)
proxy_active = 0
if page.status_code == 200:
return proxy_active, None, page
def _get_hashtag_timeline(self):
"""Function to obain the hashtag timeline from Twitter
Returns:
dict -- dict with tweets stored
dict -- dict with users stored
"""
tweets_dict = self._create_dict_from_structure('tweets_hashtag')
users_dict = self._create_dict_from_structure('users_hashtag')
print(f"Collecting data from Twitter about: {self.hashtag}")
has_more_items = True
last_tweet_id = 0
count_tweets = 0
proxy_active = 0
proxy_active_value = ""
min_position = math.inf
try:
# Iterate simulating scroll
while (has_more_items):
print(f"{count_tweets}/{self.max_tweets if self.max_tweets else 'Max Scroll'} tweets obtained...")
headers = {
'authority': 'api.twitter.com',
'authorization': 'Bearer AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejRCOuH5E6I8xnZz4puTs%3D1Zv7ttfk8LF81IUq16cHjhLTvJu4FA33AGWWjCpTnA',
'x-twitter-client-language': 'es',
'x-guest-token': str(self.x_guest_token),
'x-twitter-active-user': 'yes',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36',
'accept': '*/*',
'origin': 'https://twitter.com',
'sec-fetch-site': 'same-site',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://twitter.com/',
'accept-language': 'es-ES,es;q=0.9',
}
params = (
('include_profile_interstitial_type', '1'),
('include_blocking', '1'),
('include_blocked_by', '1'),
('include_followed_by', '1'),
('include_want_retweets', '1'),
('include_mute_edge', '1'),
('include_can_dm', '1'),
('include_can_media_tag', '1'),
('skip_status', '1'),
('cards_platform', 'Web-12'),
('include_cards', '1'),
('include_ext_alt_text', 'true'),
('include_quote_count', 'true'),
('include_reply_count', '1'),
('tweet_mode', 'extended'),
('include_entities', 'true'),
('include_user_entities', 'true'),
('include_ext_media_color', 'true'),
('include_ext_media_availability', 'true'),
('send_error_codes', 'true'),
('simple_quoted_tweet', 'true'),
('q', '#'+str(self.hashtag)+''),
('count', '20'),
('query_source', 'hashtag_click'),
('cursor', str(min_position)),
('pc', '1'),
('spelling_corrections', '1'),
('ext', 'mediaStats,highlightedLabel'),
)
url = 'https://api.twitter.com/2/search/adaptive.json'
proxy_active, proxy_active_value, page = self._make_request(
proxy_active, proxy_active_value, url, headers, params)
data = json.loads(page.content)
cursor_item_init = [d for d in data["timeline"]["instructions"][0]["addEntries"]["entries"] if d['entryId'] == 'sq-cursor-bottom']
if cursor_item_init:
cursor_item = cursor_item_init[0]
else:
cursor_item = data["timeline"]["instructions"][-1]["replaceEntry"]["entry"]
min_position = cursor_item["content"]["operation"]["cursor"]["value"]
for tweet in data["globalObjects"]["tweets"].keys():
(tweets_dict, users_dict) = json_tweet_parser(data["globalObjects"]["tweets"][tweet],data["globalObjects"]["users"], tweets_dict, users_dict)
count_tweets = count_tweets+20
if len(tweets_dict["id_tweet"]) > 0:
if last_tweet_id == tweets_dict["id_tweet"][-1]:
has_more_items = False
last_tweet_id = tweets_dict["id_tweet"][-1]
else:
has_more_items = False
if self.max_tweets:
if count_tweets > self.max_tweets:
has_more_items = False
time.sleep(0.1)
except Exception as e:
print(e)
return tweets_dict, users_dict
def _create_dict_from_structure(self, key):
"""Function to create a dict structure from the template defined in the json
Arguments:
key {str} -- key in the json artifact
Returns:
dict -- template dict to fill
"""
json_file_item = {"tweets_hashtag": [
"id_tweet",
"id_user",
"user",
"link_tweet",
"timestamp",
"text",
"replies_count",
"retweets_count",
"likes_count"
],
"users_hashtag": [
"username",
"id_user",
"img_user",
"link_user"
]
}
dict_struct = {}
for column in json_file_item[key]:
dict_struct[column] = []
return dict_struct
def collect(self):
"""Function to execute a search on twitter from the initial term
"""
tweets_dict, users_dict = self._get_hashtag_timeline()
tweets_df = pd.DataFrame(tweets_dict)
users_df =
|
pd.DataFrame(users_dict)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 27 11:00:37 2018
Script to fit SHG interference data.
The procedure fits the data twice, first with the period free, and then with
the period fixed at the average of the first runs.
@author: <NAME>
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import math
#name for file
fileout = '101618fitsHemi2.txt'
#names of each fit
names = ['hemi2pure1a','hemi2pure1b','hemi2salt1a','hemi2salt1b',\
'hemi2pure2a','hemi2pure2b','hemi2salt2a','hemi2salt2b',\
'hemi2pure3a','hemi2pure3b']
#indices to import if you have multiple files named phaseMeasureX where x is
#some number, enter the first and last indices that you want to import.
startNum = 16
endNum = 25
#open file for writing to
f = open(fileout,'w+')
#initialize data frames to hold data
countsA = pd.DataFrame()
countsB = pd.DataFrame()
pos = pd.DataFrame()
#go through each file
for i in range(endNum-startNum+1):
#names of each file
filename = 'phaseMeasure' + str(i+startNum) + '.txt'
#import countsA (signal),countsB (dark counts), and pos (stage position)
countsA[names[i]] = pd.read_csv(filename,sep='\t')['countsA']
countsB[names[i]] = pd.read_csv(filename,sep='\t')['countsB']
pos[names[i]] =
|
pd.read_csv(filename,sep='\t')
|
pandas.read_csv
|
# usage:
# dataframe_filename
import sys
import os
import seaborn as sns
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from keyname import keyname as kn
from fileshash import fileshash as fsh
matplotlib.rcParams['pdf.fonttype'] = 42
sns.set(style='whitegrid')
non_url_safe = ['"', '#', '$', '%', '&', '+',
',', '/', ':', ';', '=', '?',
'@', '[', '\\', ']', '^', '`',
'{', '|', '}', '~', "'"]
def slugify(text):
"""
Turn the text content of a header into a slug for use in an ID
"""
non_safe = [c for c in text if c in non_url_safe]
if non_safe:
for c in non_safe:
text = text.replace(c, '')
# Strip leading, trailing and multiple whitespace, convert remaining whitespace to _
text = u'_'.join(text.split())
return text
dataframe_filename = sys.argv[1]
df = pd.read_csv(dataframe_filename)
print("Data loaded!")
df['Treatment'] = df['Treatment'].apply(lambda raw : {
'resource-even__channelsense-no__nlev-two__mute' : 'Blind',
'resource-even__channelsense-no__nlev-two__mute__mixed' : 'Mixed',
'resource-even__channelsense-yes__nlev-onebig' : 'Flat-Even',
'resource-even__channelsense-yes__nlev-two' : 'Nested-Even',
'resource-wave__channelsense-yes__nlev-onebig' : 'Flat-Wave',
'resource-wave__channelsense-yes__nlev-two' : 'Nested-Wave',
}[raw]
)
df['Relationship Category'] = df.apply(
lambda x: (
'Neighbor' if 'Neighbor' in x['Relationship']
else 'Channel' if 'mate' in x['Relationship']
else 'Cell' if 'Cell' in x['Relationship']
else 'Propagule' if 'Propagule' in x['Relationship']
else 'Unknown'
),
axis=1
)
print("Data crunched!")
for measure in df['Measure'].unique():
manip = df.pivot_table(
index=[
'First Update', 'Last Update', 'Relationship',
'Relationship Category', 'Seed', 'Treatment'
],
columns='Measure',
values='Value',
aggfunc='first'
).reset_index()
manip = manip[manip["Relationship"] != "Neighbor"]
l0 = manip[
manip["Relationship"].map(lambda x: " 1" not in x)
].copy()
l0["Level"] = 0
l1 = manip[
manip["Relationship"].map(lambda x: " 0" not in x)
].copy()
l1["Level"] = 1
res =
|
pd.concat([l0, l1])
|
pandas.concat
|
import pandas as pd
import src.config as proj_config
import matplotlib.pyplot as plt
from pandas.plotting import autocorrelation_plot
from src.demand_prediction.events_models import calc_events_ts
from src.demand_prediction.general_functions import load_table_cache, save_table_cache
cache_path = proj_config.CACHE_DIR
data_path = proj_config.DATA_DIR
events_data_path = proj_config.EVENTS_DATASET_DIR
categories_path = cache_path + '/categories_events/'
def add_event_to_df(df, events, start_pred_time, leaf_name, window_size=3, use_cache=False, set_name='train', w=False):
path_res = '/saved_df/' + leaf_name + '_' + set_name + '_' + start_pred_time + '.pkl'
res = load_table_cache(path_res)
if res is None or not use_cache:
res = calc_events_ts(df, events, n=window_size, w=w)
save_table_cache(res, path_res)
return res
def split_data(data, start_pred_time, verbose=True):
X_train = data[data['date'] < start_pred_time]
X_test = data[data['date'] >= start_pred_time]
if verbose:
print("Train size:", len(X_train), " - Test size:", len(X_test))
return X_train, X_test
def create_events_df(data, events, emb_only=False, emb_size=100):
data.index = data.index.astype(str)
data = data.reset_index()
events['date'] = events['date'].astype(str)
data = data.merge(events, on='date', how='left').dropna()
data = data[['date', 'Category', 'embedding', 'wiki_name',
'High-Category', 'country', 'ref_num']]
for ii in range(emb_size):
col = 'emb_' + str(ii)
data[col] = data['embedding'].apply(lambda x: x[ii])
if not emb_only:
one_hot = pd.get_dummies(data['Category'])
data = data.join(one_hot)
one_hot = pd.get_dummies(data['High-Category'])
data = data.join(one_hot)
one_hot =
|
pd.get_dummies(data['country'])
|
pandas.get_dummies
|
"""
This module implements methods to collect financial data from Wharton Research Services via the wrds package
"""
import datetime
import json
import re
import sys
import time
import warnings
from typing import Tuple
import numpy as np
import pandas as pd
import wrds
from colorama import Fore, Back, Style
from sklearn.preprocessing import StandardScaler
from config import *
# Configurations for displaying DataFrames
from core.utils import get_index_name, check_directory_for_file, Timer, lookup_multiple
pd.set_option('mode.chained_assignment', None)
warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
def retrieve_index_history(index_id: str = None, from_file=False, last_n: int = None,
folder_path: str = '', generate_dict=False) -> pd.DataFrame:
"""
Download complete daily index history and return as Data Frame (no date index)
:return: DataFrame containing full index constituent data over full index history
:rtype: pd.DataFrame
"""
if not from_file:
# Load GVKEYX lookup dict
with open(os.path.join(ROOT_DIR, 'data', 'gvkeyx_name_dict.json'), 'r') as fp:
gvkeyx_lookup = json.load(fp)
# Establish database connection
print('Opening DB connection ...')
db = wrds.Connection(wrds_username='afecker')
print('Done')
# Retrieve list of all stocks (gvkeys) for specified index including full date range of historic index data
gvkey_list, relevant_date_range = get_all_constituents(
constituency_matrix=pd.read_csv(os.path.join(ROOT_DIR, folder_path, 'constituency_matrix.csv'), index_col=0,
header=[0, 1],
parse_dates=True))
# Set start and end date
if last_n:
start_date = str(relevant_date_range[-last_n].date())
else:
start_date = str(relevant_date_range[0].date())
end_date = str(relevant_date_range[-1].date())
# Specify list of companies and start and end date of query
parameters = {'company_codes': tuple(gvkey_list), 'start_date': start_date, 'end_date': end_date}
print('Querying full index history for index %s \n'
'between %s and %s ...' % (gvkeyx_lookup.get(index_id), start_date, end_date))
start_time = time.time()
data = get_data_table(db, sql_query=True,
query_string="select datadate, gvkey, iid, trfd, ajexdi, cshtrd, prccd, divd, conm, curcdd, sedol, exchg, gsubind "
"from comp.g_secd "
"where gvkey in %(company_codes)s and datadate between %(start_date)s and %(end_date)s "
"order by datadate asc",
index_col=['datadate', 'gvkey', 'iid'], table_info=1, params=parameters)
end_time = time.time()
print('Query duration: %g seconds' % (round(end_time - start_time, 2)))
print('Number of observations: %s' % data.shape[0])
print('Number of individual dates: %d' % data.index.get_level_values('datadate').drop_duplicates().size)
# JOB: Add return_index and daily_return columns
data = calculate_daily_return(data, save_to_file=False)
# Reset index
data.reset_index(inplace=True)
# Save to file
data.to_csv(os.path.join(ROOT_DIR, folder_path, 'index_data_constituents.csv'))
else:
data = pd.read_csv(os.path.join(ROOT_DIR, folder_path, 'index_data_constituents.csv'),
dtype={'gvkey': str, 'gsubind': str, 'datadate': str, 'gics_sector': str, 'gsector': str},
parse_dates=False, index_col=False)
data.loc[:, 'datadate'] = pd.to_datetime(data.loc[:, 'datadate'], infer_datetime_format=True).dt.date
if not check_directory_for_file(index_id=index_id, folder_path=os.path.join(folder_path, 'gvkey_name_dict.json'),
create_dir=False, print_status=False):
generate_company_lookup_dict(folder_path=folder_path, data=data)
return data
def load_full_data(index_id: str = '150095', force_download: bool = False, last_n: int = None, columns: list = None,
merge_gics=False) -> Tuple[
pd.DataFrame, pd.DataFrame, str, str]:
"""
Load all available records from the data for a specified index
:param columns:
:param merge_gics:
:param index_id: Index ID to load data for
:param force_download: Flag indicating whether to overwrite existing data
:param last_n: Number of last available dates to consider
:return: Tuple of (constituency_matrix, full_data, index_name, folder_path)
"""
# Load index name dict and get index name
index_name, lookup_table = get_index_name(index_id=index_id)
configs = json.load(open(os.path.join(ROOT_DIR, 'config.json'), 'r'))
folder_path = os.path.join(ROOT_DIR, 'data', index_name.lower().replace(' ', '_')) # Path to index data folder
# JOB: Check whether index data already exist; create folder and set 'load_from_file' flag to false if non-existent
load_from_file = check_directory_for_file(index_name=index_name, folder_path=folder_path,
force_download=force_download)
# JOB: Check if saved model folder exists and create one if not
if not os.path.exists(os.path.join(ROOT_DIR, configs['model']['save_dir'])):
os.makedirs(configs['model']['save_dir'])
if not load_from_file:
# JOB: Create or load constituency matrix
print('Creating constituency matrix ...')
timer = Timer()
timer.start()
create_constituency_matrix(load_from_file=load_from_file, index_id=index_id, lookup_table=lookup_table,
folder_path=folder_path)
print('Successfully created constituency matrix.')
timer.stop()
# JOB: Load constituency matrix
print('Loading constituency matrix ...')
constituency_matrix = pd.read_csv(os.path.join(ROOT_DIR, folder_path, 'constituency_matrix.csv'), index_col=0,
header=[0, 1],
parse_dates=True)
print('Successfully loaded constituency matrix.\n')
# JOB: Load full data
print('Retrieving full index history ...')
timer = Timer().start()
full_data = retrieve_index_history(index_id=index_id, from_file=load_from_file, last_n=last_n,
folder_path=folder_path, generate_dict=False)
full_data.set_index('datadate', inplace=True, drop=True)
# JOB: Sort by date and reset index
full_data.sort_index(inplace=True)
full_data.reset_index(inplace=True)
if merge_gics and all(col_name not in full_data.columns for col_name in ['gsubind', 'gsector', 'gics_sector']):
print('Neither \'gsubind\' nor \'gsector\' are in the columns.')
# JOB: Create GICS (Global Industry Classification Standard) matrix
gics_matrix = create_gics_matrix(index_id=index_id, index_name=index_name, lookup_table=lookup_table,
load_from_file=load_from_file)
# JOB: Merge gics matrix with full data set
print('Merging GICS matrix with full data set.')
full_data.set_index(['datadate', 'gvkey'], inplace=True)
full_data = full_data.join(gics_matrix, how='left').reset_index()
# Save to file
full_data.to_csv(os.path.join(ROOT_DIR, folder_path, 'index_data_constituents.csv'))
if all(col_name not in full_data.columns for col_name in ['gics_sector', 'gsector']):
print('Neither \'gics_sector\' nor \'gsector\' are in the columns.')
# JOB: Extract 2-digit GICS code
assert 'gsubind' in full_data.columns
generate_gics_sector(full_data)
# Save to file
print('Saving modified data to file ...')
full_data.to_csv(os.path.join(ROOT_DIR, folder_path, 'index_data_constituents.csv'))
if ('gsector' in full_data.columns) and ('gics_sector' not in full_data.columns):
print('Renaming \'gsector\' to \'gics_sector\'.')
# full_data.drop(columns=['gsector'], inplace=True)
full_data.rename(columns={'gsector': 'gics_sector'}, inplace=True)
# Save to file
print('Saving modified data to file ...')
full_data.to_csv(os.path.join(ROOT_DIR, folder_path, 'index_data_constituents.csv'))
if any(col_name in columns for col_name in ['ind_mom_ratio', 'ind_mom']):
# JOB: Add 6-month (=120 days) momentum column
print('Adding 6-month momentum ...')
timer = Timer().start()
full_data.set_index(keys=['datadate', 'gvkey', 'iid'], inplace=True)
full_data.loc[:, '6m_mom'] = full_data.groupby(level=['gvkey', 'iid'])['return_index'].apply(
lambda x: x.pct_change(periods=120))
timer.stop()
full_data.reset_index(inplace=True)
gics_map = pd.read_json(os.path.join(ROOT_DIR, 'data', 'gics_code_dict.json'), orient='records',
typ='series').rename('gics_sec')
gics_map = {str(key): val for key, val in gics_map.to_dict().items()}
if 'gics_sector_name' not in full_data.columns:
print('Extracting GICS sector names ...')
full_data.loc[:, 'gics_sector_name'] = full_data['gics_sector'].replace(gics_map, inplace=False)
print('Saving modified data to file ...')
full_data.to_csv(os.path.join(ROOT_DIR, folder_path, 'index_data_constituents.csv'))
# if 'prchd' in full_data.columns:
# full_data.loc[:, 'daily_spread'] = (full_data['prchd'] - full_data['prcld']).divide(full_data['prccd'])
if 'daily_return' not in full_data.columns:
print('Calculating daily returns ...')
timer = Timer.start()
full_data = calculate_daily_return(full_data, folder_path=folder_path, save_to_file=True)
timer.stop()
print('Successfully loaded index history.')
timer.stop()
print()
# Drop records with missing daily_return variable
full_data.dropna(subset=['daily_return'], inplace=True)
return constituency_matrix, full_data, index_name, folder_path
def generate_study_period(constituency_matrix: pd.DataFrame, full_data: pd.DataFrame,
period_range: tuple, index_name: str, configs: dict, folder_path: str, columns=None) -> (
pd.DataFrame, int):
"""
Generate a time-period sample for a study period
:param columns: Feature columns
:param configs: Dictionary containing model and training configurations
:param folder_path: Path to data folder
:param period_range: Date index range of study period in form (start_index, end_index)
:type period_range: tuple
:param full_data: Full stock data
:type full_data: pd.DataFrame
:param constituency_matrix: Constituency matrix
:type constituency_matrix: pd.DataFrame
:param index_name: Name of index
:type index_name: str
:return: Tuple Study period sample and split index
:rtype: tuple[pd.DataFrame, int]
"""
# Convert date columns to DatetimeIndex
full_data.loc[:, 'datadate'] = pd.to_datetime(full_data['datadate'])
# Set index to date column
full_data.set_index('datadate', inplace=True, drop=True)
# Get unique dates
unique_dates = full_data.index.drop_duplicates()
split_ratio = configs['data']['train_test_split']
i_split = len(unique_dates[:period_range[0]]) + int(
len(unique_dates[period_range[0]:period_range[1]]) * split_ratio)
split_date = unique_dates[i_split]
# Detect potential out-of-bounds indices
if abs(period_range[0]) > len(unique_dates) or abs(period_range[1] > len(unique_dates)):
print(f'Index length is {len(unique_dates)}. Study period range ist {period_range}.')
raise IndexError('Period index out of bounds.')
print(f'Retrieving index constituency for {index_name} as of {split_date.date()}.')
try:
constituent_indices = get_index_constituents(constituency_matrix, date=split_date,
folder_path=folder_path, print_constituents=False)
if len(constituent_indices) < 2:
raise RuntimeWarning('Period contains too few constituents. Continuing with next study period.')
except IndexError as ie:
print(
f'{Fore.RED}{Back.YELLOW}{Style.BRIGHT}Period index out of bounds. Choose different study period bounds.'
f'{Style.RESET_ALL}')
print(', '.join(ie.args))
print('Terminating program.')
sys.exit(1)
# df_to_html(constituency_matrix.iloc[-20:, :20], 'const_matrix')
# exit()
full_data.reset_index(inplace=True)
# JOB: Select relevant data
# Select relevant stocks
full_data = full_data.set_index(['gvkey', 'iid'], drop=True)
# print(f'Length of intersection: {len(constituent_indices.intersection(full_data.index))}')
# print(f'Length of difference: {len(constituent_indices.difference(full_data.index))}')
assert len(constituent_indices.intersection(full_data.index)) == len(constituent_indices)
print('\nFiltering by study period constituents ...')
timer = Timer().start()
full_data = full_data.loc[constituent_indices, :]
# print(f'Number of constituents (deduplicated): {full_data.index.get_level_values(level="gvkey").unique().shape[0]}')
full_data.reset_index(inplace=True)
full_data.set_index('datadate', inplace=True)
full_data.sort_index(inplace=True)
timer.stop()
# JOB: Select data from study period
print(f'Retrieving data from {unique_dates[period_range[0]].date()} to {unique_dates[period_range[1]].date()} \n')
study_data = full_data.loc[unique_dates[period_range[0]]:unique_dates[period_range[1]]]
if 'ind_mom_ratio' in columns:
# JOB: Drop records without 6 month-returns
print('Removing records without 6-month momentum ...')
print('Missing records before removal:')
print(study_data[study_data['6m_mom'].isna()].set_index(['gvkey', 'iid'], append=True).index.get_level_values(
level='gvkey').unique().shape[0])
study_data.dropna(how='any', subset=['6m_mom'], inplace=True)
print('Missing records after removal:')
print(study_data[study_data['6m_mom'].isna()].set_index(['gvkey', 'iid'], append=True).index.get_level_values(
level='gvkey').unique().shape[0])
del full_data
print(f'Study period length: {len(study_data.index.unique())}')
study_data_split_index = study_data.index.unique().get_loc(split_date, method='ffill')
study_data_split_date = study_data.index.unique()[study_data_split_index]
print(f'Study data split index: {study_data_split_index}')
print(f'Study data split date: {study_data_split_date.date()}')
# JOB: Calculate mean and standard deviation of daily returns for training period
mean_daily_return = study_data.loc[unique_dates[period_range[0]]:split_date, 'daily_return'].mean()
std_daily_return = study_data.loc[unique_dates[period_range[0]]:split_date, 'daily_return'].std()
print('Mean daily return: %g' % mean_daily_return)
print('Std. daily return: %g \n' % std_daily_return)
# JOB: Calculate standardized daily returns
study_data.loc[:, 'stand_d_return'] = (study_data['daily_return'] - mean_daily_return) / std_daily_return
# JOB: Drop observations with critical n/a values
# Review: Check whether operation is redundant
# critical_cols = ['daily_return']
# study_data.dropna(how='any', subset=critical_cols, inplace=True)
# JOB: Create target
study_data.loc[:, 'above_cs_med'] = study_data['daily_return'].gt(
study_data.groupby('datadate')['daily_return'].transform('median')).astype(np.int8)
study_data.loc[:, 'cs_med'] = study_data.groupby('datadate')['daily_return'].transform('median')
# JOB: Create cross-sectional ranking
# study_data.loc[:, 'cs_rank'] = study_data.groupby('datadate')['daily_return'].rank(method='first', ascending=False).astype('int16')
# study_data.loc[:, 'cs_percentile'] = study_data.groupby('datadate')['daily_return'].rank(pct=True)
# JOB: Add columns with number of securities in cross-section
study_data.loc[:, 'cs_length'] = study_data.groupby('datadate')['daily_return'].count()
study_data.reset_index(inplace=True)
study_data.set_index('datadate', inplace=True, drop=True)
if any(col_name in columns for col_name in ['ind_mom_ratio', 'ind_mom']):
# JOB: Add industry momentum column
study_data.loc[:, 'ind_mom'] = study_data.groupby(['gics_sector', 'datadate'])['6m_mom'].transform('mean')
# Drop observations with missing industry momentum
study_data.dropna(how='any', subset=['ind_mom'], inplace=True)
# JOB: Add 'ind_mom_ratio' column
study_data.loc[:, 'ind_mom_ratio'] = study_data.loc[:, '6m_mom'].divide(study_data.loc[:, 'ind_mom'])
# JOB: Remove data with missing or unbounded industry momentum ratio
study_data = study_data[study_data['ind_mom_ratio'].ne(np.inf)]
study_data.dropna(subset=['ind_mom_ratio'], inplace=True)
# Standardize ind_mom_ratio
study_data.loc[:, 'ind_mom_ratio'] = StandardScaler().fit_transform(
study_data.loc[:, 'ind_mom_ratio'].values.reshape(-1, 1))
return study_data, study_data_split_index
def create_constituency_matrix(load_from_file=False, index_id='150095', lookup_table='global',
folder_path: str = None) -> None:
"""
Generate constituency matrix for stock market index components
:param lookup_table: Table to use for constituency lookup
:param folder_path: Path to data folder
:param index_id: Index to create constituency matrix for
:param load_from_file: Flag indicating whether to load constituency information from file
:return: Tuple containing [0] list of historical constituent gvkeys and [1] relevant date range
"""
# File name with constituents and corresponding time frames
file_name = 'data_constituents.csv'
if folder_path:
folder = folder_path
else:
folder = ''
db = None
const_data = None
parameters = {'index_id': (index_id,)}
# JOB: In case constituents have to be downloaded from database
if not load_from_file:
# Establish database connection
print('Opening DB connection ...')
db = wrds.Connection(wrds_username='afecker')
print('Done')
print(f'Retrieving index history from {lookup_table} ...')
if lookup_table == 'global':
const_data = get_data_table(db, sql_query=True,
query_string="select * "
"from comp.g_idxcst_his "
"where gvkeyx in %(index_id)s ",
index_col=['gvkey', 'iid'], table_info=1, params=parameters)
elif lookup_table == 'north_america':
const_data = get_data_table(db, sql_query=True,
query_string="select * "
"from comp.idxcst_his "
"where gvkeyx in %(index_id)s ",
index_col=['gvkey', 'iid'], table_info=1, params=parameters)
# Save to file
const_data.to_csv(os.path.join(ROOT_DIR, folder, 'data_constituents.csv'))
# JOB: Load table from local file
else:
# Load constituency table from file and transform key to string
const_data = pd.read_csv(os.path.join(ROOT_DIR, folder, file_name), dtype={'gvkey': str})
# const_data['gvkey'] = const_data['gvkey'].astype('str')
# Set gvkey and iid as MultiIndex
const_data.set_index(['gvkey', 'iid'], inplace=True)
# Convert date columns to datetime format
for col in ['from', 'thru']:
const_data.loc[:, col] = pd.to_datetime(const_data[col], format='%Y-%m-%d')
const_data.loc[:, col] = const_data[col].dt.date
# Determine period starting date and relevant date range
index_starting_date = const_data['from'].min()
relevant_date_range = pd.date_range(index_starting_date, datetime.date.today(), freq='D')
# Create empty constituency matrix
constituency_matrix = pd.DataFrame(0, index=relevant_date_range,
columns=const_data.index.drop_duplicates())
# JOB: Iterate through all company stocks ever listed in index and set adjacency to 0 or 1
for stock_index in const_data.index:
if isinstance(const_data.loc[stock_index], pd.Series):
if pd.isnull(const_data.loc[stock_index, 'thru']):
constituency_matrix.loc[pd.date_range(start=const_data.loc[stock_index, 'from'],
end=datetime.date.today()), stock_index] = 1
else:
constituency_matrix.loc[pd.date_range(start=const_data.loc[stock_index, 'from'], end=const_data.loc[
stock_index, 'thru']), stock_index] = 1
else:
for row in const_data.loc[stock_index].iterrows():
if pd.isnull(row[1]['thru']):
constituency_matrix.loc[
pd.date_range(start=row[1]['from'], end=datetime.date.today()), stock_index] = 1
else:
constituency_matrix.loc[pd.date_range(start=row[1]['from'], end=row[1]['thru']), stock_index] = 1
# Save constituency table to file
constituency_matrix.to_csv(os.path.join(ROOT_DIR, folder, 'constituency_matrix.csv'))
if not load_from_file:
db.close()
print('DB connection closed.')
return None
def create_gics_matrix(index_id='150095', index_name=None, lookup_table=None, folder_path: str = None,
load_from_file=False) -> pd.DataFrame:
"""
Generate constituency matrix with Global Industry Classification Standard (GICS) classification code (full length)
:param index_id: Index to create constituency matrix for
:param index_name: (Optional) - Index name
:param lookup_table: (Optional) - Lookup table name
:param folder_path: (Optional) - Folder path
:param load_from_file: (Optional) - Tag indicating whether to load matrix from file
:return:
"""
lookup_dict = {'Global Dictionary':
{'file_path': 'gvkeyx_name_dict.json',
'lookup_table': 'global'},
'North American Dictionary':
{'file_path': 'gvkeyx_name_dict_na.json',
'lookup_table': 'north_america'}
}
if load_from_file:
# JOB: In case existing gics_matrix can be loaded from file
print(f'Loading GICS matrix for {index_id}: {index_name}')
timer = Timer()
timer.start()
if folder_path is None:
if index_name is None:
index_name, lookup_table = lookup_multiple(dict_of_dicts=lookup_dict, index_id=index_id)
folder_path = os.path.join(ROOT_DIR, 'data',
index_name.lower().replace(' ', '_')) # Path to index data folder
# Load from fle
gic_constituency_matrix = pd.read_csv(os.path.join(ROOT_DIR, folder_path, 'gics_matrix.csv'), index_col=0,
header=0,
parse_dates=True, dtype=str)
gic_constituency_matrix.index = pd.to_datetime(gic_constituency_matrix.index)
timer.stop()
else:
# JOB: In case gics_matrix has to be created
if folder_path is None:
if (index_name is None) or (lookup_table is None):
index_name, lookup_table = lookup_multiple(dict_of_dicts=lookup_dict, index_id=index_id)
folder_path = os.path.join(ROOT_DIR, 'data',
index_name.lower().replace(' ', '_')) # Path to index data folder
else:
index_name = str.split(folder_path, '\\')[-1].replace('_', ' ')
index_id, lookup_table = lookup_multiple(dict_of_dicts=lookup_dict, index_id=index_name,
reverse_lookup=True, key_to_lower=True)
folder_exists = check_directory_for_file(index_name=index_name, folder_path=folder_path, create_dir=False)
if folder_exists:
print(f'Creating GICS matrix for {index_id}: {index_name.capitalize()}')
timer = Timer().start()
else:
print(
f'Directory for index {index_id} ({index_name.capitalize()}) does not exist. \nPlease either download the '
f'necessary data or choose different index ID.')
raise LookupError('Value not found.')
# JOB: Get all historic index constituents
gvkey_list, _ = get_all_constituents(
constituency_matrix=pd.read_csv(os.path.join(ROOT_DIR, folder_path, 'constituency_matrix.csv'),
index_col=0,
header=[0, 1],
parse_dates=True))
parameters = {'index_ids': tuple(gvkey_list)}
# JOB: Download GICS table
# Establish database connection
print('Opening DB connection ...')
db = wrds.Connection(wrds_username='afecker')
print('Done')
print(f'Retrieving GICS history from {lookup_table} ...')
if lookup_table == 'global':
const_data = get_data_table(db, sql_query=True,
query_string="select * "
"from comp.g_co_hgic "
"where gvkey in %(index_ids)s ",
index_col=['gvkey'], table_info=1, params=parameters)
elif lookup_table == 'north_america':
const_data = get_data_table(db, sql_query=True,
query_string="select * "
"from comp.co_hgic "
"where gvkey in %(index_ids)s ",
index_col=['gvkey'], table_info=1, params=parameters)
else:
raise LookupError('Value not found.')
# Convert date columns to datetime format
for col in ['indfrom', 'indthru']:
const_data.loc[:, col] = pd.to_datetime(const_data[col], format='%Y-%m-%d')
const_data.loc[:, col] = const_data[col].dt.date
# Determine period starting date and relevant date range
index_starting_date = const_data['indfrom'].min()
relevant_date_range = pd.date_range(index_starting_date, datetime.date.today(), freq='D')
# Create empty constituency matrix
gic_constituency_matrix = pd.DataFrame(None, index=relevant_date_range,
columns=const_data.index.drop_duplicates())
# JOB: Iterate through all company stocks ever listed in index and set adjacency to 0 or 1
for stock_index in const_data.index:
if isinstance(const_data.loc[stock_index], pd.Series):
if pd.isnull(const_data.loc[stock_index, 'indthru']):
gic_constituency_matrix.loc[
pd.date_range(start=const_data.loc[stock_index, 'indfrom'],
end=datetime.date.today()), stock_index] = const_data.loc[stock_index, 'gsubind']
else:
gic_constituency_matrix.loc[
pd.date_range(start=const_data.loc[stock_index, 'indfrom'], end=const_data.loc[
stock_index, 'indthru']), stock_index] = const_data.loc[stock_index, 'gsubind']
else:
for row in const_data.loc[stock_index].iterrows():
if pd.isnull(row[1]['indthru']):
gic_constituency_matrix.loc[
pd.date_range(start=row[1]['indfrom'], end=datetime.date.today()), stock_index] = row[1][
'gsubind']
else:
gic_constituency_matrix.loc[
|
pd.date_range(start=row[1]['indfrom'], end=row[1]['indthru'])
|
pandas.date_range
|
import json
import os
import glob
import random
from typing import Union
try:
import xarray as xr
except ModuleNotFoundError:
xr = None
import numpy as np
import pandas as pd
from .datasets import Datasets
from .utils import check_attributes, download, sanity_check
from ai4water.utils.utils import dateandtime_now
try: # shapely may not be installed, as it may be difficult to isntall and is only needed for plotting data.
from ai4water.pre_processing.spatial_utils import plot_shapefile
except ModuleNotFoundError:
plot_shapefile = None
# directory separator
SEP = os.sep
def gb_message():
link = "https://doi.org/10.5285/8344e4f3-d2ea-44f5-8afa-86d2987543a9"
raise ValueError(f"Dwonlaoad the data from {link} and provide the directory "
f"path as dataset=Camels(data=data)")
class Camels(Datasets):
"""
Get CAMELS dataset.
This class first downloads the CAMELS dataset if it is not already downloaded.
Then the selected attribute for a selected id are fetched and provided to the
user using the method `fetch`.
Attributes
-----------
- ds_dir str/path: diretory of the dataset
- dynamic_features list: tells which dynamic attributes are available in
this dataset
- static_features list: a list of static attributes.
- static_attribute_categories list: tells which kinds of static attributes
are present in this category.
Methods
---------
- stations : returns name/id of stations for which the data (dynamic attributes)
exists as list of strings.
- fetch : fetches all attributes (both static and dynamic type) of all
station/gauge_ids or a speficified station. It can also be used to
fetch all attributes of a number of stations ids either by providing
their guage_id or by just saying that we need data of 20 stations
which will then be chosen randomly.
- fetch_dynamic_features :
fetches speficied dynamic attributes of one specified station. If the
dynamic attribute is not specified, all dynamic attributes will be
fetched for the specified station. If station is not specified, the
specified dynamic attributes will be fetched for all stations.
- fetch_static_features :
works same as `fetch_dynamic_features` but for `static` attributes.
Here if the `category` is not specified then static attributes of
the specified station for all categories are returned.
stations : returns list of stations
"""
DATASETS = {
'CAMELS-BR': {'url': "https://zenodo.org/record/3964745#.YA6rUxZS-Uk",
},
'CAMELS-GB': {'url': gb_message},
}
def stations(self):
raise NotImplementedError
def _read_dynamic_from_csv(self, stations, dynamic_features, st=None, en=None):
raise NotImplementedError
def fetch_static_features(self, station, features):
raise NotImplementedError
@property
def start(self): # start of data
raise NotImplementedError
@property
def end(self): # end of data
raise NotImplementedError
@property
def dynamic_features(self)->list:
raise NotImplementedError
def _check_length(self, st, en):
if st is None:
st = self.start
if en is None:
en = self.end
return st, en
def to_ts(self, static, st, en, as_ts=False, freq='D'):
st, en = self._check_length(st, en)
if as_ts:
idx = pd.date_range(st, en, freq=freq)
static = pd.DataFrame(np.repeat(static.values, len(idx), axis=0), index=idx,
columns=static.columns)
return static
else:
return static
@property
def camels_dir(self):
"""Directory where all camels datasets will be saved. This will under
datasets directory"""
return os.path.join(self.base_ds_dir, "CAMELS")
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return self._ds_dir
@ds_dir.setter
def ds_dir(self, x):
if x is None:
x = os.path.join(self.camels_dir, self.__class__.__name__)
if not os.path.exists(x):
os.makedirs(x)
# sanity_check(self.name, x)
self._ds_dir = x
def fetch(self,
stations: Union[str, list, int, float, None] = None,
dynamic_features: Union[list, str, None] = 'all',
static_features: Union[str, list, None] = None,
st: Union[None, str] = None,
en: Union[None, str] = None,
as_dataframe:bool = False,
**kwargs
) -> Union[dict, pd.DataFrame]:
"""
Fetches the attributes of one or more stations.
Arguments:
stations : if string, it is supposed to be a station name/gauge_id.
If list, it will be a list of station/gauge_ids. If int, it will
be supposed that the user want data for this number of
stations/gauge_ids. If None (default), then attributes of all
available stations. If float, it will be supposed that the user
wants data of this fraction of stations.
dynamic_features : If not None, then it is the attributes to be
fetched. If None, then all available attributes are fetched
static_features : list of static attributes to be fetches. None
means no static attribute will be fetched.
st : starting date of data to be returned. If None, the data will be
returned from where it is available.
en : end date of data to be returned. If None, then the data will be
returned till the date data is available.
as_dataframe : whether to return dynamic attributes as pandas
dataframe or as xarray dataset.
kwargs : keyword arguments to read the files
returns:
If both static and dynamic features are obtained then it returns a
dictionary whose keys are station/gauge_ids and values are the
attributes and dataframes.
Otherwise either dynamic or static features are returned.
"""
if isinstance(stations, int):
# the user has asked to randomly provide data for some specified number of stations
stations = random.sample(self.stations(), stations)
elif isinstance(stations, list):
pass
elif isinstance(stations, str):
stations = [stations]
elif isinstance(stations, float):
num_stations = int(len(self.stations()) * stations)
stations = random.sample(self.stations(), num_stations)
elif stations is None:
# fetch for all stations
stations = self.stations()
else:
raise TypeError(f"Unknown value provided for stations {stations}")
if xr is None:
raise ModuleNotFoundError("modeule xarray must be installed to use `datasets` module")
return self.fetch_stations_attributes(stations,
dynamic_features,
static_features,
st=st,
en=en,
as_dataframe=as_dataframe,
**kwargs)
def _maybe_to_netcdf(self, fname:str):
self.dyn_fname = os.path.join(self.ds_dir, f'{fname}.nc')
if not os.path.exists(self.dyn_fname):
# saving all the data in netCDF file using xarray
print(f'converting data to netcdf format for faster io operations')
data = self.fetch(static_features=None)
data_vars = {}
coords = {}
for k, v in data.items():
data_vars[k] = (['time', 'dynamic_features'], v)
index = v.index
index.name = 'time'
coords = {
'dynamic_features': list(v.columns),
'time': index
}
xds = xr.Dataset(
data_vars=data_vars,
coords=coords,
attrs={'date': f"create on {dateandtime_now()}"}
)
xds.to_netcdf(self.dyn_fname)
def fetch_stations_attributes(self,
stations: list,
dynamic_features='all',
static_features=None,
st=None,
en=None,
as_dataframe:bool = False,
**kwargs):
"""Reads attributes of more than one stations.
Arguments:
stations : list of stations for which data is to be fetched.
dynamic_features : list of dynamic attributes to be fetched.
if 'all', then all dynamic attributes will be fetched.
static_features : list of static attributes to be fetched.
If `all`, then all static attributes will be fetched. If None,
then no static attribute will be fetched.
st : start of data to be fetched.
en : end of data to be fetched.
as_dataframe : whether to return the data as pandas dataframe. default
is xr.dataset object
kwargs dict: additional keyword arguments
Returns:
Dynamic and static features of multiple stations. Dynamic features
are by default returned as xr.Dataset unless `as_dataframe` is True, in
such a case, it is a pandas dataframe with multiindex. If xr.Dataset,
it consists of `data_vars` equal to number of stations and for each
station, the `DataArray` is of dimensions (time, dynamic_features).
where `time` is defined by `st` and `en` i.e length of `DataArray`.
In case, when the returned object is pandas DataFrame, the first index
is `time` and second index is `dyanamic_features`. Static attributes
are always returned as pandas DataFrame and have following shape
`(stations, static_features). If `dynamic_features` is None,
then they are not returned and the returned value only consists of
static features. Same holds true for `static_features`.
If both are not None, then the returned type is a dictionary with
`static` and `dynamic` keys.
Raises:
ValueError, if both dynamic_features and static_features are None
"""
st, en = self._check_length(st, en)
if dynamic_features is not None:
dynamic_features = check_attributes(dynamic_features, self.dynamic_features)
if not os.path.exists(self.dyn_fname):
# read from csv files
# following code will run only once when fetch is called inside init method
dyn = self._read_dynamic_from_csv(stations, dynamic_features, st=st, en=en)
else:
dyn = xr.load_dataset(self.dyn_fname) # daataset
dyn = dyn[stations].sel(dynamic_features=dynamic_features, time=slice(st, en))
if as_dataframe:
dyn = dyn.to_dataframe(['time', 'dynamic_features'])
if static_features is not None:
static = self.fetch_static_features(stations, static_features)
stns = {'dynamic': dyn, 'static': static}
else:
stns = dyn
elif static_features is not None:
return self.fetch_static_features(stations, static_features)
else:
raise ValueError
return stns
def fetch_dynamic_features(self,
stn_id,
attributes='all',
st=None,
en=None,
as_dataframe=False):
"""Fetches all or selected dynamic attributes of one station."""
assert isinstance(stn_id, str)
station = [stn_id]
return self.fetch_stations_attributes(station,
attributes,
None,
st=st,
en=en,
as_dataframe=as_dataframe)
def fetch_station_attributes(self,
station: str,
dynamic_features: Union[str, list, None] = 'all',
static_features: Union[str, list, None] = None,
as_ts: bool = False,
st: Union[str, None] = None,
en: Union[str, None] = None,
**kwargs) -> pd.DataFrame:
"""
Fetches attributes for one station.
Arguments:
station : station id/gauge id for which the data is to be fetched.
dynamic_features
static_features
as_ts : whether static attributes are to be converted into a time
series or not. If yes then the returned time series will be of
same length as that of dynamic attribtues.
st : starting point from which the data to be fetched. By default
the data will be fetched from where it is available.
en : end point of data to be fetched. By default the dat will be fetched
Return:
dataframe if as_ts is True else it returns a dictionary of static and
dynamic attributes for a station/gauge_id
"""
st, en = self._check_length(st, en)
station_df = pd.DataFrame()
if dynamic_features:
dynamic = self.fetch_dynamic_features(station, dynamic_features, st=st,
en=en, **kwargs)
station_df = pd.concat([station_df, dynamic])
if static_features is not None:
static = self.fetch_static_features(station, static_features)
if as_ts:
station_df = pd.concat([station_df, static], axis=1)
else:
station_df ={'dynamic': station_df, 'static': static}
elif static_features is not None:
station_df = self.fetch_static_features(station, static_features)
return station_df
class LamaH(Camels):
"""
Large-Sample Data for Hydrology and Environmental Sciences for Central Europe
from url = "https://zenodo.org/record/4609826#.YFNp59zt02w"
paper: https://essd.copernicus.org/preprints/essd-2021-72/
"""
url = "https://zenodo.org/record/4609826#.YFNp59zt02w"
_data_types = ['total_upstrm', 'diff_upstrm_all', 'diff_upstrm_lowimp'
]
time_steps = ['daily', 'hourly'
]
static_attribute_categories = ['']
def __init__(self, *,
time_step: str,
data_type: str,
**kwargs
):
"""
Arguments:
time_step : possible values are `daily` or `hourly`
data_type : possible values are `total_upstrm`, `diff_upstrm_all`
or 'diff_upstrm_lowimp'
"""
assert time_step in self.time_steps, f"invalid time_step {time_step} given"
assert data_type in self._data_types, f"invalid data_type {data_type} given."
self.time_step = time_step
self.data_type = data_type
super().__init__(**kwargs)
self._download()
fpath = os.path.join(self.ds_dir, 'lamah_diff_upstrm_lowimp_hourly_dyn.nc')
_data_types = self._data_types if self.time_step == 'daily' else ['total_upstrm']
if not os.path.exists(fpath):
for dt in _data_types:
for ts in self.time_steps:
self.time_step = ts
self.data_type = dt
fname = f"lamah_{dt}_{ts}_dyn"
self._maybe_to_netcdf(fname)
self.time_step = time_step
self.data_type = data_type
self.dyn_fname = os.path.join(self.ds_dir, f'lamah_{data_type}_{time_step}_dyn.nc')
@property
def dynamic_features(self):
station = self.stations()[0]
df = self.read_ts_of_station(station)
return df.columns.to_list()
@property
def static_features(self) -> list:
fname = os.path.join(self.data_type_dir, f'1_attributes{SEP}Catchment_attributes.csv')
df = pd.read_csv(fname, sep=';', index_col='ID')
return df.columns.to_list()
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return os.path.join(self.camels_dir, self.name)
@property
def data_type_dir(self):
directory = 'CAMELS_AT'
if self.time_step == 'hourly':
directory = 'CAMELS_AT1' # todo, use it only for hourly, daily is causing errors
# self.ds_dir/CAMELS_AT/data_type_dir
f = [f for f in os.listdir(os.path.join(self.ds_dir, directory)) if self.data_type in f][0]
return os.path.join(self.ds_dir, f'{directory}{SEP}{f}')
def stations(self)->list:
# assuming file_names of the format ID_{stn_id}.csv
_dirs = os.listdir(os.path.join(self.data_type_dir, f'2_timeseries{SEP}{self.time_step}'))
s = [f.split('_')[1].split('.csv')[0] for f in _dirs]
return s
def _read_dynamic_from_csv(self,
stations,
dynamic_features:Union[str, list]='all',
st=None,
en=None,
):
"""Reads attributes of one station"""
stations_attributes = {}
for station in stations:
station_df = pd.DataFrame()
if dynamic_features is not None:
dynamic_df = self.read_ts_of_station(station)
station_df = pd.concat([station_df, dynamic_df])
stations_attributes[station] = station_df
return stations_attributes
def fetch_static_features(self,
station:Union[str, list],
features=None
)->pd.DataFrame:
fname = os.path.join(self.data_type_dir, f'1_attributes{SEP}Catchment_attributes.csv')
df = pd.read_csv(fname, sep=';', index_col='ID')
#if features is not None:
static_features = check_attributes(features, self.static_features)
df = df[static_features]
if isinstance(station, list):
stations = [str(i) for i in station]
elif isinstance(station, int):
stations = str(station)
else:
stations = station
df.index = df.index.astype(str)
df = df.loc[stations]
if isinstance(df, pd.Series):
df = pd.DataFrame(df).transpose()
return df
def read_ts_of_station(self, station) -> pd.DataFrame:
# read a file containing timeseries data for one station
fname = os.path.join(self.data_type_dir,
f'2_timeseries{SEP}{self.time_step}{SEP}ID_{station}.csv')
df = pd.read_csv(fname, sep=';')
if self.time_step == 'daily':
periods = pd.PeriodIndex(year=df["YYYY"], month=df["MM"], day=df["DD"], freq="D")
df.index = periods.to_timestamp()
else:
periods = pd.PeriodIndex(year=df["YYYY"], month=df["MM"], day=df["DD"], hour=df["hh"], minute=df["mm"], freq="H")
df.index = periods.to_timestamp()
# remove the cols specifying index
[df.pop(item) for item in ['YYYY', 'MM', 'DD', 'hh', 'mm'] if item in df]
return df
@property
def start(self):
return "19810101"
@property
def end(self):
return "20191231"
class HYSETS(Camels):
"""
database for hydrometeorological modeling of 14,425 North American watersheds
from 1950-2018 following the work of
[Arsenault et al., 2020](https://doi.org/10.1038/s41597-020-00583-2)
The user must manually download the files, unpack them and provide
the `path` where these files are saved.
This data comes with multiple sources. Each source having one or more dynamic_features
Following data_source are available.
|sources | dynamic_features |
|---------------|------------------|
|SNODAS_SWE | dscharge, swe|
|SCDNA | discharge, pr, tasmin, tasmax|
|nonQC_stations | discharge, pr, tasmin, tasmax|
|Livneh | discharge, pr, tasmin, tasmax|
|ERA5 | discharge, pr, tasmax, tasmin|
|ERAS5Land_SWE | discharge, swe|
|ERA5Land | discharge, pr, tasmax, tasmin|
all sources contain one or more following dynamic_features
with following shapes
|dynamic_features | shape |
|----------------------------|------------|
|time | (25202,) |
|watershedID | (14425,) |
|drainage_area | (14425,) |
|drainage_area_GSIM | (14425,) |
|flag_GSIM_boundaries | (14425,) |
|flag_artificial_boundaries | (14425,) |
|centroid_lat | (14425,) |
|centroid_lon | (14425,) |
|elevation | (14425,) |
|slope | (14425,) |
|discharge | (14425, 25202) |
|pr | (14425, 25202) |
|tasmax | (14425, 25202) |
|tasmin | (14425, 25202) |
"""
doi = "https://doi.org/10.1038/s41597-020-00583-2"
url = "https://osf.io/rpc3w/"
Q_SRC = ['ERA5', 'ERA5Land', 'ERA5Land_SWE', 'Livneh', 'nonQC_stations', 'SCDNA', 'SNODAS_SWE']
SWE_SRC = ['ERA5Land_SWE', 'SNODAS_SWE']
OTHER_SRC = [src for src in Q_SRC if src not in ['ERA5Land_SWE', 'SNODAS_SWE']]
dynamic_features = ['discharge', 'swe', 'tasmin', 'tasmax', 'pr']
def __init__(self,
path:str,
swe_source:str = "SNODAS_SWE",
discharge_source: str = "ERA5",
tasmin_source: str = "ERA5",
tasmax_source: str = "ERA5",
pr_source: str = "ERA5",
**kwargs
):
"""
Arguments:
path : path where all the data files are saved.
swe_source : source of swe data.
discharge_source : source of discharge data
tasmin_source : source of tasmin data
tasmax_source : source of tasmax data
pr_source : source of pr data
kwargs : arguments for `Camels` base class
"""
assert swe_source in self.SWE_SRC, f'source must be one of {self.SWE_SRC}'
assert discharge_source in self.Q_SRC, f'source must be one of {self.Q_SRC}'
assert tasmin_source in self.OTHER_SRC, f'source must be one of {self.OTHER_SRC}'
assert tasmax_source in self.OTHER_SRC, f'source must be one of {self.OTHER_SRC}'
assert pr_source in self.OTHER_SRC, f'source must be one of {self.OTHER_SRC}'
self.sources = {
'swe': swe_source,
'discharge': discharge_source,
'tasmin': tasmin_source,
'tasmax': tasmax_source,
'pr': pr_source
}
super().__init__(**kwargs)
self.ds_dir = path
fpath = os.path.join(self.ds_dir, 'hysets_dyn.nc')
if not os.path.exists(fpath):
self._maybe_to_netcdf('hysets_dyn')
def _maybe_to_netcdf(self, fname:str):
# todo saving as one file takes very long time
oneD_vars = []
twoD_vars = []
for src in self.Q_SRC:
xds = xr.open_dataset(os.path.join(self.ds_dir, f'HYSETS_2020_{src}.nc'))
for var in xds.variables:
print(f'getting {var} from source {src} ')
if len(xds[var].data.shape) > 1:
xar = xds[var]
xar.name = f"{xar.name}_{src}"
twoD_vars.append(xar)
else:
xar = xds[var]
xar.name = f"{xar.name}_{src}"
oneD_vars.append(xar)
oneD_xds = xr.merge(oneD_vars)
twoD_xds = xr.merge(twoD_vars)
oneD_xds.to_netcdf(os.path.join(self.ds_dir, "hysets_static.nc"))
twoD_xds.to_netcdf(os.path.join(self.ds_dir, "hysets_dyn.nc"))
return
@property
def ds_dir(self):
return self._ds_dir
@ds_dir.setter
def ds_dir(self, x):
sanity_check('HYSETS', x)
self._ds_dir = x
@property
def static_features(self):
df = self.read_static_data()
return df.columns.to_list()
def stations(self) -> list:
return self.read_static_data().index.to_list()
@property
def start(self):
return "19500101"
@property
def end(self):
return "20181231"
def fetch_stations_attributes(self,
stations: list,
dynamic_features: Union[str, list, None] = 'all',
static_features: Union[str, list, None] = None,
st = None,
en = None,
as_dataframe: bool = False,
**kwargs):
stations = check_attributes(stations, self.stations())
stations = [int(stn) for stn in stations]
if dynamic_features is not None:
dyn = self._fetch_dynamic_features(stations=stations,
dynamic_features=dynamic_features,
as_dataframe=as_dataframe,
**kwargs
)
if static_features is not None: # we want both static and dynamic
to_return = {}
static = self._fetch_static_features(station=stations,
static_features=static_features,
**kwargs
)
to_return['static'] = static
to_return['dynamic'] = dyn
else:
to_return = dyn
elif static_features is not None:
# we want only static
to_return = self._fetch_static_features(
station=stations,
static_features=static_features,
**kwargs
)
else:
raise ValueError
return to_return
def fetch_dynamic_features(self,
station,
dynamic_features='all',
st=None,
en=None,
as_dataframe=False):
"""Fetches dynamic attributes of one station."""
station = [int(station)]
return self._fetch_dynamic_features(stations=station,
dynamic_features=dynamic_features,
st=st,
en=en,
as_dataframe=as_dataframe)
def _fetch_dynamic_features(self,
stations:list,
dynamic_features='all',
st=None,
en=None,
as_dataframe=False,
as_ts=False
):
"""Fetches dynamic attributes of station."""
st, en = self._check_length(st, en)
attrs = check_attributes(dynamic_features, self.dynamic_features)
stations = np.subtract(stations, 1).tolist()
# maybe we don't need to read all variables
sources = {k:v for k,v in self.sources.items() if k in attrs}
# original .nc file contains datasets with dynamic and static features as data_vars
# however, for uniformity of this API and easy usage, we want a Dataset to have
# station names/gauge_ids as data_vars and each data_var has
# dimension (time, dynamic_variables)
# Therefore, first read all data for each station from .nc file
# then rearrange it.
# todo, this operation is slower because of `to_dataframe`
# also doing this removes all the metadata
x = {}
f = os.path.join(self.ds_dir, "hysets_dyn.nc")
xds = xr.open_dataset(f)
for stn in stations:
xds1 = xds[[f'{k}_{v}' for k, v in sources.items()]].sel(watershed=stn, time=slice(st, en))
xds1 = xds1.rename_vars({f'{k}_{v}': k for k, v in sources.items()})
x[stn] = xds1.to_dataframe(['time'])
xds = xr.Dataset(x)
xds = xds.rename_dims({'dim_1': 'dynamic_features'})
xds = xds.rename_vars({'dim_1': 'dynamic_features'})
if as_dataframe:
return xds.to_dataframe(['time', 'dynamic_features'])
return xds
def _fetch_static_features(self,
station,
static_features:Union[str, list]='all',
st=None,
en=None,
as_ts=False):
df = self.read_static_data()
static_features = check_attributes(static_features, self.static_features)
if isinstance(station, str):
station = [station]
elif isinstance(station, int):
station = [str(station)]
elif isinstance(station, list):
station = [str(stn) for stn in station]
else:
raise ValueError
return self.to_ts(df.loc[station][static_features], st=st, en=en, as_ts=as_ts)
def fetch_static_features(self,
station,
features='all',
st=None,
en=None,
as_ts=False
)->pd.DataFrame:
return self._fetch_static_features(station, features, st, en, as_ts)
def read_static_data(self):
fname = os.path.join(self.ds_dir, 'HYSETS_watershed_properties.txt')
static_df = pd.read_csv(fname, index_col='Watershed_ID', sep=';')
static_df.index = static_df.index.astype(str)
return static_df
class CAMELS_US(Camels):
"""
Downloads and processes CAMELS dataset of 671 catchments named as CAMELS
from https://ral.ucar.edu/solutions/products/camels
https://doi.org/10.5194/hess-19-209-2015
"""
DATASETS = ['CAMELS_US']
url = "https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/basin_timeseries_v1p2_metForcing_obsFlow.zip"
catchment_attr_url = "https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/camels_attributes_v2.0.zip"
folders = {'basin_mean_daymet': f'basin_mean_forcing{SEP}daymet',
'basin_mean_maurer': f'basin_mean_forcing{SEP}maurer',
'basin_mean_nldas': f'basin_mean_forcing{SEP}nldas',
'basin_mean_v1p15_daymet': f'basin_mean_forcing{SEP}v1p15{SEP}daymet',
'basin_mean_v1p15_nldas': f'basin_mean_forcing{SEP}v1p15{SEP}nldas',
'elev_bands': f'elev{SEP}daymet',
'hru': f'hru_forcing{SEP}daymet'}
dynamic_features = ['dayl(s)', 'prcp(mm/day)', 'srad(W/m2)',
'swe(mm)', 'tmax(C)', 'tmin(C)', 'vp(Pa)', 'Flow']
def __init__(self, data_source='basin_mean_daymet'):
assert data_source in self.folders, f'allwed data sources are {self.folders.keys()}'
self.data_source = data_source
super().__init__("CAMELS_US")
if os.path.exists(self.ds_dir):
print(f"dataset is already downloaded at {self.ds_dir}")
else:
download(self.url, os.path.join(self.camels_dir, f'CAMELS_US{SEP}CAMELS_US.zip'))
download(self.catchment_attr_url, os.path.join(self.camels_dir, f"CAMELS_US{SEP}catchment_attrs.zip"))
self._unzip()
self.attr_dir = os.path.join(self.ds_dir, f'catchment_attrs{SEP}camels_attributes_v2.0')
self.dataset_dir = os.path.join(self.ds_dir, f'CAMELS_US{SEP}basin_dataset_public_v1p2')
self._maybe_to_netcdf('camels_us_dyn')
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return os.path.join(self.camels_dir, self.name)
@property
def start(self):
return "19800101"
@property
def end(self):
return "20141231"
@property
def static_features(self):
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, 'catchment_attrs', 'camels_attributes_v2.0')}/*.txt")
cols = []
for f in files:
_df =
|
pd.read_csv(f, sep=';', index_col='gauge_id', nrows=1)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
assert lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='US/Eastern')],
dtype=object))
assert not lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='CET')],
dtype=object))
@pytest.mark.parametrize(
"func",
[
'is_datetime_array',
'is_datetime64_array',
'is_bool_array',
'is_timedelta_or_timedelta64_array',
'is_date_array',
'is_time_array',
'is_interval_array',
'is_period_array'])
def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(['foo', 'bar'])
assert not func(arr)
arr = np.array([1, 2])
assert not func(arr)
def test_date(self):
dates = [date(2012, 1, day) for day in range(1, 20)]
index =
|
Index(dates)
|
pandas.Index
|
import abc
import pandas as pd
import numpy as np
from numbers import Number
from itertools import combinations
from statsmodels.formula.api import ols
from functools import partial
from .utils import NumericFunction
from .assignment import get_assignments_as_positions
def identity(x): return x
def max_absolute_value(x): return np.max(np.abs(x))
min_across_covariates = partial(np.min, axis=1)
max_across_covariates = partial(np.max, axis=1)
mean_across_covariates = partial(np.mean, axis=1)
class BalanceObjective:
def __init__(self, cols=None):
self._cols = cols
def col_selection(self, df):
return self._cols or df.columns
@property
def balance_func(self):
return NumericFunction.numerize(self._balance_func)
@abc.abstractmethod
def _balance_func(self, df, assignments):
""""""
@classmethod
def assignment_indices(cls, df, assignments):
idxs = cls._idxs_from_assignment(df, assignments)
return cls._append_complementary_assignment(idxs)
@classmethod
def _idxs_from_assignment(cls, df, assignments):
if len(assignments[0]) == len(df.index):
return assignments
else:
return [df.index.isin(a) for a in assignments]
@classmethod
def _append_complementary_assignment(cls, idxs):
total_assignments = np.add(*idxs) if len(idxs) > 1 else idxs[0]
if not min(total_assignments):
idxs.append(np.logical_not(total_assignments))
return idxs
class MahalanobisBalance(BalanceObjective):
def __init__(self, treatment_aggregator=identity, cols=None):
self.treatment_aggregator = treatment_aggregator
super().__init__(cols)
def _balance_func(self, df, assignments):
df_sel = df[self.col_selection(df)]
inverse_cov = np.linalg.inv(df_sel.cov())
means = [df_sel.loc[idx].mean() for idx in
self.assignment_indices(df_sel, assignments)]
combs = list(combinations(range(len(means)), 2))
mean_diffs = [means[a] - means[b] for a, b in combs]
res = pd.DataFrame(data=[mean_diff @ inverse_cov @ mean_diff
for mean_diff in mean_diffs],
index=['{}-{}'.format(a, b) for a, b in combs])
return -self.treatment_aggregator(res)
def mahalanobis_balance(cols=None):
return MahalanobisBalance(np.max, cols=cols).balance_func
class PValueBalance(BalanceObjective):
def __init__(self, treatment_aggreagtor=identity,
covariate_aggregator=identity, cols=None):
self.treatment_aggregator = treatment_aggreagtor
self.covariate_aggregator = covariate_aggregator
super().__init__(cols)
def _balance_func(self, df, assignments):
pvalues = dict((col, self.pvalues_by_col(
col, df, assignments)) for col in self.col_selection(df))
return self.covariate_aggregator(pd.DataFrame(pvalues))
def pvalues_by_col(self, col, df, assignments):
pv = self.treatment_aggregator(self.ols_col_on_treatment(
col, df, assignments).pvalues.iloc[1:].values)
if isinstance(pv, Number):
pv = [pv]
return pv
def ols_col_on_treatment(self, col, df, assignments):
t_dummies = pd.DataFrame(
dict(('t{}'.format(i), df.index.isin(assignment))
for i, assignment in enumerate(assignments)))
data =
|
pd.concat((df, t_dummies), axis=1)
|
pandas.concat
|
def search(name=None, source=None, id_No=None, markdown=False):
"""
Search function that interacts directly with the Global Lake Level Database API.
Arguments:
name (str): Name of Lake or Reservoir. Be sure to use proper spelling. Wildcards (%) are allowed,as is any MySQL 5.7 syntax
source (str): Lake water level source flag, accepted values are "usgs", "grealm", or "hydroweb"
id_No (str,int): Global Lake Level Database identification number
markdown (bool, optional): Returns markdown dataframe when True
Returns:
Lake object: `Lake()` object
"""
import pandas as pd
import requests
import warnings
from IPython.display import display
if id_No:
id_No = str(id_No)
url = 'https://4o8d0ft32f.execute-api.us-east-2.amazonaws.com/prod/glld/search/?idNo={}'.format(
id_No)
r = requests.get(url)
json_decode = r.json()
df = pd.DataFrame().from_records(json_decode, columns = ['id_No', 'lake_name', 'source', 'metadata'])
elif not source:
url = 'https://4o8d0ft32f.execute-api.us-east-2.amazonaws.com/prod/glld/search/?name={}'.format(
name)
r = requests.get(url)
json_decode = r.json()
df = pd.DataFrame().from_records(json_decode, columns = ['id_No', 'lake_name', 'source', 'metadata'])
elif source:
url = 'https://4o8d0ft32f.execute-api.us-east-2.amazonaws.com/prod/glld/' \
'search?name={}&source={}'.format(name, source)
r = requests.get(url)
json_decode = r.json()
df = pd.DataFrame().from_records(json_decode, columns = ['id_No', 'lake_name', 'source', 'metadata'])
else:
raise ValueError("I don't know how you did this, but if you did, make a github issue!")
if len(df) < 1:
raise RuntimeError('No results returned. Please adjust search parameters or see documentation')
if len(df) > 1:
warnings.warn('Search Result: \'{}\' has more than 1 Result. Showing the {} most relevant results.\n'
'Specify \'id_No\' or narrow search name.'.format(name, len(df)), category = RuntimeWarning)
if markdown is True:
print(df.filter(['id_No', 'source', 'lake_name']).to_markdown())
else:
print(df.filter(['id_No', 'source', 'lake_name']))
elif len(df) == 1:
meta_series = df['metadata'].map(eval).apply(pd.Series)
df_unpacked = pd.merge(left = df,
right = meta_series.drop(['source', 'lake_name'],
axis = 1),
left_index = True,
right_index = True,
how = 'outer').drop('metadata', axis = 1)
if markdown is True:
print(df_unpacked.to_markdown())
else:
with pd.option_context('display.max_rows', 5, 'display.max_columns', None):
display(df_unpacked)
lake_object = _lake_meta_constructor(df_unpacked)
return lake_object
def _lake_meta_constructor(df):
"""
Arguments:
df (): Pandas DataFrame of lake metadata from :function:`search`
Returns:
{:class:`Lake`}
"""
import pandas as pd
import requests
# todo location!!!!!!!!
if len(df) > 1:
raise RuntimeError('{} lakes have been passed to the constructor which only accepts one as input.\n'
'Utilize search parameters by passing "source" or "id_No" to refine results')
elif len(df) == 1:
source = df.source[0]
if source == 'grealm':
name = df.lake_name[0]
country = df.Country[0]
continent = df.Continent[0]
original_id = df.grealm_database_ID[0]
id_No = df.id_No[0]
observation_period = df['Satellite Observation Period'][0]
misc_data = {'Type': df.Type[0], 'Resolution': df.Resolution[0]}
metadata = df
lake = Lake(name = name,
country = country,
continent = continent,
source = source,
original_id = original_id,
id_No = id_No,
observation_period = observation_period,
latitude = None,
longitude = None,
misc_data = misc_data,
metadata = metadata,
data = None,
timeseries = None,
datum = ('meters above TOPEX/Poseidon ellipsoid'),
mean = None,
median = None,
mode = None)
lake.data = _get_levels(lake)
lake.timeseries = lake.data.filter(['date', 'water_level']).set_index('date')
lake.timeseries.index =
|
pd.to_datetime(lake.timeseries.index)
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""\
Copyright (c) 2015-2018, MGH Computational Pathology
"""
import json
from calicoml.core.metrics import ROC
from calicoml.core.problem import Problem
from calicoml.core.serialization.serializer import Serializer, get_class_name
from calicoml.core.metrics import compute_averaged_metrics
import numpy as np
import pandas as pd
from calicoml.core.utils import pretty_print_dataframe
def roc_auc_function(y_one_vs_all, scores_y):
""" Function to access AUC computations of ROC instance """
return ROC.from_scores(y_one_vs_all, scores_y).auc
class ClassificationModel(object):
"""\
A fully packaged up, fitted classification model: can be written and read from disk and provides functions
for classifying new samples. Also does correctness checks on loading.
"""
def __init__(self, approach, training_problem, expected_scores=None, model_id=None):
"""\
:param approach: a fitted LearningApproach
:param training_problem: the problem on which the LearningApproach was trained
:param expected_scores: expected predictions for all samples in the training problem (optional)
:param id: model id
"""
self.id = model_id
self.approach = approach
self.training_problem = training_problem
self.expected_scores = expected_scores
if self.expected_scores is not None and len(self.expected_scores) != len(self.training_problem):
raise ValueError('Length of expected scores ({}) does not match the length of the training '
'problem ({})'.format(len(self.expected_scores), len(self.training_problem)))
elif expected_scores is None:
self.expected_scores = self.approach.apply(self.training_problem)
@property
def features(self):
"""Returns list of features used for prediction"""
return self.training_problem.features
@property
def outcome(self):
"""Returns the name of the outcome"""
return self.training_problem.outcome_column
@property
def positive_outcome(self):
"""Returns outcome value considered positive (i.e. mapped to 1)"""
return self.training_problem.positive_outcome
@property
def training_auc(self):
"""Computes AUC on the training set"""
if not self.training_problem.should_be_binary:
return self.averaged_training_auc()
return ROC.from_scores(self.training_problem.y, self.expected_scores).auc
def averaged_training_auc(self):
""" Compute averaged auc for multiclass classification """
scores = self.approach.prediction_probabilities(self.training_problem)
return compute_averaged_metrics(self.training_problem.y, scores, roc_auc_function)
def serialize(self, serializer=None):
"""\
Serializes this ClassificationModel into Python primitives
:param serializer: serializer to use (optional)
:return: a dictionary of Python primitives
"""
serializer = serializer or Serializer()
return {'__class__': get_class_name(ClassificationModel),
'approach': serializer.serialize(self.approach),
'problem': serializer.serialize(self.training_problem),
'expected_scores': serializer.serialize(self.expected_scores),
"id": self.id}
def validate(self, fail_if_different=True):
"""\
Validates that actual predictions from this ClassificationModel match the expected scores
:param fail_if_different: whether to fail with a ValueError if any of the scores don't match. Default: True
:return: a DataFrame with validation results
"""
expected_score = self.expected_scores
actual_score = self.approach.apply(self.training_problem)
if len(expected_score.shape) == 2:
data_to_check = {'sample': self.training_problem.sample_ids, 'truth': self.training_problem.y}
columns_validation = ['sample', 'truth']
separate_scores = True
else:
data_to_check = {'sample': self.training_problem.sample_ids,
'truth': self.training_problem.y,
'expected_score': expected_score,
'actual_score': actual_score}
columns_validation = ['sample', 'truth', 'expected_score', 'actual_score']
separate_scores = False
validation_df = pd.DataFrame(data=data_to_check, columns=columns_validation)
if not separate_scores:
validation_df['is_correct'] = np.isclose(validation_df['expected_score'].values,
validation_df['actual_score'].values)
else:
validation_df['is_correct'] = [np.allclose(expected_score[index], actual_score[index]) for
index in range(expected_score.shape[0])]
incorrect_sdf = validation_df[~validation_df['is_correct']]
if len(incorrect_sdf) > 0 and fail_if_different:
pretty_print_dataframe(incorrect_sdf)
raise ValueError('Model validation failed: scores differ for {}/{} samples'.format(
len(incorrect_sdf), len(validation_df)))
return validation_df if not separate_scores else validation_df, expected_score, actual_score
def _check_prediction_input(self, df):
"""Validates that a DataFrame has all the required columns for prediction, and returns a Problem instance
that the underlying learning approach can be invoked on"""
missing_features = sorted(set(self.training_problem.features) - set(df.columns))
if len(missing_features) > 0:
raise ValueError("Input is missing features (count={}): {}".format(len(missing_features),
', '.join(missing_features)))
# TODO FIXME: LearningApproaches require a Problem instance when calling apply(). This is not ideal
# because Problems assume an outcome column, which might not be known when applying to new data.
# Here we just mock a null outcome column, but we should consider changing the interface so that
# apply() accepts a data frame directly.
classification_columns = self.training_problem.features + [self.training_problem.outcome_column]
classification_df = pd.DataFrame(df, columns=classification_columns)
return Problem(classification_df, self.training_problem.features, self.training_problem.outcome_column,
self.training_problem.positive_outcome, self.training_problem.label_list)
def predict(self, df, join=False):
"""\
Applies this model to data in a pandas DataFrame, returning a new dataframe with predictions.
:param df: input DataFrame. Has to contain all features required for classification.
:param join: if True, will append predictions to the input DataFrame. Otherwise, will return a new DataFrame
with only the sample IDs and predictions. Default: False
:return: DataFrame with predicted scores
"""
problem = self._check_prediction_input(df)
results_df = pd.DataFrame(index=
|
pd.Index(problem.sample_ids)
|
pandas.Index
|
import pickle5 as pickle
import matplotlib.pyplot as plt
import matplotlib as mpl
#mpl.use('pdf')
import itertools
import numpy as np
from datetime import datetime
import torch
from torch import nn
from torch import optim
import os
import sys
import pandas as pd
from matplotlib import interactive
from matplotlib.patches import Rectangle
from utils import make_histos
from utils.utilities import meter
sys.path.insert(0,'/mnt/c/Users/rober/Dropbox/Bobby/Linux/classes/GAML/GAMLX/nflows/nflows')
from nflows.transforms.autoregressive import MaskedUMNNAutoregressiveTransform
from nflows.flows.base import Flow
from nflows.distributions.normal import StandardNormal
from nflows.distributions.normal import DiagonalNormal
from nflows.transforms.base import CompositeTransform
from nflows.transforms.autoregressive import MaskedAffineAutoregressiveTransform
from nflows.transforms.permutations import ReversePermutation
#data_path = "gendata/4features/" #Just electorn features
data_path = "gendata/16features/" #All 16 features
physics_cuts = False
gen_all_emd = False
gen_1d_histos = True
dfs = []
filenames = os.listdir(data_path)
for f in filenames:
df0 = pd.read_pickle(data_path+f)
dfs.append(df0)
df_nflow_data = pd.concat(dfs)
nflow_data_len = len(df_nflow_data.index)
print("The Generated dataset has {} events".format(nflow_data_len))
df_test_data_all =
|
pd.read_pickle("data/pi0_cartesian_test.pkl")
|
pandas.read_pickle
|
import datetime
import itertools
from typing import Sequence, Any, Union, Optional, Tuple
from warnings import warn
import numpy as np
import torch
from torch import Tensor
from torch.utils.data import TensorDataset, DataLoader, ConcatDataset
from torch_kalman.internals.repr import NiceRepr
from torch_kalman.internals.utils import ragged_cat, true1d_idx
from torch_kalman.utils.datetime import DateTimeHelper
class TimeSeriesDataset(NiceRepr, TensorDataset):
"""
TimeSeriesDataset includes additional information about each of the Tensors' dimensions: the name for each group in
the first dimension, the start (date)time (and optionally datetime-unit) for the second dimension, and the name of
the measures for the third dimension.
Note that unlike TensorDataset, indexing a TimeSeriesDataset returns another TimeSeriesDataset, not a tuple of
tensors. So when using TimeSeriesDataset, use `TimeSeriesDataLoader` (or just use
`DataLoader(collate_fn=TimeSeriesDataset.collate)`).
"""
supported_dt_units = {'Y', 'D', 'h', 'm', 's'}
_repr_attrs = ('sizes', 'measures')
def __init__(self,
*tensors: Tensor,
group_names: Sequence[Any],
start_times: Union[np.ndarray, Sequence],
measures: Sequence[Sequence[str]],
dt_unit: Optional[str]):
if not isinstance(group_names, np.ndarray):
group_names = np.array(group_names)
assert len(group_names) == len(set(group_names))
assert len(group_names) == len(start_times)
assert len(tensors) == len(measures)
for i, (tensor, tensor_measures) in enumerate(zip(tensors, measures)):
if len(tensor.shape) < 3:
raise ValueError(f"Tensor {i} has < 3 dimensions")
if tensor.shape[0] != len(group_names):
raise ValueError(f"Tensor {i}'s first dimension has length != {len(group_names)}.")
if tensor.shape[2] != len(tensor_measures):
raise ValueError(f"Tensor {i}'s 3rd dimension has length != len({tensor_measures}).")
self.measures = tuple(tuple(m) for m in measures)
self.all_measures = tuple(itertools.chain.from_iterable(self.measures))
self.group_names = group_names
self._dt_helper = DateTimeHelper(dt_unit=dt_unit)
self.start_times = self._dt_helper.validate_datetimes(start_times)
super().__init__(*tensors)
@property
def dt_unit(self) -> str:
return self._dt_helper.dt_unit
@property
def sizes(self) -> Sequence:
return [t.size() for t in self.tensors]
# Subsetting ------------------------:
def train_val_split(self,
train_frac: float = None,
dt: Union[np.datetime64, dict] = None) -> Tuple['TimeSeriesDataset', 'TimeSeriesDataset']:
"""
:param train_frac: The proportion of the data to keep for training. This is calculated on a per-group basis, by
taking the last observation for each group (i.e., the last observation that a non-nan value on any measure). If
neither `train_frac` nor `dt` are passed, `train_frac=.75` is used.
:param dt: A datetime to use in dividing train/validation (first datetime for validation), or a dictionary of
group-names : date-times.
:return: Two TimeSeriesDatasets, one with data before the split, the other with >= the split.
"""
# get split times:
if dt is None:
if train_frac is None:
train_frac = .75
assert 0 < train_frac < 1
# for each group, find the last non-nan, take `frac` of that to find the train/val split point:
split_idx = np.array([int(idx * train_frac) for idx in self._last_measured_idx()], dtype='int')
_times = self.times(0)
split_times = np.array([_times[i, t] for i, t in enumerate(split_idx)])
else:
if train_frac is not None:
raise TypeError("Can pass only one of `train_frac`, `dt`.")
if isinstance(dt, dict):
split_times = np.array([dt[group_name] for group_name in self.group_names])
else:
if not isinstance(dt, np.datetime64):
dt = np.datetime64(dt, self.dt_unit)
split_times = np.full(shape=len(self.group_names), fill_value=dt)
# val:
val_dataset = self.with_new_start_times(split_times)
# train:
train_tensors = []
for i, tens in enumerate(self.tensors):
train = tens.data.clone()
train[np.where(self.times(i) >= split_times[:, None])] = float('nan')
if i == 0:
not_all_nan = (~torch.isnan(train)).sum((0, 2))
last_good_idx = true1d_idx(not_all_nan).max()
train = train[:, :(last_good_idx + 1), :]
train_tensors.append(train)
# TODO: replace padding nans for all but first tensor?
# TODO: reduce width of 0> tensors based on width of 0 tensor?
train_dataset = self.with_new_tensors(*train_tensors)
return train_dataset, val_dataset
def with_new_start_times(self, start_times: Union[np.ndarray, Sequence]) -> 'TimeSeriesDataset':
"""
Subset a TimeSeriesDataset so that some/all of the groups have later start times.
:param start_times: An array/list of new datetimes.
:return: A new TimeSeriesDataset.
"""
new_tensors = []
for i, tens in enumerate(self.tensors):
times = self.times(i)
new_tens = []
for g, (new_time, old_times) in enumerate(zip(start_times, times)):
if (old_times <= new_time).all():
warn(f"{new_time} is later than all the times for group {self.group_names[g]}")
new_tens.append(tens[[g], 0:0])
continue
elif (old_times > new_time).all():
warn(f"{new_time} is earlier than all the times for group {self.group_names[g]}")
new_tens.append(tens[[g], 0:0])
continue
# drop if before new_time:
g_tens = tens[g, true1d_idx(old_times >= new_time)]
# drop if after last nan:
all_nan, _ = torch.min(torch.isnan(g_tens), 1)
if all_nan.all():
warn(f"Group '{self.group_names[g]}' (tensor {i}) has only `nans` after {new_time}")
end_idx = 0
else:
end_idx = true1d_idx(~all_nan).max() + 1
new_tens.append(g_tens[:end_idx].unsqueeze(0))
new_tens = ragged_cat(new_tens, ragged_dim=1, cat_dim=0)
new_tensors.append(new_tens)
return type(self)(
*new_tensors,
group_names=self.group_names,
start_times=start_times,
measures=self.measures,
dt_unit=self.dt_unit
)
def get_groups(self, groups: Sequence[Any]) -> 'TimeSeriesDataset':
"""
Get the subset of the batch corresponding to groups. Note that the ordering in the output will match the
original ordering (not that of `group`), and that duplicates will be dropped.
"""
group_idx = true1d_idx(np.isin(self.group_names, groups))
return self[group_idx]
def split_measures(self, *measure_groups, which: Optional[int] = None) -> 'TimeSeriesDataset':
"""
Take a dataset with one tensor, split it into a dataset with multiple tensors.
:param measure_groups: Each argument should be be a list of measure-names, or an indexer (i.e. list of ints or
a slice).
:param which: If there are already multiple measure groups, the split will occur within one of them; must
specify which.
:return: A TimeSeriesDataset, now with multiple tensors for the measure-groups
"""
if which is None:
if len(self.measures) > 1:
raise RuntimeError(f"Must pass `which` if there's more than one groups:\n{self.measures}")
which = 0
self_tensor = self.tensors[which]
self_measures = self.measures[which]
idxs = []
for measure_group in measure_groups:
if isinstance(measure_group, slice) or isinstance(measure_group[0], int):
idxs.append(measure_group)
else:
idxs.append([self_measures.index(m) for m in measure_group])
self_measures = np.array(self_measures)
return type(self)(
*(self_tensor[:, :, idx].clone() for idx in idxs),
start_times=self.start_times,
group_names=self.group_names,
measures=[tuple(self_measures[idx]) for idx in idxs],
dt_unit=self.dt_unit
)
def __getitem__(self, item: Union[int, Sequence, slice]) -> 'TimeSeriesDataset':
if isinstance(item, int):
item = [item]
return type(self)(
*super(TimeSeriesDataset, self).__getitem__(item),
group_names=self.group_names[item],
start_times=self.start_times[item],
measures=self.measures,
dt_unit=self.dt_unit
)
# Creation/Transformation ------------------------:
@classmethod
def collate(cls, batch: Sequence['TimeSeriesDataset']) -> 'TimeSeriesDataset':
to_concat = {
'tensors': [batch[0].tensors],
'group_names': [batch[0].group_names],
'start_times': [batch[0].start_times]
}
fixed = {'dt_unit': batch[0].dt_unit, 'measures': batch[0].measures}
for i, ts_dataset in enumerate(batch[1:], 1):
for attr, appendlist in to_concat.items():
to_concat[attr].append(getattr(ts_dataset, attr))
for attr, required_val in fixed.items():
new_val = getattr(ts_dataset, attr)
if new_val != required_val:
raise ValueError(f"Element {i} has `{attr}` = {new_val}, but for element 0 it's {required_val}.")
tensors = tuple(ragged_cat(t, ragged_dim=1) for t in zip(*to_concat['tensors']))
return cls(
*tensors,
group_names=np.concatenate(to_concat['group_names']),
start_times=np.concatenate(to_concat['start_times']),
measures=fixed['measures'],
dt_unit=fixed['dt_unit']
)
def to_dataframe(self,
group_colname: str = 'group',
time_colname: str = 'time'
) -> 'DataFrame':
return self.tensor_to_dataframe(
tensor=ragged_cat(self.tensors, ragged_dim=1, cat_dim=2),
times=self.times(),
group_names=self.group_names,
group_colname=group_colname,
time_colname=time_colname,
measures=self.all_measures
)
@staticmethod
def tensor_to_dataframe(tensor: Tensor,
times: np.ndarray,
group_names: Sequence,
group_colname: str,
time_colname: str,
measures: Sequence[str]) -> 'DataFrame':
from pandas import DataFrame, concat
tensor = tensor.data.numpy()
assert tensor.shape[0] == len(group_names)
assert tensor.shape[0] == len(times)
assert tensor.shape[1] <= times.shape[1]
assert tensor.shape[2] == len(measures)
dfs = []
for g, group_name in enumerate(group_names):
# get values, don't store trailing nans:
values = tensor[g]
all_nan_per_row = np.min(np.isnan(values), axis=1)
if all_nan_per_row.all():
warn(f"Group {group_name} has only missing values.")
continue
end_idx = true1d_idx(~all_nan_per_row).max() + 1
# convert to dataframe:
df =
|
DataFrame(data=values[:end_idx, :], columns=measures)
|
pandas.DataFrame
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pandas
from modin.engines.base.frame.partition import BaseFramePartition
from modin.data_management.utils import length_fn_pandas, width_fn_pandas
from modin.engines.ray.utils import handle_ray_task_error
import ray
from ray.worker import RayTaskError
class PandasOnRayFramePartition(BaseFramePartition):
def __init__(self, object_id, length=None, width=None, call_queue=None):
assert type(object_id) is ray.ObjectID
self.oid = object_id
if call_queue is None:
call_queue = []
self.call_queue = call_queue
self._length_cache = length
self._width_cache = width
def get(self):
"""Gets the object out of the plasma store.
Returns:
The object from the plasma store.
"""
if len(self.call_queue):
self.drain_call_queue()
try:
return ray.get(self.oid)
except RayTaskError as e:
handle_ray_task_error(e)
def apply(self, func, **kwargs):
"""Apply a function to the object stored in this partition.
Note: It does not matter if func is callable or an ObjectID. Ray will
handle it correctly either way. The keyword arguments are sent as a
dictionary.
Args:
func: The function to apply.
Returns:
A RayRemotePartition object.
"""
oid = self.oid
call_queue = self.call_queue + [(func, kwargs)]
result, length, width = deploy_ray_func.remote(call_queue, oid)
return PandasOnRayFramePartition(result, length, width)
def add_to_apply_calls(self, func, **kwargs):
return PandasOnRayFramePartition(
self.oid, call_queue=self.call_queue + [(func, kwargs)]
)
def drain_call_queue(self):
if len(self.call_queue) == 0:
return
oid = self.oid
call_queue = self.call_queue
self.oid, self._length_cache, self._width_cache = deploy_ray_func.remote(
call_queue, oid
)
self.call_queue = []
def __copy__(self):
return PandasOnRayFramePartition(
self.oid, self._length_cache, self._width_cache, self.call_queue
)
def to_pandas(self):
"""Convert the object stored in this partition to a Pandas DataFrame.
Returns:
A Pandas DataFrame.
"""
dataframe = self.get()
assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series
return dataframe
def to_numpy(self, **kwargs):
"""
Convert the object stored in this parition to a NumPy array.
Returns
-------
A NumPy array.
"""
return self.apply(lambda df, **kwargs: df.to_numpy(**kwargs)).get()
def mask(self, row_indices, col_indices):
if (
(isinstance(row_indices, slice) and row_indices == slice(None))
or (
not isinstance(row_indices, slice)
and self._length_cache is not None
and len(row_indices) == self._length_cache
)
) and (
(isinstance(col_indices, slice) and col_indices == slice(None))
or (
not isinstance(col_indices, slice)
and self._width_cache is not None
and len(col_indices) == self._width_cache
)
):
return self.__copy__()
new_obj = self.add_to_apply_calls(
lambda df:
|
pandas.DataFrame(df.iloc[row_indices, col_indices])
|
pandas.DataFrame
|
from scipy.special import gammaln, psi
import numpy as np
import math
import pandas as pd
class LDABase:
"""
Base Class to fit LDA using Collapsed Gibbs Sampling derived from
<NAME> Steyvers (2004): Finding scientific topics
"""
def __init__(self, corpus, K, alpha="asymmetric", beta=0.01):
"""
Create LDABase instance.
"""
self.corpus = corpus
self.K = K # number of topics
self.W = corpus.get_vocab_len() # number of words in vocabulary
self.D = len(corpus) # number of documents
# initialize count parameters
self.topic_word_count = np.zeros((self.K, self.W))
self.doc_topic_count = np.zeros((self.D, self.K))
self.doc_word_topic_assignment = [[0] * len(doc) for doc in corpus]
self.topic_count = np.zeros(K)
self.doc_len = corpus.get_doc_len()
self.num_tokens = np.sum(corpus.get_doc_len()) # num tokens in corpus
# initialize parameters for estimation phi and theta
self.theta = np.zeros((self.D, self.K))
self.phi = np.zeros((self.K, self.W))
# initialize priors
if isinstance(alpha, (np.ndarray, list)):
# set alpha from parameters
self.alpha = np.array(alpha)
elif alpha == "asymmetric":
# asymmetric prior alpha
self.alpha = np.array([1.0 / (k + np.sqrt(K)) for k in range(K)])
else:
# symmetric prior alpha
self.alpha = np.array([1.0 / K] * K)
# symmetric prior beta
self.beta = beta
self.log_likelihood_trace = []
self.perplexity_trace = []
self.theta_trace = []
self.phi_trace = []
self.marginal_topic_dist_trace = []
self.training = True
def _get_topic_word_idx(self, topn=10):
"""
Returns matrix with word index and shape (K,topn).
"""
topic_word_idx_sorted = np.argpartition(self.phi, kth=range(-topn, 0), axis=-1)[:,-topn:]
topic_word_idx_sorted = np.flip(topic_word_idx_sorted, axis=-1)
return topic_word_idx_sorted
def get_topics(self, topn=10):
"""
Returns topn words from all topics as list of words and list of word probabilities.
"""
topics_words = []
topics_probs = []
for topic_idx, topic in enumerate(self._get_topic_word_idx(topn)):
words = []
probs = []
for word_idx in topic:
word = self.corpus.idx2word(word_idx)
prob = self.phi[topic_idx, word_idx]
words.append(word)
probs.append(prob)
topics_probs.append(probs)
topics_words.append(words)
return topics_words, topics_probs
def print_topics(self, topn=10):
"""
Prints topn words from all topics.
"""
words, probs = self.get_topics(topn)
print("p(w|t)\tword\n")
for topic_idx, (words, probs) in enumerate(zip(words, probs)):
print("Topic #{}".format(topic_idx + 1))
for word, prob in zip(words, probs):
output = "{:.3f}\t{}".format(prob, word)
print(output)
print()
def set_inference_mode(self):
"""
Disable training mode.
"""
self.training = False
def set_training_mode(self):
"""
Enable training mode.
"""
self.training = True
def update(self, doc_idx, word_idx, pos, topic_idx, count):
"""
Increases or decreases all count parameters by given count value (+1 or -1).
"""
if self.training is True: # should not change during inference
self.topic_word_count[topic_idx,word_idx] += count
self.topic_count[topic_idx] += count
self.doc_topic_count[doc_idx,topic_idx] += count
self.doc_word_topic_assignment[doc_idx][pos] = topic_idx
def get_topic_assignment(self, doc_idx, pos):
"""
Returns current topic assignment of word in document doc_idx at given position.
"""
return self.doc_word_topic_assignment[doc_idx][pos]
def get_phi(self):
"""
Returns per-topic word distribution phi.
<NAME> Steyvers (2004): Finding scientific topics
"""
phi = np.zeros((self.K, self.W))
beta_sum = self.beta * self.W
for z in range(self.K):
for w in range(self.W):
phi[z,w] = (self.topic_word_count[z,w] + self.beta) / (self.topic_count[z] + beta_sum)
return phi
def get_theta(self):
"""
Returns per-document topic distribution theta.
<NAME> (2004): Finding scientific topics
"""
theta = np.zeros((self.D, self.K))
alpha_sum = np.sum(self.alpha)
for d in range(self.D):
for z in range(self.K):
theta[d,z] = (self.doc_topic_count[d,z] + self.alpha[z]) / (self.doc_len[d] + alpha_sum)
return theta
def full_conditional_distribution(self, doc_idx, word_idx, topic_idx):
"""
Returns full conditional distribution for given document index, word index and topic index.
<NAME> Steyvers (2004): Finding scientific topics
"""
n_w_t = self.topic_word_count[:,word_idx] # get word topic count for word index
n_t = self.topic_count # get topic count
n_d_t = self.doc_topic_count[doc_idx] # get doc topic count for doc index
n_d = self.doc_len[doc_idx] # get document word count
word_topic_ratio = (n_w_t + self.beta) / (n_t + self.W * self.beta)
topic_doc_ratio = (n_d_t + self.alpha) / (n_d + np.sum(self.alpha))
p_z_cond = word_topic_ratio * topic_doc_ratio
return p_z_cond / p_z_cond.sum()
def get_log_likelihood(self):
"""
Returns joint log likelihood p(w, z) = p(w|z)p(z).
<NAME> Steyvers (2004): Finding scientific topics
"""
log_likelihood = 0.0
for z in range(self.K): # log p(w|z)
log_likelihood += gammaln(self.W * self.beta)
log_likelihood -= self.W * gammaln(self.beta)
log_likelihood += np.sum(gammaln(self.topic_word_count[z] + self.beta))
log_likelihood -= gammaln(np.sum(self.topic_word_count[z] + self.beta))
for doc_idx, _ in enumerate(self.corpus): # log p(z)
log_likelihood += gammaln(np.sum(self.alpha))
log_likelihood -= np.sum(gammaln(self.alpha))
log_likelihood += np.sum(gammaln(self.doc_topic_count[doc_idx] + self.alpha))
log_likelihood -= gammaln(np.sum(self.doc_topic_count[doc_idx] + self.alpha))
return log_likelihood
def trace_metrics(self):
"""
Traces metrics to ensure convergency.
"""
log_likelihood = self.get_log_likelihood()
perplexity = np.exp(-log_likelihood / self.num_tokens)
marginal_topic_dist = self.topic_word_count.sum(axis=-1) / self.topic_word_count.sum()
self.log_likelihood_trace.append(log_likelihood)
self.perplexity_trace.append(perplexity)
self.marginal_topic_dist_trace.append(marginal_topic_dist)
def trace_params(self):
"""
Traces estimates of phi and theta.
"""
self.phi_trace.append(self.get_phi())
self.theta_trace.append(self.get_theta())
def plot_topic_prior_alpha(self):
"""
Plots topic prior alpha.
"""
|
pd.DataFrame(self.alpha)
|
pandas.DataFrame
|
from matplotlib import cm
from tqdm import tqdm
from skimage.filters import threshold_otsu
from keras.models import load_model
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os.path as osp
import openslide
from pathlib import Path
from skimage.filters import threshold_otsu
import glob
import math
# before importing HDFStore, make sure 'tables' is installed by pip3 install tables
from pandas import HDFStore
from openslide.deepzoom import DeepZoomGenerator
from sklearn.model_selection import StratifiedShuffleSplit
import cv2
from keras.utils.np_utils import to_categorical
output_dir = Path('/home/wli/Downloads/camelyontestonly')
import os.path as osp
import openslide
from pathlib import Path
from keras.models import Sequential
from keras.layers import Lambda, Dropout
from keras.layers.convolutional import Convolution2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.models import model_from_json
import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
import numpy as np
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
#BASE_TRUTH_DIR = Path('/home/wli/Downloads/camelyontest/mask')
#slide_path = '/home/wli/Downloads/CAMELYON16/training/tumor/'
slide_path = '/Volumes/WZL-NIAID-5/New folder (4)/CAMELYON16/training/normal/'
#slide_path = '/home/wli/Downloads/CAMELYON16/training/normal/'
#slide_path_validation = '/home/wli/Downloads/CAMELYON16/training/tumor/validation/'
#slide_path_validation = '/home/wli/Downloads/CAMELYON16/training/normal/validation/'
#truth_path = str(BASE_TRUTH_DIR / 'tumor_026_Mask.tif')
#slide_paths = list(slide_path)
slide_paths = glob.glob(osp.join(slide_path, '*.tif'))
#slide_paths_validation = glob.glob(osp.join(slide_path_validation, '*.tif'))
#slide_paths = slide_paths + slide_paths_validation
#slide_paths = slide_path
# slide_paths.sort()
#slide = openslide.open_slide(slide_path)
def find_patches_from_slide(slide_path, filter_non_tissue=True):
"""Returns a dataframe of all patches in slide
input: slide_path: path to WSI file
output: samples: dataframe with the following columns:
slide_path: path of slide
is_tissue: sample contains tissue
is_tumor: truth status of sample
tile_loc: coordinates of samples in slide
option: base_truth_dir: directory of truth slides
option: filter_non_tissue: Remove samples no tissue detected
"""
#sampletotal = pd.DataFrame([])
#base_truth_dir = Path(BASE_TRUTH_DIR)
#anno_path = Path(anno_path)
#slide_contains_tumor = osp.basename(slide_paths[i]).startswith('tumor_')
print(slide_path)
dimensions = []
with openslide.open_slide(slide_path) as slide:
dtotal = (slide.dimensions[0] / 224, slide.dimensions[1] / 224)
thumbnail = slide.get_thumbnail((dtotal[0], dtotal[1]))
thum = np.array(thumbnail)
ddtotal = thum.shape
dimensions.extend(ddtotal)
hsv_image = cv2.cvtColor(thum, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv_image)
hthresh = threshold_otsu(h)
sthresh = threshold_otsu(s)
vthresh = threshold_otsu(v)
# be min value for v can be changed later
minhsv = np.array([hthresh, sthresh, 70], np.uint8)
maxhsv = np.array([180, 255, vthresh], np.uint8)
thresh = [minhsv, maxhsv]
print(thresh)
# extraction the countor for tissue
rgbbinary = cv2.inRange(hsv_image, thresh[0], thresh[1])
_, contours, _ = cv2.findContours(
rgbbinary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
bboxtcols = ['xmin', 'xmax', 'ymin', 'ymax']
bboxt = pd.DataFrame(columns=bboxtcols)
for c in contours:
(x, y, w, h) = cv2.boundingRect(c)
bboxt = bboxt.append(
|
pd.Series([x, x+w, y, y+h], index=bboxtcols)
|
pandas.Series
|
import sys
import os
import torch
import numpy as np
import torch_geometric.datasets
import pyximport
from torch_geometric.data import InMemoryDataset, download_url
import pandas as pd
from sklearn import preprocessing
pyximport.install(setup_args={'include_dirs': np.get_include()})
import os.path as osp
from torch_geometric.data import Data
import time
from torch_geometric.utils import add_self_loops, negative_sampling
from torch_geometric.data import Dataset
from functools import lru_cache
import copy
from fairseq.data import (
NestedDictionaryDataset,
NumSamplesDataset,
)
import json
import pathlib
from pathlib import Path
BASE = Path(os.path.realpath(__file__)).parent
GLOBAL_ROOT = str(BASE / 'graphormer_repo' / 'graphormer')
sys.path.insert(1, (GLOBAL_ROOT))
from data.wrapper import preprocess_item
import datetime
def find_part(hour):
if hour < 11:
part = 1
elif (hour > 11) & (hour < 20):
part = 2
else:
part = 3
return part
def prepare_raw_dataset_edge(dataset_name):
if dataset_name == 'abakan':
raw_data = pd.read_csv('datasets/abakan/raw/abakan_full_routes_final_weather_L_NaN_filtered_FIXED.csv')
all_roads_graph = list(pd.read_pickle('datasets/abakan/raw/all_roads_graph.pickle').to_networkx().edges())
all_nodes = pd.read_pickle('datasets/abakan/raw/clear_nodes.pkl')
init = pd.read_csv('datasets/abakan/raw/graph_abakan_init.csv')
elif dataset_name == 'omsk':
raw_data = pd.read_csv('datasets/omsk/raw/omsk_full_routes_final_weather_L_NaN_filtered_FIXED.csv')
all_roads_graph = list(pd.read_pickle('datasets/omsk/raw/all_roads_graph.pickle').to_networkx().edges())
# all_nodes = pd.read_pickle('datasets/omsk/raw/clear_nodes.pkl')
init = pd.read_csv('datasets/omsk/raw/graph_omsk_init.csv')
all_roads_dataset = pd.DataFrame()
all_edge_list = [list((all_roads_graph)[i]) for i in range(0,len( (all_roads_graph)))]
all_roads_dataset['edge_id']= range(0,len(init['edge_id'].unique()))
all_roads_dataset['speed'] = ' 1'
all_roads_dataset['length'] = ' 1'
all_roads_dataset[' start_point_part'] = init['quartal_id'] / len(init['quartal_id'].unique())
all_roads_dataset['finish_point_part'] = init['quartal_id'] / len(init['quartal_id'].unique())
all_roads_dataset_edges = pd.DataFrame()
all_roads_dataset_edges['source'] = [x[0] for x in all_edge_list]
all_roads_dataset_edges['target'] = [x[1] for x in all_edge_list]
# all_roads_dataset_edges = all_roads_dataset_edges.drop_duplicates().reset_index(drop = True)
trip_part = all_roads_dataset[['edge_id', 'speed', 'length', ' start_point_part', 'finish_point_part']].copy()
source_merge = pd.merge(all_roads_dataset_edges, trip_part.rename(columns = {'edge_id':'source'}), on = ['source'], how = 'left')
target_merge = pd.merge(all_roads_dataset_edges, trip_part.rename(columns = {'edge_id':'target'}), on = ['target'], how = 'left')
total_table = pd.DataFrame()
total_table['speed'] = (source_merge['speed'].apply(lambda x: [x]) + target_merge['speed'].apply(lambda x: [x]))
total_table['length'] = (source_merge['length'].apply(lambda x: [x]) + target_merge['length'].apply(lambda x: [x]))
total_table['edges'] = (source_merge['source'].apply(lambda x: [x]) + target_merge['target'].apply(lambda x: [x]))
total_table[' start_point_part'] = source_merge[' start_point_part']
total_table['finish_point_part'] = target_merge['finish_point_part']
total_table['week_period'] = datetime.datetime.now().hour
total_table['hour'] = datetime.datetime.now().weekday()
total_table['day_period'] = total_table['hour'].apply(lambda x: find_part(x))
total_table['RTA'] = 1
total_table['clouds'] = 1
total_table['snow'] = 0
total_table['temperature'] = 10
total_table['wind_dir'] = 180
total_table['wind_speed'] = 3
total_table['pressure'] = 747
total_table['source'] = source_merge['source']
total_table['target'] = source_merge['target']
# total_table = total_table.drop_duplicates().reset_index(drop = True)
return total_table
def prepare_raw_dataset_node(dataset_name):
if dataset_name == 'abakan':
raw_data = pd.read_csv('datasets/abakan/raw/abakan_full_routes_final_weather_L_NaN_filtered_FIXED.csv')
all_roads_graph = list(pd.read_pickle('datasets/abakan/raw/all_roads_graph.pickle').to_networkx().edges())
all_nodes = pd.read_pickle('datasets/abakan/raw/clear_nodes.pkl')
init =
|
pd.read_csv('datasets/abakan/raw/graph_abakan_init.csv')
|
pandas.read_csv
|
import ast
import time
import numpy as np
import pandas as pd
from copy import deepcopy
from typing import Any
from matplotlib import dates as mdates
from scipy import stats
from aistac.components.aistac_commons import DataAnalytics
from ds_discovery.components.transitioning import Transition
from ds_discovery.components.commons import Commons
from aistac.properties.abstract_properties import AbstractPropertyManager
from ds_discovery.components.discovery import DataDiscovery
from ds_discovery.intent.abstract_common_intent import AbstractCommonsIntentModel
__author__ = '<NAME>'
class AbstractBuilderIntentModel(AbstractCommonsIntentModel):
_INTENT_PARAMS = ['self', 'save_intent', 'column_name', 'intent_order',
'replace_intent', 'remove_duplicates', 'seed']
def __init__(self, property_manager: AbstractPropertyManager, default_save_intent: bool=None,
default_intent_level: [str, int, float]=None, default_intent_order: int=None,
default_replace_intent: bool=None):
"""initialisation of the Intent class.
:param property_manager: the property manager class that references the intent contract.
:param default_save_intent: (optional) The default action for saving intent in the property manager
:param default_intent_level: (optional) the default level intent should be saved at
:param default_intent_order: (optional) if the default behaviour for the order should be next available order
:param default_replace_intent: (optional) the default replace existing intent behaviour
"""
default_save_intent = default_save_intent if isinstance(default_save_intent, bool) else True
default_replace_intent = default_replace_intent if isinstance(default_replace_intent, bool) else True
default_intent_level = default_intent_level if isinstance(default_intent_level, (str, int, float)) else 'A'
default_intent_order = default_intent_order if isinstance(default_intent_order, int) else 0
intent_param_exclude = ['size']
intent_type_additions = [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, pd.Timestamp]
super().__init__(property_manager=property_manager, default_save_intent=default_save_intent,
intent_param_exclude=intent_param_exclude, default_intent_level=default_intent_level,
default_intent_order=default_intent_order, default_replace_intent=default_replace_intent,
intent_type_additions=intent_type_additions)
def run_intent_pipeline(self, canonical: Any=None, intent_levels: [str, int, list]=None, run_book: str=None,
seed: int=None, simulate: bool=None, **kwargs) -> pd.DataFrame:
"""Collectively runs all parameterised intent taken from the property manager against the code base as
defined by the intent_contract. The whole run can be seeded though any parameterised seeding in the intent
contracts will take precedence
:param canonical: a direct or generated pd.DataFrame. see context notes below
:param intent_levels: (optional) a single or list of intent_level to run in order given
:param run_book: (optional) a preset runbook of intent_level to run in order
:param seed: (optional) a seed value that will be applied across the run: default to None
:param simulate: (optional) returns a report of the order of run and return the indexed column order of run
:return: a pandas dataframe
"""
simulate = simulate if isinstance(simulate, bool) else False
col_sim = {"column": [], "order": [], "method": []}
# legacy
if 'size' in kwargs.keys():
canonical = kwargs.pop('size')
canonical = self._get_canonical(canonical)
size = canonical.shape[0] if canonical.shape[0] > 0 else 1000
# test if there is any intent to run
if self._pm.has_intent():
# get the list of levels to run
if isinstance(intent_levels, (str, list)):
column_names = Commons.list_formatter(intent_levels)
elif isinstance(run_book, str) and self._pm.has_run_book(book_name=run_book):
column_names = self._pm.get_run_book(book_name=run_book)
else:
# put all the intent in order of model, get, correlate, associate
_model = []
_get = []
_correlate = []
_frame_start = []
_frame_end = []
for column in self._pm.get_intent().keys():
for order in self._pm.get(self._pm.join(self._pm.KEY.intent_key, column), {}):
for method in self._pm.get(self._pm.join(self._pm.KEY.intent_key, column, order), {}).keys():
if str(method).startswith('get_'):
if column in _correlate + _frame_start + _frame_end:
continue
_get.append(column)
elif str(method).startswith('model_'):
_model.append(column)
elif str(method).startswith('correlate_'):
if column in _get:
_get.remove(column)
_correlate.append(column)
elif str(method).startswith('frame_'):
if column in _get:
_get.remove(column)
if str(method).startswith('frame_starter'):
_frame_start.append(column)
else:
_frame_end.append(column)
column_names = Commons.list_unique(_frame_start + _get + _model + _correlate + _frame_end)
for column in column_names:
level_key = self._pm.join(self._pm.KEY.intent_key, column)
for order in sorted(self._pm.get(level_key, {})):
for method, params in self._pm.get(self._pm.join(level_key, order), {}).items():
try:
if method in self.__dir__():
if simulate:
col_sim['column'].append(column)
col_sim['order'].append(order)
col_sim['method'].append(method)
continue
result = []
params.update(params.pop('kwargs', {}))
if isinstance(seed, int):
params.update({'seed': seed})
_ = params.pop('intent_creator', 'Unknown')
if str(method).startswith('get_'):
result = eval(f"self.{method}(size=size, save_intent=False, **params)",
globals(), locals())
elif str(method).startswith('correlate_'):
result = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
elif str(method).startswith('model_'):
canonical = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
continue
elif str(method).startswith('frame_starter'):
canonical = self._get_canonical(params.pop('canonical', canonical), deep_copy=False)
size = canonical.shape[0]
canonical = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
continue
elif str(method).startswith('frame_'):
canonical = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
continue
if 0 < size != len(result):
raise IndexError(f"The index size of '{column}' is '{len(result)}', "
f"should be {size}")
canonical[column] = result
except ValueError as ve:
raise ValueError(f"intent '{column}', order '{order}', method '{method}' failed with: {ve}")
except TypeError as te:
raise TypeError(f"intent '{column}', order '{order}', method '{method}' failed with: {te}")
if simulate:
return pd.DataFrame.from_dict(col_sim)
return canonical
def _get_number(self, from_value: [int, float]=None, to_value: [int, float]=None, relative_freq: list=None,
precision: int=None, ordered: str=None, at_most: int=None, size: int=None,
seed: int=None) -> list:
""" returns a number in the range from_value to to_value. if only to_value given from_value is zero
:param from_value: (signed) integer to start from
:param to_value: optional, (signed) integer the number sequence goes to but not include
:param relative_freq: a weighting pattern or probability that does not have to add to 1
:param precision: the precision of the returned number. if None then assumes int value else float
:param ordered: order the data ascending 'asc' or descending 'dec', values accepted 'asc' or 'des'
:param at_most: the most times a selection should be chosen
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
"""
if not isinstance(from_value, (int, float)) and not isinstance(to_value, (int, float)):
raise ValueError(f"either a 'range_value' or a 'range_value' and 'to_value' must be provided")
if not isinstance(from_value, (float, int)):
from_value = 0
if not isinstance(to_value, (float, int)):
(from_value, to_value) = (0, from_value)
if to_value <= from_value:
raise ValueError("The number range must be a positive different, found to_value <= from_value")
at_most = 0 if not isinstance(at_most, int) else at_most
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
precision = 3 if not isinstance(precision, int) else precision
if precision == 0:
from_value = int(round(from_value, 0))
to_value = int(round(to_value, 0))
is_int = True if (isinstance(to_value, int) and isinstance(from_value, int)) else False
if is_int:
precision = 0
# build the distribution sizes
if isinstance(relative_freq, list) and len(relative_freq) > 1:
freq_dist_size = self._freq_dist_size(relative_freq=relative_freq, size=size, seed=_seed)
else:
freq_dist_size = [size]
# generate the numbers
rtn_list = []
generator = np.random.default_rng(seed=_seed)
dtype = int if is_int else float
bins = np.linspace(from_value, to_value, len(freq_dist_size) + 1, dtype=dtype)
for idx in np.arange(1, len(bins)):
low = bins[idx - 1]
high = bins[idx]
if low >= high:
continue
elif at_most > 0:
sample = []
for _ in np.arange(at_most, dtype=dtype):
count_size = freq_dist_size[idx - 1] * generator.integers(2, 4, size=1)[0]
sample += list(set(np.linspace(bins[idx - 1], bins[idx], num=count_size, dtype=dtype,
endpoint=False)))
if len(sample) < freq_dist_size[idx - 1]:
raise ValueError(f"The value range has insufficient samples to choose from when using at_most."
f"Try increasing the range of values to sample.")
rtn_list += list(generator.choice(sample, size=freq_dist_size[idx - 1], replace=False))
else:
if dtype == int:
rtn_list += generator.integers(low=low, high=high, size=freq_dist_size[idx - 1]).tolist()
else:
choice = generator.random(size=freq_dist_size[idx - 1], dtype=float)
choice = np.round(choice * (high-low)+low, precision).tolist()
# make sure the precision
choice = [high - 10**(-precision) if x >= high else x for x in choice]
rtn_list += choice
# order or shuffle the return list
if isinstance(ordered, str) and ordered.lower() in ['asc', 'des']:
rtn_list.sort(reverse=True if ordered.lower() == 'asc' else False)
else:
generator.shuffle(rtn_list)
return rtn_list
def _get_category(self, selection: list, relative_freq: list=None, size: int=None, at_most: int=None,
seed: int=None) -> list:
""" returns a category from a list. Of particular not is the at_least parameter that allows you to
control the number of times a selection can be chosen.
:param selection: a list of items to select from
:param relative_freq: a weighting pattern that does not have to add to 1
:param size: an optional size of the return. default to 1
:param at_most: the most times a selection should be chosen
:param seed: a seed value for the random function: default to None
:return: an item or list of items chosen from the list
"""
if not isinstance(selection, list) or len(selection) == 0:
return [None]*size
_seed = self._seed() if seed is None else seed
select_index = self._get_number(len(selection), relative_freq=relative_freq, at_most=at_most, size=size,
seed=_seed)
rtn_list = [selection[i] for i in select_index]
return list(rtn_list)
def _get_datetime(self, start: Any, until: Any, relative_freq: list=None, at_most: int=None, ordered: str=None,
date_format: str=None, as_num: bool=None, ignore_time: bool=None, size: int=None,
seed: int=None, day_first: bool=None, year_first: bool=None) -> list:
""" returns a random date between two date and/or times. weighted patterns can be applied to the overall date
range.
if a signed 'int' type is passed to the start and/or until dates, the inferred date will be the current date
time with the integer being the offset from the current date time in 'days'.
if a dictionary of time delta name values is passed this is treated as a time delta from the start time.
for example if start = 0, until = {days=1, hours=3} the date range will be between now and 1 days and 3 hours
Note: If no patterns are set this will return a linearly random number between the range boundaries.
:param start: the start boundary of the date range can be str, datetime, pd.datetime, pd.Timestamp or int
:param until: up until boundary of the date range can be str, datetime, pd.datetime, pd.Timestamp, pd.delta, int
:param relative_freq: (optional) A pattern across the whole date range.
:param at_most: the most times a selection should be chosen
:param ordered: order the data ascending 'asc' or descending 'dec', values accepted 'asc' or 'des'
:param ignore_time: ignore time elements and only select from Year, Month, Day elements. Default is False
:param date_format: the string format of the date to be returned. if not set then pd.Timestamp returned
:param as_num: returns a list of Matplotlib date values as a float. Default is False
:param size: the size of the sample to return. Default to 1
:param seed: a seed value for the random function: default to None
:param year_first: specifies if to parse with the year first
If True parses dates with the year first, eg 10/11/12 is parsed as 2010-11-12.
If both dayfirst and yearfirst are True, yearfirst is preceded (same as dateutil).
:param day_first: specifies if to parse with the day first
If True, parses dates with the day first, eg %d-%m-%Y.
If False default to the a preferred preference, normally %m-%d-%Y (but not strict)
:return: a date or size of dates in the format given.
"""
# pre check
if start is None or until is None:
raise ValueError("The start or until parameters cannot be of NoneType")
# Code block for intent
as_num = False if not isinstance(as_num, bool) else as_num
ignore_time = False if not isinstance(ignore_time, bool) else ignore_time
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
if isinstance(start, int):
start = (pd.Timestamp.now() + pd.Timedelta(days=start))
if isinstance(until, int):
until = (pd.Timestamp.now() + pd.Timedelta(days=until))
if isinstance(until, dict):
until = (start + pd.Timedelta(**until))
if start == until:
rtn_list = [self._convert_date2value(start, day_first=day_first, year_first=year_first)[0]] * size
else:
_dt_start = self._convert_date2value(start, day_first=day_first, year_first=year_first)[0]
_dt_until = self._convert_date2value(until, day_first=day_first, year_first=year_first)[0]
precision = 15
if ignore_time:
_dt_start = int(_dt_start)
_dt_until = int(_dt_until)
precision = 0
rtn_list = self._get_number(from_value=_dt_start, to_value=_dt_until, relative_freq=relative_freq,
at_most=at_most, ordered=ordered, precision=precision, size=size, seed=seed)
if not as_num:
rtn_list = mdates.num2date(rtn_list)
if isinstance(date_format, str):
rtn_list = pd.Series(rtn_list).dt.strftime(date_format).to_list()
else:
rtn_list = pd.Series(rtn_list).dt.tz_convert(None).to_list()
return rtn_list
def _get_intervals(self, intervals: list, relative_freq: list=None, precision: int=None, size: int=None,
seed: int=None) -> list:
""" returns a number based on a list selection of tuple(lower, upper) interval
:param intervals: a list of unique tuple pairs representing the interval lower and upper boundaries
:param relative_freq: a weighting pattern or probability that does not have to add to 1
:param precision: the precision of the returned number. if None then assumes int value else float
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
:return: a random number
"""
# Code block for intent
size = 1 if size is None else size
if not isinstance(precision, int):
precision = 0 if all(isinstance(v[0], int) and isinstance(v[1], int) for v in intervals) else 3
_seed = self._seed() if seed is None else seed
if not all(isinstance(value, tuple) for value in intervals):
raise ValueError("The intervals list must be a list of tuples")
interval_list = self._get_category(selection=intervals, relative_freq=relative_freq, size=size, seed=_seed)
interval_counts =
|
pd.Series(interval_list, dtype='object')
|
pandas.Series
|
# !/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @author breeze
import threading
import argparse
import multiprocessing
import time
from multiprocessing import Queue, Pool
import face_recognition
import pandas as pd
import win32com.client
import cv2
import encoding_images
from app_utils import *
# This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the
# other example, but it includes some basic performance tweaks to make things run a lot faster:
# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)
# 2. Only detect faces in every other frame of video.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Load a sample picture and learn how to recognize it.
# face_recognition.api.batch_face_locations(images, number_of_times_to_upsample=1, batch_size=128)[source]
# face_recognition.api.compare_faces(known_face_encodings, face_encoding_to_check, tolerance=0.6)
# face_recognition.api.face_distance(face_encodings, face_to_compare)[source]
# face_recognition.api.face_encodings(face_image, known_face_locations=None, num_jitters=1)[source]
# face_recognition.api.face_landmarks(face_image, face_locations=None)[source]
# face_recognition.api.face_locations(img, number_of_times_to_upsample=1, model='hog')[source]
# face_recognition.api.load_image_file(file, mode='RGB')[source]
# 语音模块 voice model
speaker = win32com.client.Dispatch("SAPI.SpVoice")
name = "Unknown"
current_names = [name]
last_time = time.time()
known_face_names = []
known_face_encodings = []
known_face_encodings, known_face_names = encoding_images.load_encodings()
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True #
TIME_DIFF = 20 # 持久化的时间间隔,当设置为 0 时候,每次识别的结果直接进行保存.
name_record = "./dataset/face_record.txt" # 持久化识别出的人脸结果
NAME_DF = pd.DataFrame(known_face_names, columns=["name"])
last_ts = time.time()
lock = threading.Lock()
def myprint(log, ts):
global lock, last_ts
if lock.acquire():
diff = ts - last_ts
print(log, '--------', diff)
last_ts = ts
lock.release()
def process_face_records(name):
"""
处理每一条识别的记录 ,并在一定时间之后将数据持久化到文件中
此处会碰到全局并发,导致锁的问题
:param name:
:return:
"""
return
print('process_face_records start', time.time())
global current_names, last_time
# myprint("global current_names {}, last_time {}".format(current_names, last_time))
# 判断是不是在识别的列表中,不在的话就进行问候
if name not in current_names:
print("ts ====", last_time, time.time())
current_names.append(name)
myprint("Hello {}, nice to meet you! ".format(name))
# speaker.Speak("Hello {}, nice to meet you! ".format(name))
# 在一定时间内,清空已经识别的人, 并进行
if last_time < time.time() - TIME_DIFF: # 每隔一段时间清空一下检测到的人
last_time = time.time()
time_format = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
myprint(time_format + " update last_time and clear current_names.")
with open(name_record, 'a') as f:
if len(current_names) > 0:
f.writelines("{}:{} \n".format(time_format, str(current_names)))
print("======", current_names)
current_names = [] # clear()
current_names = [name]
myprint('process_face_records end', time.time())
def vote_class(face_encoding, tolerance=0.3, topN=5):
myprint('vote start ', time.time())
"""
当比较的结果小于tolerance的时候,有多个值,采用取topN 进行投票 ,决定最终的分类,此处没有对 distance 距离进行加权
:param face_encoding: face encoding
:param tolerance: 距离的阈值,越小越相似
:param topN: 参与投票的最大数量
:return: detect name
"""
# 计算出距离
distance_ = face_recognition.face_distance(known_face_encodings, face_encoding)
df = pd.DataFrame(distance_, columns=["dis"]) # 转换成 DataFrame
topDF = df[df['dis'] <= tolerance].nsmallest(topN, columns=['dis']) # 过滤结果集
namedf = NAME_DF.loc[topDF.index] # 从姓名列表中获取face距离对应的人脸名称
con =
|
pd.concat([topDF, namedf], axis=1)
|
pandas.concat
|
"""
This is a Python implementation of the Denoising Autoencoder approach that we proposed for
the first Multi-target speaker detection and identification Challenge Evaluation (MCE 2018, http://www.mce2018.org)
The basic idea is to train a Denoising Autoencoder to map each individual input ivector
to the mean of all ivectors from that speaker.
The aim of this DAE is to compensate for inter-session variability and increase the discriminative power of the ivectors.
You can find our system description for the MCE 2018 challenge here: http://mce.csail.mit.edu/pdfs/BiometricVox_description.pdf
Part of the code has been adapted from the baseline system at: https://github.com/swshon/multi-speakerID.
Copyright 2018 <NAME>
Biometric Vox S.L.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import numpy as np
from mce_utils import load_ivector, length_norm, make_spkvec, calculate_EER, get_trials_label_with_confusion, calculate_EER_with_confusion
import pandas as pd
from keras.models import Model
from keras.layers import Input, Dense, Activation
from keras import metrics
from keras import optimizers
# Neural network definition: a single hidden layer with 'tanh' activation and a linear output layer
def get_DAE(nu=2000):
iv_dim = 600
inputs = Input(shape=(iv_dim,))
x = Dense(nu)(inputs)
x = Activation('tanh')(x)
x = Dense(iv_dim)(x)
out = Activation('linear')(x)
model = Model(inputs=inputs, outputs=out)
return model
# Making dictionary to find blacklist pair between train and test dataset
bl_match = np.loadtxt('data/bl_matching.csv',dtype='str')
dev2train={}
dev2id={}
train2dev={}
train2id={}
test2train={}
train2test={}
for iter, line in enumerate(bl_match):
line_s = line.split(',')
dev2train[line_s[1].split('_')[-1]]= line_s[3].split('_')[-1]
dev2id[line_s[1].split('_')[-1]]= line_s[0].split('_')[-1]
train2dev[line_s[3].split('_')[-1]]= line_s[1].split('_')[-1]
train2id[line_s[3].split('_')[-1]]= line_s[0].split('_')[-1]
test2train[line_s[2].split('_')[-1]]= line_s[3].split('_')[-1]
train2test[line_s[3].split('_')[-1]]= line_s[2].split('_')[-1]
# load test set information
filename = 'data/tst_evaluation_keys.csv'
tst_info = np.loadtxt(filename,dtype='str',delimiter=',',skiprows=1,usecols=range(0,3))
tst_trials = []
tst_trials_label = []
tst_ground_truth =[]
for iter in range(len(tst_info)):
tst_trials_label.extend([tst_info[iter,0]])
if tst_info[iter,1]=='background':
tst_trials = np.append(tst_trials,0)
else:
tst_trials = np.append(tst_trials,1)
# Set random seed to make results reproducible
seed = 134
np.random.seed(seed)
# Loading i-vector
trn_bl_id, trn_bl_utt, trn_bl_ivector = load_ivector('data/trn_blacklist.csv')
trn_bg_id, trn_bg_utt, trn_bg_ivector = load_ivector('data/trn_background.csv')
dev_bl_id, dev_bl_utt, dev_bl_ivector = load_ivector('data/dev_blacklist.csv')
dev_bg_id, dev_bg_utt, dev_bg_ivector = load_ivector('data/dev_background.csv')
tst_id, test_utt, tst_ivector = load_ivector('data/tst_evaluation.csv')
# length normalization
trn_bl_ivector = length_norm(trn_bl_ivector)
trn_bg_ivector = length_norm(trn_bg_ivector)
dev_bl_ivector = length_norm(dev_bl_ivector)
dev_bg_ivector = length_norm(dev_bg_ivector)
tst_ivector = length_norm(tst_ivector)
# Inputs to DAE are ivectors and targets are the speaker-level mean ivectors
train_spk_ids =
|
pd.DataFrame({'spk_ids': trn_bg_id})
|
pandas.DataFrame
|
from __future__ import division, print_function
from action_detector_diagnosis import ActionDetectorDiagnosis
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import pandas as pd
import os
from collections import OrderedDict
from utils import interpolated_prec_rec
from matplotlib import gridspec, rc
import matplotlib as mpl
mpl.use('Agg')
params = {'font.family': 'serif','font.serif': 'Times',
'text.usetex': True,
'xtick.major.size': 8,
'ytick.major.size': 8,
'xtick.major.width': 3,
'ytick.major.width': 3,
'mathtext.fontset': 'custom',
}
mpl.rcParams.update(params)
import matplotlib.pyplot as plt
def compute_mAP_N(result,this_cls_pred,this_cls_gt):
ap = np.zeros(len(result.tiou_thresholds))
tp = np.zeros((len(result.tiou_thresholds), len(this_cls_pred)))
fp = np.zeros((len(result.tiou_thresholds), len(this_cls_pred)))
for tidx, tiou in enumerate(result.tiou_thresholds):
fp[tidx,pd.isnull(this_cls_pred[result.matched_gt_id_cols[tidx]]).values] = 1
tp[tidx,~(
|
pd.isnull(this_cls_pred[result.matched_gt_id_cols[tidx]])
|
pandas.isnull
|
import argparse
import json
import numpy as np
import pandas as pd
GUEST = "guest"
HOST = "host"
def create_conf(name, pairs):
for r, c in pairs:
sub = f"{name}_{r}_{c}"
conf = {
"initiator": {"role": "guest", "party_id": 10000},
"job_parameters": {"work_mode": 0},
"role": {"guest": [10000], "host": [10000]},
"role_parameters": {
role: {
"args": {"data": {"data": [{"name": f"{sub}_{role}", "namespace": "experiment"}]}},
"dataio_0": {"with_label": [False], "output_format": ["dense"]},
"pearson_0": {"column_indexes": [-1]}
} for role in [GUEST, HOST]
}
}
with open(f"{sub}_conf.json", "w") as f:
json.dump(conf, f, indent=2)
def create_test_suite(name, pairs):
def data_pair(sub_name):
return [{
"file": f"examples/federatedml-1.x-examples/hetero_pearson/{sub_name}_{role}.csv",
"head": 1,
"partition": 16,
"work_mode": 0,
"table_name": f"{sub_name}_{role}",
"namespace": "experiment",
"role": f"{role}_0"
} for role in [GUEST, HOST]]
data = []
task = {}
for r, c in pairs:
sub = f"{name}_{r}_{c}"
data.extend(data_pair(sub))
task[f"pearson_{sub}"] = {"conf": f"./{sub}_conf.json",
"dsl": "./test_dsl.json"}
with open(f"{name}_testsuite.json", "w") as f:
json.dump({"data": data, "tasks": task}, f, indent=2)
def create_data(role, name, pairs):
for r, c in pairs:
sub = f"{name}_{r}_{c}"
arr = np.random.rand(r, c)
df =
|
pd.DataFrame(arr)
|
pandas.DataFrame
|
""" Interact with the FRITZ ZTF-II marshal """
import os
import warnings
import pandas
import json
import requests
import numpy as np
from astropy import time
from astropy.io import fits
from .io import LOCALSOURCE, _load_id_
FRITZSOURCE = os.path.join(LOCALSOURCE,"fritz")
if not os.path.isdir(FRITZSOURCE):
os.mkdir(FRITZSOURCE)
FID_TO_NAME = {1:"ztfg", 2:"ztfr", 3:"ztfi"}
ZTFCOLOR = {"ztfr":"tab:red", "ztfg":"tab:green", "ztfi":"tab:orange"}
_BASE_FRITZ_URL = "https://fritz.science/"
FRITZSOURCE = os.path.join(LOCALSOURCE,"fritz")
####################
# #
# GENERIC TOOLS #
# #
####################
# ---------- #
# Downloads #
# ---------- #
def api(method, endpoint, data=None, load=True, token=None, **kwargs):
""" """
if token is None:
token = _load_id_('fritz')
headers = {'Authorization': f"token {token}"}
response = requests.request(method, endpoint, json=data, headers=headers, **kwargs)
if not load:
return response
try:
downloaded = json.loads(response.content)
except:
warnings.warn("cannot load the response.content")
downloaded = None
if downloaded["status"] not in ["success"]:
raise IOError(f"downloading status of '{method} {endpoint}' is not success: {downloaded['status']}")
return downloaded["data"]
def bulk_download(fobject, names, nprocess=4, show_progress=True,
asdict=False, force_dl=False, client=None, store=True):
""" Multiprocessed download of Fritz{fobject}.
This makes use of the Fritz{fobject}.from_name() classmethods
Parameters
----------
fobject: [string]
What you want to download.
- "lightcurve" (or "photometry"), "spectra" (or "spectrum"), "alerts", or "source"
names: [list of string]
list of names for which you want to download data.
nprocess: [int] -optional-
list of parallel download processes.
force_dl: [bool] -optional-
Should this redownload existing data ?
store: [bool] -optional-
Should the downloaded data be stored ?
asdict: [bool] -optional-
Should this return a dictionary or a list
- asdict=True: {name: fritz{fobject}}
- asdict=False: [fritz{fobject}]
Returns
-------
Dictionary {name: fritz{fobject}}
"""
KNOW_OBJECT = ["lightcurve","photometry", "spectra", "spectrum", "alerts","source"]
if fobject not in KNOW_OBJECT:
raise ValueError(f"Unknown fritz object {fobject}")
if fobject == "spectrum":
fobject = "spectra"
if fobject == "photometry":
fobject = "lightcurve"
if client is not None:
from dask import delayed
dl_func = eval(f"_single_download_{fobject}_")
d_download = [delayed(dl_func)([name, force_dl, store]) for name in names]
return client.compute(d_download)
from .utils.tools import is_running_from_notebook
import multiprocessing
nnames = len(names)
#
# - Progress bar or not
if show_progress:
from astropy.utils.console import ProgressBar
bar = ProgressBar( nnames, ipython_widget=is_running_from_notebook())
else:
bar = None
#
# - Input
objects = {}
force_dl = [force_dl]*nnames
store = [store]*nnames
#
# - Multiprocessing
with multiprocessing.Pool(nprocess) as p:
# Da Loop
for j, flc in enumerate( p.imap(eval(f"_single_download_{fobject}_"), zip(names, force_dl, store) ) ):
if bar is not None:
bar.update(j)
objects[names[j]] = flc
if bar is not None:
bar.update(nnames)
return objects if asdict else list(objects.values())
def _single_download_lightcurve_(args):
""" """
name, force_dl, store = args
return FritzPhotometry.from_name(name, force_dl=force_dl, store=store)
def _single_download_spectra_(args):
""" """
name, force_dl, store = args
return FritzSpectrum.from_name(name, force_dl=force_dl, store=store)
def _single_download_alerts_(args):
""" """
name, force_dl, store = args
return FritzAlerts.from_name(name, force_dl=force_dl, store=store)
def _single_download_source_(args):
""" """
name, force_dl, store = args
return FritzSource.from_name(name, force_dl=force_dl, store=store)
# =============== #
# #
# LightCurve #
# #
# =============== #
def download_lightcurve(name, get_object=False,
token=None, clean_groupcolumn=True,
format=None, magsys=None, store=False,
verbose=False,
**kwargs):
"""
Parameters
----------
format: [string] -optional-
= skyportal api option =
flux or mag (None means default)
magsys: [string] -optional-
= skyportal api option =
ab or vega (None means default)
**kwargs are ignored (here for backward compatibilities)
"""
#
# - start: addon
addon = []
if format is not None:
addon.append(f"format={format}")
if magsys is not None:
addon.append(f"magsys={magsys}")
addon = "" if len(addon)==0 else "?"+"&".join(addon)
# - end: addon
#
q_url = _BASE_FRITZ_URL+f'api/sources/{name}/photometry{addon}'
if verbose:
print(f"queried URL: {q_url}")
lcdata = api('get', q_url, load=True, token=token)
lcdata = pandas.DataFrame(lcdata)
if clean_groupcolumn:
lcdata["groups"] = [[i_["id"] for i_ in lcdata["groups"].iloc[i]]
for i in range(len(lcdata))]
# - output
if not store and not get_object:
return lcdata
flcdata = FritzPhotometry( lcdata )
if store:
flcdata.store()
return flcdata if get_object else lcdata
# =============== #
# #
# Spectra #
# #
# =============== #
def download_spectra(name, get_object=False, token=None, store=False, verbose=False):
""" """
q_url = _BASE_FRITZ_URL+f'api/sources/{name}/spectra'
if verbose:
print(f"queried URL: {q_url}")
list_of_dict = api('get', q_url, load=True, token=token)
#
# - Any problem ?
if list_of_dict is None or len(list_of_dict)==0:
warnings.warn(f"no spectra downloaded. {q_url} download is empty")
return None
spectra = list_of_dict["spectra"]
if spectra is None or len(spectra)==0:
warnings.warn(f"no spectra downloaded. {q_url} download is empty")
return None
# - No ? Good
#
if not store and not get_object:
return spectra
if spectra is None or len(spectra)==0:
return None
if len(spectra)==1:
fspectra = FritzSpectrum(spectra[0])
if store:
fspectra.store()
else:
fspectra = [FritzSpectrum(spec_) for spec_ in spectra]
if store:
[fspec_.store() for fspec_ in fspectra]
return fspectra if get_object else spectra
# =============== #
# #
# Alers #
# #
# =============== #
def download_alerts(name, candid=None, allfields=None,
get_object=False, token=None, store=False, verbose=False):
"""
looking for api/alerts/{name}{addon}
Parameters
----------
candid: [int/str]
alert candid like: 1081317100915015025
"""
#
# - start: addon
addon = []
if candid is not None:
addon.append(f"candid={candid}")
if allfields is not None:
addon.append(f"includeAllFields={allfields}")
addon = "" if len(addon)==0 else "?"+"&".join(addon)
# - end: addon
#
q_url = _BASE_FRITZ_URL+f'api/alerts/{name}{addon}'
if verbose:
print(f"queried URL: {q_url}")
alerts = api('get',q_url, load=True, token=token)
# - output
if not store and not get_object:
return alerts
falerts = FritzAlerts.from_alerts(alerts)
if store:
falerts.store()
return falerts if get_object else alerts
# =============== #
# #
# Source #
# #
# =============== #
def download_source(name, get_object=False, token=None, store=False, verbose=False):
""" """
addon=''
q_url = _BASE_FRITZ_URL+f'api/sources/{name}{addon}'
if verbose:
print(f"queried URL: {q_url}")
source = api('get', q_url, load=True, token=token)
if not store and not get_object:
return source
fsource = FritzSource(source)
if store:
fsource.store()
return fsource if get_object else source
# =============== #
# --------------- #
# - Sample - #
# --------------- #
# =============== #
def download_sample( groupid, get_object=False,
savesummary=False,
savedafter=None, savedbefore=None,
name=None,
includephotometry=None,
includerequested=None,
addon=None, token=None,
store=False, verbose=False):
"""
includephotometry: [bool] -optional-
Includes the photometric table inside sources["photometry"]
"""
#
# - start: addon
if addon is None:
addon = []
elif type(addon) is str:
addon = [addon]
if savesummary:
addon.append(f"saveSummary=true")
if store:
warnings.warn("store option not available if savesummary=True.")
store=False
if groupid is not None and groupid not in ["*", "all"]:
addon.append(f"group_ids={groupid}")
if savedafter is not None:
addon.append(f"savedAfter={time.Time(savedafter).isot}")
if savedbefore is not None:
addon.append(f"savedBefore={time.Time(savedbefore).isot}")
if name is not None:
addon.append(f"sourceID={name}")
if includephotometry is not None:
addon.append(f"includePhotometry={includephotometry}")
if includerequested is not None:
addon.append(f"includeRequested={includerequested}")
addon = "" if len(addon)==0 else "?"+"&".join(addon)
# - end: addon
#
q_url = _BASE_FRITZ_URL+f"api/sources{addon}"
if verbose:
print(f"queried URL: {q_url}")
sources = api('get', q_url, load=True, token=token)
if not store and not get_object:
return sources
sample = FritzSample(sources, groupid)
if store:
sample.store()
return sample if get_object else sources
#
# Group
#
def download_groups(get_object=False, token=None, store=True, verbose=False):
""" """
q_url = _BASE_FRITZ_URL+f'api/groups'
if verbose:
print(f"queried URL: {q_url}")
groups = api('get',q_url, load=True, token=token)
if not store and not get_object:
return groups
fgroups = FritzGroups(groups)
if store:
fgroups.store()
return fgroups if get_object else groups
# -------------- #
# Data I/O #
# -------------- #
#
# Spectra
#
def parse_spectrum_filename(filename):
""" """
directory = os.path.dirname(filename)
basename = os.path.basename(filename).split(".")[0]
extension = filename.split(".")[-1]
if not basename.startswith("fritz"):
raise ValueError("Cannot parse the given name. Not a fritz_bla file.")
_, instspectrum, name, *orig = basename.split("_")
originalname = "_".join(orig)
return {"instrument":instspectrum.replace("spectrum",""),
"name":name,
"original_file_filename":originalname,
"extension":extension,
"directory":directory}
####################
# #
# Classes #
# #
####################
# -------------- #
# Photometry/ #
# LightCurve #
# -------------- #
class FritzPhotometry( object ):
""" """
def __init__(self, dataframe=None):
""" """
if dataframe is not None:
self.set_data(dataframe)
@classmethod
def from_fritz(cls, name):
""" """
print("FritzPhotometry.from_fritz(name) is DEPRECATED, use FritzPhotometry.from_name(name)")
return cls.from_name(name)
@classmethod
def from_name(cls, name, force_dl=False, store=False, **kwargs):
""" """
if not force_dl:
filename = cls._build_filename_(name, **kwargs)
if os.path.isfile(filename):
extension = filename.split(".")[-1]
return getattr(cls,f"read_{extension}")(filename)
return cls( download_lightcurve(name, get_object=False, store=store) )
# ============= #
# Method #
# ============= #
# --------- #
# I/O #
# --------- #
def store(self, fileout=None, dirout="default", extension="csv", **kwargs):
""" calls the self.to_{extension} with the default naming convention. """
# can differ to extension if fileout given
if fileout is None:
fileout = self._build_filename_(self.name, dirout=dirout, extension=extension)
if extension in ["csv","json","parquet",]:
return getattr(self,f"to_{extension}")(fileout, **kwargs)
if extension in ["hdf","hd5","hdf5","h5"]:
return self.to_hdf(fileout, **kwargs)
raise ValueError(f"only 'csv','json', 'hdf5' extension implemented ; {extension} given")
# - read file
@classmethod
def read_parquet(cls, filename, **kwargs):
""" """
return cls(pandas.read_parquet(filename, **kwargs))
@classmethod
def read_csv(cls, filename, **kwargs):
""" """
return cls(
|
pandas.read_csv(filename, **kwargs)
|
pandas.read_csv
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/3/17 13:06
Desc: 期货-中国-交易所-会员持仓数据接口
大连商品交易所、上海期货交易所、郑州商品交易所、中国金融期货交易所
采集前 20 会员持仓数据;
建议下午 16:30 以后采集当天数据, 避免交易所数据更新不稳定;
郑州商品交易所格式分为三类
大连商品交易所有具体合约的持仓排名, 通过 futures_dce_position_rank 获取
20171228
http://www.czce.com.cn/cn/DFSStaticFiles/Future/2020/20200727/FutureDataHolding.txt
20100825
http://www.czce.com.cn/cn/exchange/2014/datatradeholding/20140515.txt
"""
import datetime
import json
import re
import warnings
import zipfile
from io import BytesIO
from io import StringIO
import pandas as pd
import requests
from bs4 import BeautifulSoup
from akshare.futures import cons
from akshare.futures.requests_fun import (
requests_link
)
from akshare.futures.symbol_var import chinese_to_english, find_chinese
from akshare.futures.symbol_var import (
symbol_varieties
)
calendar = cons.get_calendar()
rank_columns = ['vol_party_name', 'vol', 'vol_chg', 'long_party_name', 'long_open_interest',
'long_open_interest_chg', 'short_party_name', 'short_open_interest', 'short_open_interest_chg']
intColumns = ['vol', 'vol_chg', 'long_open_interest', 'long_open_interest_chg', 'short_open_interest',
'short_open_interest_chg']
def get_rank_sum_daily(start_day="20200721", end_day="20200723", vars_list=cons.contract_symbols):
"""
采集四个期货交易所前5、前10、前15、前20会员持仓排名数据
注1:由于上期所和中金所只公布每个品种内部的标的排名,没有公布品种的总排名;
所以函数输出的品种排名是由品种中的每个标的加总获得,并不是真实的品种排名列表
注2:大商所只公布了品种排名,未公布标的排名
:param start_day: 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
:param end_day: 结束数据 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
:param vars_list: 合约品种如RB、AL等列表 为空时为所有商品
:return: pd.DataFrame
展期收益率数据(DataFrame):
symbol 标的合约 string
var 商品品种 string
vol_top5 成交量前5会员成交量总和 int
vol_chg_top5 成交量前5会员成交量变化总和 int
long_open_interest_top5 持多单前5会员持多单总和 int
long_open_interest_chg_top5 持多单前5会员持多单变化总和 int
short_open_interest_top5 持空单前5会员持空单总和 int
short_open_interest_chg_top5 持空单前5会员持空单变化总和 int
vol_top10 成交量前10会员成交量总和 int
...
date 日期 string YYYYMMDD
"""
start_day = cons.convert_date(start_day) if start_day is not None else datetime.date.today()
end_day = cons.convert_date(end_day) if end_day is not None else cons.convert_date(
cons.get_latest_data_date(datetime.datetime.now()))
records = pd.DataFrame()
while start_day <= end_day:
print(start_day)
if start_day.strftime('%Y%m%d') in calendar:
data = get_rank_sum(start_day, vars_list)
if data is False:
print(f"{start_day.strftime('%Y-%m-%d')}日交易所数据连接失败,已超过20次,您的地址被网站墙了,请保存好返回数据,稍后从该日期起重试")
return records.reset_index(drop=True)
records = records.append(data)
else:
warnings.warn(f"{start_day.strftime('%Y%m%d')}非交易日")
start_day += datetime.timedelta(days=1)
return records.reset_index(drop=True)
def get_rank_sum(date="20200727", vars_list=cons.contract_symbols):
"""
抓取四个期货交易所前5、前10、前15、前20会员持仓排名数据
注1:由于上期所和中金所只公布每个品种内部的标的排名, 没有公布品种的总排名;
所以函数输出的品种排名是由品种中的每个标的加总获得, 并不是真实的品种排名列表
注2:大商所只公布了品种排名, 未公布标的排名
:param date: 日期 format: YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
:param vars_list: 合约品种如 RB, AL等列表 为空时为所有商品
:return: pd.DataFrame:
展期收益率数据
symbol 标的合约 string
var 商品品种 string
vol_top5 成交量前5会员成交量总和 int
vol_chg_top5 成交量前5会员成交量变化总和 int
long_open_interest_top5 持多单前5会员持多单总和 int
long_open_interest_chg_top5 持多单前5会员持多单变化总和 int
short_open_interest_top5 持空单前5会员持空单总和 int
short_open_interest_chg_top5 持空单前5会员持空单变化总和 int
vol_top10 成交量前10会员成交量总和 int
...
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % date.strftime('%Y%m%d'))
return None
dce_var = [i for i in vars_list if i in cons.market_exchange_symbols['dce']]
shfe_var = [i for i in vars_list if i in cons.market_exchange_symbols['shfe']]
czce_var = [i for i in vars_list if i in cons.market_exchange_symbols['czce']]
cffex_var = [i for i in vars_list if i in cons.market_exchange_symbols['cffex']]
big_dict = {}
if len(dce_var) > 0:
data = get_dce_rank_table(date, dce_var)
if data is False:
return False
big_dict.update(data)
if len(shfe_var) > 0:
data = get_shfe_rank_table(date, shfe_var)
if data is False:
return False
big_dict.update(data)
if len(czce_var) > 0:
data = get_czce_rank_table(date, czce_var)
if data is False:
return False
big_dict.update(data)
if len(cffex_var) > 0:
data = get_cffex_rank_table(date, cffex_var)
if data is False:
return False
big_dict.update(data)
records = pd.DataFrame()
for symbol, table in big_dict.items():
table = table.applymap(lambda x: 0 if x == '' else x)
for symbol_inner in set(table['symbol']):
var = symbol_varieties(symbol_inner)
if var in vars_list:
table_cut = table[table['symbol'] == symbol_inner]
table_cut['rank'] = table_cut['rank'].astype('float')
table_cut_top5 = table_cut[table_cut['rank'] <= 5]
table_cut_top10 = table_cut[table_cut['rank'] <= 10]
table_cut_top15 = table_cut[table_cut['rank'] <= 15]
table_cut_top20 = table_cut[table_cut['rank'] <= 20]
big_dict = {'symbol': symbol_inner, 'variety': var,
'vol_top5': table_cut_top5['vol'].sum(), 'vol_chg_top5': table_cut_top5['vol_chg'].sum(),
'long_open_interest_top5': table_cut_top5['long_open_interest'].sum(),
'long_open_interest_chg_top5': table_cut_top5['long_open_interest_chg'].sum(),
'short_open_interest_top5': table_cut_top5['short_open_interest'].sum(),
'short_open_interest_chg_top5': table_cut_top5['short_open_interest_chg'].sum(),
'vol_top10': table_cut_top10['vol'].sum(),
'vol_chg_top10': table_cut_top10['vol_chg'].sum(),
'long_open_interest_top10': table_cut_top10['long_open_interest'].sum(),
'long_open_interest_chg_top10': table_cut_top10['long_open_interest_chg'].sum(),
'short_open_interest_top10': table_cut_top10['short_open_interest'].sum(),
'short_open_interest_chg_top10': table_cut_top10['short_open_interest_chg'].sum(),
'vol_top15': table_cut_top15['vol'].sum(),
'vol_chg_top15': table_cut_top15['vol_chg'].sum(),
'long_open_interest_top15': table_cut_top15['long_open_interest'].sum(),
'long_open_interest_chg_top15': table_cut_top15['long_open_interest_chg'].sum(),
'short_open_interest_top15': table_cut_top15['short_open_interest'].sum(),
'short_open_interest_chg_top15': table_cut_top15['short_open_interest_chg'].sum(),
'vol_top20': table_cut_top20['vol'].sum(),
'vol_chg_top20': table_cut_top20['vol_chg'].sum(),
'long_open_interest_top20': table_cut_top20['long_open_interest'].sum(),
'long_open_interest_chg_top20': table_cut_top20['long_open_interest_chg'].sum(),
'short_open_interest_top20': table_cut_top20['short_open_interest'].sum(),
'short_open_interest_chg_top20': table_cut_top20['short_open_interest_chg'].sum(),
'date': date.strftime('%Y%m%d')
}
records = records.append(pd.DataFrame(big_dict, index=[0]))
if len(big_dict.items()) > 0:
add_vars = [i for i in cons.market_exchange_symbols['shfe'] + cons.market_exchange_symbols['cffex'] if
i in records['variety'].tolist()]
for var in add_vars:
records_cut = records[records['variety'] == var]
var_record = pd.DataFrame(records_cut.sum()).T
var_record['date'] = date.strftime('%Y%m%d')
var_record.loc[:, ['variety', 'symbol']] = var
records = records.append(var_record)
return records.reset_index(drop=True)
def get_shfe_rank_table(date=None, vars_list=cons.contract_symbols):
"""
上海期货交易所前 20 会员持仓排名数据明细
注:该交易所只公布每个品种内部的标的排名,没有公布品种的总排名
数据从20020107开始,每交易日16:30左右更新数据
:param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
:param vars_list: 合约品种如RB、AL等列表 为空时为所有商品
:return: pd.DataFrame
rank 排名 int
vol_party_name 成交量排序的当前名次会员 string(中文)
vol 该会员成交量 int
vol_chg 该会员成交量变化量 int
long_party_name 持多单排序的当前名次会员 string(中文)
long_open_interest 该会员持多单 int
long_open_interest_chg 该会员持多单变化量 int
short_party_name 持空单排序的当前名次会员 string(中文)
short_open_interest 该会员持空单 int
short_open_interest_chg 该会员持空单变化量 int
symbol 标的合约 string
var 品种 string
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date < datetime.date(2002, 1, 7):
print("shfe数据源开始日期为20020107,跳过")
return {}
if date.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % date.strftime('%Y%m%d'))
return {}
url = cons.SHFE_VOL_RANK_URL % (date.strftime('%Y%m%d'))
r = requests_link(url, 'utf-8')
try:
context = json.loads(r.text)
except:
return {}
df = pd.DataFrame(context['o_cursor'])
df = df.rename(
columns={'CJ1': 'vol', 'CJ1_CHG': 'vol_chg', 'CJ2': 'long_open_interest', 'CJ2_CHG': 'long_open_interest_chg',
'CJ3': 'short_open_interest',
'CJ3_CHG': 'short_open_interest_chg', 'PARTICIPANTABBR1': 'vol_party_name',
'PARTICIPANTABBR2': 'long_party_name',
'PARTICIPANTABBR3': 'short_party_name', 'PRODUCTNAME': 'product1', 'RANK': 'rank',
'INSTRUMENTID': 'symbol', 'PRODUCTSORTNO': 'product2'})
if len(df.columns) < 3:
return {}
df = df.applymap(lambda x: x.strip() if isinstance(x, str) else x)
df = df.applymap(lambda x: None if x == '' else x)
df['variety'] = df['symbol'].apply(lambda x: symbol_varieties(x))
df = df[df['rank'] > 0]
for col in ['PARTICIPANTID1', 'PARTICIPANTID2', 'PARTICIPANTID3', 'product1', 'product2']:
try:
del df[col]
except:
pass
get_vars = [var for var in vars_list if var in df['variety'].tolist()]
big_dict = {}
for var in get_vars:
df_var = df[df['variety'] == var]
for symbol in set(df_var['symbol']):
df_symbol = df_var[df_var['symbol'] == symbol]
big_dict[symbol] = df_symbol.reset_index(drop=True)
return big_dict
def _czce_df_read(url, skip_rows, encoding='utf-8', header=0):
"""
郑州商品交易所的网页数据
:param header:
:type header:
:param url: 网站 string
:param skip_rows: 去掉前几行 int
:param encoding: utf-8 or gbk or gb2312
:return: pd.DataFrame
"""
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36",
"Host": "www.czce.com.cn",
"Cookie": "XquW6dFMPxV380S=CAaD3sMkdXv3fUoaJlICIEv0MVegGq5EoMyBcxkOjCgSjmpuovYFuTLtYFcxTZGw; XquW6dFMPxV380T=5QTTjUlA6f6WiDO7fMGmqNxHBWz.hKIc8lb_tc1o4nHrJM4nsXCAI9VHaKyV_jkHh4cIVvD25kGQAh.MvLL1SHRA20HCG9mVVHPhAzktNdPK3evjm0NYbTg2Gu_XGGtPhecxLvdFQ0.JlAxy_z0C15_KdO8kOI18i4K0rFERNPxjXq5qG1Gs.QiOm976wODY.pe8XCQtAsuLYJ.N4DpTgNfHJp04jhMl0SntHhr.jhh3dFjMXBx.JEHngXBzY6gQAhER7uSKAeSktruxFeuKlebse.vrPghHqWvJm4WPTEvDQ8q",
}
r = requests_link(url, encoding, headers=headers)
data = pd.read_html(r.text, match='.+', flavor=None, header=header, index_col=0, skiprows=skip_rows, attrs=None,
parse_dates=False, thousands=', ', encoding="gbk", decimal='.',
converters=None, na_values=None, keep_default_na=True)
return data
def get_czce_rank_table(date="20200727", vars_list=cons.contract_symbols):
"""
郑州商品交易所前 20 会员持仓排名数据明细
注:该交易所既公布了品种排名, 也公布了标的排名
:param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
:param vars_list: 合约品种如RB、AL等列表 为空时为所有商品, 数据从20050509开始,每交易日16:30左右更新数据
:return: pd.DataFrame
rank 排名 int
vol_party_name 成交量排序的当前名次会员 string(中文)
vol 该会员成交量 int
vol_chg 该会员成交量变化量 int
long_party_name 持多单排序的当前名次会员 string(中文)
long_open_interest 该会员持多单 int
long_open_interest_chg 该会员持多单变化量 int
short_party_name 持空单排序的当前名次会员 string(中文)
short_open_interest 该会员持空单 int
short_open_interest_chg 该会员持空单变化量 int
symbol 标的合约 string
var 品种 string
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date < datetime.date(2015, 10, 8):
print("CZCE可获取的数据源开始日期为 20151008, 请输入合适的日期参数")
return {}
if date.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % date.strftime('%Y%m%d'))
return {}
if date >= datetime.date(2015, 10, 8):
url = f"http://www.czce.com.cn/cn/DFSStaticFiles/Future/{date.year}/{date.isoformat().replace('-', '')}/FutureDataHolding.xls"
r = requests.get(url)
temp_df = pd.read_excel(BytesIO(r.content))
temp_pinzhong_index = [item + 1 for item in temp_df[temp_df.iloc[:, 0].str.contains("合计")].index.to_list()]
temp_pinzhong_index.insert(0, 0)
temp_pinzhong_index.pop()
temp_symbol_index = temp_df.iloc[temp_pinzhong_index, 0].str.split(" ", expand=True).iloc[:, 0]
symbol_list = [re.compile(r"[0-9a-zA-Z_]+").findall(item)[0] for item in temp_symbol_index.values]
temp_symbol_index_list = temp_symbol_index.index.to_list()
big_dict = {}
for i in range(len(temp_symbol_index_list)-1):
inner_temp_df = temp_df[temp_symbol_index_list[i]+2: temp_symbol_index_list[i+1]-1]
inner_temp_df.columns = ["rank",
"vol_party_name",
"vol",
"vol_chg",
"long_party_name",
"long_open_interest",
"long_open_interest_chg",
"short_party_name",
"short_open_interest",
"short_open_interest_chg",
]
inner_temp_df.reset_index(inplace=True, drop=True)
big_dict[symbol_list[i]] = inner_temp_df
inner_temp_df = temp_df[temp_symbol_index_list[i+1]+2:-1]
inner_temp_df.columns = ["rank",
"vol_party_name",
"vol",
"vol_chg",
"long_party_name",
"long_open_interest",
"long_open_interest_chg",
"short_party_name",
"short_open_interest",
"short_open_interest_chg",
]
inner_temp_df.reset_index(inplace=True, drop=True)
big_dict[symbol_list[-1]] = inner_temp_df
new_big_dict = {}
for key, value in big_dict.items():
value["symbol"] = key
value["variety"] = re.compile(r"[a-zA-Z_]+").findall(key)[0]
new_big_dict[key] = value
return new_big_dict
def get_dce_rank_table(date="20200727", vars_list=cons.contract_symbols):
"""
大连商品交易所前 20 会员持仓排名数据明细
注: 该交易所既公布品种排名, 也公布标的合约排名
:param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date 对象, 为空时为当天
:param vars_list: 合约品种如 RB、AL等列表为空时为所有商品, 数据从 20060104 开始,每交易日 16:30 左右更新数据
:return: pandas.DataFrame
rank 排名 int
vol_party_name 成交量排序的当前名次会员 string(中文)
vol 该会员成交量 int
vol_chg 该会员成交量变化量 int
long_party_name 持多单排序的当前名次会员 string(中文)
long_open_interest 该会员持多单 int
long_open_interest_chg 该会员持多单变化量 int
short_party_name 持空单排序的当前名次会员 string(中文)
short_open_interest 该会员持空单 int
short_open_interest_chg 该会员持空单变化量 int
symbol 标的合约 string
var 品种 string
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date < datetime.date(2006, 1, 4):
print(Exception("大连商品交易所数据源开始日期为20060104,跳过"))
return {}
if date.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % date.strftime('%Y%m%d'))
return {}
vars_list = [i for i in vars_list if i in cons.market_exchange_symbols['dce']]
big_dict = {}
for var in vars_list:
url = cons.DCE_VOL_RANK_URL % (var.lower(), var.lower(), date.year, date.month - 1, date.day)
list_60_name = []
list_60 = []
list_60_chg = []
rank = []
texts = requests_link(url).content.splitlines()
if not texts:
return False
if len(texts) > 30:
for text in texts:
line = text.decode("utf-8")
string_list = line.split()
try:
if int(string_list[0]) <= 20:
list_60_name.append(string_list[1])
list_60.append(string_list[2])
list_60_chg.append(string_list[3])
rank.append(string_list[0])
except:
pass
table_cut = pd.DataFrame({'rank': rank[0:20],
'vol_party_name': list_60_name[0:20],
'vol': list_60[0:20],
'vol_chg': list_60_chg[0:20],
'long_party_name': list_60_name[20:40],
'long_open_interest': list_60[20:40],
'long_open_interest_chg': list_60_chg[20:40],
'short_party_name': list_60_name[40:60],
'short_open_interest': list_60[40:60],
'short_open_interest_chg': list_60_chg[40:60]
})
table_cut = table_cut.applymap(lambda x: x.replace(',', ''))
table_cut = _table_cut_cal(table_cut, var)
big_dict[var] = table_cut.reset_index(drop=True)
return big_dict
def get_cffex_rank_table(date="20200427", vars_list=cons.contract_symbols):
"""
中国金融期货交易所前 20 会员持仓排名数据明细
注:该交易所既公布品种排名,也公布标的排名
:param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
:param vars_list: 合约品种如RB、AL等列表 为空时为所有商品, 数据从20100416开始,每交易日16:30左右更新数据
:return: pd.DataFrame
rank 排名 int
vol_party_name 成交量排序的当前名次会员 string(中文)
vol 该会员成交量 int
vol_chg 该会员成交量变化量 int
long_party_name 持多单排序的当前名次会员 string(中文)
long_open_interest 该会员持多单 int
long_open_interest_chg 该会员持多单变化量 int
short_party_name 持空单排序的当前名次会员 string(中文)
short_open_interest 该会员持空单 int
short_open_interest_chg 该会员持空单变化量 int
symbol 标的合约 string
var 品种 string
date 日期 string YYYYMMDD
"""
vars_list = [i for i in vars_list if i in cons.market_exchange_symbols['cffex']]
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date < datetime.date(2010, 4, 16):
print(Exception("cffex数据源开始日期为20100416,跳过"))
return {}
if date.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % date.strftime('%Y%m%d'))
return {}
big_dict = {}
for var in vars_list:
# print(var)
# var = "IF"
url = cons.CFFEX_VOL_RANK_URL % (date.strftime('%Y%m'), date.strftime('%d'), var)
r = requests_link(url, encoding='gbk')
if not r:
return False
if '网页错误' not in r.text:
try:
temp_chche = StringIO(r.text.split('\n交易日,')[1])
except:
temp_chche = StringIO(r.text.split('\n交易日,')[0][4:]) # 20200316开始数据结构变化,统一格式
table = pd.read_csv(temp_chche)
table = table.dropna(how='any')
table = table.applymap(lambda x: x.strip() if isinstance(x, str) else x)
for symbol in set(table['合约']):
table_cut = table[table['合约'] == symbol]
table_cut.columns = ['symbol', 'rank'] + rank_columns
table_cut = _table_cut_cal(
|
pd.DataFrame(table_cut)
|
pandas.DataFrame
|
#////////////////////////////////////////////////////////////////////
#////////////////////////////////////////////////////////////////////
# script: getMutationCounts_overall_and_GOI.py
# author: Lincoln
# date: 10.11.18
#
# This script performs some basic analysis on vcf files, as output by
# my SNP_detection_pipeline. It has 4 separate run modes:
# 1. get raw mutation counts, for every cell
# 2. get mutation counts, after filtering through COSMIC database
# 3. get mutation counts, ' ',
# with the specific LAUD annotation
# 4. for a given GOI, which cells have mutations, and what are those
# mutations, on the amino acid level? This creates the necessary
# input for all of the lolliplot stuff. As well as for
# makeSummaryTable.ipynb
#
#////////////////////////////////////////////////////////////////////
#////////////////////////////////////////////////////////////////////
import numpy as np
import VCF # comes from <NAME>
import os
import csv
import pandas as pd
import sys
import itertools
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
#////////////////////////////////////////////////////////////////////
# getFileNames()
# Get file names based on the specified path
#
#////////////////////////////////////////////////////////////////////
def getFileNames():
files = []
for file in os.listdir("vcf_germline_filter/"):
if file.endswith(".vcf"):
fullPath = (os.path.join("vcf_germline_filter/", file))
files.append(fullPath)
return files
#////////////////////////////////////////////////////////////////////
# getRawCounts()
# Creates dictionary obj with raw counts for GATK hits w/in a given set of vcf files
#
#////////////////////////////////////////////////////////////////////
def getRawCounts(fileNames):
print('getting raw counts...')
cells_dict = {}
for f in fileNames:
cell = f.replace("vcf_germline_filter/", "")
cell = cell.replace(".vcf", "")
df = VCF.dataframe(f)
unique = len(np.unique(df.POS))
cells_dict.update({cell : unique})
print('finished!')
return cells_dict
#////////////////////////////////////////////////////////////////////
# getGenomePos()
# Returns a genome position sting that will match against the ones w/in COSMIC db
#
#////////////////////////////////////////////////////////////////////
def getGenomePos(sample):
try:
chr = sample[0]
chr = chr.replace("chr", "")
pos = int(sample[1])
ref = str(sample[3])
alt = str(sample[4])
if (len(ref) == 1) & (len(alt) == 1): # most basic case
secondPos = pos
genomePos = chr + ':' + str(pos) + '-' + str(secondPos)
elif (len(ref) > 1) & (len(alt) == 1):
secondPos = pos + len(ref)
genomePos = chr + ':' + str(pos) + '-' + str(secondPos)
elif (len(alt) > 1) & (len(ref) == 1):
secondPos = pos + len(alt)
genomePos = chr + ':' + str(pos) + '-' + str(secondPos)
else: # BOTH > 1 .... not sure what to do here. does this actually happen?
secondPos = 'dummy'
genomePos = chr + ':' + str(pos) + '-' + str(secondPos)
except:
genomePos = 'chr0:0-0'
return(genomePos)
#////////////////////////////////////////////////////////////////////
# getFilterCountsBasic()
# Creates dictionry obj with COSMIC filtered GATK hits w/in a given set of vcfs
#
#////////////////////////////////////////////////////////////////////
def getFilterCountsBasic(fileNames):
print('getting filter counts basic...')
cells_dict_filter = {}
genomePos_db = pd.Series(database['Mutation genome position'])
for f in fileNames:
cell = f.replace("vcf_germline_filter/", "")
cell = cell.replace(".vcf", "")
print(cell)
df = VCF.dataframe(f)
genomePos_query = df.apply(getGenomePos, axis=1)
shared = list(set(genomePos_query) & set(genomePos_db))
cells_dict_filter.update({cell : len(shared)})
print('finished!')
return cells_dict_filter
#////////////////////////////////////////////////////////////////////
# getLAUD_db()
# Return the cosmic database after lung adeno filter
#
#////////////////////////////////////////////////////////////////////
def getLAUD_db():
print('setting up LAUD filtered database...')
pHistList = database.index[database['Primary histology'] == 'carcinoma'].tolist()
pSiteList = database.index[database['Primary site'] == 'lung'].tolist()
shared = list(set(pHistList) & set(pSiteList))
database_filter = database.iloc[shared]
return database_filter
#////////////////////////////////////////////////////////////////////
# getFilterCountsLAUD()
# Creates dictionry obj with COSMIC filtered GATK hits w/in a given set of vcfs
#
#////////////////////////////////////////////////////////////////////
def getFilterCountsLAUD(fileNames):
print('getting filter counts LAUD...')
cells_dict_laud = {}
genomePos_laud_db = pd.Series(database_laud['Mutation genome position'])
for f in fileNames:
cell = f.replace("vcf_germline_filter/", "")
cell = cell.replace(".vcf", "")
df = VCF.dataframe(f)
genomePos_query = df.apply(getGenomePos, axis=1) # apply function for every row in df
shared = list(set(genomePos_query) & set(genomePos_laud_db))
cells_dict_laud.update({cell : len(shared)})
print('finished!')
return cells_dict_laud
#////////////////////////////////////////////////////////////////////
# hitSearchFunc()
# Performs the actual search
# REMEMBER: `match` is just a boolean
#////////////////////////////////////////////////////////////////////
def hitSearchFunc(sample):
match = 0
currChrom = sample.split(':')[0]
if currChrom == queryChrom:
sub0 = sample.split('-')[0] # split on `-`
sub1 = sample.split('-')[1] # this guy is good
sub00 = sub0.split(':')[1] # split on :, need to get rid of chrom
try:
lPosCurr = sub00
rPosCurr = sub1
# rPosQuery and lPosQuery are GLOBALs
if (lPosCurr >= lPosQuery) & (lPosCurr <= rPosQuery): # left position good
if (rPosCurr >= lPosQuery) & (rPosCurr <= rPosQuery): # right position good
match = 1
except IndexError:
print('index error')
return match
#////////////////////////////////////////////////////////////////////
# hitSearchFunc_coords()
# given a list of shared entries between an individual cell's VCF
# and the COSMIC LAUD db, searches for hits to the GOI, as specified
# with cmd line option '4'
#
# REMEMBER: `match` is NOT a bool here
#
# passing in *args so that i can tell what cell im finding indels in!!!
#////////////////////////////////////////////////////////////////////
def hitSearchFunc_coords(sample, *args):
cell_ = args[0]
match = ""
print(sample)
currChrom = sample.split(':')[0]
if currChrom == queryChrom:
print('here i am!')
sub0 = sample.split('-')[0] # split on `-`
sub1 = sample.split('-')[1] # this guy is good
sub00 = sub0.split(':')[1] # split on :, need to get rid of chrom
try:
lPosCurr = sub00
rPosCurr = sub1
# keep in mind rPosQuery and lPosQuery are GLOBALs
if (lPosCurr >= lPosQuery) & (lPosCurr <= rPosQuery): # left pos GOI match
if (rPosCurr >= lPosQuery) & (rPosCurr <= rPosQuery): # right pos GOI match
if lPosCurr == rPosCurr: # SNP
match = lPosCurr
else: # found an indel!!
match = lPosCurr + '-' + rPosCurr
#print(cell_)
except IndexError:
print('index error')
return match
#////////////////////////////////////////////////////////////////////
# getGOIHits()
# Creates dictionry obj with hits to a specific Gene of Interest
#
#////////////////////////////////////////////////////////////////////
def getGOIHits(fileNames, chrom, pos1, pos2):
print('getting hits to GOI')
global queryChrom, lPosQuery, rPosQuery # dont like this
genomePos_laud_db = pd.Series(database_laud['Mutation genome position'])
cells_dict_GOI = {}
queryChrom = chrom
lPosQuery = pos1
rPosQuery = pos2
for f in fileNames:
numMatches = 0
cell = f.replace("vcf_germline_filter/", "")
cell = cell.replace(".vcf", "")
df = VCF.dataframe(f)
genomePos_query = df.apply(getGenomePos, axis=1) # apply function for every row in df
shared = list(set(genomePos_query) & set(genomePos_laud_db)) # get the LAUD filter set
shared1 = pd.Series(shared) # what if i convert this guy to a pandas object?
numMatches = shared1.apply(hitSearchFunc) # another apply call
cells_dict_GOI.update({cell : sum(numMatches)})
return cells_dict_GOI
#////////////////////////////////////////////////////////////////////
# getGOIHit_coords()
# Creates dictionry obj with genome coords for hits to specific GOI
#
#////////////////////////////////////////////////////////////////////
def getGOIHit_coords(fileNames, chrom, pos1, pos2):
print('getting coords to GOI hits')
global queryChrom, lPosQuery, rPosQuery # dont like this
genomePos_laud_db = pd.Series(database_laud['Mutation genome position'])
cells_dict_GOI_coords = {}
queryChrom = chrom
lPosQuery = pos1
rPosQuery = pos2
for f in fileNames:
numMatches = 0
cell = f.replace("vcf_germline_filter/", "")
cell = cell.replace(".vcf", "")
df = VCF.dataframe(f)
genomePos_query = df.apply(getGenomePos, axis=1) # apply function for every row in df
# get the entries shared between curr cells VCF and the LAUD filter set
# remember, these are general, and NOT gene specific
genomePos_query_expand = expandSet(set(genomePos_query))
shared = list(set(genomePos_query_expand) & set(genomePos_laud_db))
shared1 =
|
pd.Series(shared)
|
pandas.Series
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from typing import List, Optional
# Skipping analyzing 'numpy': found module but no type hints or library stubs
import numpy as np # type: ignore
import numpy.ma as ma # type: ignore
# Skipping analyzing 'pandas': found module but no type hints or library stubs
import pandas as pd # type: ignore
import pyarrow as pa # type: ignore
import torcharrow.dtypes as dt
import torcharrow.pytorch as tap
from torcharrow._interop import (
from_arrow_table,
from_arrow_array,
from_pandas_dataframe,
from_pandas_series,
)
from torcharrow.scope import Scope
# replicated here since we don't expose it from interop.py
# TO DELETE: New logic, mask illegal data...
# def _column_without_nan(series, dtype):
# if dtype is None or is_floating(dtype):
# for i in series:
# if isinstance(i, float) and np.isnan(i):
# yield None
# else:
# yield i
# else:
# for i in series:
# yield i
class TestLegacyInterop(unittest.TestCase):
def setUp(self):
self.ts = Scope({"device": "demo"})
def test_numpy_numerics_no_mask(self):
# numerics...
for np_type, ta_type in zip(
[np.int8, np.int16, np.int32, np.int64, np.float32, np.float64],
[dt.int8, dt.int16, dt.int32, dt.int64, dt.Float32(True), dt.Float64(True)],
):
self.assertEqual(dt.typeof_np_dtype(np_type), ta_type)
arr = np.ones((20,), dtype=np_type)
# type preserving
self.assertEqual(dt.typeof_np_dtype(arr.dtype), ta_type)
col = self.ts._FullColumn(arr, dtype=ta_type)
self.assertTrue(col.valid(1))
arr[1] = 99
self.assertEqual(arr[1], 99)
self.assertEqual(col[1], 99)
def test_numpy_numerics_with_mask(self):
for np_type, ta_type in zip(
[np.int8, np.int16, np.int32, np.int64, np.float32, np.float64],
[dt.int8, dt.int16, dt.int32, dt.int64, dt.Float32(True), dt.Float64(True)],
):
data = np.ones((20,), dtype=np_type)
mask = np.full((len(data),), False, dtype=np.bool8)
mask[1] = True
arr = ma.array(data, mask=mask)
col = self.ts._FullColumn(data, dtype=ta_type, mask=mask)
# all defined, except...
self.assertFalse(col.valid(1))
self.assertTrue(col.valid(2))
data[1] = 99
self.assertTrue(ma.is_masked(arr[1]))
self.assertEqual(col[1], None)
def test_strings_no_mask(self):
# dt.strings (with np.str_ representation)
arr = np.array(["a", "b", "cde"], dtype=np.str_)
self.assertEqual(dt.typeof_np_dtype(arr.dtype), dt.string)
col = self.ts._FullColumn(arr, dtype=dt.string)
arr[1] = "kkkk"
self.assertEqual(arr[1], "kkk")
self.assertEqual(col[1], "kkk")
# dt.strings (with object representation)
arr = np.array(["a", "b", "cde"], dtype=object)
self.assertEqual(dt.typeof_np_dtype(arr.dtype), dt.String(True))
col = self.ts._FullColumn(arr, dtype=dt.String(True))
self.assertTrue(col.valid(1))
arr[1] = "kkkk"
self.assertEqual(arr[1], "kkkk")
self.assertEqual(col[1], "kkkk")
def test_strings_with_mask(self):
def is_not_str(s):
return not isinstance(s, str)
# dt.strings (with object representation)
arr = np.array(["a", None, "cde"], dtype=object)
self.assertEqual(dt.typeof_np_dtype(arr.dtype), dt.String(True))
mask = np.vectorize(is_not_str)(arr)
col = self.ts._FullColumn(arr, dtype=dt.String(True), mask=mask)
self.assertTrue(col.valid(0))
self.assertFalse(col.valid(1))
arr[1] = "kkkk"
self.assertEqual(arr[1], "kkkk")
self.assertEqual(col._data[1], "kkkk")
self.assertEqual(col[1], None)
def test_panda_series(self):
s = pd.Series([1, 2, 3])
self.assertEqual(list(s), list(from_pandas_series(s)))
s = pd.Series([1.0, np.nan, 3])
self.assertEqual([1.0, None, 3], list(from_pandas_series(s)))
s = pd.Series([1, 2, 3])
self.assertEqual(list(s), list(from_pandas_series(s, dt.Int16(False))))
s = pd.Series([1, 2, 3])
t = from_pandas_series(s)
self.assertEqual(t.dtype, dt.Int64(False))
self.assertEqual(list(s), list(from_pandas_series(s)))
s = pd.Series([True, False, True])
t = from_pandas_series(s)
self.assertEqual(t.dtype, dt.Boolean(False))
self.assertEqual(list(s), list(from_pandas_series(s)))
s = pd.Series(["a", "b", "c", "d", "e", "f", "g"])
t = from_pandas_series(s)
# TODO Check following assert
# self.assertEqual(t.dtype, dt.String(False))
self.assertEqual(list(s), list(t))
def test_panda_dataframes(self):
s = pd.DataFrame({"a": [1, 2, 3]})
self.assertEqual(
[(i,) for i in s["a"]], list(from_pandas_dataframe(s, scope=self.ts))
)
s = pd.DataFrame({"a": [1.0, np.nan, 3]})
t = from_pandas_dataframe(s, scope=self.ts)
self.assertEqual(list(t), [(i,) for i in [1.0, None, 3]])
# [(i,) for i in list(_column_without_nan(s["a"], dt.Float64(True)))], list(t)
s = pd.DataFrame({"a": [1, 2, 3]})
t = from_pandas_dataframe(
s, dt.Struct([dt.Field("a", dt.Int16(False))]), scope=self.ts
)
self.assertEqual([(i,) for i in s["a"]], list(t))
s = pd.DataFrame({"a": [1, 2, 3]})
t = from_pandas_dataframe(s, scope=self.ts)
self.assertEqual(t.dtype, dt.Struct([dt.Field("a", dt.Int64(False))]))
self.assertEqual([(i,) for i in s["a"]], list(t))
s = pd.DataFrame({"a": [True, False, True]})
t = from_pandas_dataframe(s, scope=self.ts)
self.assertEqual(t.dtype, dt.Struct([dt.Field("a", dt.Boolean(False))]))
self.assertEqual([(i,) for i in s["a"]], list(t))
s = pd.DataFrame({"a": ["a", "b", "c", "d", "e", "f", "g"]})
t = from_pandas_dataframe(s, scope=self.ts)
self.assertEqual(t.dtype, dt.Struct([dt.Field("a", dt.String(True))]))
self.assertEqual([(i,) for i in s["a"]], list(t))
# TODO Check why error is not raised...
# with self.assertRaises(KeyError):
# # KeyError: 'no matching test found for Void(nullable=True)', i.e.
# # NULL Columns are not supported
# s = pd.DataFrame({"a": ["a"], "b": [1], "c": [None], "d": [1.0]})
# t = from_pandas_dataframe(s)
s =
|
pd.DataFrame({"a": ["a"], "b": [1], "c": [True], "d": [1.0]})
|
pandas.DataFrame
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import json
import import_db_assessment
def createTransformersVariable(transformerRule):
# Convert the JSON fields into variables like dictionaries, lists, string and numbers and return it
if str(transformerRule['action_details']['datatype']).upper() == 'DICTIONARY':
# For dictionaries
return json.loads(str(transformerRule['action_details']['value']).strip())
elif str(transformerRule['action_details']['datatype']).upper() == 'LIST':
# For Lists it is expected to be separated by comma
return str(transformerRule['action_details']['value']).split(',')
elif str(transformerRule['action_details']['datatype']).upper() == 'STRING':
# For strings we just need to carry out the content
return str(transformerRule['action_details']['value'])
elif str(transformerRule['action_details']['datatype']).upper() == 'NUMBER':
# For number we are casting it to float
return float(transformerRule['action_details']['value'])
else:
# If the JSON file has any value not expected
return None
def runRules(transformerRules, dataFrames, singleRule, args, collectionKey, transformersTablesSchema, fileList, rulesAlreadyExecuted, transformersParameters):
# Variable to keep track of rules executed and its results and status
transformerResults = {}
# Variable to keep track and make available all the variables from the JSON file
transformersRulesVariables = {}
# Standardize Statuses
# Executed
EXECUTEDSTATUS = 'EXECUTED'
FAILEDSTATUS = 'FAILED'
if singleRule:
# If parameter is set then we will run only 1 rule
sorted_keys = []
sorted_keys.append(singleRule)
else:
# Getting ordered list of keys by priority to iterate over the dictionary
sorted_keys = sorted(transformerRules, key=lambda x: (transformerRules[x]['priority']))
# Looping on ALL rules from transformers.json
for ruleItem in sorted_keys:
stringExpression = getParsedRuleExpr(transformerRules[ruleItem]['expr1'])
iferrorExpression = getParsedRuleExpr(transformerRules[ruleItem]['iferror'])
if str(transformerRules[ruleItem]['status']).upper() == "ENABLED":
if ruleItem not in rulesAlreadyExecuted:
print('Processing rule item: "{}"\nPriority: "{}"'.format(ruleItem,transformerRules[ruleItem]['priority']))
if str(transformerRules[ruleItem]['type']).upper() == "VARIABLE" and str(transformerRules[ruleItem]['action']).upper() == "CREATE":
# transformers.json asking to create a variable which is a dictionary
try:
transformerResults[ruleItem] = {'Status': EXECUTEDSTATUS, 'Result Value': createTransformersVariable(transformerRules[ruleItem])}
transformersRulesVariables[transformerRules[ruleItem]['action_details']['varname']] = transformerResults[ruleItem]['Result Value']
except:
# In case of any issue the rule will be marked as FAILEDSTATUS
transformerResults[ruleItem] = {'Status': FAILEDSTATUS, 'Result Value': None}
transformersRulesVariables[transformerRules[ruleItem]['action_details']['varname']] = None
elif str(transformerRules[ruleItem]['type']).upper() in ("NUMBER","FREESTYLE") and str(transformerRules[ruleItem]['action']).upper() == "ADD_OR_UPDATE_COLUMN":
# transformers.json asking to add a column that is type number meaning it can be a calculation and the column to be added is NUMBER too
# Where the result of expr1 will be saved initially
dfTargetName = transformerRules[ruleItem]['action_details']['dataframe_name']
columnTargetName = transformerRules[ruleItem]['action_details']['column_name']
ruleCondition = True
try:
ruleConditionString = str(transformerRules[ruleItem]['ifcondition1'])
except KeyError:
ruleConditionString = None
# In case ifcondition1 (transformers.json) is set for the rule
if ruleConditionString is not None and ruleConditionString != "":
try:
ruleCondition = eval (ruleConditionString)
print ('ruleCondition = {}'.format(ruleCondition))
except:
print ('\n Error processing ifcondition1 "{}" for rule "{}". So, this rule will be skipped.\n'.format(ruleConditionString,ruleItem))
continue
if not ruleCondition:
print ('WARNING: This rule "{}" will be skipped because of "ifcondition1" from transformers.json is FALSE.'.format(ruleItem))
continue
try:
dataFrames[str(dfTargetName).upper()][str(columnTargetName).upper()] = execStringExpression(stringExpression,iferrorExpression, dataFrames)
df = dataFrames[str(dfTargetName).upper()]
except KeyError:
print ('\n WARNING: The rule "{}" could not be executed because the variable "{}" used in the transformers.json could not be found.\n'.format(ruleItem, str(dfTargetName).upper()))
continue
newTableName = str(transformerRules[ruleItem]['action_details']['target_dataframe_name']).lower()
fileName = str(getattr(args,'fileslocation')) + '/opdbt__' + newTableName + '__' + collectionKey
resCSVCreation, transformersTablesSchema = createCSVFromDataframe(df, transformerRules[ruleItem]['action_details'], args, fileName, transformersTablesSchema, newTableName, False)
# Creating the new dataframe
dataFrames[str(newTableName).upper()] = df
if resCSVCreation:
# If CSV creation was successfully then we will add this to the list of files to be imported
fileList.append(fileName)
elif str(transformerRules[ruleItem]['type']).upper() == "FREESTYLE" and str(transformerRules[ruleItem]['action']).upper() == "CREATE_OR_REPLACE_DATAFRAME":
#
df = execStringExpression(stringExpression,iferrorExpression,dataFrames)
if df is None:
print('\n WARNING: The rule "{}" could not be executed because the expression "{}" used in the transformers.json could not be executed.\n'.format(ruleItem,stringExpression))
continue
newTableName = str(transformerRules[ruleItem]['action_details']['dataframe_name']).lower()
fileName = str(getattr(args,'fileslocation')) + '/opdbt__' + newTableName + '__' + collectionKey
resCSVCreation, transformersTablesSchema = createCSVFromDataframe(df, transformerRules[ruleItem]['action_details'], args, fileName, transformersTablesSchema, newTableName, False)
# Creating the new dataframe
dataFrames[str(transformerRules[ruleItem]['action_details']['dataframe_name']).upper()] = df
if resCSVCreation:
# If CSV creation was successfully then we will add this to the list of files to be imported
fileList.append(fileName)
elif str(transformerRules[ruleItem]['type']).upper() == "FREESTYLE" and str(transformerRules[ruleItem]['action']).upper() == "FREESTYLE":
try:
eval (stringExpression)
except KeyError:
print ('\n WARNING: The rule "{}" could not be executed because the expr1 "{}" used in the transformers.json could not be executed.\n'.format(ruleItem, stringExpression))
continue
newTableName = str(transformerRules[ruleItem]['action_details']['target_dataframe_name']).lower()
fileName = str(getattr(args,'fileslocation')) + '/opdbt__' + newTableName + '__' + collectionKey
resCSVCreation, transformersTablesSchema = createCSVFromDataframe(df, transformerRules[ruleItem]['action_details'], args, fileName, transformersTablesSchema, newTableName, False)
# Creating the new dataframe
dataFrames[str(newTableName).upper()] = df
if resCSVCreation:
# If CSV creation was successfully then we will add this to the list of files to be imported
fileList.append(fileName)
return transformerResults, transformersRulesVariables, fileList, dataFrames
def execStringExpression(stringExpression,iferrorExpression, dataFrames):
try:
res = eval (stringExpression)
except:
try:
res = eval (iferrorExpression)
except:
res = None
return res
def getParsedRuleExpr(ruleExpr):
# Function to get a clean string to be executed in eval function. The input is a string with many components separated by ; coming from transformers.json
ruleComponents = []
ruleComponents = str(ruleExpr).split(';')
finalExpression = ''
for ruleItem in ruleComponents:
ruleItem = ruleItem.strip()
finalExpression = str(finalExpression) + str(ruleItem) + ' '
return finalExpression
def getRulesFromJSON(jsonFileName):
# Read JSON file from the OS and turn it into a hash table
with open(jsonFileName) as f:
transformerRules = json.load(f)
return transformerRules
def getDataFrameFromCSV(csvFileName,tableName,skipRows,separatorString,transformersTablesSchema):
# Read CSV files from OS and turn it into a dataframe
paramCleanDFHeaders = False
paramGetHeadersFromConfig = True
try:
if paramGetHeadersFromConfig:
if transformersTablesSchema.get(tableName):
try:
tableHeaders = getDFHeadersFromTransformers(tableName,transformersTablesSchema)
tableHeaders = [header.upper() for header in tableHeaders]
df =
|
pd.read_csv(csvFileName, skiprows=skipRows+1, header=None, names=tableHeaders)
|
pandas.read_csv
|
from datetime import datetime
import pandas as pd
import os
import re
from .transformers_map import transformers_map
def build_data_frame(backtest: dict, csv_path: str):
"""Creates a Pandas DataFame with the provided backtest. Used when providing a CSV as the datafile
Parameters
----------
backtest: dict, provides instructions on how to build the dataframe
csv_path: string, absolute path of where to find the data file
Returns
-------
object, A Pandas DataFrame indexed buy date
"""
df = load_basic_df_from_csv(csv_path)
if df.empty:
raise Exception("Dataframe is empty. Check the start and end dates")
df = prepare_df(df, backtest)
return df
def load_basic_df_from_csv(csv_path: str):
"""Loads a dataframe from a csv
Parameters
----------
csv_path: string, path to the csv so it can be read
Returns
df, A basic dataframe with the data from the csv
"""
if not os.path.isfile(csv_path):
raise Exception(f"File not found: {csv_path}")
df = pd.read_csv(csv_path, header=0)
df = standardize_df(df)
return df
def prepare_df(df: pd.DataFrame, backtest: dict):
"""Prepares the provided dataframe for a backtest by applying the datapoints and splicing based on the given backtest.
Useful when loading an existing dataframe (ex. from a cache).
Parameters
----------
df: DataFrame, should have all the open, high, low, close, volume data set as headers and indexed by date
backtest: dict, provides instructions on how to build the dataframe
Returns
------
df: DataFrame, with all the datapoints as column headers and trimmed to the provided time frames
"""
datapoints = backtest.get("datapoints", [])
df = apply_transformers_to_dataframe(df, datapoints)
trailing_stop_loss = backtest.get("trailing_stop_loss", 0)
if trailing_stop_loss:
df["trailing_stop_loss"] = df["close"].cummax() * (
1 - float(trailing_stop_loss)
)
chart_period = backtest.get("chart_period", "1Min")
start_time = backtest.get("start")
stop_time = backtest.get("stop")
df = apply_charting_to_df(df, chart_period, start_time, stop_time)
return df
def apply_charting_to_df(
df: pd.DataFrame, chart_period: str, start_time: str, stop_time: str
):
"""Modifies the dataframe based on the chart_period, start dates and end dates
Parameters
----------
df: dataframe with data loaded
chart_period: string, describes how often to sample data, default is '1Min' (1 minute)
see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
start_time: datestring in YYYY-MM-DD HH:MM (ex. 2020-08-31 04:00) of when to begin the backtest
stop_time: datestring of YYYY-MM-DD HH:MM when to stop the backtest
Returns
DataFrame, a sorted dataframe ready for consumption by run_backtest
"""
if df.index.dtype != "datetime64[ns]":
headers = df.columns.values.tolist()
headers.extend([df.index.name])
if "date" not in headers:
raise Exception(
"Data does not have a date column. Headers must include date, open, high, low, close, volume."
)
time_unit = detect_time_unit(df.date[1])
df.date = pd.to_datetime(df.date, unit=time_unit)
df.set_index("date", inplace=True)
if start_time:
if isinstance(start_time, datetime) or type(start_time) is int:
time_unit = detect_time_unit(start_time)
start_time = pd.to_datetime(start_time, unit=time_unit)
start_time = start_time.strftime("%Y-%m-%d %H:%M:%S")
if stop_time:
if isinstance(stop_time, datetime) or type(stop_time) is int:
time_unit = detect_time_unit(stop_time)
stop_time = pd.to_datetime(stop_time, unit=time_unit)
stop_time = stop_time.strftime("%Y-%m-%d %H:%M:%S")
df = df.resample(chart_period).first()
if start_time and stop_time:
df = df[start_time:stop_time] # noqa
elif start_time and not stop_time:
df = df[start_time:] # noqa
elif not start_time and stop_time:
df = df[:stop_time]
return df
def apply_transformers_to_dataframe(df: pd.DataFrame, datapoints: list):
"""Applies indications from the backtest to the dataframe
Parameters
----------
df: dataframe loaded with data
datapoints: list of indictors as dictionary objects
transformer detail:
{
"transformer": "", string, actual function to be called MUST be in the datapoints
"name": "", string, name of the transformer, becomes a column on the dataframe
"args": [], list arguments to pass the the function
}
Returns
-------
df, a modified dataframe with all the datapoints calculated as columns
"""
for ind in datapoints:
transformer = ind.get("transformer")
field_name = ind.get("name")
if len(ind.get("args", [])):
args = ind.get("args")
# df[field_name] = datapoints[transformer](df, *args)
trans_res = transformers_map[transformer](df, *args)
else:
trans_res = transformers_map[transformer](df)
if isinstance(trans_res, pd.DataFrame):
df = process_res_df(df, ind, trans_res)
if isinstance(trans_res, pd.Series):
df[field_name] = trans_res
return df
def process_res_df(df, ind, trans_res):
"""handle if a transformer returns multiple columns
To manage this, we just add the name of column in a clean
way, removing periods and lowercasing it.
Parameters
----------
df, dataframe, current dataframe
ind, indicator object
trans_res, result from the transformer function
Returns
-------
df, dataframe, updated dataframe with the new columns
"""
for key in trans_res.keys().values:
i_name = ind.get("name")
clean_key = key.lower()
clean_key = clean_key.replace(".", "")
clean_key = clean_key.replace(" ", "_")
df_key = f"{i_name}_{clean_key}"
df[df_key] = trans_res[key]
return df
def detect_time_unit(str_or_int: str or int):
"""Determines a if a timestamp is really a timestamp and if it
matches is in seconds or milliseconds
Parameters
----------
str_or_int: string or int of the timestamp to detect against
Returns
-------
string of "s" or "ms", or None if nothing detected
"""
str_or_int = str(str_or_int)
regex1 = r"^(\d{10})$"
regex2 = r"^(\d{13})$"
if re.match(regex1, str_or_int):
return "s"
if re.match(regex2, str_or_int):
return "ms"
def standardize_df(df: pd.DataFrame):
"""Standardizes a dataframe with the basic features used
throughout the project.
Parameters
----------
df: A pandas dataframe (probably one just created) with
at least the required columns of: date, open, close, high, low, volume.
Returns
-------
A new pandas dataframe of with all the data in the expected types.
"""
new_df = df.copy()
if "date" in new_df.columns:
new_df = new_df.set_index("date")
ts = str(new_df.index[0])
time_unit = detect_time_unit(ts)
new_df.index = pd.to_datetime(new_df.index, unit=time_unit)
new_df = new_df[~new_df.index.duplicated(keep="first")]
new_df = new_df.sort_index()
columns_to_drop = ["ignore", "date"]
new_df.drop(columns=columns_to_drop, errors="ignore")
new_df.open = pd.to_numeric(new_df.open)
new_df.close = pd.to_numeric(new_df.close)
new_df.high =
|
pd.to_numeric(new_df.high)
|
pandas.to_numeric
|
# coding: utf-8
###################################################################
# Step 1-3: find synonyms for mesh keywords
###################################################################
# library
import pandas as pd
import numpy as np
# function
# importing the mesh public health terms and normalizing the dataset
def mesh_term_data(file1):
inpfile = file1
mesh_term_df = pd.read_excel (inpfile )
return(mesh_term_df['Mesh Terms '])
# identifying the synonyms of the imported Mesh terms
def mesh_synonyms(file1, cui_mesh):
mesh_term = mesh_term_data(file1)
out_df = pd.read_csv(cui_mesh, sep='\t',encoding='utf-8' )
cui_mesh = out_df[out_df['name'].isin(mesh_term)]
cui_mesh = cui_mesh ['cui'].drop_duplicates()
# identying the synonyms of the imported Mesh tersm in the selected cui_synonym
cui_synonym = out_df[out_df['cui'].isin (cui_mesh)].drop_duplicates()
i=0;
for term in mesh_term:
temp_term=out_df.loc[out_df['name']==term]
if len(temp_term)==0:
print(term)
else :
i=i+1
print(i)
return cui_synonym
# exporting the synonym for each mesh term to a dataframe
def df_synonyms(file1, cui_mesh):
df_export =
|
pd.DataFrame()
|
pandas.DataFrame
|
from datetime import datetime
from typing import Any, Dict, List, Optional
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from pytz import utc
from feast import utils
from feast.errors import (
FeatureNameCollisionError,
RequestDataNotFoundInEntityDfException,
)
from feast.feature_service import FeatureService
from feast.feature_view import FeatureView
from feast.infra.offline_stores.offline_utils import (
DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL,
)
from tests.integration.feature_repos.repo_configuration import (
construct_universal_feature_views,
table_name_from_data_source,
)
from tests.integration.feature_repos.universal.entities import (
customer,
driver,
location,
)
np.random.seed(0)
def convert_timestamp_records_to_utc(
records: List[Dict[str, Any]], column: str
) -> List[Dict[str, Any]]:
for record in records:
record[column] = utils.make_tzaware(record[column]).astimezone(utc)
return records
# Find the latest record in the given time range and filter
def find_asof_record(
records: List[Dict[str, Any]],
ts_key: str,
ts_start: datetime,
ts_end: datetime,
filter_keys: Optional[List[str]] = None,
filter_values: Optional[List[Any]] = None,
) -> Dict[str, Any]:
filter_keys = filter_keys or []
filter_values = filter_values or []
assert len(filter_keys) == len(filter_values)
found_record = {}
for record in records:
if (
all(
[
record[filter_key] == filter_value
for filter_key, filter_value in zip(filter_keys, filter_values)
]
)
and ts_start <= record[ts_key] <= ts_end
):
if not found_record or found_record[ts_key] < record[ts_key]:
found_record = record
return found_record
def get_expected_training_df(
customer_df: pd.DataFrame,
customer_fv: FeatureView,
driver_df: pd.DataFrame,
driver_fv: FeatureView,
orders_df: pd.DataFrame,
order_fv: FeatureView,
location_df: pd.DataFrame,
location_fv: FeatureView,
global_df: pd.DataFrame,
global_fv: FeatureView,
entity_df: pd.DataFrame,
event_timestamp: str,
full_feature_names: bool = False,
):
# Convert all pandas dataframes into records with UTC timestamps
customer_records = convert_timestamp_records_to_utc(
customer_df.to_dict("records"), customer_fv.batch_source.event_timestamp_column
)
driver_records = convert_timestamp_records_to_utc(
driver_df.to_dict("records"), driver_fv.batch_source.event_timestamp_column
)
order_records = convert_timestamp_records_to_utc(
orders_df.to_dict("records"), event_timestamp
)
location_records = convert_timestamp_records_to_utc(
location_df.to_dict("records"), location_fv.batch_source.event_timestamp_column
)
global_records = convert_timestamp_records_to_utc(
global_df.to_dict("records"), global_fv.batch_source.event_timestamp_column
)
entity_rows = convert_timestamp_records_to_utc(
entity_df.to_dict("records"), event_timestamp
)
# Manually do point-in-time join of driver, customer, and order records against
# the entity df
for entity_row in entity_rows:
customer_record = find_asof_record(
customer_records,
ts_key=customer_fv.batch_source.event_timestamp_column,
ts_start=entity_row[event_timestamp] - customer_fv.ttl,
ts_end=entity_row[event_timestamp],
filter_keys=["customer_id"],
filter_values=[entity_row["customer_id"]],
)
driver_record = find_asof_record(
driver_records,
ts_key=driver_fv.batch_source.event_timestamp_column,
ts_start=entity_row[event_timestamp] - driver_fv.ttl,
ts_end=entity_row[event_timestamp],
filter_keys=["driver_id"],
filter_values=[entity_row["driver_id"]],
)
order_record = find_asof_record(
order_records,
ts_key=customer_fv.batch_source.event_timestamp_column,
ts_start=entity_row[event_timestamp] - order_fv.ttl,
ts_end=entity_row[event_timestamp],
filter_keys=["customer_id", "driver_id"],
filter_values=[entity_row["customer_id"], entity_row["driver_id"]],
)
origin_record = find_asof_record(
location_records,
ts_key=location_fv.batch_source.event_timestamp_column,
ts_start=order_record[event_timestamp] - location_fv.ttl,
ts_end=order_record[event_timestamp],
filter_keys=["location_id"],
filter_values=[order_record["origin_id"]],
)
destination_record = find_asof_record(
location_records,
ts_key=location_fv.batch_source.event_timestamp_column,
ts_start=order_record[event_timestamp] - location_fv.ttl,
ts_end=order_record[event_timestamp],
filter_keys=["location_id"],
filter_values=[order_record["destination_id"]],
)
global_record = find_asof_record(
global_records,
ts_key=global_fv.batch_source.event_timestamp_column,
ts_start=order_record[event_timestamp] - global_fv.ttl,
ts_end=order_record[event_timestamp],
)
entity_row.update(
{
(
f"customer_profile__{k}" if full_feature_names else k
): customer_record.get(k, None)
for k in (
"current_balance",
"avg_passenger_count",
"lifetime_trip_count",
)
}
)
entity_row.update(
{
(f"driver_stats__{k}" if full_feature_names else k): driver_record.get(
k, None
)
for k in ("conv_rate", "avg_daily_trips")
}
)
entity_row.update(
{
(f"order__{k}" if full_feature_names else k): order_record.get(k, None)
for k in ("order_is_success",)
}
)
entity_row.update(
{
"origin__temperature": origin_record.get("temperature", None),
"destination__temperature": destination_record.get("temperature", None),
}
)
entity_row.update(
{
(f"global_stats__{k}" if full_feature_names else k): global_record.get(
k, None
)
for k in ("num_rides", "avg_ride_length",)
}
)
# Convert records back to pandas dataframe
expected_df = pd.DataFrame(entity_rows)
# Move "event_timestamp" column to front
current_cols = expected_df.columns.tolist()
current_cols.remove(event_timestamp)
expected_df = expected_df[[event_timestamp] + current_cols]
# Cast some columns to expected types, since we lose information when converting pandas DFs into Python objects.
if full_feature_names:
expected_column_types = {
"order__order_is_success": "int32",
"driver_stats__conv_rate": "float32",
"customer_profile__current_balance": "float32",
"customer_profile__avg_passenger_count": "float32",
"global_stats__avg_ride_length": "float32",
}
else:
expected_column_types = {
"order_is_success": "int32",
"conv_rate": "float32",
"current_balance": "float32",
"avg_passenger_count": "float32",
"avg_ride_length": "float32",
}
for col, typ in expected_column_types.items():
expected_df[col] = expected_df[col].astype(typ)
conv_feature_name = "driver_stats__conv_rate" if full_feature_names else "conv_rate"
expected_df["conv_rate_plus_100"] = expected_df[conv_feature_name] + 100
expected_df["conv_rate_plus_val_to_add"] = (
expected_df[conv_feature_name] + expected_df["val_to_add"]
)
return expected_df
@pytest.mark.integration
@pytest.mark.parametrize("full_feature_names", [True, False], ids=lambda v: str(v))
def test_historical_features(environment, universal_data_sources, full_feature_names):
store = environment.feature_store
(entities, datasets, data_sources) = universal_data_sources
feature_views = construct_universal_feature_views(data_sources)
customer_df, driver_df, location_df, orders_df, global_df, entity_df = (
datasets["customer"],
datasets["driver"],
datasets["location"],
datasets["orders"],
datasets["global"],
datasets["entity"],
)
entity_df_with_request_data = entity_df.copy(deep=True)
entity_df_with_request_data["val_to_add"] = [
i for i in range(len(entity_df_with_request_data))
]
customer_fv, driver_fv, driver_odfv, location_fv, order_fv, global_fv = (
feature_views["customer"],
feature_views["driver"],
feature_views["driver_odfv"],
feature_views["location"],
feature_views["order"],
feature_views["global"],
)
feature_service = FeatureService(
name="convrate_plus100",
features=[feature_views["driver"][["conv_rate"]], feature_views["driver_odfv"]],
)
feature_service_entity_mapping = FeatureService(
name="entity_mapping",
features=[
location_fv.with_name("origin").with_join_key_map(
{"location_id": "origin_id"}
),
location_fv.with_name("destination").with_join_key_map(
{"location_id": "destination_id"}
),
],
)
feast_objects = []
feast_objects.extend(
[
customer_fv,
driver_fv,
driver_odfv,
location_fv,
order_fv,
global_fv,
driver(),
customer(),
location(),
feature_service,
feature_service_entity_mapping,
]
)
store.apply(feast_objects)
entity_df_query = None
orders_table = table_name_from_data_source(data_sources["orders"])
if orders_table:
entity_df_query = f"SELECT customer_id, driver_id, order_id, origin_id, destination_id, event_timestamp FROM {orders_table}"
event_timestamp = (
DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL
if DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL in orders_df.columns
else "e_ts"
)
full_expected_df = get_expected_training_df(
customer_df,
customer_fv,
driver_df,
driver_fv,
orders_df,
order_fv,
location_df,
location_fv,
global_df,
global_fv,
entity_df_with_request_data,
event_timestamp,
full_feature_names,
)
# Only need the shadow entities features in the FeatureService test
expected_df = full_expected_df.drop(
columns=["origin__temperature", "destination__temperature"],
)
if entity_df_query:
job_from_sql = store.get_historical_features(
entity_df=entity_df_query,
features=[
"driver_stats:conv_rate",
"driver_stats:avg_daily_trips",
"customer_profile:current_balance",
"customer_profile:avg_passenger_count",
"customer_profile:lifetime_trip_count",
"order:order_is_success",
"global_stats:num_rides",
"global_stats:avg_ride_length",
],
full_feature_names=full_feature_names,
)
start_time = datetime.utcnow()
actual_df_from_sql_entities = job_from_sql.to_df()
end_time = datetime.utcnow()
print(
str(f"\nTime to execute job_from_sql.to_df() = '{(end_time - start_time)}'")
)
# Not requesting the on demand transform with an entity_df query (can't add request data in them)
expected_df_query = expected_df.drop(
columns=["conv_rate_plus_100", "val_to_add", "conv_rate_plus_val_to_add"]
)
assert sorted(expected_df_query.columns) == sorted(
actual_df_from_sql_entities.columns
)
actual_df_from_sql_entities = (
actual_df_from_sql_entities[expected_df_query.columns]
.sort_values(by=[event_timestamp, "order_id", "driver_id", "customer_id"])
.drop_duplicates()
.reset_index(drop=True)
)
expected_df_query = (
expected_df_query.sort_values(
by=[event_timestamp, "order_id", "driver_id", "customer_id"]
)
.drop_duplicates()
.reset_index(drop=True)
)
assert_frame_equal(
actual_df_from_sql_entities, expected_df_query, check_dtype=False,
)
table_from_sql_entities = job_from_sql.to_arrow()
df_from_sql_entities = (
table_from_sql_entities.to_pandas()[expected_df_query.columns]
.sort_values(by=[event_timestamp, "order_id", "driver_id", "customer_id"])
.drop_duplicates()
.reset_index(drop=True)
)
for col in df_from_sql_entities.columns:
expected_df_query[col] = expected_df_query[col].astype(
df_from_sql_entities[col].dtype
)
|
assert_frame_equal(expected_df_query, df_from_sql_entities)
|
pandas.testing.assert_frame_equal
|
import os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from src.preprocess import create_ship_dataframe
tc_mapper = pd.read_csv(
os.path.join("emission_model", "ship_weightclass_mapper.csv"), index_col=0
)
ships = create_ship_dataframe()
# remove ships built after 2015
ships = ships[(ships["BUILT"] <= 2015) & (ships["BUILT"] > 1900)]
mapping = {
"Diverse": "Diverse",
"Cargo": "MPV",
"RoRo Passenger": "Ro-Pax",
"Cruise Liner": "Cruise",
"Tanker": "Tanker",
"Bulker": "Bulker",
"RoRo Ship": "Ro-Ro",
"Container": "Container",
"RoRo Car": "Car Carrier",
}
ships["NEWCLASS"] = ships["Class"].map(mapping)
# tc_mapper.loc[[i for i in tc_mapper.index if "Tier II" in i]]
# Analysis --------------------------------------------------------------------
gt_classes = {
"Ro-Ro": [(0, 25e3), (25e3, float("+inf"))],
"Ro-Pax": [(0, 25e3), (25e3, float("+inf"))],
"Cruise": [(0, 25e3), (25e3, float("+inf"))],
"Diverse": [(0, 2e3), (2e3, float("+inf"))],
"Container": [
(0, 17.5e3),
(17.5e3, 55e3),
(55e3, 145e3),
(145e3, float("+inf")),
],
"Car Carrier": [(0, 40e3), (40e3, float("+inf"))],
}
dwt_classes = {
"Tanker": [(0, 35e3), (35e3, 45e3), (45e3, 120e3), (120e3, float("+inf"))],
"Bulker": [(0, 35e3), (35e3, 45e3), (45e3, 120e3), (120e3, float("+inf"))],
"MPV": [(0, 120e3), (120e3, float("+inf"))],
}
# shiptype and weigth plot --------------------------------------------------
ships[["NEWCLASS", "GT", "BUILT"]]
cutter = [
(0, 2000),
(5000, 10e3),
(10e3, 30e3),
(30e3, 60e3),
(60e3, 145e3),
(145e3, 10000e3),
]
ships["GT Class"] = pd.cut(
ships["GT"], pd.IntervalIndex.from_tuples(cutter), precision=0,
)
df = ships.groupby(["NEWCLASS", "GT Class"])["IMO"].count().reset_index()
df = df.rename(
columns={
"IMO": "Count",
"NEWCLASS": "Shipclass",
"GT Class": "Gross tonnage",
}
)
plt.figure(figsize=(10, 6))
ax = sns.barplot(x="Gross tonnage", y="Count", hue="Shipclass", data=df)
ax.set_ylabel("Number of Ships")
# ax.set_xlabel("Gross tonnage", size=14)
ax.set_xticklabels(
["<" + str((int(i[1]))) for i in cutter[0:-1]] + [">145000"]
)
ax.legend(title="Type")
plt.savefig("figures/shiptype_by_gt.pdf")
df.to_latex(
"tables/shiptype_by_gt.tex",
caption="All ships by class and gross tonnage weigth as of 2015",
label="fig:shiptype_by_gt",
)
# Weight distribution---------------------------------------------------------
# for class
sns.histplot(
data=ships[ships["NEWCLASS"].isin(["Tanker"])],
x="DWT",
hue="NEWCLASS",
multiple="stack",
bins=100,
)
["GT"].hist(bins=100)
# Age structure --------------------------------------------------------------
# cutter = [(0, 5), (5, 10), (10, 15), (15, 20), (20, 25), (25, 100)]
# cutter_years = [(2015-i[1], 2015-i[0]) for i in cutter]
ships = ships[(ships["BUILT"] <= 2015) & (ships["BUILT"] > 1950)]
plt.figure(figsize=(10, 6))
ax = sns.histplot(
data=ships[["NEWCLASS", "BUILT"]],
x="BUILT",
hue="NEWCLASS",
multiple="stack",
)
leg = ax.get_legend()
leg.set_title("Type")
ax.set_ylabel("Number of Ships", size=14)
ax.set_xlabel("Built year", size=14)
plt.savefig("figures/age_structure_by_shiptype.pdf")
# GWT mean
gt_d = {}
for k, cutter in gt_classes.items():
ships_by_class = ships[ships["NEWCLASS"] == k]
vals = pd.cut(
ships_by_class["GT"],
pd.IntervalIndex.from_tuples(cutter),
precision=0,
)
gt_d[k] = ships_by_class.groupby(vals).count()["IMO"]
df_gt = pd.DataFrame(gt_d)
df_gt["Weighttype"] = "GT"
df_gt.set_index("Weighttype", append=True, drop=True, inplace=True)
dwt_d = {}
for k, cutter in dwt_classes.items():
ships_by_class = ships[ships["NEWCLASS"] == k]
vals = pd.cut(
ships_by_class["DWT"],
pd.IntervalIndex.from_tuples(cutter),
precision=0,
)
dwt_d[k] = ships_by_class.groupby(vals).count()["IMO"]
df_dwt = pd.DataFrame(dwt_d)
df_dwt["Weighttype"] = "DWT"
df_dwt.set_index("Weighttype", append=True, drop=True, inplace=True)
combined =
|
pd.concat([df_dwt, df_gt])
|
pandas.concat
|
import numpy as np
import pandas as pd
import torch
from physics.protein_os import Protein
import options
import os
from tqdm import tqdm
import mdtraj as md
from utils import test_setup, load_protein_bead
#################################################
parser = options.get_decoy_parser()
args = options.parse_args_and_arch(parser)
device, model, energy_fn, ProteinBase = test_setup(args)
torch.set_grad_enabled(False)
#################################################
amino_acids = pd.read_csv('data/amino_acids.csv')
vocab = {x.upper(): y - 1 for x, y in zip(amino_acids.AA3C, amino_acids.idx)}
def get_cb_index(topology):
ca_gly = topology.select('(name == CA) and (resname == GLY)')
cb = topology.select('name == CB')
beads_idx = np.append(ca_gly, cb)
beads_idx = np.sort(beads_idx)
print(beads_idx)
return beads_idx
# md_data_list = ['BPTI', 'Fip35', 'val_deep']
md_data_list = ['val_deep']
if 'Fip35' in md_data_list:
root_dir = '/home/hyang/bio/erf/data/decoys/msm'
trj_dir1 = f'{root_dir}/deshaw/DESRES-Trajectory-ww_1-protein/ww_1-protein/'
trj_dir2 = f'{root_dir}/deshaw/DESRES-Trajectory-ww_2-protein/ww_2-protein/'
seq_native, coords_native, profile_native = load_protein_bead(f'{root_dir}/fip35_bead.csv', 'CB', device)
protein_native = Protein(seq_native, coords_native, profile_native)
energy_native = protein_native.get_energy(energy_fn).item()
print('native', energy_native)
for trj_dir in [trj_dir1, trj_dir2]:
structure = md.load(f'{trj_dir}/ww-protein.pdb')
top = structure.topology
df = pd.read_csv(f'{trj_dir}/ww-protein-beads.csv')
cb_idx = df['beads_cb_index'].values
seq = df['group_name'].values
seq_id = df['group_name'].apply(lambda x: vocab[x]).values
profile = torch.tensor(seq_id, dtype=torch.long, device=device)
score_list = []
flist = pd.read_csv(f'{trj_dir}/flist.txt')['fname']
for k, fname in enumerate(flist):
trj = md.load(f'{trj_dir}/{fname}', top=top)
coords_all = trj.xyz * 10
coords_cb_all = coords_all[:, cb_idx, :]
for i in tqdm(range(coords_cb_all.shape[0])):
coords = torch.tensor(coords_cb_all[i], dtype=torch.float, device=device)
protein = Protein(seq, coords, profile)
energy = protein.get_energy(energy_fn).item()
score_list.append(energy)
df_i =
|
pd.DataFrame({'energy': score_list})
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import math
test_prices = map(math.log, [100, 150, 125, 200, 175, 225, 300, 350])
x = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
d = {
'prices' :
|
pd.Series(test_prices, index=x)
|
pandas.Series
|
# -*- coding: utf-8 -*-
# """@author: Elie"""
# run locally on python 3.8.5('dec1st_py38_xgboostetal':conda)
# =============================================================================
# %% Libraries
# =============================================================================
import pandas as pd
import numpy as np
import datetime
from functools import partial, reduce
from joblib import load, dump
import os
import sys
#plotting
from matplotlib import pyplot as plt
import matplotlib.lines as mlines
from matplotlib import cm
plt.rcParams["font.size"] = "4"
import seaborn as sns
import matplotlib as mpl
#ML/Stats
from sklearn.model_selection import train_test_split, GridSearchCV, KFold, cross_val_score, StratifiedKFold
from sklearn.metrics import roc_curve, auc,precision_recall_curve, f1_score
from sklearn.metrics import roc_curve, precision_recall_curve, auc, make_scorer, recall_score, accuracy_score, precision_score, confusion_matrix
import shap
import xgboost
from xgboost import XGBClassifier
pd.options.mode.chained_assignment = None
# import matplotlib as mpl
# mpl.matplotlib_fname()
# plt.matplotlib_fname()
mpl.rcParams['savefig.transparent'] = "False"
mpl.rcParams['axes.facecolor'] = "white"
mpl.rcParams['figure.facecolor'] = "white"
mpl.rcParams['font.size'] = "5"
plt.rcParams['savefig.transparent'] = "False"
plt.rcParams['axes.facecolor'] = "white"
plt.rcParams['figure.facecolor'] = "white"
# =============================================================================
# %% define these feature/headers here in case the headers
# are out of order in input files (often the case)
# =============================================================================
snv_categories = ["sample",
"A[C>A]A", "A[C>A]C", "A[C>A]G", "A[C>A]T",
"C[C>A]A", "C[C>A]C", "C[C>A]G", "C[C>A]T",
"G[C>A]A", "G[C>A]C", "G[C>A]G", "G[C>A]T",
"T[C>A]A", "T[C>A]C", "T[C>A]G", "T[C>A]T",
"A[C>G]A", "A[C>G]C", "A[C>G]G", "A[C>G]T",
"C[C>G]A", "C[C>G]C", "C[C>G]G", "C[C>G]T",
"G[C>G]A", "G[C>G]C", "G[C>G]G", "G[C>G]T",
"T[C>G]A", "T[C>G]C", "T[C>G]G", "T[C>G]T",
"A[C>T]A", "A[C>T]C", "A[C>T]G", "A[C>T]T",
"C[C>T]A", "C[C>T]C", "C[C>T]G", "C[C>T]T",
"G[C>T]A", "G[C>T]C", "G[C>T]G", "G[C>T]T",
"T[C>T]A", "T[C>T]C", "T[C>T]G", "T[C>T]T",
"A[T>A]A", "A[T>A]C", "A[T>A]G", "A[T>A]T",
"C[T>A]A", "C[T>A]C", "C[T>A]G", "C[T>A]T",
"G[T>A]A", "G[T>A]C", "G[T>A]G", "G[T>A]T",
"T[T>A]A", "T[T>A]C", "T[T>A]G", "T[T>A]T",
"A[T>C]A", "A[T>C]C", "A[T>C]G", "A[T>C]T",
"C[T>C]A", "C[T>C]C", "C[T>C]G", "C[T>C]T",
"G[T>C]A", "G[T>C]C", "G[T>C]G", "G[T>C]T",
"T[T>C]A", "T[T>C]C", "T[T>C]G", "T[T>C]T",
"A[T>G]A", "A[T>G]C", "A[T>G]G", "A[T>G]T",
"C[T>G]A", "C[T>G]C", "C[T>G]G", "C[T>G]T",
"G[T>G]A", "G[T>G]C", "G[T>G]G", "G[T>G]T",
"T[T>G]A", "T[T>G]C", "T[T>G]G", "T[T>G]T"]
indel_categories = ["sample",
"1:Del:C:0", "1:Del:C:1", "1:Del:C:2", "1:Del:C:3", "1:Del:C:4", "1:Del:C:5",
"1:Del:T:0", "1:Del:T:1", "1:Del:T:2", "1:Del:T:3", "1:Del:T:4", "1:Del:T:5",
"1:Ins:C:0", "1:Ins:C:1", "1:Ins:C:2", "1:Ins:C:3", "1:Ins:C:4", "1:Ins:C:5",
"1:Ins:T:0", "1:Ins:T:1", "1:Ins:T:2", "1:Ins:T:3", "1:Ins:T:4", "1:Ins:T:5",
"2:Del:R:0", "2:Del:R:1", "2:Del:R:2", "2:Del:R:3", "2:Del:R:4", "2:Del:R:5",
"3:Del:R:0", "3:Del:R:1", "3:Del:R:2", "3:Del:R:3", "3:Del:R:4", "3:Del:R:5",
"4:Del:R:0", "4:Del:R:1", "4:Del:R:2", "4:Del:R:3", "4:Del:R:4", "4:Del:R:5",
"5:Del:R:0", "5:Del:R:1", "5:Del:R:2", "5:Del:R:3", "5:Del:R:4", "5:Del:R:5",
"2:Ins:R:0", "2:Ins:R:1", "2:Ins:R:2", "2:Ins:R:3", "2:Ins:R:4", "2:Ins:R:5",
"3:Ins:R:0", "3:Ins:R:1", "3:Ins:R:2", "3:Ins:R:3", "3:Ins:R:4", "3:Ins:R:5",
"4:Ins:R:0", "4:Ins:R:1", "4:Ins:R:2", "4:Ins:R:3", "4:Ins:R:4", "4:Ins:R:5",
"5:Ins:R:0", "5:Ins:R:1", "5:Ins:R:2", "5:Ins:R:3", "5:Ins:R:4", "5:Ins:R:5",
"2:Del:M:1", "3:Del:M:1", "3:Del:M:2", "4:Del:M:1", "4:Del:M:2", "4:Del:M:3",
"5:Del:M:1", "5:Del:M:2", "5:Del:M:3", "5:Del:M:4", "5:Del:M:5"]
cnv_categories = ["sample",
"BCper10mb_0", "BCper10mb_1", "BCper10mb_2", "BCper10mb_3",
"CN_0", "CN_1", "CN_2", "CN_3", "CN_4", "CN_5", "CN_6", "CN_7", "CN_8",
"CNCP_0", "CNCP_1", "CNCP_2", "CNCP_3", "CNCP_4", "CNCP_5", "CNCP_6", "CNCP_7",
"BCperCA_0", "BCperCA_1", "BCperCA_2", "BCperCA_3", "BCperCA_4", "BCperCA_5",
"SegSize_0", "SegSize_1", "SegSize_2", "SegSize_3", "SegSize_4", "SegSize_5",
"SegSize_6", "SegSize_7", "SegSize_8", "SegSize_9", "SegSize_10",
"CopyFraction_0", "CopyFraction_1", "CopyFraction_2", "CopyFraction_3", "CopyFraction_4",
"CopyFraction_5", "CopyFraction_6"]
### ==========================================================
# make concat sig dataframe
# ============================================================
"""load the 3 data frames and merge to one df"""
def load_data(snv_counts_path, indel_counts_path, cnv_counts_path):
df_snv = pd.read_csv(snv_counts_path, sep='\t', low_memory=False)
df_snv = df_snv[snv_categories]
df_snv["sample"] = df_snv["sample"].astype(str)
df_indel = pd.read_csv(indel_counts_path, sep='\t', low_memory=False)
df_indel = df_indel[indel_categories]
df_indel["sample"] = df_indel["sample"].astype(str)
df_cnv = pd.read_csv(cnv_counts_path, sep='\t', low_memory=False)
df_cnv = df_cnv[cnv_categories]
df_cnv["sample"] = df_cnv["sample"].astype(str)
df_sigs = pd.merge(df_snv, df_indel, on="sample", how='left').fillna(0)
df_sigs = pd.merge(df_sigs, df_cnv, on="sample", how='left').reset_index(drop=True)
return df_sigs
def get_data_and_labels_from_df(df, gene_name):
#first encode gene lable as binary
combined_matrix_for_gene = df.copy(deep=True)
gene_name = str(gene_name)
combined_matrix_for_gene.loc[(combined_matrix_for_gene["primary_label"] == gene_name), 'primary_label'] = 1
combined_matrix_for_gene.loc[(combined_matrix_for_gene["primary_label"] != 1), 'primary_label'] = 0
#amazingly stupid, if dont specify astype int, the 1/0 remain an object and dont work with gridsearchcv
combined_matrix_for_gene["primary_label"] = combined_matrix_for_gene["primary_label"].astype('int')
#now extract 2d matrix of feature values and 1d matrix of labels
features_list = snv_categories[1:] + indel_categories[1:] + cnv_categories[1:]
X_data = combined_matrix_for_gene[features_list]
X_data.columns = X_data.columns.str.replace("[", "mm").str.replace("]", "nn").str.replace(">", "rr")
Y_labels = combined_matrix_for_gene["primary_label"]
return X_data, Y_labels
"""Can use this function on the server with many cores, takes long time without many cores"""
def do_grid_search_for_best_params(xtrain, ytrain, xtest, ytest, paramgrid):
estimator = XGBClassifier(objective='binary:logistic', nthread=1, seed=42)
grid_search = GridSearchCV(estimator=estimator, param_grid=paramgrid, scoring = 'roc_auc', n_jobs = 60, cv = 10, verbose=True)
fit_params={"eval_metric" : ['auc', 'error', 'logloss'], "eval_set" : [[xtest, ytest]]}
fitted_model = grid_search.fit(xtrain, ytrain, **fit_params)
cv_results = pd.DataFrame(fitted_model.cv_results_)
return fitted_model.best_score_, fitted_model.best_params_, fitted_model.best_estimator_, cv_results
def model_with_params(trainX, trainY, testX, testY, params, max_rounds):
estimator = XGBClassifier(n_estimators=max_rounds, nthread=10, **params)
fitted_model = estimator.fit(trainX, trainY, verbose=True)
prediction_binary_test = fitted_model.predict(testX, ntree_limit=max_rounds)
prediction_probability_test = fitted_model.predict_proba(testX, ntree_limit=max_rounds)
prediction_prob_of_true_test = prediction_probability_test[:,1]
prediction_binary_train = fitted_model.predict(trainX, ntree_limit=max_rounds)
prediction_probability_train = fitted_model.predict_proba(trainX, ntree_limit=max_rounds)
prediction_prob_of_true_train = prediction_probability_train[:,1]
return fitted_model, prediction_binary_test, prediction_prob_of_true_test, prediction_binary_train, prediction_prob_of_true_train
def kfold_cv(Knumber, Xdata, Ylabels, model):
kfold = KFold(n_splits=Knumber)
results = cross_val_score(model, Xdata, Ylabels, cv=kfold)
return results
def shapely_values(model, Xdata, Nvalues):
import inspect
print(os.path.abspath(inspect.getfile(shap.summary_plot)))
X = Xdata.copy(deep=True)
shap_values = shap.TreeExplainer(model, feature_perturbation='tree_path_dependent').shap_values(X, check_additivity=False)
X.columns = X.columns.str.replace("mm", "[").str.replace("nn", "]").str.replace("rr", ">")
fig, ax = plt.subplots(figsize=(7,4))
shap.summary_plot(shap_values, X, plot_type="dot", max_display=Nvalues, show=False, plot_size=(6,3), alpha=0.7)
plt.subplots_adjust(left=0.3, right=0.94, top=0.9, bottom=0.1)
ax = plt.gca()
fig = plt.gcf()
mpl.rcParams['savefig.transparent'] = "False"
mpl.rcParams['axes.facecolor'] = "white"
mpl.rcParams['figure.facecolor'] = "white"
mpl.rcParams['font.size'] = "5"
plt.rcParams['savefig.transparent'] = "False"
plt.rcParams['axes.facecolor'] = "white"
plt.rcParams['figure.facecolor'] = "white"
return fig, ax
def my_roc(data, prob_of_true):
fpr, tpr, thresholds = roc_curve(data, prob_of_true)
roc_auc = auc(fpr, tpr)
fig, ax = plt.subplots(figsize=(1.3,1.4))
lw = 1
ax.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
ax.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
ax.set_xlim([-0.02, 1.0])
ax.set_ylim([0.0, 1.02])
ax.set_xlabel('False Positive Rate', fontsize=4, labelpad=0.75)
ax.set_ylabel('True Positive Rate', fontsize=4, labelpad=0.75)
#ax.set_title('ROC curve', fontsize=6, pad=1)
ax.legend(loc="lower right", fontsize=4)
tick_numbers = [round(x,1) for x in np.arange(0, 1.1, 0.2)]
ax.set_xticks(tick_numbers)
ax.tick_params(axis='both', which="major", length=2, labelsize=4, pad=0.5, reset=False)
fig.subplots_adjust(left=0.15, right=0.965, top=0.98, bottom=0.12)
sns.despine(ax=ax, top=True, right=True, left=False, bottom=False)
return fig, ax
def precision_recall(data, prob_of_true):
precision, recall, thresholds = precision_recall_curve(data, prob_of_true)
fig, ax = plt.subplots(figsize=(1.3,1.4))
lw = 1
# ax.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
ax.plot(recall, precision, color='darkorange', lw=lw, label='PR curve')
ax.set_xlim([-0.02, 1.0])
ax.set_ylim([0.5, 1.05])
# axis labels
ax.set_xlabel('Recall', fontsize=4, labelpad=0.75)
ax.set_ylabel('Precision', fontsize=4, labelpad=0.75)
ax.legend(loc="lower left", fontsize=4)
tick_numbers = [round(x,1) for x in np.arange(0, 1.1, 0.2)]
ax.set_xticks(tick_numbers)
ax.tick_params(axis='both', which="major", length=2, labelsize=4, pad=0.5, reset=False)
fig.subplots_adjust(left=0.15, right=0.965, top=0.98, bottom=0.12)
sns.despine(ax=ax, top=True, right=True, left=False, bottom=False)
return fig, ax
def plot_precision_recall_vs_threshold(data, prob_of_true):
"""Modified from: Hands-On Machine learning with Scikit-Learn
and TensorFlow; p.89
"""
#first generate and find fscores for all possible thresholds:
def to_labels(pos_probs, threshold):
return (pos_probs >= threshold).astype('int')
#evaluate each threshold
thresholds = np.arange(0, 1, 0.001)
scores = [f1_score(data, to_labels(prob_of_true, t)) for t in thresholds]
ix = np.argmax(scores)
print('Threshold=%.3f, F-Score=%.5f' % (thresholds[ix], scores[ix]))
best_threshold = thresholds[ix]
Fscore = scores[ix]
#now plot precision recall as a function of threshold
precisions, recalls, thresholds = precision_recall_curve(data, prob_of_true)
fig, ax = plt.subplots(figsize=(1.3,1.4))
lw = 1
#plt.title("Precision and Recall Scores as a function of the decision threshold")
ax.plot(thresholds, precisions[:-1], color="#CD5C5C", label="Precision", lw=lw)
ax.plot(thresholds, recalls[:-1], "#197419", label="Recall", lw=lw)
ax.axvline(x=best_threshold, color="b",linestyle="--", label=f'Threshold={best_threshold:.2f},\nF-Score={Fscore:.2f}')
ax.set_ylabel("Score", fontsize=4, labelpad=0.75)
ax.set_xlabel("Decision Threshold", fontsize=4, labelpad=0.75)
ax.legend(loc="lower center", fontsize=4)
tick_numbers = [round(x,1) for x in np.arange(0, 1.1, 0.2)]
ax.set_xticks(tick_numbers)
ax.tick_params(axis='both', which="major", length=2, labelsize=4, pad=0.5, reset=False)
fig.subplots_adjust(left=0.15, right=0.965, top=0.98, bottom=0.12)
sns.despine(ax=ax, top=True, right=True, left=False, bottom=False)
return fig, ax, best_threshold, Fscore
def makepredictions(loadedmodel, dfgood, xdata, ylabels):
prediction_probability = loadedmodel.predict_proba(xdata)
pred_prob = prediction_probability[:,1]
allpredprob_df = pd.DataFrame(data={"labels":ylabels.values, "prob_of_true": pred_prob})
all_data_with_preds = pd.merge(dfgood, allpredprob_df, left_index=True, right_index=True)
pred_data = all_data_with_preds[["sample", "primary_label", "prob_of_true"]]
pred_data["primary_label"] = pred_data["primary_label"].fillna("DRp")
all_data_with_preds = all_data_with_preds.drop(columns=snv_categories[1:]).drop(columns=indel_categories[1:]).drop(columns=cnv_categories[1:])
return pred_prob, pred_data
def least_sub_rank1_model_params(cv_results_path):
rank1_cv_results = pd.read_csv(cv_results_path, sep="\t").query('(rank_test_score < 2)').query('(param_colsample_bylevel > 0.3) and (param_colsample_bynode > 0.3) and (param_colsample_bytree > 0.3) and (param_subsample > 0.3)')
rank1_cv_results["total_subsample"] = rank1_cv_results['param_colsample_bylevel'] * rank1_cv_results['param_colsample_bynode'] * rank1_cv_results['param_colsample_bytree'] * rank1_cv_results['param_subsample']
rank1_cv_results = rank1_cv_results.sort_values(by="total_subsample", ascending=False).head(n=1)
params = rank1_cv_results["params"].iloc[0]
params_dict = eval(params)
return params_dict
def probability_bar_graph(gene_oi, pos_color, neg_color, legend_d, legend_p, all_data_with_preds):
all_prob_table = all_data_with_preds.copy(deep=True)
pos = all_prob_table.query('(primary_label == @gene_oi)').sort_values(f"{gene_oi}_prob_of_true", ascending=False)
pos["color"] = pos_color
neg = all_prob_table.query('(primary_label != @gene_oi)').sort_values(f"{gene_oi}_prob_of_true", ascending=False)
neg["color"] = neg_color
bargraph = pd.concat([pos, neg]).reset_index(drop=True)
def fig_aesthetic(ax, df):
ax.set_ylim(0,1)
ax.set_xlim(df.index[0]-0.5,df.index[-1]+0.5)
ax.grid(b=False, which='both', axis='y', color='0.4', linewidth=0.9, linestyle='dotted', zorder=0)
ax.tick_params(axis='both', which="major", length=3, labelsize=5, pad=1, reset=False)
ax.set_xticks([])
# ax[0].set_ylabel("Signature Weights", fontsize=8, horizontalalignment="center", labelpad=0.5)
ax.set_yticks([0.25, 0.50, 0.75])
ax.set_xlabel("")
ax.set_ylabel("Probability", fontsize=5, horizontalalignment="center", labelpad=0.6)
ax.yaxis.set_label_coords(-0.08, 0.5)
sns.despine(ax=ax, top=True, right=True, left=False, bottom=False)
return ax
fig, ax = plt.subplots(figsize=(3.2,1.5))
ax.bar(x=bargraph.index, height=bargraph[f"{gene_oi}_prob_of_true"], width=0.8, edgecolor=None, linewidth=0, color=bargraph["color"], zorder=10)
ax = fig_aesthetic(ax, bargraph)
handles = []
handles.append(mlines.Line2D([], [], color=pos_color, markeredgecolor=pos_color, marker='s', lw=0, markersize=8, label=legend_d))
handles.append(mlines.Line2D([], [], color=neg_color, markeredgecolor=neg_color, marker='s', lw=0, markersize=8, label=legend_p))
ax.legend(handles=handles,loc='upper left', edgecolor='0.5', frameon=False, ncol=2, fontsize=5, handletextpad=0.001, bbox_to_anchor=(0.45, 0.72), borderpad=0, columnspacing=0.9)
fig.subplots_adjust(left=0.1, right=0.995, top=0.99, bottom=0.03)
return fig, ax
def conf_matrix(df, label, threshold):
table = df.copy(deep=True) #df is all data_with_preds
label = str(label) #label is primary label column
threshold = float(threshold)
prob_column = f"{label}_prob_of_true"
table["TP"] = 0
table.loc[(table['primary_label'] == label) & (table[prob_column] >= threshold), 'TP'] = 1
table["FP"] = 0
table.loc[(table['primary_label'] != label) & (table[prob_column] >= threshold), 'FP'] = 1
table["FN"] = 0
table.loc[(table['primary_label'] == label) & (table[prob_column] <= threshold), 'FN'] = 1
table["TN"] = 0
table.loc[(table['primary_label'] != label) & (table[prob_column] <= threshold), 'TN'] = 1
TP = table["TP"].sum()
FP = table["FP"].sum()
FN = table["FN"].sum()
TN = table["TN"].sum()
return np.array([[TP, FP], [FN, TN]])
def accuracy(TP, TN, FP, FN):
return ((TP+TN)/(TP + TN + FP + FN))
def precision(TP, TN, FP, FN):
return ((TP)/(TP + FP))
def recall(TP, TN, FP, FN):
return ((TP)/(TP + FN))
def plot_matrix(cm_array):
fig, ax = plt.subplots(figsize=(3, 3))
group_names = ['True Pos', 'False Pos', 'False Neg', 'True Neg']
group_counts = cm_array.flatten()
labels = [f"{name}\n{count}" for name, count in zip(group_names,group_counts)]
labels = np.asarray(labels).reshape(2,2)
sns.heatmap(cm_array, annot=labels, annot_kws={"size":8}, fmt='', cmap='Blues', ax=ax)
ax.set_xlabel("Published labels", fontsize=8)
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
ax.set_ylabel("Predicted labels", fontsize=8)
ax.set_xticklabels(["yes", "no"])
ax.set_yticklabels(["yes", "no"])
ax.tick_params(axis = 'both', which="major", length=0, pad=0, labelsize=8, reset=False)
cbar = ax.collections[0].colorbar
# here set the labelsize by 20
cbar.ax.tick_params(labelsize=8)
return fig, ax
### ==========================================================
# get paths, load data and make df with each file merged
# ============================================================
#files from paths relative to this script
rootdir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
datadir = os.path.join(rootdir, "data")
cohort_data = os.path.join(datadir, "cohort.tsv")
snv_features = os.path.join(datadir, "tns_features.tsv")
ndl_features = os.path.join(datadir, "ndl_features.tsv")
cnv_features = os.path.join(datadir, "cnv_features.tsv")
outputdir = os.path.dirname(__file__)
cv_results_dir = os.path.dirname(__file__)
print('Loading data at '+str(datetime.datetime.now()))
sigs = load_data(snv_features, ndl_features, cnv_features)
sample_labels = pd.read_csv(cohort_data, sep='\t', low_memory=False)
df = pd.merge(sample_labels, sigs, how='left', on='sample').query('(cancer == "PC")').reset_index(drop=True)
print('Finished loading data at '+str(datetime.datetime.now()))
all_probabilites_list = []
# color list for bargraphs
color_list = list(sns.color_palette().as_hex())
blue = color_list[0] #drp
orange = color_list[1] #atm
green = color_list[2] #cdk12
red = color_list[3] #brca2
purple = color_list[4] #mmr
# %%
# model BRCA2
# =============================================================================
goi = "BRCA2d"
goi = str(goi)
print('Loading data at '+str(datetime.datetime.now()))
sigs = load_data(snv_features, ndl_features, cnv_features)
sample_labels = pd.read_csv(cohort_data, sep='\t', low_memory=False)
df = pd.merge(sample_labels, sigs, how='left', on='sample').query('(cancer == "PC")').reset_index(drop=True)
print('Finished loading data at '+str(datetime.datetime.now()))
print(f"start splitting data for {goi} at {str(datetime.datetime.now())}")
X_data, Y_labels = get_data_and_labels_from_df(df_good, goi)
X_train, X_test, Y_train, Y_test = train_test_split(X_data, Y_labels, test_size=0.4, random_state=42, stratify=Y_labels)
# model_path = "c:/Users/ElieRitch/Desktop/signatures_aug2021/gridsearch_models6/BRCA2_gridparams_refitmodel.joblib.model.dat"
# modelpath = os.path.expanduser(model_path)
# model = load(modelpath)
# PredProbs, PredData = makepredictions(model, df_good, X_data, Y_labels)
print(f"start making model for {goi} at {str(datetime.datetime.now())}")
max_rounds = 1000000
# cv_grid_path = f"{cv_results_dir}/{goi}_cv_results.tsv"
# best_params_ = least_sub_rank1_model_params(cv_grid_path)
best_params_ = {'colsample_bylevel': 0.3, 'colsample_bynode': 0.3, 'colsample_bytree': 0.3, 'eta': 0.001, 'max_depth': 3, 'seed': 32, 'subsample': 0.4}
fitted_model, prediction_binary_test, prediction_prob_of_true_test, prediction_binary_train, prediction_prob_of_true_train = model_with_params(X_train, Y_train, X_test, Y_test, best_params_, max_rounds)
test_df = pd.DataFrame(data={"labels":Y_test.values, "prob_of_true": prediction_prob_of_true_test, "pred_binary":prediction_binary_test})
test_df.index = Y_test.index
train_df = pd.DataFrame(data={"labels":Y_train.values, "prob_of_true": prediction_prob_of_true_train, "pred_binary":prediction_binary_train})
train_df.index = Y_train.index
all_preds_df = pd.concat([test_df, train_df])
all_data_with_preds = pd.merge(df_good, all_preds_df, left_index=True, right_index=True)
all_data_with_preds = all_data_with_preds.drop(columns=snv_categories[1:]).drop(columns=indel_categories[1:]).drop(columns=cnv_categories[1:])
all_data_with_preds = all_data_with_preds.drop(columns="labels").rename(columns={"prob_of_true": goi+"_prob_of_true", "pred_binary": goi+"_pred_binary"})
all_probabilites_list.append(all_data_with_preds)
all_data_with_preds.to_csv(outputdir+"/"+goi+"_predictions.tsv",sep='\t', index=False)
fitted_model.save_model(fr"{model_output_dir}\{goi}.xgb_py37_xgboost_ml.model.txt")
all_data = pd.concat([Y_test, Y_train])
all_prob_of_true = np.concatenate([prediction_prob_of_true_test, prediction_prob_of_true_train])
print(f"finished making model for {goi} at {str(datetime.datetime.now())}")
#####ROC for all data and for test ##############
print(f"start graphing model for {goi} at {str(datetime.datetime.now())}")
fig, ax = my_roc(all_data, all_prob_of_true)
plt.savefig(outputdir+"/"+goi+"_ROC.png", dpi=500)
plt.close()
fig, ax = my_roc(Y_test, prediction_prob_of_true_test)
plt.savefig(outputdir+"/"+goi+"_test_ROC.png", dpi=500)
plt.close()
fig, ax = precision_recall(all_data, all_prob_of_true)
plt.savefig(outputdir+"/"+goi+"_PreRec.png", dpi=500)
fig, ax = precision_recall(all_data, all_prob_of_true)
plt.savefig(outputdir+"/"+goi+"_PreRec.png", dpi=500)
plt.close()
# plt.savefig(outputdir+"/"+goi+"_PreRec.pdf", dpi=500)
plt.close()
fig, ax, best_threshold, Fscore = plot_precision_recall_vs_threshold(all_data, all_prob_of_true)
plt.savefig(outputdir+"/"+goi+"_PreRec_vs_Thresh.png", dpi=500)
# plt.savefig(outputdir+"/"+goi+"_PreRec_vs_Thresh.pdf", dpi=500)
plt.close()
print(f"start graphing shap for {goi} at {str(datetime.datetime.now())}")
fig, ax = shapely_values(fitted_model, X_data, 15)
ax.set_xticks([-0.5, 0,0.5,1])
plt.savefig(outputdir+"/"+goi+"_shap15.png", dpi=500)
# plt.savefig(outputdir+"/"+goi+"_shap15.pdf", dpi=500)
plt.close()
print(f"start graphing bars for {goi} at {str(datetime.datetime.now())}")
fig, ax = probability_bar_graph(goi, red, blue, f"{goi}d", f"{goi}p", all_data_with_preds)
plt.savefig(f"{outputdir}/{goi}_prob_of_class.png", dpi=500, transparent=False, facecolor="w")
plt.close()
print(f"finished graphing model for {goi} at {str(datetime.datetime.now())}")
print(f"Confusion metric and graph for {goi} at {str(datetime.datetime.now())}")
confusion_matrix = conf_matrix(all_data_with_preds, goi, best_threshold)
TruePos = confusion_matrix.flatten()[0]
FalsePos = confusion_matrix.flatten()[1]
FalseNeg = confusion_matrix.flatten()[2]
TrueNeg = confusion_matrix.flatten()[3]
accuracy_of_model = accuracy(TruePos, TrueNeg, FalsePos, FalseNeg)
precision_of_model = precision(TruePos, TrueNeg, FalsePos, FalseNeg)
recall_of_model = recall(TruePos, TrueNeg, FalsePos, FalseNeg)
print(confusion_matrix)
print(f"{goi} model accuracy = {accuracy_of_model}")
print(f"{goi} model precision = {precision_of_model}")
print(f"{goi} model recall = {recall_of_model}")
fig, ax = plot_matrix(confusion_matrix)
plt.savefig(f"{outputdir}/{goi}_confusion_matrix.png", dpi=500, transparent=False, facecolor="w")
plt.close()
# %%
# model CDK12
# =============================================================================
goi = "CDK12"
goi = str(goi)
print('Loading data at '+str(datetime.datetime.now()))
sigs = load_data(snv_features, ndl_features, cnv_features)
sample_labels = pd.read_csv(cohort_data, sep='\t', low_memory=False)
df = pd.merge(sample_labels, sigs, how='left', on='sample').query('(cancer == "PC")').reset_index(drop=True)
print('Finished loading data at '+str(datetime.datetime.now()))
print('Start '+ goi + ' at '+str(datetime.datetime.now()))
X_data, Y_labels = get_data_and_labels_from_df(df_good, goi)
X_train, X_test, Y_train, Y_test = train_test_split(X_data, Y_labels, test_size=0.4, random_state=42, stratify=Y_labels)
print(f"start making model for {goi} at {str(datetime.datetime.now())}")
max_rounds = 1000000
# cv_grid_path = f"{cv_results_dir}/{goi}_cv_results.tsv"
# best_params_ = least_sub_rank1_model_params(cv_grid_path)
best_params_ = {'colsample_bylevel': 0.6, 'colsample_bynode': 0.9, 'colsample_bytree': 0.7, 'eta': 0.001, 'max_depth': 3, 'seed': 47, 'subsample': 0.7}
fitted_model, prediction_binary_test, prediction_prob_of_true_test, prediction_binary_train, prediction_prob_of_true_train = model_with_params(X_train, Y_train, X_test, Y_test, best_params_, max_rounds)
test_df = pd.DataFrame(data={"labels":Y_test.values, "prob_of_true": prediction_prob_of_true_test, "pred_binary":prediction_binary_test})
test_df.index = Y_test.index
train_df =
|
pd.DataFrame(data={"labels":Y_train.values, "prob_of_true": prediction_prob_of_true_train, "pred_binary":prediction_binary_train})
|
pandas.DataFrame
|
#Imports
import pandas as pd
import numpy as np
#import h5py
import itertools
from numpy import isnan
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import math
from numpy import linalg as LA
from matplotlib import pyplot as plt
from sklearn.metrics import mean_squared_error
from math import sqrt
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import average_precision_score
from sklearn.metrics import accuracy_score
import joblib
import matplotlib.backends.backend_pdf
from math import sqrt
#The Functions
#activation function
def Activation_function(Din):
m,n =Din.shape
eta=0.99
u,s,v=np.linalg.svd(Din)
B = np.zeros(Din.shape)
for i in range(m) :
for j in range(n) :
if i == j : B[i,j] = s[j]
for i in range(m):
for j in range(n):
if i==j:B[i,j]=min(B[i,j],eta)
Dout=np.dot(u,np.dot(B,v))
return Dout
#Metric functions
def getRMSEForThreads(PredictedMatrix,ActualMatrix,TC):
List=[]
for i in range(0,TC):
List.append(sqrt(mean_squared_error(list(ActualMatrix[i]),list(PredictedMatrix[i]))))
return List
def getMaeForThreads(PredictedMatrix,ActualMatrix,TC):
List=[]
for i in range(0,TC):
List.append(mean_absolute_error(list(ActualMatrix[i]),list(PredictedMatrix[i])))
return List
#Update the predict Matrix as prediction moves
def updatePredictedMatrix(predicted,pred_day_start,pred_day_end,PredictedMatrix,window_size):
PredictedMatrix[:,(pred_day_start-window_size):(pred_day_end-window_size)]=predicted
return PredictedMatrix
#Prepare data for input to model
def AppendFuturePrediction(Thread,Window_Size):
WholeDataMatrix=Thread
X_row,X_col=WholeDataMatrix.shape
#append shifted matrix along rows
WholeDataMatrix_shifted=WholeDataMatrix[:,1:X_col]
observation_data=np.concatenate((WholeDataMatrix[:,0:(X_col-1)],WholeDataMatrix_shifted[0].reshape((1,(X_col-1)))), axis=0)
X_row,X_col=observation_data.shape
return X_row,X_col,WholeDataMatrix,observation_data
#The model
def FindHyperparameters(sigma,sigma1,sigma2,f1,f2,f3,thread_count,seed,Nz,Nx,intialisationFactor,FuturePredDays,no_iter,day,pred_day_start,pred_day_end,window_size,WholeDataMatrix,observation_data,X_col):
WrongSeedFlag=1
rows,cols=WholeDataMatrix.shape
PredictedMatrix=np.zeros((1,(FuturePredDays+3)))
ActualMatrix=WholeDataMatrix[0,window_size+1:(window_size+FuturePredDays+4)].reshape((1,(FuturePredDays+3)))
TC=1
#onlineRDL
#Initialising D1, D2
np.random.seed(seed)
D_1_init = f1*np.eye(Nz)
D_2_init = f2*np.random.rand(Nx,Nz)
w_T=f3*np.random.rand(1,Nz)
D_2_init=np.concatenate((D_2_init,w_T))
est_D1 = 1
est_D2 = 1
D_1_est_a_ = D_1_init
D_2_est_a_ = D_2_init
count1=0
num=pred_day_end+3
while(pred_day_end<=num):
window_data=observation_data[:,day:pred_day_start]
#onlineRDL
#Initialising D1, D2
rows,cols= window_data.shape
kalman_mean_a =np.zeros((Nz,cols))
kalman_covariance_a =np.zeros((cols,Nz,Nz))
kalman_covariance_a[0] = (sigma*sigma) * np.eye(Nz)
Q = (sigma1*sigma1) * np.eye(Nz)
R = (sigma2*sigma2) * np.eye(Nx+TC)
#print('initialised')
for i in range(0,Nz):
kalman_mean_a[i][0]=window_data[i][0]
smooth_mean_a =np.zeros((Nz,cols))
smooth_covariance_a =np.zeros((cols,Nz,Nz))
G =np.zeros((cols,Nz,Nz))
ErrD1 = np.zeros((no_iter,1))
ErrD2 = np.zeros((no_iter,1))
for n in range(0,no_iter):
for k in range(1,cols):
prior_m=np.dot(D_1_est_a_, kalman_mean_a[:,k-1])
prior_p=np.dot((np.dot(D_1_est_a_,kalman_covariance_a[k-1])),D_1_est_a_.T)+Q
y=window_data[:,k]-np.dot(D_2_est_a_,prior_m)
s=np.dot(np.dot(D_2_est_a_,prior_p),D_2_est_a_.T)+R
K=np.dot(np.dot(prior_p,D_2_est_a_.T),np.linalg.pinv(s))
kalman_mean_a[:,k]= prior_m + np.dot(K,y)
kalman_covariance_a[k]=prior_p-np.dot(np.dot(K,s),K.T)
smooth_mean_a[:,cols-1]= kalman_mean_a[:,cols-1]
smooth_covariance_a[cols-1]=kalman_covariance_a[cols-1]
for k in range(cols-2,-1,-1):
post_m=np.dot(D_1_est_a_, kalman_mean_a[:,k])
post_p=np.dot(np.dot(D_1_est_a_,kalman_covariance_a[k]),D_1_est_a_.T) +Q
G[k]=np.dot(np.dot(kalman_covariance_a[k],D_1_est_a_.T),np.linalg.pinv(post_p))
smooth_mean_a[:,k]=kalman_mean_a[:,k]+np.dot(G[k],(smooth_mean_a[:,k+1]-post_m))
smooth_covariance_a[k]=kalman_covariance_a[k]+np.dot(np.dot(G[k],(smooth_covariance_a[k+1]-post_p)),G[k].T)
Sigma = np.zeros((Nz,Nz))
Phi = np.zeros((Nz,Nz))
B = np.zeros((Nx+TC,Nz))
C = np.zeros((Nz,Nz))
for k in range(1,cols):
X_k = window_data[:,k]
X_k = X_k[:, np.newaxis]
ms_k = smooth_mean_a[:,k]
ms_k = ms_k[:, np.newaxis]
ms_k_old = smooth_mean_a[:,k-1]
ms_k_old = ms_k_old[:, np.newaxis]
Sigma = Sigma + 1/cols * (smooth_covariance_a[k] + np.dot(ms_k,ms_k.T))
Phi = Phi + 1/cols * (smooth_covariance_a[k-1] + np.dot(ms_k_old,ms_k_old.T))
B = B + 1/cols * (np.dot(X_k,ms_k.T))
C = C + 1/cols * (np.dot(smooth_covariance_a[k],G[k-1].T) + np.dot(ms_k,ms_k_old.T))
if est_D1>0:
D_1_est_a_new = np.dot(C,np.linalg.inv(Phi))
ErrD1[n] = np.linalg.norm(D_1_est_a_new-D_1_est_a_)/np.linalg.norm(D_1_est_a_)
D_1_est_a_ = Activation_function(D_1_est_a_new)
if est_D2>0:
D_2_est_a_new = np.dot(B,np.linalg.pinv(Sigma))
ErrD2[n] = np.linalg.norm(D_2_est_a_new-D_2_est_a_)/np.linalg.norm(D_2_est_a_)
D_2_est_a_ = D_2_est_a_new
#predicting for tomorrow
D_1=D_1_est_a_
D_2=D_2_est_a_[0:Nx,:]
W_T=D_2_est_a_[Nx:,:]
col= FuturePredDays
kalman_mean_test =np.zeros((Nz,col+1))
kalman_mean_test[:,0:1]=kalman_mean_a[:,cols-1].reshape(Nz,1)
kalman_covariance_test =np.zeros((col,Nz,Nz))
kalman_covariance_test[0] = (sigma*sigma) * np.eye(Nz)
Q = (sigma1*sigma1) * np.eye(Nz)
R = (sigma2*sigma2) * np.eye(Nx)
for k in range(1,col+1):
kalman_mean_test[:,k]=np.dot(D_1, kalman_mean_test[:,k-1])
predicted=np.dot(W_T,kalman_mean_test[:,1:])
PredictedMatrix=updatePredictedMatrix(predicted,pred_day_start,pred_day_end,PredictedMatrix,window_size)
day+=1
pred_day_start=window_size+day
pred_day_end=pred_day_start + FuturePredDays
count1+=1
if not WrongSeedFlag:
return 10,10
PredictedMatrix[np.isnan(PredictedMatrix)]=0
MAE_List=getMaeForThreads(PredictedMatrix,ActualMatrix,TC)
RMSE_List=getRMSEForThreads(PredictedMatrix,ActualMatrix,TC)
return MAE_List[0],RMSE_List[0]
def ForecastForThread(sigma,sigma1,sigma2,f1,f2,f3,thread_count,seed,Nz,Nx,intialisationFactor,FuturePredDays,no_iter,day,pred_day_start,pred_day_end,window_size,WholeDataMatrix,observation_data,X_col):
rows,cols=WholeDataMatrix.shape
PredictedMatrix=np.zeros((1,(cols-(window_size+1))))
ActualMatrix=WholeDataMatrix[0,window_size+1:].reshape((1,(cols-(window_size+1))))
TC=1
#onlineRDL
#Initialising D1, D2
np.random.seed(seed)
D_1_init = f1*np.eye(Nz)
D_2_init = f2*np.random.rand(Nx,Nz)
w_T=f3*np.random.rand(1,Nz)
D_2_init=np.concatenate((D_2_init,w_T))
est_D1 = 1
est_D2 = 1
D_1_est_a_ = D_1_init
D_2_est_a_ = D_2_init
count1=0
pe=pred_day_end
ps=pred_day_start
while(pe<=X_col):
day+=1
count1+=1
ps=window_size+day
pe=ps +FuturePredDays
day=0
Predicted_values=np.zeros((count1,FuturePredDays))
Kalman_mean_test_save=np.zeros((count1,(FuturePredDays*5)))
count1=0
while(pred_day_end+1<=(X_col+1)):
window_data=observation_data[:,day:pred_day_start]
#onlineRDL
#Initialising D1, D2
rows,cols= window_data.shape
kalman_mean_a =np.zeros((Nz,cols))
kalman_covariance_a =np.zeros((cols,Nz,Nz))
kalman_covariance_a[0] = (sigma*sigma) * np.eye(Nz)
Q = (sigma1*sigma1) * np.eye(Nz)
R = (sigma2*sigma2) * np.eye(Nx+TC)
#print('initialised')
for i in range(0,Nz):
kalman_mean_a[i][0]=window_data[i][0]
smooth_mean_a =np.zeros((Nz,cols))
smooth_covariance_a =np.zeros((cols,Nz,Nz))
G =np.zeros((cols,Nz,Nz))
ErrD1 = np.zeros((no_iter,1))
ErrD2 = np.zeros((no_iter,1))
for n in range(0,no_iter):
for k in range(1,cols):
prior_m=np.dot(D_1_est_a_, kalman_mean_a[:,k-1])
prior_p=np.dot((np.dot(D_1_est_a_,kalman_covariance_a[k-1])),D_1_est_a_.T)+Q
y=window_data[:,k]-np.dot(D_2_est_a_,prior_m)
s=np.dot(np.dot(D_2_est_a_,prior_p),D_2_est_a_.T)+R
K=np.dot(np.dot(prior_p,D_2_est_a_.T),np.linalg.pinv(s))
kalman_mean_a[:,k]= prior_m + np.dot(K,y)
kalman_covariance_a[k]=prior_p-np.dot(np.dot(K,s),K.T)
smooth_mean_a[:,cols-1]= kalman_mean_a[:,cols-1]
smooth_covariance_a[cols-1]=kalman_covariance_a[cols-1]
for k in range(cols-2,-1,-1):
post_m=np.dot(D_1_est_a_, kalman_mean_a[:,k])
post_p=np.dot(np.dot(D_1_est_a_,kalman_covariance_a[k]),D_1_est_a_.T) +Q
G[k]=np.dot(np.dot(kalman_covariance_a[k],D_1_est_a_.T),np.linalg.pinv(post_p))
smooth_mean_a[:,k]=kalman_mean_a[:,k]+np.dot(G[k],(smooth_mean_a[:,k+1]-post_m))
smooth_covariance_a[k]=kalman_covariance_a[k]+np.dot(np.dot(G[k],(smooth_covariance_a[k+1]-post_p)),G[k].T)
# Dictionary update : M-step
Sigma = np.zeros((Nz,Nz))
Phi = np.zeros((Nz,Nz))
B = np.zeros((Nx+TC,Nz))
C = np.zeros((Nz,Nz))
for k in range(1,cols):
X_k = window_data[:,k]
X_k = X_k[:, np.newaxis]
ms_k = smooth_mean_a[:,k]
ms_k = ms_k[:, np.newaxis]
ms_k_old = smooth_mean_a[:,k-1]
ms_k_old = ms_k_old[:, np.newaxis]
Sigma = Sigma + 1/cols * (smooth_covariance_a[k] + np.dot(ms_k,ms_k.T))
Phi = Phi + 1/cols * (smooth_covariance_a[k-1] + np.dot(ms_k_old,ms_k_old.T))
B = B + 1/cols * (np.dot(X_k,ms_k.T))
C = C + 1/cols * (np.dot(smooth_covariance_a[k],G[k-1].T) + np.dot(ms_k,ms_k_old.T))
if est_D1>0:
D_1_est_a_new = np.dot(C,np.linalg.inv(Phi))
ErrD1[n] = np.linalg.norm(D_1_est_a_new-D_1_est_a_)/np.linalg.norm(D_1_est_a_)
D_1_est_a_ = Activation_function(D_1_est_a_new)
if est_D2>0:
D_2_est_a_new = np.dot(B,np.linalg.pinv(Sigma))
ErrD2[n] = np.linalg.norm(D_2_est_a_new-D_2_est_a_)/np.linalg.norm(D_2_est_a_)
D_2_est_a_ = D_2_est_a_new
#predicting for tomorrow
D_1=D_1_est_a_
D_2=D_2_est_a_[0:Nx,:]
W_T=D_2_est_a_[Nx:,:]
col= FuturePredDays # predicting for next 3 days
kalman_mean_test =np.zeros((Nz,col+1))
kalman_mean_test[:,0:1]=kalman_mean_a[:,cols-1].reshape(Nz,1)
kalman_covariance_test =np.zeros((col,Nz,Nz))
kalman_covariance_test[0] = (sigma*sigma) * np.eye(Nz)
Q = (sigma1*sigma1) * np.eye(Nz)
R = (sigma2*sigma2) * np.eye(Nx)
for k in range(1,col+1):
kalman_mean_test[:,k]=np.dot(D_1, kalman_mean_test[:,k-1])
predicted=np.dot(W_T,kalman_mean_test[:,1:])
#added
Predicted_values[count1,:]=predicted.reshape(FuturePredDays,)
Kalman_mean_test_save[count1,:] = kalman_mean_test[:,1:].reshape(col*5,)
PredictedMatrix=updatePredictedMatrix(predicted,pred_day_start,pred_day_end,PredictedMatrix,window_size)
day+=1
pred_day_start=window_size+day
pred_day_end=pred_day_start + FuturePredDays
count1+=1
x=list(range(window_size+1,(X_col+1)))
for i in range(0,TC):
fig=plt.figure()
plt.plot(x,list(PredictedMatrix[i]), 'r')
plt.plot(x,ActualMatrix[i], 'b')
plt.title('thread'+str(thread_count))
plt.xlabel('Window Frame')
plt.ylabel('HateScore')
plt.legend(["predicted", "Actual"], loc ="lower right")
plt.show()
pdfForFeature.savefig(fig)
PredictedMatrix[np.isnan(PredictedMatrix)]=0
MAE_List=getMaeForThreads(PredictedMatrix,ActualMatrix,TC)
RMSE_List=getRMSEForThreads(PredictedMatrix,ActualMatrix,TC)
return MAE_List[0],RMSE_List[0],PredictedMatrix,ActualMatrix
# load Dataset
import pickle
with open('./time_series.pkl', 'rb') as f:
data = pickle.load(f)
def CreateTestSet(DataList):
TheTestList=[]
TestThreadName=[]
for data in DataList:
TotalLength=len(data[0])
Data=np.concatenate((data[0].reshape((1,TotalLength)), data[1].reshape((1,TotalLength)),data[2].reshape((1,TotalLength)),data[3].reshape((1,TotalLength)),data[4].reshape((1,TotalLength))), axis=0)
rows,cols=Data.shape
trainlen=int(0.7*cols)
trainlen=trainlen-20
if trainlen >0 :
TestThreadName.append(data[5][:-4])
TheTestList.append(Data[:,trainlen:])
rows,cols=Data[:,trainlen:].shape
return TheTestList,TestThreadName
DataList,TestThreadName=CreateTestSet(data)
HyperParameters={
'seed1':[6,7,8,9],
'Nz':[5],
'intialisationFactor':[0.5],
'FuturePredDays':[3],
'no_iter':[20],
'f1':[0.7],
'f2':[0.7],
'f3':[0.7],
'sigma':[0.00001],
'sigma1':[0.1],
'sigma2':[0.1],
'window_size':[20]
}
pdfForFeature = matplotlib.backends.backend_pdf.PdfPages("./temp2.pdf")
counter=1
FinalMetricDict={}
for Thread in DataList:
MetricDict={}
HPcount=0
HyperParameterEachThread=[]
keys = list(HyperParameters)
for values in itertools.product(*map(HyperParameters.get, keys)):
HP=dict(zip(keys, values))
HyperParameterEachThread.append(HP)
day=0
window_size=HP['window_size']
pred_day_start= window_size+day
pred_day_end=pred_day_start +HP['FuturePredDays']
X_row,X_col,WholeDataMatrix,observation_data = AppendFuturePrediction(Thread,window_size)
#get best fit hyperparameters
Thread_MAE,Thread_RMSE= FindHyperparameters(HP['sigma'],HP['sigma1'],HP['sigma2'],HP['f1'],HP['f2'],HP['f3'],counter,HP['seed1'],HP['Nz'],5,HP['intialisationFactor'],HP['FuturePredDays'],HP['no_iter'],day,pred_day_start,pred_day_end,window_size,WholeDataMatrix,observation_data,X_col)
MetricDict[HPcount]={'MAE':Thread_MAE,'RMSE':Thread_RMSE}
HPcount=HPcount+1
df=pd.DataFrame.from_dict(MetricDict, orient='index')
df=df.sort_values(by='RMSE')
ArgIndex=int(df.index[0])
HP=HyperParameterEachThread[ArgIndex]
day=0
window_size=HP['window_size']
pred_day_start= window_size+day
pred_day_end=pred_day_start +HP['FuturePredDays']
X_row,X_col,WholeDataMatrix,observation_data = AppendFuturePrediction(Thread,window_size)
Thread_MAE,Thread_RMSE,PredictedMatrix,ActualMatrix= ForecastForThread(HP['sigma'],HP['sigma1'],HP['sigma2'],HP['f1'],HP['f2'],HP['f3'],counter,HP['seed1'],HP['Nz'],5,HP['intialisationFactor'],HP['FuturePredDays'],HP['no_iter'],day,pred_day_start,pred_day_end,window_size,WholeDataMatrix,observation_data,X_col)
FinalMetricDict[counter]={'MAE':Thread_MAE,'RMSE':Thread_RMSE}
df=pd.DataFrame(ActualMatrix[0].tolist(),columns=['original'])
df['predicted']=PredictedMatrix[0].tolist()
df.to_csv('./'+TestThreadName[ counter-1]+"_DSS.csv")
counter=counter+1
pdfForFeature.close()
FinalThreadWiseScore=
|
pd.DataFrame.from_dict(FinalMetricDict, orient='index')
|
pandas.DataFrame.from_dict
|
"""
Author: <NAME>
Organization: Phillips Lab, Institute of Ecology and Evolution,
University of Oregon
"""
import csv
import random
import datetime
import numpy as np
import pandas as pd
# import plotly.io as pio
import plotly.graph_objects as go
# pio.kaleido.scope.mathjax = None
from exp_mixture_model import EMM
def access_csv(file_str, data, usage_str):
with open(file_str, usage_str, newline="") as file:
writer = csv.writer(file, delimiter=",")
for el in data:
writer.writerow(el)
def load_ec_log(file_path, wrong_temp_sets, min_eggs_num=10):
# Load all worksheets (all temperatures) with sheet_name=None
data_dict = pd.read_excel(
file_path, header=0, sheet_name=None, engine="openpyxl"
)
# Concat the returned dictionary of DataFrames
data = pd.concat(data_dict)
# Reset the index to collapse the multi-index to a single
data = data.reset_index(drop=True)
# Slice out the extra header rows and drop big text cols
data = data[data["Rig"] != "Rig"]
data = data.drop(["TempDataQuality", "Notes"], axis=1)
# Get only entries that had eggs and non-zero params
# (which also removes most nans)
data = data[data["p"] > 0]
# Filter out worms with too few eggs
data = data[data["EggCount"] >= min_eggs_num]
data = data.reset_index(drop=True)
# Explicitly set params as floats
data["p"] = data["p"].astype("float")
data["lambda1"] = data["lambda1"].astype("float")
data["lambda2"] = data["lambda2"].astype("float")
for el in wrong_temp_sets:
rig = el[0]
date = el[1]
temp = el[2]
data.loc[(data.Rig == rig) & (data.Date == date), "SetTemp"] = temp
# Get rid of C character in set temps
# data['SetTemp'] = [x.strip('C') for x in data['SetTemp']]
# data['SetTemp'].astype(int)
return data
def conv_str_series_data(str_series):
str_list = str_series.strip("[]").split(",")
float_list = [float(x) for x in str_list]
return float_list
def estimate_parameters(raw_intervals, iterations=100):
model = EMM(k=2, n_iter=iterations)
pi, mu = model.fit(raw_intervals)
try:
pi_2 = model.pi[1]
mu_1 = model.mu[0]
mu_2 = model.mu[1]
p = round(1 - (pi_2 * mu_1 * ((1 / mu_1) - (1 / mu_2))), 5)
l_1 = round(1 / mu_1, 5)
if l_1 > 0.9999:
l_1 = 0.9999
l_2 = round(1 / (p * mu_2), 5)
except Exception:
pi_2 = 0
mu_1 = 0
mu_2 = 0
p = 0
l_1 = 0
l_2 = 0
return [p, l_1, l_2]
def get_windowed_egg_counts(row):
# Get date (for helping keep track of the relative times)
date = row.Date
# Get SFES series for that row/worm
sfes = conv_str_series_data(row.SFES)
# Set a fake egg lay event at the end of the experiment time period
# to help Pandas resample the time correctly. That way, I don't have
# to do custom time filling stuff.
# 172,000 seconds = 48 hours, which is the length of the experiment.
# When looking at binned times, if you bin by hour, this results in
# the last 46 minutes and 40 seconds being potentially missed in the
# bins. So instead of adding a final SFES value of 172,000, I add
# 172,800 (46 min, 40 sec = 800 seconds) to even the 1 hour bins
sfes.append(172_800)
# Set up first time as a datetime object at 0
first_time = "00:00:00"
first_dt = date + " " + first_time
first_dt = datetime.datetime.strptime(first_dt, "%Y-%m-%d %H:%M:%S")
# Basically, convert SFES series into a series of datetime objects
# that preserves the relative timing of egg lay events.
# The absolute times do not correspond to when the egg lay
# occurred.
dts = [first_dt]
for t in sfes:
next_dt = first_dt + datetime.timedelta(seconds=t)
dts.append(next_dt)
# Set up a DataFrame from the SFES datetime objects
df = pd.DataFrame(dts, columns=["time"])
# Set the DataFrame index to the time column
df = df.set_index("time", drop=True)
# At each time point, there was 1 egg laid. So set the "value"
# column to all ones
df["value"] = np.ones(len(df), dtype=int)
# Remove the one at timepoint 0, because no eggs had been laid yet
df.iloc[0]["value"] = 0
# Set the fake egg lay at the end to 0, to remove the fake
df.iloc[-1]["value"] = 0
# Resample
dfrs = df.resample("1h").sum().ffill()
# Add bins
dfrs["TimeBin"] = pd.cut(dfrs.index, bins=dfrs.index, right=False)
# Group bins to make final dataframe with correct time bins and values
dfg = dfrs.groupby(["TimeBin"]).sum()
return dfg
def get_param_error_arrays(file_str, param, estimates_df, set_temps):
upper_qt = 0.975
lower_qt = 0.025
bootstrap_df = pd.read_csv(file_str, header=0)
array_plus = []
array_minus = []
# Build error bar arrays
for t in set_temps:
t_df = bootstrap_df[bootstrap_df["t"] == t]
qt_series = t_df[param].quantile([lower_qt, upper_qt])
lower_cutoff = round(qt_series.iloc[0], 4)
upper_cutoff = round(qt_series.iloc[1], 4)
param_est = estimates_df[estimates_df["Temperature"] == t][param]
upper_offset = round(float(upper_cutoff - param_est), 4)
lower_offset = round(float(param_est - lower_cutoff), 4)
array_plus.append(upper_offset)
array_minus.append(lower_offset)
return array_plus, array_minus
def randomized_parameter_test(
egg_data,
param,
t1,
t2,
plot_settings,
permutation_total=1000,
plot_stuff=False,
verbose=False,
):
temps = [t1, t2]
dfs = [egg_data[egg_data["SetTemp"] == t] for t in temps]
means = [round(df[param].mean(), 4) for df in dfs]
# Calculate "ground truth" -- actual, observed difference between means
# of the two temperature param estimate values
gT = round(means[0] - means[1], 4)
# Get param estimate vals into single array for easier shuffling and
# slicing
x =
|
pd.concat(dfs)
|
pandas.concat
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:App: exiflib
:Purpose: This installed library is designed to be a light-weight and
easy to use wrapper for extracting meta data from an image.
The GPS information is converted into latitude /longitude
coordinates and the full extracted dataset is returned as a
pandas Series.
:Version: 3.0.1
:Platform: Linux/Windows | Python 3.8
:Developer: <NAME>
:Email: <EMAIL>
:Attrib: The latitude / longitude extraction and conversion code is
a modified version of:
* Programmer: <NAME>
* Source: https://gist.github.com/erans/983821
:Example:
Example code use::
from exiflib.exif import Exif
data = Exif(image_path='/path/to/image.jpg').extract()
"""
import os
import pandas as pd
import utils3.utils as u
from PIL import Image
from PIL.ExifTags import TAGS, GPSTAGS
class Exif():
"""Wrapper for extracting exif data from images.
Args:
image_path (str): Full file path to the image to be read.
"""
def __init__(self, image_path):
"""Class initialiser."""
self._img = image_path
self._s_data =
|
pd.Series()
|
pandas.Series
|
import os
import pandas
import subprocess
import fire
import math
import time
import sys
import re
def consolidate_when_done(path_to_jar, num_terms):
done = False
time.sleep(5)
while not done:
if len(os.listdir(path_to_jar)) >= num_terms:
final_df = pandas.DataFrame()
for file in os.listdir(path_to_jar):
final_df = final_df.append(pandas.read_pickle(os.path.join(path_to_jar, file)))
final_df.to_excel(os.path.join(path_to_jar, 'sorted.xlsx'))
done = True
else:
consolidate_when_done(path_to_jar, num_terms)
def term_call(call_file="bullhorn_call.py", excel_file='/home/jjorissen/Documents/BullhornActDocs/EM.xlsx',
path_to_jar='../ACT_EM_pickle_jar', num_terms=5):
file = os.path.abspath(excel_file)
path_to_jar = os.path.abspath(path_to_jar)
for file in os.listdir(path_to_jar):
os.remove(os.path.join(path_to_jar, file))
try:
df = pandas.read_excel(file)
except:
df =
|
pandas.read_csv(file)
|
pandas.read_csv
|
import pandas as pd
# Tools for machine learning
import pickle
import time
import xgboost as xgb
from sklearn.model_selection import train_test_split
matches = pd.read_csv('data/seasons_merged.csv')
letter_to_result = {'H': 1, 'D': 0, 'A': -1}
def get_n_last_matches(matches, date, team, n=10):
'''
Get the last n matches of a given team.
'''
# All matches with a given team
team_matches = matches[(matches['HomeTeam'] == team) | (matches['AwayTeam'] == team)]
# Filter n last matches from team matches
n_last_matches = (team_matches[team_matches.Date < date]
.sort_values(by='Date', ascending=False)
.iloc[0:n, :])
return n_last_matches
def get_n_last_matches_against_each_other(matches, date, home_team, away_team, n=5):
'''
Get the last n matches of two given teams. If possible, else
get all matches available
'''
home_matches = matches[(matches['HomeTeam'] == home_team) & (matches['AwayTeam'] == away_team)]
away_matches = matches[(matches['HomeTeam'] == away_team) & (matches['AwayTeam'] == home_team)]
total_matches = pd.concat([home_matches, away_matches])
# Get last n matches, if possible:
try:
last_matches = (total_matches[total_matches.Date < date]
.sort_values(by='Date', ascending=False)
.iloc[0:n, :])
except: # If there are less than n matches
last_matches = (total_matches[total_matches.Date < date]
.sort_values(by='Date', ascending=False)
.iloc[0:total_matches.shape[0], :])
return last_matches
def get_goals(matches, team):
'''
Get total number of goals,a specfic team has scored
from a dataframe of specific matches
'''
home_goals = matches.FTHG[matches.HomeTeam == team].sum()
away_goals = matches.FTAG[matches.AwayTeam == team].sum()
return home_goals + away_goals
def get_concealed_goals(matches, team):
'''
Get all the goals, concealed of a specfic team from a dataframe of specific matches
'''
home_goals = matches.FTAG[matches.HomeTeam == team].sum()
away_goals = matches.FTHG[matches.AwayTeam == team].sum()
return home_goals + away_goals
def get_wins(matches, team):
'''
Get the number of wins of a specfic team from a dataframe of specific matches.
'''
home_wins = matches[(matches.FTR == 1) & (matches.HomeTeam == team)].shape[0]
away_wins = matches[(matches.FTR == -1) & (matches.AwayTeam == team)].shape[0]
return home_wins + away_wins
def coefficients_to_probability(matches):
'''
Converts betting platform coefficient(1 < x) with % of income
into a probability coefficient(0 < x < 1)
'''
# How many profit betting companies make on bets
matches['profit_B365'] = sum((1 / matches['B365H'], 1 / matches['B365D'], 1 / matches['B365A']))
matches['profit_BbAv'] = sum((1 / matches['BbAvA'], 1 / matches['BbAvD'], 1 / matches['BbAvH']))
# Converting all betting coefficients into probabilities of homw/draw/away:
for betting_column in ['B365H', 'B365D', 'B365A', 'BbAvH', 'BbAvD', 'BbAvA']:
matches[betting_column] = 1 / (matches[betting_column] * matches['profit_' + betting_column[:-1]])
return matches
# Create features, based on which, the model would train and predict results
def get_features_for_match(match, matches, n1=10, n2=3):
'''
Creates a special set of features for each match, if possible(10 last matches
and 3 last matches against each other)
'''
match_date = match.Date
home_team = match.HomeTeam
away_team = match.AwayTeam
# Get n1 last matches of 2 teams
home_last = get_n_last_matches(matches, match_date, home_team, n=n1)
away_last = get_n_last_matches(matches, match_date, away_team, n=n1)
# Get last n2 matches against each other
home_last_against = get_n_last_matches_against_each_other(matches, match_date, home_team, away_team, n=n2)
away_last_against = get_n_last_matches_against_each_other(matches, match_date, away_team, home_team, n=n2)
# Goals stuff
home_goals = get_goals(home_last, home_team)
away_goals = get_goals(away_last, away_team)
home_goals_conceided = get_concealed_goals(home_last, home_team)
away_goals_conceided = get_concealed_goals(away_last, away_team)
res = pd.DataFrame()
res.loc[0, 'H_goal_diff'] = home_goals - home_goals_conceided
res.loc[0, 'A_goal_diff'] = away_goals - away_goals_conceided
res.loc[0, 'H_win'] = get_wins(home_last, home_team)
res.loc[0, 'A_win'] = get_wins(away_last, away_team)
res.loc[0, 'H_win_against'] = get_wins(home_last_against, home_team)
res.loc[0, 'A_win_against'] = get_wins(away_last_against, away_team)
# TODO ПОПРООБУВАТИ ЩЕ ЯКІСЬ КРИТЕРІЇ ПОТЕСТУВАТИ
# print(result.loc[0])
return res.loc[0]
teams = ['Arsenal',
'<NAME>',
'Bournemouth',
'Brighton',
'Burnley',
'Chelsea',
'Crystal Palace',
'Everton',
'Leicester',
'Liverpool',
'Man City',
'Man United',
'Newcastle',
'Southampton',
'Tottenham',
'Watford',
'West Ham',
'Wolves']
def save_to_cache():
"""
Saves the current standings into cache, so that you
don't have to compute them every time
"""
arr = []
for hometeam in teams:
for awayteam in teams:
if hometeam == awayteam:
continue
# Creating a new match
match = pd.DataFrame(data={'Date': ['2020-07-22'],
'HomeTeam': [hometeam],
'AwayTeam': [awayteam], },
columns=['Date', 'HomeTeam',
'AwayTeam'])
# Creating features for a given match
match_features = get_features_for_match(match.iloc[0],
matches, 20, 3)
df = pd.DataFrame(data={'Unnamed: 0': [3333]},
columns=['Unnamed: 0'])
# Filling a dataframe with features, needed for an analysis
for i in match_features.to_frame().reset_index()['index']:
df[i] = match_features[i]
# Using a model to predict an outcome, returns an array
df = df.drop(['Unnamed: 0'], axis=1)
home_win, draw, away_win = clf_boosted.predict_proba(df)[0]
arr.append(','.join((hometeam, awayteam, str(home_win), str(draw),
str(away_win))))
with open('data/coefficients.txt', 'w') as f:
f.write('\n'.join(arr))
def get_cached(hometeam, awayteam):
"""
Uploads the result from the cache, if possible
"""
with open('data/coefficients.txt', 'r') as f:
for line in f:
line = line.strip()
home_team, away_team, away_win, draw, home_win = line.split(',')
if hometeam != home_team or awayteam != away_team:
continue
home_win = float(home_win)
away_win = float(away_win)
draw = float(draw)
return home_win, draw, away_win
features =
|
pd.read_csv('data/features.csv')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
@author: Fuck
"""
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
#import datetime
# INPUT
state_name = 'São Paulo'
metodo = "subreport" # "fator_verity" #
# IMPORT DATA
df = pd.read_excel (r'C:\Users\Fuck\Downloads\HIST_PAINEL_COVIDBR_21mai2020.xlsx')
# data semanaEpi populacaoTCU2019 casosAcumulado obitosAcumulado Recuperadosnovos emAcompanhamentoNovos
states = { 'coduf': [76, 11, 12, 13, 14, 15, 16, 17, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32, 33, 35, 41, 42, 43, 50, 51, 52, 53],
'state_name': ['Brasil','Rondônia','Acre','Amazonas','Roraima','Pará','Amapá','Tocantins','Maranhão','Piauí','Ceará','Rio Grande do Norte','Paraíba','Pernambuco','Alagoas','Sergipe','Bahia','Minas Gerais','Espiríto Santo','Rio de Janeiro','São Paulo','Paraná','Santa Catarina','Rio Grande do Sul','Mato Grosso do Sul','Mato Grosso','Goiás','Distrito Federal'],
'populationTCU2019': [210_147_125, 1_777_225, 881_935, 4_144_597, 605_761, 8_602_865, 845_731, 1_572_866, 7_075_181, 3_273_227, 9_132_078, 3_506_853, 4_018_127, 45_919_049, 3_337_357, 2_298_696, 14_873_064, 21_168_791, 4_018_650, 17_264_943, 7_164_788, 9_557_071, 11_433_957, 11_377_239, 2_778_986, 3_484_466, 7_018_354, 3_015_268]}
states = pd.DataFrame(states, columns = ['coduf', 'state_name', 'populationTCU2019'])
# INITIAL DATE
if state_name == 'Pernambuco':
startdate = '2020-05-02'
r0 = (1.1, 1.3)
coduf = 26
# population = 9_557_071
elif state_name == 'Santa Catarina':
startdate = '2020-05-10'
r0 = (1.1, 1.2)
coduf = 42
# population = 7_164_788
elif state_name == 'São Paulo':
startdate = '2020-04-29'
r0 = (1.15, 1.32)
coduf = 35
# population = 45_919_049
elif state_name == 'Brasil':
startdate = '2020-05-19'
coduf = 76
# population = 210_147_125
states_set = states[states['coduf'] == coduf ]
population = states_set['populationTCU2019'].values[0]
dfMS = df[df['coduf'] == coduf ]
dfMS = dfMS[dfMS['codmun'].isnull()] # only rows without city
dfMS['data'] =
|
pd.to_datetime(dfMS['data'])
|
pandas.to_datetime
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 10 11:04:13 2018
@author: dalonlobo
"""
from __future__ import absolute_import, division, \
print_function, unicode_literals
import os
import sys
import argparse
import logging
import glob
import pandas as pd
from utils import convert_mp4_to_audio, run_command
from pydub import AudioSegment
from pydub.effects import normalize
from pydub.silence import split_on_silence
logger = logging.getLogger("__main__")
def split_on_silence_threshold(wav_file):
"""
Splits the wav file in to chunks of wav files,
based on the silence level
Documentaion: http://pydub.com/
Git: https://github.com/jiaaro/pydub/
"""
abs_path = os.path.dirname(wav_file)
dest_dir = os.path.join(abs_path, "custom_split")
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.info("Splitting started: " + wav_file)
# Read the file
audioSegment = AudioSegment.from_wav(wav_file)
# Calculating the silence threshold
# Normalizing the audio file belfore finding the threshold
full_audio_wav = normalize(audioSegment)
loudness_ms_list = [] # Save the audio levels of all the chunks
for ms_chunk in full_audio_wav:
loudness_ms_list.append(round(ms_chunk.dBFS))
print("Audio levels are recorded", file=sys.stderr)
# Using pandas df for easier manipulation
df =
|
pd.DataFrame(loudness_ms_list)
|
pandas.DataFrame
|
import json
import os
import sys
import glob
import yaml
from typing import Set, List
from joblib import Parallel, delayed
from collections import Counter
from tqdm import tqdm
from tqdm import tqdm_notebook
from termcolor import cprint
from matplotlib.pyplot import cm
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import seaborn as sn
import numpy as np
import pandas as pd
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report, precision_recall_fscore_support
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import KMeans
from sklearn.utils import shuffle
from msbase.utils import load_json, sha256sum
import pickle
precision_recall_fscore_cols = ['precision', 'recall', 'fscore', 'support']
class Configuration(object):
def __init__(self):
self.labels = None
self.vectors_dir = None
self.model = "rf" # default: random forest
self.gamma_svc = "auto"
self.random_state = 37
self.linearsvc_C = 1
self.output_suffix = None
self.secondary_conf = None
def get_labels_index(self):
return list(range(len(self.labels)))
def get_output_suffix(self):
return self.output_suffix
def load_vectors(self):
self.DX = [] # features
self.DY = [] # label
self.DEXTRA = [] # extra info
with open(self.vectors_dir + '/DX.pickle', "rb") as f:
DX = pickle.load(f)
with open(self.vectors_dir + '/DY.pickle', "rb") as f:
DY = pickle.load(f)
with open(self.vectors_dir + '/DEXTRA.pickle', "rb") as f:
DEXTRA = pickle.load(f)
label_index = { label: i for i, label in enumerate(self.labels) }
for i, X in enumerate(DX):
label = DY[i]
extra = DEXTRA[i]
self.DX.append(X)
self.DY.append(label_index[label])
self.DEXTRA.append(extra)
self.DY = np.array(self.DY)
self.DEXTRA = np.array(self.DEXTRA)
if isinstance(self.DX[0], dict):
print("Use DictVectorizer")
v = DictVectorizer(sparse=False)
self.DX = v.fit_transform(self.DX)
self.feature_names = v.get_feature_names()
elif isinstance(self.DX[0], str):
print("Use TfidfVectorizer")
v = TfidfVectorizer(tokenizer=lambda x: x.split('\n'), token_pattern=None, binary=True)
self.DX = v.fit_transform(self.DX).todense()
self.feature_names = v.get_feature_names()
elif isinstance(self.DX[0], np.ndarray):
print("Use arrays directly")
self.DX = np.array(self.DX)
with open(self.vectors_dir + '/features.pickle', "rb") as f:
self.feature_names = pickle.load(f)
self.DY_EXTRAs =
|
pd.DataFrame([[self.labels[y] for y in self.DY], self.DEXTRA], index=["true-label", "EXTRA"])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import os
from datetime import datetime
import plotly.graph_objects as go
import time
def timer():
return '['+datetime.now().strftime("%d/%m/%Y %H:%M:%S")+']'
#Cases where the client does not have access to the links Cargo
try:
from confidential.secrets import url_cargo, url_vessels
except:
url_cargo=0
url_vessels=0
print('\033[1;31;48m'+timer()+'[WARNING] Cargos update links are not accessible. Did you set up the secrets.py file correctly? More information in the READ.ME \033[0m')
path="data/external/cargos/"
config = {'displayModeBar': False}
# Functions
def conversion(old):
direction = {'N':1, 'S':-1, 'E': 1, 'W':-1}
new = old.replace(u'°',' ').replace('\'',' ').replace('"',' ')
new = new.split()
new_dir = new.pop()
new.extend([0,0,0])
return (int(new[0])+int(new[1])/60.0+int(new[2])/3600.0) * direction[new_dir]
####### First part : Update data
print(timer()+'[INFO] Check cargo files to update...')
ls=[path+"UpdateCargo",path+"UpdateVessels",path+"archives",path+"archives/HistoriqueMarchandises",path+"archives/HistoriqueNavires"]
for elem in ls:
if(os.path.isdir(elem)==0):
try:
os.mkdir(elem)
except OSError:
print('\033[1;31;48m'+timer()+'[ERROR] Creation of the directory failed \033[0m')
#Create files to avoid bugs in HistoriqueNavires
if len(os.listdir(path+"archives/HistoriqueNavires"))==0:
pd.DataFrame(data=[["03/01/2010 09:00","04/01/2010 04:00","HANJINAG - HANJIN","HANJINAG - HANJIN","ATP00014315","HNHL0049W","HNHL0049E","<NAME>","FOS","Validée","Validée"]], columns=["ARRIVEE","DEPART","Rep. Transp EM","Rep. Transp SM","id.AP+","Référence Arrivée","Référence Départ","<NAME>","<NAME>","Statut Arrivée","Statut Départ"]).to_csv(path+'archives/HistoriqueNavires/AMU_VESSEL_2010.txt', sep=' ', encoding = "ISO-8859-1")
## Update Cargo trafic
#Read files with "cargo" in title
folder="UpdateCargo/"
files=os.listdir(path+folder[:-1])
ls=[os.path.getmtime(path+folder+elem) for elem in files]
last_file=pd.to_datetime(files[ls.index(max(ls))][-10:-4], format='%d%m%y')
last_file_cargo=pd.to_datetime(files[ls.index(max(ls))][-10:-4], format='%d%m%y').strftime('%d/%m/%y')
today=pd.Timestamp.today()
#check if client can access to the links
if url_cargo != 0:
#check time of precedent file
if((last_file.year < today.year) | (last_file.week < (today.week-1))):
try:
cargo_update=pd.read_csv(url_cargo, encoding = "ISO-8859-1", sep=";")
new_file=pd.to_datetime(cargo_update["Date fin"][1], format="%d/%m/%Y").strftime(folder[:-1]+'S%W%d%m%y.csv')
except Exception as e:
print('\033[1;31;48m'+timer()+'[WARNING] Cargos except error:',e,'. Maybe the file given have not the good columns structure? \033[0m')
else:
#Save if not exist
if new_file not in os.listdir(path+folder):
cargo_update.to_csv(path+folder+new_file, sep=";", encoding = "ISO-8859-1")
## Update vessels trafic
folder="UpdateVessels/"
files=os.listdir(path+folder[:-1])
ls=[os.path.getmtime(path+folder+elem) for elem in files]
last_file=pd.to_datetime(files[ls.index(max(ls))][-10:-4], format='%d%m%y')
last_file_vessels=pd.to_datetime(files[ls.index(max(ls))][-10:-4], format='%d%m%y').strftime('%d/%m/%y')
today=pd.Timestamp.today()
if url_vessels != 0:
if((last_file.year < today.year) | (last_file.week < today.week)):
try:
cargo_update=pd.read_csv(url_vessels, encoding = "ISO-8859-1", sep=";")
new_file=pd.Timestamp.today().strftime(folder[:-1]+'S%W%d%m%y.csv')
except Exception as e:
print('\033[1;31;48m'+timer()+'[WARNING] Vessels except error:',e,'. Maybe the file given have not the good columns structure? \033[0m')
else:
#Save if not exist
if new_file not in os.listdir(path+folder):
#Remove previous file
os.remove(path+folder+files[ls.index(max(ls))])
#Save new file
cargo_update.to_csv(path+folder+new_file, sep=";", encoding = "ISO-8859-1")
#Correction if file doesn't exist to force the condition IF == TRUE
if os.path.isfile(path+'../../processed/CARGO_2010-2020.xlsx'):
datetime_cargos=datetime.fromtimestamp(os.path.getmtime(path+'../../processed/CARGO_2010-2020.xlsx'))
else:
datetime_cargos=datetime.fromtimestamp(1326244364)
## Update main file: Cargo
folder="UpdateCargo/"
## IF (Last Update file time > Main excel file time) => We update the main file
if datetime.fromtimestamp(max([os.path.getmtime(path+folder+elem) for elem in os.listdir(path+folder[:-1])])) > datetime_cargos:
#Read previous file
if os.path.isfile(path+'../../processed/CARGO_2010-2020.xlsx'):
cargo=pd.read_excel(path+'../../processed/CARGO_2010-2020.xlsx', encoding = "ISO-8859-1", index_col=0).reset_index(drop=True)
else:
cargo=pd.DataFrame()
#Read update files
files=os.listdir(path+folder)
ls=[i for i in os.listdir(path+folder)]
#concat update files
cargo_update=pd.DataFrame()
for elem in ls:
cargo_update_add=pd.read_csv(path+folder+elem, encoding = "ISO-8859-1", sep=";", index_col=0)
cargo_update_add["date"]=pd.to_datetime(elem[-10:-4], format='%d%m%y')
cargo_update=pd.concat([cargo_update_add,cargo_update])
#Clean update files
cargo_update.loc[cargo_update.Type=="IMPORT","Type"]=0
cargo_update.loc[cargo_update.Type=="EXPORT","Type"]=1
cargo_update=cargo_update.rename(columns={"Type":"export"})
cargo_update=cargo_update[["Nom du port","export","Nb 20'","Nb 40'","Nb other","Nb roros","Roulant divers","Nb conv","date"]]
cargo_update["somme"]=cargo_update[["Nb 20'", "Nb 40'", 'Nb other', 'Nb roros', 'Roulant divers', 'Nb conv']].sum(axis=1)
cargo_update=cargo_update.reset_index(drop=True)
cargo_update=pd.pivot_table(cargo_update, values='somme', index=['date','export'], columns=['Nom du port']).reset_index()
cargo_update=cargo_update.rename(columns={"FRFOS":"FOS","FRMRS":"MRS"})
#Concat both files
cargo=pd.concat([cargo,cargo_update]).drop_duplicates(["date","export"])
#If MRS & FOS are NA or ==0 -> Remove rows
cargo=cargo[((~cargo["FOS"].isna()) & (~cargo["MRS"].isna())) | (~cargo["FOS"]==0) & (~cargo["MRS"]==0)]
cargo=cargo.reset_index(drop=True)
#Save
cargo.to_excel(path+'../../processed/CARGO_2010-2020.xlsx', encoding = "ISO-8859-1")
print(timer()+'[INFO] Cargo files updated.')
#Correction if file doesn't exist to force the condition IF == TRUE
if os.path.isfile(path+'../../processed/VESSEL_2010-2020.xlsx'):
datetime_vessels=datetime.fromtimestamp(os.path.getmtime(path+'../../processed/VESSEL_2010-2020.xlsx'))
else:
datetime_vessels=datetime.fromtimestamp(1326244364)
## Update main file: Vessels
folder="UpdateVessels/"
#If Update time > Main file time
if datetime.fromtimestamp(max([os.path.getmtime(path+folder+elem) for elem in os.listdir(path+folder[:-1])])) > datetime_vessels:
#Read archives not CI5
files=os.listdir(path+"archives/HistoriqueNavires")
ls=[i for i in files if os.path.isfile(os.path.join(path+"archives/HistoriqueNavires",i)) and 'AMU' in i]
#Read historical datas
df=
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Copyright 2022 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
Criado em 10 de janeiro de 2022
Autor: <NAME> ( <EMAIL> )
====== FORMATAÇÃO DE DADOS FLUVIOMÉTRICOS DA PLATAFORMA HIDROWEB ======
O presente código, escrito em linguagem python, tem por objetivo a formatação
de dados pluviométricos obtidos na plataforma HIDROWEB
( https://www.snirh.gov.br/hidroweb/apresentacao ).
As 13 primeiras linhas (isto é, todas as linhas acima do cabeçalho) do arquivo
baixado na plataforma devem ser excuídas antes do seu carregamento.
O resultado é uma tabela com três colunas:
- date: indica a data da medição;
- discharge: indica a vazão medida (diária), em m³/s;
- status: indica o status da medida (branco, real, estimado ou régua seca).
Responda, no console, y (yes) para que os dados formatados sejam salvos em csv.
Informações de missing values e as estatísticas do status das medições também
são apresentadas no console.
"""
# Bibliotecas requeridas
import pandas as pd
import os
# Definição do diretório de trabalho
os.chdir('C:\\Users\\brech\\Documents\\Estudos\\2021-2\\' +
'Obras_Hidraulicas\\TrabalhoPratico_PythonCodes\\' +
'DadosHidroweb')
#%% LEITURA E FORMATAÇÃO DOS DADOS DE ENTRADA
# Nome do arquivo de entrada
file = 'vazoes_C_83300200.csv'
# Carregamento dos dados
data = pd.read_csv(file, decimal=',', sep=';')
# Data no formato datetime
data['Data'] =
|
pd.to_datetime(data.Data, format='%d/%m/%Y')
|
pandas.to_datetime
|
"""
Copyright 2022 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
from impetuous.convert import create_synonyms , flatten_dict
from scipy.stats import rankdata
from scipy.stats import ttest_rel , ttest_ind , mannwhitneyu
from scipy.stats.mstats import kruskalwallis as kruskwall
from sklearn.decomposition import PCA
import itertools
import typing
def subArraysOf ( Array:list,Array_:list=None ) -> list :
if Array_ == None :
Array_ = Array[:-1]
if Array == [] :
if Array_ == [] :
return ( [] )
return( subArraysOf(Array_,Array_[:-1]) )
return([Array]+subArraysOf(Array[1:],Array_))
def permuter( inputs:list , n:int ) -> list :
# permuter( inputs = ['T2D','NGT','Female','Male'] , n = 2 )
return( [p[0] for p in zip(itertools.permutations(inputs,n))] )
def grouper ( inputs, n ) :
iters = [iter(inputs)] * n
return zip ( *iters )
def whiten_data ( Xdf ) :
# REMEMBER BOYS AND GIRLS THIS IS SOMETHING YOU MIGHT NOT WANT TO DO :)
mean_center = lambda x: x-np.mean(x,0)
X = Xdf.values
u , s , v = np.linalg.svd( mean_center(X),full_matrices=False )
X_white = np.dot(X,np.dot( np.diag(s**-1),np.abs(v) )) # we don't know the sign
return ( pd.DataFrame( X_white,index=Xdf.index.values,columns=Xdf.columns ) )
def threshold ( E , A ) :
if not 'pandas' in str(type(A)) or not 'pandas' in str(type(E)):
print ( "ERROR MUST SUPPLY TWO PANDAS DATAFRAMES" )
return ( -1 )
thresholds_df = pd .DataFrame ( np.dot( E,A.T ) ,
columns = A .index ,
index = E .index ) .apply ( lambda x:x/np.sum(E,1) )
return ( thresholds_df )
def solve ( C = pd.DataFrame([ [10,1],[3,5] ]) ,
E = pd.DataFrame([ [25],[31] ]) ):
if not 'pandas' in str(type(C)) or not 'pandas' in str(type(E)):
print ( "ERROR MUST SUPPLY TWO PANDAS DATAFRAMES" )
return ( -1 )
recover = lambda U,S,Vt : np.dot(U*S,Vt)
cU, cS, cVt = np.linalg.svd(C, full_matrices=False )
cST = 1/cS
psuedo_inverse = pd.DataFrame( recover(cVt.T,cST,cU.T) , index=C.columns ,columns=C.index )
identity = np.dot(C,psuedo_inverse)
TOLERANCE = np.max( np.sqrt( ( identity * ( ( 1-np.eye(len(np.diag(identity)))) ) )**2 ))
return ( np.dot( psuedo_inverse,E),TOLERANCE )
import re
def find_category_variables( istr ) :
return ( re.findall( r'C\((.*?)\)', istr ) )
def encode_categorical( G = ['Male','Male','Female'] ):
#
# CREATES AN BINARY ENCODING MATRIX FROM THE SUPPLIED LIST
# USES A PANDAS DATAFRAME AS INTERMEDIATE FOR ERROR CHECKING
# THIS PUTS THE O IN OPLS (ORTHOGONAL)
#
ugl = list(set(G)) ; n = len(ugl) ; m = len(G)
lgu = { u:j for u,j in zip(ugl,range(n)) }
enc_d = pd.DataFrame( np.zeros(m*n).reshape(-1,n),columns=ugl )
for i in range ( m ) :
j = lgu[G[i]]
enc_d.iloc[i,j] = 1
return ( enc_d )
def create_encoding_journal( use_categories, journal_df ) :
encoding_df = None
for category in use_categories :
catvals = journal_df.loc[category].to_list()
cat_encoding = encode_categorical( catvals )
cat_encoding.index = journal_df.columns.values
if encoding_df is None :
encoding_df = cat_encoding.T
else :
encoding_df = pd.concat([encoding_df,cat_encoding.T])
return ( encoding_df )
def quantify_density_probability ( rpoints , cutoff = None ) :
#
# DETERMINE P VALUES
loc_pdf = lambda X,mean,variance : [ 1./np.sqrt(2.*np.pi*variance)*np.exp(-((x-mean)/(2.*variance))**2) for x in X ]
from scipy.special import erf as erf_
loc_cdf = lambda X,mean,variance : [ 0.5*( 1. + erf_( (x-mean)/np.sqrt( 2.*variance ) ) ) for x in X ]
loc_Q = lambda X,mean,variance : [ 1. - 0.5*( 1. + erf_( (x-mean)/np.sqrt( 2.*variance ) ) ) for x in X ]
M_,Var_ = np.mean(rpoints),np.std(rpoints)**2
#
# INSTEAD OF THE PROBABILTY DENSITY WE RETURN THE FRACTIONAL RANKS
# SINCE THIS ALLOWS US TO CALCULATE RANK STATISTICS FOR THE PROJECTION
corresponding_density = rankdata (rpoints,'average') / len(rpoints) # loc_pdf( rpoints,M_,Var_ )
corresponding_pvalue = loc_Q ( rpoints,M_,Var_ )
#
# HERE WE MIGHT BE DONE
if not cutoff is None :
resolution = 10. ; nbins = 100.
#
# ONLY FOR ASSESING
h1,h2 = np.histogram(rpoints,bins=int(np.ceil(len(rpoints)/resolution)))
bin_radius = 0.5 * ( h2[1:] + h2[:-1] )
radial_density = np.cumsum( h1 )/np.sum( h1 ) # lt
#
# NOW RETRIEVE ALL DENSITIES OF THE RADII
tol = 1./nbins
corresponding_radius = np.min( bin_radius[radial_density > cutoff/nbins] )
return ( corresponding_pvalue , corresponding_density, corresponding_radius )
return ( corresponding_pvalue , corresponding_density )
def find_category_interactions ( istr ) :
all_cats = re.findall( r'C\((.*?)\)', istr )
interacting = [ ':' in c for c in istr.split(')') ][ 0:len(all_cats) ]
interacting_categories = [ [all_cats[i-1],all_cats[i]] for i in range(1,len(interacting)) if interacting[i] ]
return ( interacting_categories )
def create_encoding_data_frame ( journal_df , formula , bVerbose = False ) :
#
# THE JOURNAL_DF IS THE COARSE GRAINED DATA (THE MODEL)
# THE FORMULA IS THE SEMANTIC DESCRIPTION OF THE PROBLEM
#
interaction_pairs = find_category_interactions ( formula.split('~')[1] )
add_pairs = []
sjdf = set(journal_df.index)
if len( interaction_pairs ) > 0 :
for pair in interaction_pairs :
cpair = [ 'C('+p+')' for p in pair ]
upair = [ pp*(pp in sjdf)+cp*(cp in sjdf and not pp in sjdf) for (pp,cp) in zip( pair,cpair) ]
journal_df.loc[ ':'.join(upair) ] = [ p[0]+'-'+p[1] for p in journal_df.loc[ upair,: ].T.values ]
add_pairs.append(':'.join(upair))
use_categories = list(set(find_category_variables(formula.split('~')[1])))
cusecats = [ 'C('+p+')' for p in use_categories ]
use_categories = [ u*( u in sjdf) + cu *( cu in sjdf ) for (u,cu) in zip(use_categories,cusecats) ]
use_categories = [ *use_categories,*add_pairs ]
#
if len( use_categories )>0 :
encoding_df = create_encoding_journal ( use_categories , journal_df ).T
else :
encoding_df = None
#
if bVerbose :
print ( [ v for v in encoding_df.columns.values ] )
print ( 'ADD IN ANY LINEAR TERMS AS THEIR OWN AXIS' )
#
# THIS TURNS THE MODEL INTO A MIXED LINEAR MODEL
add_df = journal_df.loc[ [c.replace(' ','') for c in formula.split('~')[1].split('+') if not 'C('in c],: ]
if len(add_df)>0 :
if encoding_df is None :
encoding_df = add_df.T
else :
encoding_df = pd.concat([ encoding_df.T ,
journal_df.loc[ [ c.replace(' ','') for c in formula.split('~')[1].split('+') if not 'C(' in c] , : ] ]).T
return ( encoding_df.apply(pd.to_numeric) )
def interpret_problem ( analyte_df , journal_df , formula , bVerbose=False ) :
#
# THE JOURNAL_DF IS THE COARSE GRAINED DATA (THE MODEL)
# THE ANALYTE_DF IS THE FINE GRAINED DATA (THE DATA)
# THE FORMULA IS THE SEMANTIC DESCRIPTION OF THE PROBLEM
#
interaction_pairs = find_category_interactions ( formula.split('~')[1] )
add_pairs = []
if len( interaction_pairs )>0 :
for pair in interaction_pairs :
journal_df.loc[ ':'.join(pair) ] = [ p[0]+'-'+p[1] for p in journal_df.loc[ pair,: ].T.values ]
add_pairs.append(':'.join(pair))
use_categories = list(set(find_category_variables(formula.split('~')[1])))
use_categories = [u for u in use_categories if 'C('+u+')' in set(formula.replace(' ','').split('~')[1].split('+'))]
use_categories = [ *use_categories,*add_pairs ]
#
if len( use_categories )>0 :
encoding_df = create_encoding_journal ( use_categories , journal_df ).T
else :
encoding_df = None
#
if bVerbose :
print ( [ v for v in encoding_df.columns.values ] )
print ( 'ADD IN ANY LINEAR TERMS AS THEIR OWN AXIS' )
#
# THIS TURNS THE MODEL INTO A MIXED LINEAR MODEL
add_df = journal_df.loc[ [c.replace(' ','') for c in formula.split('~')[1].split('+') if not 'C('in c],: ]
if len(add_df)>0 :
if encoding_df is None :
encoding_df = add_df.T
else :
encoding_df = pd.concat([ encoding_df.T ,
journal_df.loc[ [ c.replace(' ','') for c in formula.split('~')[1].split('+') if not 'C(' in c] , : ] ]).T
return ( encoding_df )
def calculate_alignment_properties ( encoding_df , quantx, quanty, scorex,
analyte_df = None , journal_df = None ,
bVerbose = False , synonyms = None ,
blur_cutoff = 99.8 , exclude_labels_from_centroids = [''] ,
study_axii = None , owner_by = 'tesselation' ):
if bVerbose :
print ( np.shape(encoding_df) )
print ( np.shape(analyte_df) )
print ( 'qx:',np.shape(quantx) )
print ( 'qy:',np.shape(quanty) )
print ( 'sx:',np.shape(scorex) )
print ( 'WILL ASSIGN OWNER BY PROXIMITY TO CATEGORICALS' )
if analyte_df is None or journal_df is None:
print ( 'USER MUST SUPPLY ANALYTE AND JOURNAL DATA FRAMES' )
exit(1)
#
# THESE ARE THE CATEGORICAL DESCRIPTORS
use_centroid_indices = [ i for i in range(len(encoding_df.columns.values)) if (
encoding_df.columns.values[i] not in set( exclude_labels_from_centroids )
) ]
#
use_centroids = list( quanty[use_centroid_indices] )
use_labels = list( encoding_df.columns.values[use_centroid_indices] )
#
if owner_by == 'tesselation' :
transcript_owner = [ use_labels[ np.argmin([ np.sum((xw-cent)**2) for cent in use_centroids ])] for xw in quantx ]
sample_owner = [ use_labels[ np.argmin([ np.sum((yw-cent)**2) for cent in use_centroids ])] for yw in scorex ]
#
if owner_by == 'angle' :
angular_proximity = lambda B,A : 1 - np.dot(A,B) / ( np.sqrt(np.dot(A,A))*np.sqrt(np.dot(B,B)) )
transcript_owner = [ use_labels[ np.argmin([ angular_proximity(xw,cent) for cent in use_centroids ])] for xw in quantx ]
sample_owner = [ use_labels[ np.argmin([ angular_proximity(yw,cent) for cent in use_centroids ])] for yw in scorex ]
#
# print ( 'PLS WEIGHT RADIUS' )
radius = lambda vector:np.sqrt(np.sum((vector)**2)) # radii
#
# print ( 'ESTABLISH LENGTH SCALES' )
xi_l = np.max(np.abs(quantx),0)
#
rpoints = np.array( [ radius( v/xi_l ) for v in quantx ] ) # HERE WE MERGE THE AXES
xpoints = np.array( [ radius((v/xi_l)[0]) for v in quantx ] ) # HERE WE USE THE X AXES
ypoints = np.array( [ radius((v/xi_l)[1]) for v in quantx ] ) # HERE WE USE THE Y AXES
#
# print ( 'ESTABLISH PROJECTION OF THE WEIGHTS ONTO THEIR AXES' )
proj = lambda B,A : np.dot(A,B) / np.sqrt( np.dot(A,A) )
#
# ADDING IN ADDITIONAL DIRECTIONS
# THAT WE MIGHT BE INTERESTED IN
if 'list' in str( type( study_axii ) ):
for ax in study_axii :
if len( set( ax ) - set( use_labels ) ) == 0 and len(ax)==2 :
axsel = np.array([ use_centroids[i] for i in range(len(use_labels)) if use_labels[i] in set(ax) ])
axis_direction = axsel[0]-axsel[1]
use_labels .append( '-'.join(ax) )
use_centroids .append( np.array(axis_direction) )
proj_df = pd.DataFrame( [ [ np.abs(proj(P/xi_l,R/xi_l)) for P in quantx ] for R in use_centroids ] ,
index = use_labels , columns=analyte_df.index.values )
#
# print ( 'P VALUES ALIGNED TO PLS AXES' )
for idx in proj_df.index :
proj_p,proj_rho = quantify_density_probability ( proj_df.loc[idx,:].values )
proj_df = proj_df.rename( index = {idx:idx+',r'} )
proj_df.loc[idx+',p'] = proj_p
proj_df.loc[idx+',rho'] = proj_rho
#
# print ( 'THE EQUIDISTANT 1D STATS' )
corresponding_pvalue , corresponding_density , corresponding_radius = quantify_density_probability ( rpoints , cutoff = blur_cutoff )
#
# print ( 'THE TWO XY 1D STATS' )
corr_pvalue_0 , corr_density_0 = quantify_density_probability ( xpoints )
corr_pvalue_1 , corr_density_1 = quantify_density_probability ( ypoints )
#
bOrderedAlphas = False
if True :
# DO ALPHA LEVELS BASED ON DENSITY
bOrderedAlphas = True
use_points = rpoints > corresponding_radius
ordered_alphas = [ float(int(u))*0.5 + 0.01 for u in use_points ]
result_dfs = []
#
# print ( 'COMPILE RESULTS FRAME' )
for ( lookat,I_ ) in [ ( quantx , 0 ) ,
( scorex , 1 ) ] :
lookat = [ [ l[0],l[1] ] for l in lookat ]
if I_ == 1 :
aidx = journal_df.columns.values
else :
aidx = analyte_df.index.values
qdf = pd.DataFrame( [v[0] for v in lookat] , index=aidx , columns = ['x'] )
qdf['y'] = [ v[1] for v in lookat ]
names = aidx
if I_ == 0 :
qdf[ 'owner' ] = transcript_owner
qdf['Corr,p' ] = corresponding_pvalue
qdf['Corr,r' ] = corresponding_density
qdf['Corr0,p'] = corr_pvalue_0
qdf['Corr0,r'] = corr_density_0
qdf['Corr1,p'] = corr_pvalue_1
qdf['Corr1,r'] = corr_density_1
qdf = pd.concat([qdf.T,proj_df]).T
if bOrderedAlphas :
qdf[ 'alpha' ] = ordered_alphas
else :
qdf['alpha'] = [ '0.3' for a in transcript_owner ]
else :
qdf['owner'] = sample_owner # The default should be the aligned projection weight
qdf['alpha'] = [ '0.2' for n in names ]
if synonyms is None :
qdf['name'] = names
else :
qdf['name'] = [ synonyms[v] if v in synonyms else v for v in names ]
result_dfs.append(qdf.copy())
return ( result_dfs )
def run_rpls_regression ( analyte_df , journal_df , formula ,
bVerbose = False , synonyms = None , blur_cutoff = 99.8 ,
exclude_labels_from_centroids = [''] , pls_components = 2,
bDeveloperTesting = False ,
study_axii = None , owner_by = 'tesselation'
) :
encoding_df = interpret_problem ( analyte_df , journal_df , formula , bVerbose = bVerbose )
from sklearn.cross_decomposition import PLSRegression as PLS
if not bDeveloperTesting :
pls_components = 2
rpls = PLS( pls_components )
rpls_res = rpls.fit( X = analyte_df.T.values ,
Y = encoding_df .values )
quantx,quanty = rpls_res.x_weights_ , rpls_res.y_weights_
scorex = rpls_res.x_scores_
res_df = calculate_alignment_properties ( encoding_df , quantx, quanty, scorex,
journal_df = journal_df, analyte_df = analyte_df , blur_cutoff = blur_cutoff ,
bVerbose = bVerbose, exclude_labels_from_centroids = exclude_labels_from_centroids ,
study_axii = study_axii , owner_by = owner_by )
return ( res_df )
import impetuous.fit as ifit
import impetuous.clustering as icluster
def run_shape_alignment_clustering ( analyte_df , journal_df , formula, bVerbose = False ) :
NOTE_ = "This is just a kmeans in arbitrary dimensions that start out with centroids that have been shape aligned"
encoding_df = interpret_problem ( analyte_df , journal_df , formula , bVerbose = bVerbose )
Q = encoding_df.T.apply( lambda x:(rankdata(x,'average')-0.5)/len(x) ).values
P = analyte_df .apply( lambda x:(rankdata(x,'average')-0.5)/len(x) ).values
centroids = ifit.ShapeAlignment( P, Q ,
bReturnTransform = False ,
bShiftModel = True ,
bUnrestricted = True )
#
# FOR DIAGNOSTIC PURPOSES
centroids_df = pd.DataFrame ( centroids ,
index = encoding_df.columns ,
columns = encoding_df.index )
lookup_ = {i:n for n,i in zip( centroids_df.index,range(len(centroids_df.index)) ) }
labels , centroids = icluster.seeded_kmeans( P , centroids )
res_df = pd.DataFrame( [labels] , columns=analyte_df.index , index=['cluster index'] )
nam_df = pd.DataFrame( [ lookup_[l] for l in labels ] ,
columns = ['cluster name'] , index = analyte_df.index ).T
res_df = pd.concat( [ res_df , nam_df ] )
clusters_df = pd.concat( [ centroids_df, pd.DataFrame( res_df.T.groupby('cluster name').apply(len),columns=['size']) ] ,axis=1 )
return ( res_df , clusters_df )
def knn_clustering_alignment ( P, Q , bHighDim = False ) :
print ( "DOING KMEANS ALIGNMENT INSTEAD" )
return ( kmeans_clustering_alignment( P , Q , bHighDim = bHighDim ) )
def kmeans_clustering_alignment( P , Q , bHighDim=False ) :
NOTE_ = "This is just a standard kmeans in arbitrary dimensions that start out with centroids that have been shape aligned"
ispanda = lambda P: 'pandas' in str(type(P)).lower()
BustedPanda = lambda R : R.values if ispanda(R) else R
P_ = BustedPanda ( P )
Q_ = BustedPanda ( Q )
if bHighDim :
centroids = ifit .HighDimensionalAlignment ( P_ , Q_ )
else :
centroids = ifit .ShapeAlignment ( P_ , Q_ ,
bReturnTransform = False ,
bShiftModel = True ,
bUnrestricted = True )
if ispanda ( Q ) :
#
# FOR DIAGNOSTIC PURPOSES
centroids_df = pd.DataFrame ( centroids ,
index = Q.index ,
columns = Q.columns )
lookup_ = {i:n for n,i in zip( centroids_df.index,range(len(centroids_df.index)) ) }
labels , centroids = icluster.seeded_kmeans( P_ , centroids )
if ispanda ( Q ) and ispanda ( P ) :
#
# MORE DIAGNOSTICS
res_df = pd.DataFrame( [labels] , columns=P.index , index=['cluster index'] )
res_df .loc[ 'cluster name' ] = [ lookup_[l] for l in res_df.loc['cluster index'].values ]
print ( res_df )
return ( np.array(labels), np.array(centroids) )
def tol_check( val, TOL=1E-10 ):
if val > TOL :
print ( "WARNING: DATA ENTROPY HIGH (SNR LOW)", val )
ispanda = lambda P : 'pandas' in str(type(P)).lower()
def multifactor_solution ( analyte_df , journal_df , formula ,
bLegacy = False ) :
A , J , f = analyte_df , journal_df , formula
if bLegacy :
encoding_df = interpret_problem ( analyte_df = A , journal_df = J , formula = f ).T
else :
encoding_df = create_encoding_data_frame ( journal_df = J , formula = f ).T
solution_ = solve ( A.T, encoding_df.T )
tol_check ( solution_[1] )
beta_df = pd.DataFrame ( solution_[0] , index=A.index , columns=encoding_df.index )
U, S, VT = np.linalg.svd ( beta_df.values,full_matrices=False )
P = pd.DataFrame( U.T , index = [ 'Comp'+str(r) for r in range(len(U.T))] , columns = A.index )
W = pd.DataFrame( VT , index = [ 'Comp'+str(r) for r in range(len(U.T))] , columns = encoding_df.index )
Z = threshold ( encoding_df.T , S*W ) .T
return ( P.T , W.T , Z.T , encoding_df.T , beta_df )
def multifactor_evaluation ( analyte_df , journal_df , formula ) :
#
# ALTOUGH A GOOD METHOD IT IS STILL NOT SUFFICIENT
#
P, W, Z, encoding_df , beta_df = multifactor_solution ( analyte_df , journal_df , formula )
eval_df = beta_df.apply(lambda x:x**2)
all = [beta_df]
for c in eval_df.columns :
all.append ( pd.DataFrame ( quantify_density_probability ( eval_df.loc[:,c].values ),
index = [c+',p',c+',r'], columns=eval_df.index ).T)
res_df = pd.concat( all,axis=1 )
for c in res_df.columns:
if ',p' in c:
q = [ qv[0] for qv in qvalues(res_df.loc[:,c].values) ]
res_df.loc[:,c.split(',p')[0]+',q'] = q
return ( res_df )
def regression_assessment ( model , X , y , bLog = False ) :
desc_ = """
ALTERNATIVE NAIVE MODEL ASSESSMENT FOR A REGRESSION MODEL
!PRVT2D1701CM5487!
"""
y_ = y
coefs = model.coef_
mstat = dict()
if bLog :
X = np.array( [ [ np.log(x) for x in xx ] for xx in X ])
yp = np.exp(np.dot( coefs, X ) + model.intercept_ )
else :
yp = (np.dot( coefs, X ) + model.intercept_ )
#
n = len ( y_ ) ; p = len(coefs)
ym = np.mean( y_ ) # CRITICAL DIMENSION ...
#
# BZ FORMS
TSS = np.array([ np.sum(( y_ - ym ) ** 2, axis=0) ])[0]; dof_tss = n-1 ; mstat['TSS'] = TSS
RSS = np.array([ np.sum(( y_ - yp ) ** 2, axis=0) ])[0]; dof_rss = n-p ; mstat['RSS'] = RSS
ESS = np.array([ np.sum(( yp - ym ) ** 2, axis=0) ])[0]; dof_ess = p-1 ; mstat['ESS'] = ESS
mstat['dof_tss'] = dof_tss ; mstat['dof_rss'] = dof_rss ; mstat['dof_ess'] = dof_ess
#
TMS = TSS / dof_tss ; mstat['TMS'] = TMS
RMS = RSS / dof_rss ; mstat['RMS'] = RMS
EMS = ESS / dof_ess ; mstat['EMS'] = EMS
#
# F-TEST
dof_numerator = dof_rss
dof_denominator = dof_ess
from scipy.stats import f
fdist = f( dof_numerator , dof_denominator )
f0 = EMS / RMS
#
mstat['dof_numerator'] = dof_numerator
mstat['dof_denominator'] = dof_denominator
mstat['p-value'] = 1 - fdist.cdf(f0)
mstat['f0'] = f0
mstat['yp'] = yp
mstat['model'] = model
#
return ( mstat )
def proj_c ( P ) :
# P CONTAINS MUTUTALLY ORTHOGONAL COMPONENTS ALONG THE COLUMNS
# THE CS CALCULATION MIGHT SEEM STRANGE BUT FULLFILS THE PURPOSE
if not ispanda(P) : # ispandor är coola
print ( "FUNCTION REQUIRES A SAIGA OR PANDA DATA FRAME" )
CS = P.T.apply( lambda x: pd.Series( [x[0],x[1]]/np.sqrt(np.sum(x**2)),index=['cos','sin']) ).T
RHO = P.T.apply( lambda x: np.sqrt(np.sum(x**2)) )
CYL = pd.concat( [RHO*CS['cos'],RHO*CS['sin']],axis=1 )
CYL.columns = ['X','Y']
return ( CYL )
def multivariate_factorisation ( analyte_df , journal_df , formula ,
bVerbose = False , synonyms = None , blur_cutoff = 99.8 ,
exclude_labels_from_centroids = [''] ,
bDeveloperTesting = False , bReturnAll = False ,
study_axii = None , owner_by = 'angle' ,
bDoRecast = False , bUseThresholds = False ) :
P, W, Z, encoding_df , beta_df = multifactor_solution ( analyte_df , journal_df , formula )
#
# USE THE INFLATION PROJECTION AS DEFAULT
if not bUseThresholds :
aA = np.linalg.svd ( analyte_df - np.mean(np.mean(analyte_df)) , full_matrices=False )
aE = np.linalg.svd ( encoding_df.T , full_matrices=False )
Z = pd.DataFrame ( np.dot( np.dot( W.T , aE[-1] ), aA[-1]) ,
columns = encoding_df.T.columns ,
index= [ 'mComp' + str(r) for r in range(len(aE[-1]))]
).T
if bDoRecast :
print ( "WARNING: THROWING AWAY INFORMATION IN ORDER TO DELIVER A" )
print ( " VISUALLY MORE PLEASING POINT CLOUD ... ")
P = proj_c( P )
W = proj_c( W )
Z = proj_c( Z )
res_df = calculate_alignment_properties ( encoding_df ,
quantx = P.values , quanty = W.values , scorex = Z.values ,
journal_df = journal_df , analyte_df = analyte_df ,
blur_cutoff = blur_cutoff , bVerbose = bVerbose ,
exclude_labels_from_centroids = exclude_labels_from_centroids ,
study_axii = study_axii , owner_by = owner_by )
if bReturnAll :
return ( { 'Mutlivariate Solutions' : res_df ,
'Feature Scores' : P , 'Encoding Weights' : W ,
'Sample Scores' : Z , 'Encoding DataFrame' : encoding_df })
else :
return ( res_df )
def associations ( M , W = None , bRanked = True ) :
ispanda = lambda P : 'pandas' in str(type(P)).lower()
if not ispanda( M ) :
print ( "FUNCTION ",'recast_alignments'," REQUIRES ", 'M'," TO BE A PANDAS DATAFRAME" )
bValid = False
if not W is None :
if not len(W.columns.values) == len(M.columns.values):
W = M
else:
bValid = True
else :
W = M
if bRanked :
from scipy.stats import rankdata
M = ( M.T.apply(lambda x:rankdata(x,'average')).T-0.5 )/len(M.columns)
W = ( W.T.apply(lambda x:rankdata(x,'average')).T-0.5 )/len(W.columns)
rho1 = M.T.apply( lambda x:np.sqrt( np.dot( x,x ) ) )
rho2 = rho1
if bValid :
rho2 = W.T.apply( lambda x:np.sqrt( np.dot( x,x ) ) )
R2 = pd.DataFrame( np.array([np.array([r]) for r in rho1.values])*[rho2.values] ,
index = rho1.index, columns = rho2.index )
PQ = pd.DataFrame( np.dot( M,W.T ), index = rho1.index, columns = rho2.index )
res = PQ/R2
return ( res )
crop = lambda x,W:x[:,:W]
def run_shape_alignment_regression( analyte_df , journal_df , formula ,
bVerbose = False , synonyms = None , blur_cutoff = 99.8 ,
exclude_labels_from_centroids = [''] ,
study_axii = None , owner_by = 'tesselation' ,
transform = crop ) :
print ( 'WARNING: STILL UNDER DEVELOPMENT' )
print ( 'WARNING: DEFAULT IS TO CROP ALIGNED FACTORS!!')
encoding_df = interpret_problem ( analyte_df , journal_df , formula , bVerbose = bVerbose )
Q = encoding_df.T.apply( lambda x:(rankdata(x,'average')-0.5)/len(x) ).copy().values
P = analyte_df .apply( lambda x:(rankdata(x,'average')-0.5)/len(x) ).copy().values
centroids = ifit.ShapeAlignment( P, Q ,
bReturnTransform = False ,
bShiftModel = True ,
bUnrestricted = True )
#
# FOR DIAGNOSTIC PURPOSES
centroids_df = pd.DataFrame ( centroids ,
index = encoding_df.columns ,
columns = encoding_df.index )
xws = ifit.WeightsAndScoresOf( P )
yws = ifit.WeightsAndScoresOf( centroids )
W = np.min( [*np.shape(xws[0]),*np.shape(yws[0])] )
quantx = transform( xws[0],W )
quanty = transform( yws[0],W )
scorex = transform( xws[1],W )
res_df = calculate_alignment_properties ( encoding_df , quantx, quanty, scorex,
analyte_df = analyte_df.copy() , journal_df = journal_df.copy() ,
blur_cutoff = blur_cutoff , bVerbose = bVerbose,
exclude_labels_from_centroids = exclude_labels_from_centroids ,
study_axii = study_axii , owner_by = owner_by, synonyms=synonyms )
return ( res_df )
def add_foldchanges ( df, information_df , group='', fc_type=0 , foldchange_indentifier = 'FC,') :
all_vals = list(set(information_df.loc[group].values))
pair_values = [all_vals[i] for i in range(len(all_vals)) if i<2 ]
group1 = df.iloc[:,[n in pair_values[0] for n in information_df.loc[group].values] ].T
group2 = df.iloc[:,[n in pair_values[1] for n in information_df.loc[group].values] ].T
if fc_type == 0:
FC = np.mean(group1.values,0) - np.mean(group2.values,0)
if fc_type == 1:
FC = np.log2( np.mean(group1.values,0) - np.mean(group2.values,0) )
FCdf = pd.DataFrame(FC,index=df.index,columns=[foldchange_indentifier+'-'.join(pair_values) ] )
df =
|
pd.concat([df.T,FCdf.T])
|
pandas.concat
|
# -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import math
import pytz
import locale
import pytest
import time
import datetime
import calendar
import re
import decimal
import dateutil
from functools import partial
from pandas.compat import range, StringIO, u
from pandas._libs.tslib import Timestamp
import pandas._libs.json as ujson
import pandas.compat as compat
import numpy as np
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex, date_range
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
def _clean_dict(d):
"""
Sanitize dictionary for JSON by converting all keys to strings.
Parameters
----------
d : dict
The dictionary to convert.
Returns
-------
cleaned_dict : dict
"""
return {str(k): v for k, v in compat.iteritems(d)}
@pytest.fixture(params=[
None, # Column indexed by default.
"split",
"records",
"values",
"index"])
def orient(request):
return request.param
@pytest.fixture(params=[None, True])
def numpy(request):
return request.param
class TestUltraJSONTests(object):
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_encode_decimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert decoded == 1337.1337
sut = decimal.Decimal("0.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.94")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "0.9"
decoded = ujson.decode(encoded)
assert decoded == 0.9
sut = decimal.Decimal("1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "2.0"
decoded = ujson.decode(encoded)
assert decoded == 2.0
sut = decimal.Decimal("-1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "-2.0"
decoded = ujson.decode(encoded)
assert decoded == -2.0
sut = decimal.Decimal("0.995")
encoded = ujson.encode(sut, double_precision=2)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.9995")
encoded = ujson.encode(sut, double_precision=3)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.99999999999999944")
encoded = ujson.encode(sut, double_precision=15)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
@pytest.mark.parametrize("ensure_ascii", [True, False])
def test_encode_string_conversion(self, ensure_ascii):
string_input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
'\\r \\t <\\/script> &"')
html_encoded = ('"A string \\\\ \\/ \\b \\f \\n \\r \\t '
'\\u003c\\/script\\u003e \\u0026"')
def helper(expected_output, **encode_kwargs):
output = ujson.encode(string_input,
ensure_ascii=ensure_ascii,
**encode_kwargs)
assert output == expected_output
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, encode_html_chars=True)
@pytest.mark.parametrize("long_number", [
-4342969734183514, -12345678901234.56789012, -528656961.4399388
])
def test_double_long_numbers(self, long_number):
sut = {u("a"): long_number}
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert sut == decoded
def test_encode_non_c_locale(self):
lc_category = locale.LC_NUMERIC
# We just need one of these locales to work.
for new_locale in ("it_IT.UTF-8", "Italian_Italy"):
if tm.can_set_locale(new_locale, lc_category):
with tm.set_locale(new_locale, lc_category):
assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60
assert ujson.loads("4.78", precise_float=True) == 4.78
break
def test_decimal_decode_test_precise(self):
sut = {u("a"): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
assert sut == decoded
@pytest.mark.skipif(compat.is_platform_windows() and not compat.PY3,
reason="buggy on win-64 for py2")
def test_encode_double_tiny_exponential(self):
num = 1e-40
assert num == ujson.decode(ujson.encode(num))
num = 1e-100
assert num == ujson.decode(ujson.encode(num))
num = -1e-45
assert num == ujson.decode(ujson.encode(num))
num = -1e-145
assert np.allclose(num, ujson.decode(ujson.encode(num)))
@pytest.mark.parametrize("unicode_key", [
u("key1"), u("بن")
])
def test_encode_dict_with_unicode_keys(self, unicode_key):
unicode_dict = {unicode_key:
|
u("value1")
|
pandas.compat.u
|
from elasticsearch import Elasticsearch
from elasticsearch.helpers import scan
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
import time
import socket
import re
from ipwhois import IPWhois
import difflib
import model.queries as qrs
from utils.helpers import timer
class NodesMetaData:
def __init__(self, index, dateFrom, dateTo):
self.idx = index
self.dateFrom = dateFrom
self.dateTo = dateTo
self.df = self.BuildDataFrame()
def resolveHost(self, host):
h = ''
try:
if host == '127.0.0.1':
print("Incorrect value for host")
h = socket.gethostbyaddr(host)[0]
except Exception as inst:
print(str("socket exception: "+inst.args[1]))
h = host
return h
def isHost(self, val):
return re.match("^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])$", val)
def findHost(self, row, df):
host_from_index = row['host_index']
host_from_meta = row['host_meta']
if self.isHost(host_from_index):
return host_from_index
else:
# if it's an IP, try to get the hostname from a ps_meta records
if (host_from_meta is not np.nan):
if self.isHost(host_from_meta):
return host_from_meta
# it is possible that for that IP has another record with the valid hostname
elif len(df[df['ip'] == host_from_index]['host_index'].values) > 0:
return df[df['ip'] == host_from_index]['host_index'].values[0]
# otherwise we try to resolve the IP from socket.gethostbyaddr(host)
else:
return self.resolveHost(host_from_index)
return ''
def findSite(self, row, df, hosts):
# we tend to fill in only sites for hosts part of the configuration
if ((row['site_x'] is None) and (row['host_in_ps_meta'] == True)):
ratio = {}
hs = row['host']
meta_sites = df[(df['host'] == hs)]['site_y'].values
first_non_null = next(filter(bool, (x for x in meta_sites)), None)
# if site name is available in ps_meta data, take it
# first_non_null == first_non_null - that expression checks if the value is NaN
if ((first_non_null) and (first_non_null == first_non_null)):
return first_non_null
else:
# otherwise find a similar host name and take its site name
for h in hosts:
if h != hs:
similarity = difflib.SequenceMatcher(None, hs, h).ratio()
ratio[h] = similarity
# get the value with the highest similarity score
sib = max(ratio, key=ratio.get)
sib_site_index = df[df['host'] == sib]['site_x'].values
fnn_index = next(filter(bool, (x for x in sib_site_index)), None)
sib_site_meta = df[df['host'] == sib]['site_y'].values
fnn_meta = next(filter(bool, (x for x in sib_site_meta)), None)
# Check for the site name of the sibling
if (fnn_index):
return fnn_index
elif (fnn_meta):
return fnn_meta
# otherwise get get IPWhoIs network name
else:
return self.getIPWhoIs(sib)
else:
return row['site_x']
def getIPWhoIs(self, item):
val = ''
try:
obj = IPWhois(item)
res = obj.lookup_whois()
val = res['nets'][0]['name']
except Exception as inst:
if self.isHost(item):
val = ''
else: val = inst.args
return val
@timer
def BuildDataFrame(self):
print('Query ', self.idx, ' for the period', self.dateFrom, '-', self.dateTo)
# get metadata
meta_df = pd.DataFrame.from_dict(qrs.get_metadata(self.dateFrom, self.dateTo), orient='index',
columns=['site', 'admin_name', 'admin_email', 'ipv6', 'ipv4']).reset_index().rename(columns={'index': 'host'})
# in ES there is a limit of up to 10K buckets for aggregation,
# thus we need to split the queries and then merge the results
ip_site_df = pd.DataFrame.from_dict(qrs.get_ip_site(self.idx, self.dateFrom, self.dateTo),
orient='index', columns=['site', 'is_ipv6']).reset_index().rename(columns={'index': 'ip'})
ip_host_df = pd.DataFrame.from_dict(qrs.get_ip_host(self.idx, self.dateFrom, self.dateTo),
orient='index', columns=['host']).reset_index().rename(columns={'index': 'ip'})
host_site_df = pd.DataFrame.from_dict(qrs.get_host_site(self.idx, self.dateFrom, self.dateTo),
orient='index', columns=['site']).reset_index().rename(columns={'index': 'host'})
df =
|
pd.merge(ip_site_df, ip_host_df, how='outer', left_on=['ip'], right_on=['ip'])
|
pandas.merge
|
import typing
import uuid
from collections import Counter
from typing import Union
import numpy as np
import pandas as pd
from ai_inspector import ModelInspector
from scipy.stats import chi2, ks_2samp
from scipy.stats.stats import Ks_2sampResult, wasserstein_distance
from generated.ml_worker_pb2 import SingleTestResult, TestMessage, TestMessageType
from ml_worker.core.ml import run_predict
from ml_worker.testing.abstract_test_collection import AbstractTestCollection
class DriftTests(AbstractTestCollection):
@staticmethod
def _calculate_psi(category, actual_distribution, expected_distribution):
# To use log and avoid zero distribution probability,
# we bound distribution probability by min_distribution_probability
min_distribution_probability = 0.0001
expected_distribution_bounded = max(expected_distribution[category], min_distribution_probability)
actual_distribution_bounded = max(actual_distribution[category], min_distribution_probability)
modality_psi = (expected_distribution_bounded - actual_distribution_bounded) * \
np.log(expected_distribution_bounded / actual_distribution_bounded)
return modality_psi
@staticmethod
def _calculate_frequencies(actual_series, expected_series, max_categories):
all_modalities = list(set(expected_series).union(set(actual_series)))
if max_categories is not None and len(all_modalities) > max_categories:
var_count_expected = dict(Counter(expected_series).most_common(max_categories))
other_modalities_key = 'other_modalities_' + uuid.uuid1().hex
var_count_expected[other_modalities_key] = len(expected_series) - sum(var_count_expected.values())
categories_list = list(var_count_expected.keys())
var_count_actual = Counter(actual_series)
# For test data, we take the same category names as expected_data
var_count_actual = {i: var_count_actual[i] for i in categories_list}
var_count_actual[other_modalities_key] = len(actual_series) - sum(var_count_actual.values())
all_modalities = categories_list
else:
var_count_expected = Counter(expected_series)
var_count_actual = Counter(actual_series)
expected_frequencies = np.array([var_count_expected[i] for i in all_modalities])
actual_frequencies = np.array([var_count_actual[i] for i in all_modalities])
return all_modalities, actual_frequencies, expected_frequencies
@staticmethod
def _calculate_drift_psi(actual_series, expected_series, max_categories):
all_modalities, actual_frequencies, expected_frequencies = DriftTests._calculate_frequencies(
actual_series, expected_series, max_categories)
expected_distribution = expected_frequencies / len(expected_series)
actual_distribution = actual_frequencies / len(actual_series)
total_psi = 0
output_data = pd.DataFrame(columns=["Modality", "Train_distribution", "Test_distribution", "Psi"])
for category in range(len(all_modalities)):
modality_psi = DriftTests._calculate_psi(category, actual_distribution, expected_distribution)
total_psi += modality_psi
row = {
"Modality": all_modalities[category],
"Train_distribution": expected_distribution[category],
"Test_distribution": expected_distribution[category],
"Psi": modality_psi
}
output_data = output_data.append(
|
pd.Series(row)
|
pandas.Series
|
"""
prepares the data for the CNN and trains the CNN
"""
import numpy as np
import pandas as pd
import random
import keras
import tensorflow as tf
from keras.models import Sequential, Model, Input
from keras.layers import Dense, Dropout, Flatten, Concatenate, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D
from sklearn.preprocessing import StandardScaler
from keras.layers.advanced_activations import LeakyReLU
from tensorflow.keras.optimizers import Adam, Nadam
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.callbacks import EarlyStopping
import connectome.preprocessing.data_loader as dtl
from connectome.preprocessing.reorder_matrices_regions import reorder_matrices_regions
from connectome.preprocessing.data_loader import flat_to_mat_aggregation
from connectome.models.E2E_conv import E2E_conv
def model_brainnet_cnn(X, y, aggregation=False, reorder=False, augmentation=False, scale=.04, augm_fact=5,
batch_size=32, epochs=400, patience=5, validation_size=.2,
E2E_filter: int = 32, E2N_filter: int = 48, N2G_tiler: int = 64,
dense_filter: int = 64, dropout_rate=0.5,
kernel_regularizer=keras.regularizers.l2(0.01),
kernel_initializer='he_uniform', optimizer=Adam(), activation='relu',
loss="binary_crossentropy"
):
"""
Trains a Convolutional Neural Network model based on the design by Kawahara et al. (2016)
http://doi.org/10.1016/j.neuroimage.2016.09.046.
Args:
X: The training dataset
y: The true labels
aggregation: Boolean, whether the matrices were aggregated based on yeo7
reorder: Boolean, whether to reorder the matrices based on the yeo7 network. (True is recommended when training
with Brainnetome data. Only applicable to data based on the brainnetome atlas.
augmentation: Boolean, whether to apply data augmentation to increase the training data size
scale: Standard deviation of the random noise to be applied for data augmentation
augm_fact: Augmentation factor, Size of train dataset = original train dataset + augm_fact * noise dataset
batch_size: number of samples that will be propagated through the network
epochs: Number of training iterations
patience: Number of iterations to early stop if no improvement occurs
validation_size: Size of the validation set to evaluate during training
E2E_filter: Number of E2E filters, for a detailed description check out:
https://kawahara.ca/convolutional-neural-networks-for-adjacency-matrices/
E2N_filter: Number of E2N filters, for a detailed description check out:
https://kawahara.ca/convolutional-neural-networks-for-adjacency-matrices/
N2G_tiler: Number of N2G filters , for a detailed description check out:
https://kawahara.ca/convolutional-neural-networks-for-adjacency-matrices/
dense_filter: Dense layer size, default 64
dropout_rate: Size of nodes to randomly drop during training, default .5
kernel_regularizer: Layer weight regularizers to be applied, default l2 regularization
kernel_initializer: Kernel initilization strategy, default he_uniform
optimizer: Method to train the model, default Adam
activation: Hidden layer activation, default relu
loss: loss function, default binary_crossentropy
Returns:
A fitted neural network model
"""
assert isinstance(aggregation, bool), "invalid datatype for argument aggregation"
assert isinstance(reorder, bool), "invalid datatype for argument reorder"
assert isinstance(augmentation, bool), "invalid datatype for argument augmentation"
assert isinstance(scale, float) & (scale >= 0.0), "invalid path datatype for argument scale or smaller than 0"
assert isinstance(augm_fact, int) & (
augm_fact >= 1), "invalid datatype for argument augm_fact or not greater than 1"
assert isinstance(epochs, int) & (
epochs >= 1), "invalid datatype for epochs or not greater than 1"
assert isinstance(patience, int) & (
patience >= 1), "invalid datatype for argument patience or not greater than 1"
assert isinstance(validation_size, float) & (validation_size >= 0.0) & \
(validation_size <= 1.0), "invalid value validation_size"
assert isinstance(E2E_filter, int) & (
E2E_filter >= 1), "invalid datatype for argument E2E_filter or not greater than 1"
assert isinstance(E2N_filter, int) & (
E2N_filter >= 1), "invalid datatype for argument E2N_filter or not greater than 1"
assert isinstance(N2G_tiler, int) & (
N2G_tiler >= 1), "invalid datatype for argument N2G_tiler or not greater than 1"
assert isinstance(dense_filter, int) & (
dense_filter >= 1), "invalid datatype for argument dense_filter or not greater than 1"
assert isinstance(dropout_rate, float) & (dropout_rate >= 0.0) & \
(dropout_rate <= 1.0), "invalid value dropout_rate"
# add all possible parameters of brainnet
X_train, X_train_struc, y_train = preprocess_for_cnn(X, y, aggregation=aggregation, reorder=reorder,
augmentation=augmentation,
scale=scale, augmentation_factor=augm_fact)
# create validation set
# Train val split
shuffled_indices = list(range(len(X_train)))
random.shuffle(shuffled_indices)
val_ind = int(np.round(len(X_train) * validation_size))
val_idxs = shuffled_indices[:val_ind]
train_idxs = shuffled_indices[val_ind:]
# validation set
val_x = X_train[val_idxs]
val_x_struc = X_train_struc[val_idxs]
val_y = y_train[val_idxs]
# train set
train_x = X_train[train_idxs]
train_x_struc = X_train_struc[train_idxs]
train_y = y_train[train_idxs]
# mode input dimension
input_img_dim = (X_train.shape[1], X_train.shape[2], 1)
input_struc_dim = (X_train_struc.shape[1])
print('Starting to train Model')
# Initialize neural network model
brainnetcnn = brain_net_cnn(input_dim_img=input_img_dim, input_dim_struc=input_struc_dim, output_dim=1,
E2E_filter=E2E_filter, E2N_filter=E2N_filter, N2G_tiler=N2G_tiler,
dense_filter=dense_filter, dropout_rate=dropout_rate,
kernel_regularizer=kernel_regularizer,
kernel_initializer=kernel_initializer, opt=optimizer, activation=activation,
loss=loss)
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience)
if input_struc_dim != 0:
brainnetcnn.fit([train_x, train_x_struc], train_y, epochs=epochs, batch_size=batch_size, verbose=1,
validation_data=([val_x, val_x_struc], val_y), callbacks=[callback])
else:
brainnetcnn.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=1,
validation_data=(val_x, val_y), callbacks=[callback])
return brainnetcnn
def preprocess_for_cnn(X, y, aggregation=False, reorder=False, augmentation=False, scale=.07, augmentation_factor=5):
"""
Prepares and reformats the data for the cnn tensorflow mpde;
Args:
X: The training dataset
y: The true labels
aggregation: Boolean,whether the matrices were aggregated based on yeo7
reorder: Boolean, whether to reorder the matrices based on the yeo7 network. Only applicable to data
based on the brainnetome atlas.
augmentation: Boolean, whether to apply data augmentation to increase the training data size
scale: Standard deviation of the random noise to be applied for data augmentation
augmentation_factor: Augmentation factor, Size of train dataset = original dataset + augm_fact * noise dataset
Returns:
X_img: symmetric np.array with the connectivity matrices
X_struc: np.array with structural information such as e.g. age etc.
y: dataset labels
"""
assert isinstance(aggregation, bool), "invalid datatype for argument aggregation"
assert isinstance(reorder, bool), "invalid datatype for argument reorder"
assert isinstance(augmentation, bool), "invalid datatype for argument augmentation"
assert isinstance(scale, float) & (scale >= 0.0), "invalid path datatype for argument scale or smaller than 0"
assert isinstance(augmentation_factor, int) & (
augmentation_factor >= 1), "invalid datatype for argument augm_fact or not greater than 1"
# append the connectivity matrix cols to X_img_cols and the rest to X_struc_cols
X_img_cols = []
X_struc_cols = []
for x in X.columns:
if len(x.split("_")) > 1 and x.split("_")[0].isdigit() and x.split("_")[1].isdigit():
X_img_cols.append(x)
else:
X_struc_cols.append(x)
# apply data augmentation
if augmentation:
print("Starting Data Augmentation")
X_img_aug, X_struc_aug, y_aug = augmented_data(X, y, X_img_cols, X_struc_cols, sd=scale,
augm_fact=augmentation_factor)
# merging augmented data with input data
X_img =
|
pd.concat([X[X_img_cols], X_img_aug])
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 1 09:55:07 2018
@author: michaelek
"""
import numpy as np
import pandas as pd
###################################
### Parameters
freq_codes = ['D', 'W', 'M', 'A-JUN']
###################################
### Functions
def allo_ts_apply(row, from_date, to_date, freq, limit_col, remove_months=False):
"""
Pandas apply function that converts the allocation data to a monthly time series.
"""
crc_from_date =
|
pd.Timestamp(row['from_date'])
|
pandas.Timestamp
|
import json
import os
import random
from random import sample
import numpy as np
import numpy.random
import re
from collections import Counter
import inspect
import pandas as pd
import matplotlib.pyplot as plt
import requests
from IPython.display import HTML
import seaborn as sns
import networkx as nx
from pylab import rcParams
try:
from wordcloud import WordCloud
except ImportError:
print("wordcloud er ikke installert, kan ikke lage ordskyer")
#************** For defining wordbag search
def dict2pd(dictionary):
res = pd.DataFrame.from_dict(dictionary).fillna(0)
s = (res.mean(axis=0))
s = s.rename('snitt')
res = res.append(s)
return res.sort_values(by='snitt', axis=1, ascending=False).transpose()
def def2dict(ddef):
res = dict()
defs = ddef.split(';')
for d in defs:
lex = d.split(':')
if len(lex) == 2:
#print('#'.join(lex))
hyper = lex[0].strip()
occurrences = [x.strip() for x in lex[1].split(',')]
res[hyper] = occurrences
for x in res:
for y in res[x]:
if y.capitalize() not in res[x]:
res[x].append(y.capitalize())
return res
def wordbag_eval(wordbag, urns):
if type(urns) is list:
if isinstance(urns[0], list):
urns = [u[0] for u in urns]
else:
urns = urns
else:
urns = [urns]
param = dict()
param['wordbags'] = wordbag
param['urns'] = urns
r = requests.post("https://api.nb.no/ngram/wordbags", json = param)
return dict2pd(r.json())
def wordbag_eval_para(wordbag, urns):
if type(urns) is list:
if isinstance(urns[0], list):
urns = [u[0] for u in urns]
else:
urns = urns
else:
urns = [urns]
param = dict()
param['wordbags'] = wordbag
param['urns'] = urns
r = requests.post("https://api.nb.no/ngram/wordbags_para", json = param)
return r.json()
def get_paragraphs(urn, paras):
"""Return paragraphs for urn"""
param = dict()
param['paragraphs'] = paras
param['urn'] = urn
r = requests.get("https://api.nb.no/ngram/paragraphs", json=param)
return dict2pd(r.json())
### ******************* wordbag search end
def ner(text = None, dist=False):
"""Analyze text for named entities - set dist = True will return the four values that go into decision"""
r = []
if text != None:
r = requests.post("https://api.nb.no/ngram/ner", json={'text':text,'dist':dist})
return r.json()
#**** names ****
def check_navn(navn, limit=2, remove='Ja Nei Nå Dem De Deres Unnskyld Ikke Ah Hmm <NAME> Jaja Jaha'.split()):
"""Removes all items in navn with frequency below limit and words in all case as well as all words in list 'remove'"""
r = {x:navn[x] for x in navn if navn[x] > limit and x.upper() != x and not x in remove}
return r
def sentences(urns, num=300):
if isinstance(urns[0], list):
urns = [str(x[0]) for x in urns]
params = {'urns':urns,
'num':num}
res = requests.get("https://api.nb.no/ngram/sentences", params=params)
return res.json()
def names(urn, ratio = 0.3, cutoff = 2):
""" Return namens in book with urn. Returns uni- , bi-, tri- and quadgrams """
if type(urn) is list:
urn = urn[0]
r = requests.get('https://api.nb.no/ngram/names', json={'urn':urn, 'ratio':ratio, 'cutoff':cutoff})
x = r.json()
result = (
Counter(x[0][0]),
Counter({tuple(x[1][i][0]):x[1][i][1] for i in range(len(x[1]))}),
Counter({tuple(x[2][i][0]):x[2][i][1] for i in range(len(x[2]))}),
Counter({tuple(x[3][i][0]):x[3][i][1] for i in range(len(x[3]))})
)
return result
def name_graph(name_struct):
m = []
for n in name_struct[0]:
m.append(frozenset([n]))
for n in name_struct[1:]:
m += [frozenset(x) for x in n]
G = []
for x in m:
for y in m:
if x < y:
G.append((' '.join(x), ' '.join(y)))
N = []
for x in m:
N.append(' '.join(x))
Gg = nx.Graph()
Gg.add_nodes_from(N)
Gg.add_edges_from(G)
return Gg
def aggregate_urns(urnlist):
"""Sum up word frequencies across urns"""
if isinstance(urnlist[0], list):
urnlist = [u[0] for u in urnlist]
r = requests.post("https://api.nb.no/ngram/book_aggregates", json={'urns':urnlist})
return r.json()
# Norweigan word bank
def word_variant(word, form):
""" Find alternative form for a given word form, e.g. word_variant('spiste', 'pres-part') """
r = requests.get("https://api.nb.no/ngram/variant_form", params={'word':word, 'form':form})
return r.json()
def word_paradigm(word):
""" Find alternative form for a given word form, e.g. word_variant('spiste', 'pres-part') """
r = requests.get("https://api.nb.no/ngram/paradigm", params = {'word': word})
return r.json()
def word_form(word):
""" Find alternative form for a given word form, e.g. word_variant('spiste', 'pres-part') """
r = requests.get("https://api.nb.no/ngram/word_form", params = {'word': word})
return r.json()
def word_lemma(word):
""" Find lemma form for a given word form """
r = requests.get("https://api.nb.no/ngram/word_lemma", params = {'word': word})
return r.json()
def word_freq(urn, words):
""" Find frequency of words within urn """
params = {'urn':urn, 'words':words}
r = requests.post("https://api.nb.no/ngram/freq", json=params)
return dict(r.json())
def tot_freq(words):
""" Find total frequency of words """
params = {'words':words}
r = requests.post("https://api.nb.no/ngram/word_frequencies", json=params)
return dict(r.json())
def book_count(urns):
params = {'urns':urns}
r = requests.post("https://api.nb.no/ngram/book_count", json=params)
return dict(r.json())
def sttr(urn, chunk=5000):
r = requests.get("https://api.nb.no/ngram/sttr", json = {'urn':urn, 'chunk':chunk})
return r.json()
def totals(top=200):
r = requests.get("https://api.nb.no/ngram/totals", json={'top':top})
return dict(r.json())
def navn(urn):
if type(urn) is list:
urn = urn[0]
r = requests.get('https://api.nb.no/ngram/tingnavn', json={'urn':urn})
return dict(r.json())
def digibokurn_from_text(T):
"""Return URNs as 13 digits (any sequence of 13 digits is counted as an URN)"""
return re.findall("(?<=digibok_)[0-9]{13}", T)
def urn_from_text(T):
"""Return URNs as 13 digits (any sequence of 13 digits is counted as an URN)"""
return re.findall("[0-9]{13}", T)
def metadata(urn=None):
urns = pure_urn(urn)
#print(urns)
r = requests.post("https://api.nb.no/ngram/meta", json={'urn':urns})
return r.json()
def pure_urn(data):
"""Convert URN-lists with extra data into list of serial numbers.
Args:
data: May be a list of URNs, a list of lists with URNs as their
initial element, or a string of raw texts containing URNs
Any pandas dataframe or series. Urns must be in the first column of dataframe.
Returns:
List[str]: A list of URNs. Empty list if input is on the wrong
format or contains no URNs
"""
korpus_def = []
if isinstance(data, list):
if not data: # Empty list
korpus_def = []
if isinstance(data[0], list): # List of lists
try:
korpus_def = [str(x[0]) for x in data]
except IndexError:
korpus_def = []
else: # Assume data is already a list of URNs
korpus_def = [str(int(x)) for x in data]
elif isinstance(data, str):
korpus_def = [str(x) for x in urn_from_text(data)]
elif isinstance(data, (int, np.integer)):
korpus_def = [str(data)]
elif isinstance(data, pd.DataFrame):
col = data.columns[0]
urns = pd.to_numeric(data[col])
korpus_def = [str(int(x)) for x in urns.dropna()]
elif isinstance(data, pd.Series):
korpus_def = [str(int(x)) for x in data.dropna()]
return korpus_def
#### N-Grams from fulltext updated
def unigram(word, period=(1950, 2020), media = 'bok', ddk=None, topic=None, gender=None, publisher=None, lang=None, trans=None):
r = requests.get("https://api.nb.no/ngram/unigrams", params={
'word':word,
'ddk':ddk,
'topic':topic,
'gender':gender,
'publisher':publisher,
'lang':lang,
'trans':trans,
'period0':period[0],
'period1':period[1],
'media':media
})
return frame(dict(r.json()))
def bigram(first,second, period=(1950, 2020), media = 'bok', ddk=None, topic=None, gender=None, publisher=None, lang=None, trans=None):
r = requests.get("https://api.nb.no/ngram/bigrams", params={
'first':first,
'second':second,
'ddk':ddk,
'topic':topic,
'gender':gender,
'publisher':publisher,
'lang':lang,
'trans':trans,
'period0':period[0],
'period1':period[1],
'media':media
})
return frame(dict(r.json()))
def book_counts(period=(1800, 2050)):
r = requests.get("https://api.nb.no/ngram/book_counts", params={
'period0':period[0],
'period1':period[1],
})
return frame(dict(r.json()))
####
def difference(first, second, rf, rs, years=(1980, 2000),smooth=1, corpus='bok'):
"""Compute difference of difference (first/second)/(rf/rs)"""
try:
a_first = nb_ngram(first, years=years, smooth=smooth, corpus=corpus)
a_second = nb_ngram(second, years=years, smooth=smooth, corpus=corpus)
a = a_first.join(a_second)
b_first = nb_ngram(rf, years=years, smooth=smooth, corpus=corpus)
b_second = nb_ngram(rs, years=years, smooth=smooth, corpus=corpus)
if rf == rs:
b_second.columns = [rs + '2']
b = b_first.join(b_second)
s_a = a.mean()
s_b = b.mean()
f1 = s_a[a.columns[0]]/s_a[a.columns[1]]
f2 = s_b[b.columns[0]]/s_b[b.columns[1]]
res = f1/f2
except:
res = 'Mangler noen data - har bare for: ' + ', '.join([x for x in a.columns.append(b.columns)])
return res
def df_combine(array_df):
"""Combine one columns dataframes"""
import pandas as pd
cols = []
for i in range(len(a)):
#print(i)
if array_df[i].columns[0] in cols:
array_df[i].columns = [array_df[i].columns[0] + '_' + str(i)]
cols.append(array_df[i].columns[0])
return pd.concat(a, axis=1, sort=True)
def col_agg(df, col='sum'):
c = df.sum(axis=0)
c = pd.DataFrame(c)
c.columns = [col]
return c
def row_agg(df, col='sum'):
c = df.sum(axis=1)
c = pd.DataFrame(c)
c.columns = [col]
return c
def get_freq(urn, top=50, cutoff=3):
"""Get frequency list for urn"""
if isinstance(urn, list):
urn = urn[0]
r = requests.get("https://api.nb.no/ngram/urnfreq", json={'urn':urn, 'top':top, 'cutoff':cutoff})
return Counter(dict(r.json()))
####=============== GET URNS ==================##########
def book_corpus(words = None, author = None,
title = None, subtitle = None, ddk = None, subject = None,
period=(1100, 2020),
gender=None,
lang = None,
trans= None,
limit=20 ):
return frame(book_urn(words, author, title, subtitle, ddk, subject, period, gender, lang, trans, limit),
"urn author title year".split())
def book_urn(words = None, author = None,
title = None, subtitle = None, ddk = None, subject = None,
period=(1100, 2020),
gender=None,
lang = None,
trans= None,
limit=20 ):
"""Get URNs for books with metadata"""
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
query = {i:values[i] for i in args if values[i] != None and i != 'period'}
query['year'] = period[0]
query['next'] = period[1] - period[0]
return get_urn(query)
def unique_urns(korpus, newest=True):
author_title = {(c[1],c[2]) for c in korpus}
corpus = {(c[0], c[1]):[d for d in korpus if c[0] == d[1] and c[1]==d[2]] for c in author_title }
for c in corpus:
corpus[c].sort(key=lambda c: c[3])
if newest == True:
res = [corpus[c][-1] for c in corpus]
else:
res = [corpus[c][0] for c in corpus]
return res
def refine_book_urn(urns = None, words = None, author = None,
title = None, ddk = None, subject = None, period=(1100, 2020), gender=None, lang = None, trans= None, limit=20 ):
"""Refine URNs for books with metadata"""
# if empty urns nothing to refine
if urns is None or urns == []:
return []
# check if urns is a metadata list, and pick out first elements if that is the case
if isinstance(urns[0], list):
urns = [x[0] for x in urns]
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
query = {i:values[i] for i in args if values[i] != None and i != 'period' and i != 'urns'}
query['year'] = period[0]
query['next'] = period[1] - period[0]
#print(query)
return refine_urn(urns, query)
def best_book_urn(word = None, author = None,
title = None, ddk = None, subject = None, period=(1100, 2020), gender=None, lang = None, trans= None, limit=20 ):
"""Get URNs for books with metadata"""
if word is None:
return []
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
query = {i:values[i] for i in args if values[i] != None and i != 'period' and i != 'word'}
query['year'] = period[0]
query['next'] = period[1] - period[0]
return get_best_urn(word, query)
def get_urn(metadata=None):
"""Get urns from metadata"""
if metadata is None:
metadata = {}
if not ('next' in metadata or 'neste' in metadata):
metadata['next'] = 100
if not 'year' in metadata:
metadata['year'] = 1900
r = requests.get('https://api.nb.no/ngram/urn', json=metadata)
return r.json()
def refine_urn(urns, metadata=None):
"""Refine a list urns using extra information"""
if metadata is None:
metadata = {}
metadata['urns'] = urns
if not ('words' in metadata):
metadata['words'] = []
if not ('next' in metadata or 'neste' in metadata):
metadata['next'] = 520
if not 'year' in metadata:
metadata['year'] = 1500
r = requests.post('https://api.nb.no/ngram/refineurn', json=metadata)
return r.json()
def get_best_urn(word, metadata=None):
"""Get the best urns from metadata containing a specific word"""
metadata['word'] = word
if not ('next' in metadata or 'neste' in metadata):
metadata['next'] = 600
if not 'year' in metadata:
metadata['year'] = 1500
r = requests.get('https://api.nb.no/ngram/best_urn', json=metadata)
return r.json()
def get_papers(top=5, cutoff=5, navn='%', yearfrom=1800, yearto=2020, samplesize=100):
"""Get newspapers"""
div = lambda x, y: (int(x/y), x % y)
chunks = 20
# split samplesize into chunks, go through the chunks and then the remainder
(first, second) = div(samplesize, chunks)
r = []
# collect chunkwise
for i in range(first):
r += requests.get("https://api.nb.no/ngram/avisfreq", json={'navn':navn, 'top':top, 'cutoff':cutoff,
'yearfrom':yearfrom, 'yearto':yearto,'samplesize':chunks}
).json()
# collect the remainder
r += requests.get("https://api.nb.no/ngram/avisfreq", json={'navn':navn, 'top':top, 'cutoff':cutoff,
'yearfrom':yearfrom, 'yearto':yearto,'samplesize':second}
).json()
return [dict(x) for x in r]
def urn_coll(word, urns=[], after=5, before=5, limit=1000):
"""Find collocations for word in a set of book URNs. Only books at the moment"""
if isinstance(urns[0], list): # urns assumed to be list of list with urn-serial as first element
urns = [u[0] for u in urns]
r = requests.post("https://api.nb.no/ngram/urncoll", json={'word':word, 'urns':urns,
'after':after, 'before':before, 'limit':limit})
res = pd.DataFrame.from_dict(r.json(), orient='index')
if not res.empty:
res = res.sort_values(by=res.columns[0], ascending = False)
return res
def urn_coll_words(words, urns=None, after=5, before=5, limit=1000):
"""Find collocations for a group of words within a set of books given by a list of URNs. Only books at the moment"""
coll = pd.DataFrame()
if urns != None:
if isinstance(urns[0], list): # urns assumed to be list of list with urn-serial as first element
urns = [u[0] for u in urns]
colls = Counter()
if isinstance(words, str):
words = words.split()
res = Counter()
for word in words:
try:
res += Counter(
requests.post(
"https://api.nb.no/ngram/urncoll",
json={
'word':word,
'urns':urns,
'after':after,
'before':before,
'limit':limit}
).json()
)
except:
True
coll = pd.DataFrame.from_dict(res, orient='index')
if not coll.empty:
coll = coll.sort_values(by=coll.columns[0], ascending = False)
return coll
def get_aggregated_corpus(urns, top=0, cutoff=0):
res = Counter()
if isinstance(urns[0], list): # urns assumed to be list of list with urn-serial as first element
urns = [u[0] for u in urns]
for u in urns:
#print(u)
res += get_freq(u, top = top, cutoff = cutoff)
return pd.DataFrame.from_dict(res, orient='index').sort_values(by=0, ascending = False)
def compare_word_bags(bag_of_words, another_bag_of_words, first_freq = 0, another_freq = 1, top=100, first_col = 0, another_col= 0):
"""Compare two columns taken from two or one frame. Parameters x_freq are frequency limits used to cut down candidate words
from the bag of words. Compare along the columns where first_col and another_col are column numbers. Typical situation is that
bag_of_words is a one column frame and another_bag_of_words is another one column frame. When the columns are all from one frame,
just change column numbers to match the columns"""
diff = bag_of_words[bag_of_words > first_freq][bag_of_words.columns[first_col]]/another_bag_of_words[another_bag_of_words > another_freq][another_bag_of_words.columns[another_col]]
return frame(diff, 'diff').sort_values(by='diff', ascending=False)[:top]
def collocation(
word,
yearfrom=2010,
yearto=2018,
before=3,
after=3,
limit=1000,
corpus='avis',
lang='nob',
title='%',
ddk='%',
subtitle='%'):
"""Defined collects frequencies for a given word"""
data = requests.get(
"https://api.nb.no/ngram/collocation",
params={
'word':word,
'corpus':corpus,
'yearfrom':yearfrom,
'before':before,
'after':after,
'limit':limit,
'yearto':yearto,
'title':title,
'ddk':ddk,
'subtitle':subtitle}).json()
return pd.DataFrame.from_dict(data['freq'], orient='index')
def collocation_data(words, yearfrom = 2000, yearto = 2005, limit = 1000, before = 5, after = 5, title = '%', corpus='bok'):
"""Collocation for a set of words sum up all the collocations words is a list of words or a blank separated string of words"""
import sys
a = dict()
if isinstance(words, str):
words = words.split()
for word in words:
print(word)
try:
a[word] = collocation(
word,
yearfrom = yearfrom, yearto = yearto, limit = limit,
corpus = corpus, before = before,
after = after, title = title
)
a[word].columns = [word]
except:
print(word, ' feilsituasjon', sys.exc_info())
result = pd.DataFrame()
for w in a:
result = result.join(a[w], how='outer')
return pd.DataFrame(result.sum(axis=1)).sort_values(by=0, ascending=False)
class CollocationCorpus:
from random import sample
def __init__(self, corpus = None, name='', maximum_texts = 500):
urns = pure_urn(corpus)
if len(urns) > maximum_texts:
selection = random(urns, maximum_texts)
else:
selection = urns
self.corpus_def = selection
self.corpus = get_aggregated_corpus(self.corpus_def, top=0, cutoff=0)
def summary(self, head=10):
info = {
'corpus_definition':self.corpus[:head],
'number_of_words':len(self.corpus)
}
return info
def collocation_old(word, yearfrom=2010, yearto=2018, before=3, after=3, limit=1000, corpus='avis'):
data = requests.get(
"https://api.nb.no/ngram/collocation",
params={
'word':word,
'corpus':corpus,
'yearfrom':yearfrom,
'before':before,
'after':after,
'limit':limit,
'yearto':yearto}).json()
return pd.DataFrame.from_dict(data['freq'], orient='index')
def heatmap(df, color='green'):
return df.fillna(0).style.background_gradient(cmap=sns.light_palette(color, as_cmap=True))
def get_corpus_text(urns, top = 0, cutoff=0):
k = dict()
if isinstance(urns, list):
# a list of urns, or a korpus with urns as first element
if isinstance(urns[0], list):
urns = [u[0] for u in urns]
else:
# assume it is a single urn, text or number
urns = [urns]
for u in urns:
#print(u)
k[u] = get_freq(u, top = top, cutoff = cutoff)
df = pd.DataFrame(k)
res = df.sort_values(by=df.columns[0], ascending=False)
return res
def normalize_corpus_dataframe(df):
colsums = df.sum()
for x in colsums.index:
#print(x)
df[x] = df[x].fillna(0)/colsums[x]
return True
def show_korpus(korpus, start=0, size=4, vstart=0, vsize=20, sortby = ''):
"""Show corpus as a panda dataframe
start = 0 indicates which dokument to show first, dataframe is sorted according to this
size = 4 how many documents (or columns) are shown
top = 20 how many words (or rows) are shown"""
if sortby != '':
val = sortby
else:
val = korpus.columns[start]
return korpus[korpus.columns[start:start+size]].sort_values(by=val, ascending=False)[vstart:vstart + vsize]
def aggregate(korpus):
"""Make an aggregated sum of all documents across the corpus, here we use average"""
return pd.DataFrame(korpus.fillna(0).mean(axis=1))
def convert_list_of_freqs_to_dataframe(referanse):
"""The function get_papers() returns a list of frequencies - convert it"""
res = []
for x in referanse:
res.append( dict(x))
result = pd.DataFrame(res).transpose()
normalize_corpus_dataframe(result)
return result
def get_corpus(top=0, cutoff=0, navn='%', corpus='avis', yearfrom=1800, yearto=2020, samplesize=10):
if corpus == 'avis':
result = get_papers(top=top, cutoff=cutoff, navn=navn, yearfrom=yearfrom, yearto=yearto, samplesize=samplesize)
res = convert_list_of_freqs_to_dataframe(result)
else:
urns = get_urn({'author':navn, 'year':yearfrom, 'neste':yearto-yearfrom, 'limit':samplesize})
res = get_corpus_text([x[0] for x in urns], top=top, cutoff=cutoff)
return res
class Cluster:
def __init__(self, word = '', filename = '', period = (1950,1960) , before = 5, after = 5, corpus='avis', reference = 200,
word_samples=1000):
if word != '':
self.collocates = collocation(word, yearfrom=period[0], yearto = period[1], before=before, after=after,
corpus=corpus, limit=word_samples)
self.collocates.columns = [word]
if type(reference) is pd.core.frame.DataFrame:
reference = reference
elif type(reference) is int:
reference = get_corpus(yearfrom=period[0], yearto=period[1], corpus=corpus, samplesize=reference)
else:
reference = get_corpus(yearfrom=period[0], yearto=period[1], corpus=corpus, samplesize=int(reference))
self.reference = aggregate(reference)
self.reference.columns = ['reference_corpus']
self.word = word
self.period = period
self.corpus = corpus
else:
if filename != '':
self.load(filename)
def cluster_set(self, exponent=1.1, top = 200, aslist=True):
combo_corp = self.reference.join(self.collocates, how='outer')
normalize_corpus_dataframe(combo_corp)
korpus = compute_assoc(combo_corp, self.word, exponent)
korpus.columns = [self.word]
if top <= 0:
res = korpus.sort_values(by=self.word, ascending=False)
else:
res = korpus.sort_values(by=self.word, ascending=False).iloc[:top]
if aslist == True:
res = HTML(', '.join(list(res.index)))
return res
def add_reference(self, number=20):
ref = get_corpus(yearfrom=self.period[0], yearto=self.period[1], samplesize=number)
ref = aggregate(ref)
ref.columns = ['add_ref']
normalize_corpus_dataframe(ref)
self.reference = aggregate(self.reference.join(ref, how='outer'))
return True
def save(self, filename=''):
if filename == '':
filename = "{w}_{p}-{q}.json".format(w=self.word,p=self.period[0], q = self.period[1])
model = {
'word':self.word,
'period':self.period,
'reference':self.reference.to_dict(),
'collocates':self.collocates.to_dict(),
'corpus':self.corpus
}
with open(filename, 'w', encoding = 'utf-8') as outfile:
print('lagrer til:', filename)
outfile.write(json.dumps(model))
return True
def load(self, filename):
with open(filename, 'r') as infile:
try:
model = json.loads(infile.read())
#print(model['word'])
self.word = model['word']
self.period = model['period']
self.corpus = model['corpus']
self.reference = pd.DataFrame(model['reference'])
self.collocates = pd.DataFrame(model['collocates'])
except:
print('noe gikk galt')
return True
def search_words(self, words, exponent=1.1):
if type(words) is str:
words = [w.strip() for w in words.split()]
df = self.cluster_set(exponent=exponent, top=0, aslist=False)
sub= [w for w in words if w in df.index]
res = df.transpose()[sub].transpose().sort_values(by=df.columns[0], ascending=False)
return res
def wildcardsearch(params=None):
if params is None:
params = {'word': '', 'freq_lim': 50, 'limit': 50, 'factor': 2}
res = requests.get('https://api.nb.no/ngram/wildcards', params=params)
if res.status_code == 200:
result = res.json()
else:
result = {'status':'feil'}
resultat = pd.DataFrame.from_dict(result, orient='index')
if not(resultat.empty):
resultat.columns = [params['word']]
return resultat
def sorted_wildcardsearch(params):
res = wildcardsearch(params)
if not res.empty:
res = res.sort_values(by=params['word'], ascending=False)
return res
def make_newspaper_network(key, wordbag, titel='%', yearfrom='1980', yearto='1990', limit=500):
if type(wordbag) is str:
wordbag = wordbag.split()
r = requests.post("https://api.nb.no/ngram/avisgraph", json={
'key':key,
'words':wordbag,
'yearto':yearto,
'yearfrom':yearfrom,
'limit':limit})
G = nx.Graph()
if r.status_code == 200:
G.add_weighted_edges_from([(x,y,z) for (x,y,z) in r.json() if z > 0 and x != y])
else:
print(r.text)
return G
def make_network(urn, wordbag, cutoff=0):
if type(urn) is list:
urn = urn[0]
if type(wordbag) is str:
wordbag = wordbag.split()
G = make_network_graph(urn, wordbag, cutoff)
return G
def make_network_graph(urn, wordbag, cutoff=0):
r = requests.post("https://api.nb.no/ngram/graph", json={'urn':urn, 'words':wordbag})
G = nx.Graph()
G.add_weighted_edges_from([(x,y,z) for (x,y,z) in r.json() if z > cutoff and x != y])
return G
def make_network_name_graph(urn, tokens, tokenmap=None, cutoff=2):
if isinstance(urn, list):
urn = urn[0]
# tokens should be a list of list of tokens. If it is list of dicts pull out the keys (= tokens)
if isinstance(tokens[0], dict):
tokens = [list(x.keys()) for x in tokens]
r = requests.post("https://api.nb.no/ngram/word_graph", json={'urn':urn, 'tokens':tokens, 'tokenmap':tokenmap})
#print(r.text)
G = nx.Graph()
G.add_weighted_edges_from([(x,y,z) for (x,y,z) in r.json() if z > cutoff and x != y])
return G
def token_convert_back(tokens, sep='_'):
""" convert a list of tokens to string representation"""
res = [tokens[0]]
for y in tokens:
res.append([tuple(x.split(sep)) for x in y])
l = len(res)
for x in range(1, 4-l):
res.append([])
return res
def token_convert(tokens, sep='_'):
""" convert back to tuples """
tokens = [list(x.keys()) for x in tokens]
tokens = [[(x,) for x in tokens[0]], tokens[1], tokens[2], tokens[3]]
conversion = []
for x in tokens:
conversion.append([sep.join(t) for t in x])
return conversion
def token_map_to_tuples(tokens_as_strings, sep='_', arrow='==>'):
tuples = []
for x in tokens_as_strings:
token = x.split(arrow)[0].strip()
mapsto = x.split(arrow)[1].strip()
tuples.append((tuple(token.split(sep)), tuple(mapsto.split(sep))))
#tuples = [(tuple(x.split(arrow).strip()[0].split(sep)), tuple(x.split(arrow)[1].strip().split(sep))) for x in tokens_as_strings]
return tuples
def token_map(tokens, strings=False, sep='_', arrow= '==>'):
""" tokens as from nb.names()"""
if isinstance(tokens[0], dict):
# get the keys(), otherwise it is already just a list of tokens up to length 4
tokens = [list(x.keys()) for x in tokens]
# convert tokens to tuples and put them all in one list
tokens = [(x,) for x in tokens[0]] + tokens[1] + tokens[2] + tokens[3]
tm = []
#print(tokens)
for token in tokens:
if isinstance(token, str):
trep = (token,)
elif isinstance(token, list):
trep = tuple(token)
token = tuple(token)
else:
trep = token
n = len(trep)
#print(trep)
if trep[-1].endswith('s'):
cp = list(trep[:n-1])
cp.append(trep[-1][:-1])
cp = tuple(cp)
#print('copy', cp, trep)
if cp in tokens:
#print(trep, cp)
trep = cp
larger = [ts for ts in tokens if set(ts) >= set(trep)]
#print(trep, ' => ', larger)
larger.sort(key=lambda x: len(x), reverse=True)
tm.append((token,larger[0]))
res = tm
if strings == True:
res = [sep.join(x[0]) + ' ' + arrow + ' ' + sep.join(x[1]) for x in tm]
return res
def draw_graph_centrality(G, h=15, v=10, fontsize=20, k=0.2, arrows=False, font_color='black', threshold=0.01):
node_dict = nx.degree_centrality(G)
subnodes = dict({x:node_dict[x] for x in node_dict if node_dict[x] >= threshold})
x, y = rcParams['figure.figsize']
rcParams['figure.figsize'] = h, v
pos =nx.spring_layout(G, k=k)
ax = plt.subplot()
ax.set_xticks([])
ax.set_yticks([])
G = G.subgraph(subnodes)
nx.draw_networkx_labels(G, pos, font_size=fontsize, font_color=font_color)
nx.draw_networkx_nodes(G, pos, alpha=0.5, nodelist=subnodes.keys(), node_size=[v * 1000 for v in subnodes.values()])
nx.draw_networkx_edges(G, pos, alpha=0.7, arrows=arrows, edge_color='lightblue', width=1)
rcParams['figure.figsize'] = x, y
return True
def combine(clusters):
"""Make new collocation analyses from data in clusters"""
colls = []
collocates = clusters[0].collocates
for c in clusters[1:]:
collocates = collocates.join(c.collocates, rsuffix='-' + str(c.period[0]))
return collocates
def cluster_join(cluster):
clusters = [cluster[i] for i in cluster]
clst = clusters[0].cluster_set(aslist=False)
for c in clusters[1:]:
clst = clst.join(c.cluster_set(aslist=False), rsuffix = '_'+str(c.period[0]))
return clst
def serie_cluster(word, startår, sluttår, inkrement, before=5, after=5, reference=150, word_samples=500):
tidscluster = dict()
for i in range(startår, sluttår, inkrement):
tidscluster[i] = Cluster(
word,
corpus='avis',
period=(i, i + inkrement - 1),
before=after,
after=after,
reference=reference,
word_samples=word_samples)
print(i, i+inkrement - 1)
return tidscluster
def save_serie_cluster(tidscluster):
for i in tidscluster:
tidscluster[i].save()
return 'OK'
def les_serie_cluster(word, startår, sluttår, inkrement):
tcluster = dict()
for i in range(startår, sluttår, inkrement):
print(i, i+inkrement - 1)
tcluster[i] = Cluster(filename='{w}_{f}-{t}.json'.format(w=word, f=i,t=i+inkrement - 1))
return tcluster
def make_cloud(json_text, top=100, background='white', stretch=lambda x: 2**(10*x), width=500, height=500, font_path=None):
pairs0 = Counter(json_text).most_common(top)
pairs = {x[0]:stretch(x[1]) for x in pairs0}
wc = WordCloud(
font_path=font_path,
background_color=background,
width=width,
#color_func=my_colorfunc,
ranks_only=True,
height=height).generate_from_frequencies(pairs)
return wc
def draw_cloud(sky, width=20, height=20, fil=''):
plt.figure(figsize=(width,height))
plt.imshow(sky, interpolation='bilinear')
figplot = plt.gcf()
if fil != '':
figplot.savefig(fil, format='png')
return
def cloud(pd, column='', top=200, width=1000, height=1000, background='black', file='', stretch=10, font_path=None):
if column == '':
column = pd.columns[0]
data = json.loads(pd[column].to_json())
a_cloud = make_cloud(data, top=top,
background=background, font_path=font_path,
stretch=lambda x: 2**(stretch*x), width=width, height=height)
draw_cloud(a_cloud, fil=file)
return
def make_a_collocation(word, period=(1990, 2000), before=5, after=5, corpus='avis', samplesize=100, limit=2000):
collocates = collocation(word, yearfrom=period[0], yearto=period[1], before=before, after=after,
corpus=corpus, limit=limit)
collocates.columns = [word]
reference = get_corpus(yearfrom=period[0], yearto=period[1], samplesize=samplesize)
ref_agg = aggregate(reference)
ref_agg.columns = ['reference_corpus']
return ref_agg
def compute_assoc(coll_frame, column, exponent=1.1, refcolumn = 'reference_corpus'):
return pd.DataFrame(coll_frame[column]**exponent/coll_frame.mean(axis=1))
class Corpus:
def __init__(self, filename = '', target_urns = None, reference_urns = None, period = (1950,1960), author='%',
title='%', ddk='%', gender='%', subject='%', reference = 100, max_books=100):
params = {
'year':period[0],
'next': period[1]-period[0],
'subject':subject,
'ddk':ddk,
'author':author,
#'gender':gender, ser ikke ut til å virke for get_urn - sjekk opp APIet
'title':title,
'limit':max_books,
'reference':reference
}
self.params = params
self.coll = dict()
self.coll_graph = dict()
if filename == '':
if target_urns != None:
målkorpus_def = target_urns
else:
målkorpus_def = get_urn(params)
#print("Antall bøker i målkorpus ", len(målkorpus_def))
if isinstance(målkorpus_def[0], list):
målkorpus_urn = [str(x[0]) for x in målkorpus_def]
#print(målkorpus_urn)
else:
målkorpus_urn = målkorpus_def
if len(målkorpus_urn) > max_books and max_books > 0:
target_urn = list(numpy.random.choice(målkorpus_urn, max_books))
else:
target_urn = målkorpus_urn
if reference_urns != None:
referansekorpus_def = reference_urns
else:
# select from period, usually used only of target is by metadata
referansekorpus_def = get_urn({'year':period[0], 'next':period[1]-period[0], 'limit':reference})
#print("<NAME> i referanse: ", len(referansekorpus_def))
# referansen skal være distinkt fra målkorpuset
referanse_urn = [str(x[0]) for x in referansekorpus_def]
self.reference_urn = referanse_urn
self.target_urn = target_urn
# make sure there is no overlap between target and reference
#
referanse_urn = list(set(referanse_urn) - set(target_urn))
målkorpus_txt = get_corpus_text(target_urn)
normalize_corpus_dataframe(målkorpus_txt)
if referanse_urn != []:
referanse_txt = get_corpus_text(referanse_urn)
normalize_corpus_dataframe(referanse_txt)
combo = målkorpus_txt.join(referanse_txt)
else:
referanse_txt = målkorpus_txt
combo = målkorpus_txt
self.combo = combo
self.reference = referanse_txt
self.target = målkorpus_txt
self.reference = aggregate(self.reference)
self.reference.columns = ['reference_corpus']
## dokumentfrekvenser
mål_docf = pd.DataFrame(pd.DataFrame(målkorpus_txt/målkorpus_txt).sum(axis=1))
combo_docf = pd.DataFrame(pd.DataFrame(combo/combo).sum(axis=1))
ref_docf = pd.DataFrame(pd.DataFrame(referanse_txt/referanse_txt).sum(axis=1))
### Normaliser dokumentfrekvensene
normalize_corpus_dataframe(mål_docf)
normalize_corpus_dataframe(combo_docf)
normalize_corpus_dataframe(ref_docf)
self.målkorpus_tot = aggregate(målkorpus_txt)
self.combo_tot = aggregate(combo)
self.mål_docf = mål_docf
self.combo_docf = combo_docf
self.lowest = self.combo_tot.sort_values(by=0)[0][0]
else:
self.load(filename)
return
def difference(self, freq_exp=1.1, doc_exp=1.1, top = 200, aslist=True):
res = pd.DataFrame(
(self.målkorpus_tot**freq_exp/self.combo_tot)*(self.mål_docf**doc_exp/self.combo_docf)
)
res.columns = ['diff']
if top > 0:
res = res.sort_values(by=res.columns[0], ascending=False).iloc[:top]
else:
res = res.sort_values(by=res.columns[0], ascending=False)
if aslist == True:
res = HTML(', '.join(list(res.index)))
return res
def save(self, filename):
model = {
'params':self.params,
'target': self.målkorpus_tot.to_json(),
'combo': self.combo_tot.to_json(),
'target_df': self.mål_docf.to_json(),
'combo_df': self.combo_docf.to_json()
}
with open(filename, 'w', encoding = 'utf-8') as outfile:
outfile.write(json.dumps(model))
return True
def load(self, filename):
with open(filename, 'r') as infile:
try:
model = json.loads(infile.read())
#print(model['word'])
self.params = model['params']
#print(self.params)
self.målkorpus_tot = pd.read_json(model['target'])
#print(self.målkorpus_tot[:10])
self.combo_tot = pd.read_json(model['combo'])
self.mål_docf = pd.read_json(model['target_df'])
self.combo_docf = pd.read_json(model['combo_df'])
except:
print('noe gikk galt')
return True
def collocations(self, word, after=5, before=5, limit=1000):
"""Find collocations for word in a set of book URNs. Only books at the moment"""
r = requests.post(
"https://api.nb.no/ngram/urncoll",
json={
'word': word,
'urns': self.target_urn,
'after': after,
'before': before,
'limit': limit
}
)
temp = pd.DataFrame.from_dict(r.json(), orient='index')
normalize_corpus_dataframe(temp)
self.coll[word] = temp.sort_values(by = temp.columns[0], ascending = False)
return True
def conc(self, word, before=8, after=8, size=10, combo=0):
if combo == 0:
urns = self.target_urn + self.reference_urn
elif combo == 1:
urns = self.target_urn
else:
urns = self.reference_urn
if len(urns) > 300:
urns = list(numpy.random.choice(urns, 300, replace=False))
return get_urnkonk(word, {'urns':urns, 'before':before, 'after':after, 'limit':size})
def sort_collocations(self, word, comparison = None, exp = 1.0, above = None):
if comparison == None:
comparison = self.combo_tot[0]
try:
res = pd.DataFrame(self.coll[word][0]**exp/comparison)
except KeyError:
print('Constructing a collocation for {w} with default parameters.'.format(w=word))
self.collocations(word)
res =
|
pd.DataFrame(self.coll[word][0]**exp/comparison)
|
pandas.DataFrame
|
import numpy as np
from torch.utils.data import Dataset
from src.models.lang_model.w2v_averager_model import W2vAveragerModel
from sklearn.preprocessing import StandardScaler
import datetime
import pandas as pd
from copy import deepcopy
# import matplotlib.pyplot as plt
"""
Data import functions
"""
def make_dfs(paths):
df = []
for path in paths:
df.append(
|
pd.read_json(path)
|
pandas.read_json
|
"""
A library of analytical methods.
"""
import numpy as np
import pandas as pd
import arrow
from .settings import Config
from .base import RecipeBase
from .scrapers import stockmarket
__all__ = ['any_data', 'MovingAvg', 'sample_dates', 'simulate',
'multisim']
def any_data(data):
"""If there is data, raise alert.
::parents:: data
::alerts:: data exists, data does not exist
"""
alerts = []
if isinstance(data, pd.DataFrame) and not data.empty:
alerts.append('data exists')
else:
alerts.append('data does not exist')
return (None, alerts)
class MovingAvg(RecipeBase):
valid_type = 'market'
def __init__(self, symbol='SPY'):
self.studyname = f"{symbol} Moving Avg"
self.parents = {'prices': stockmarket.StockDaily(symbol)}
@staticmethod
def process(prices, **kwargs):
alerts = []
centering = False
outdf = prices[['adjClose']]
outdf = outdf.rename(columns={'adjClose': 'close_price'})
outdf['5-day avg'] = outdf.rolling(5, center=centering).mean()
outdf['30-day avg'] = outdf['close_price'].rolling(30, center=centering).mean()
outdf['100-day avg'] = outdf['close_price'].rolling(100, center=centering).mean()
today = outdf.tail(1)
diff_5_30 = today['5-day avg'] - today['30-day avg']
if diff_5_30[0] > 0:
alerts.append('5d over 30d')
else:
alerts.append('5d under 30d')
return (outdf, alerts)
def sample_dates(data, N=100, window=365, backdate=0):
"""Sample the available dates in the data.
::parents:: data
::params:: N, window, backdate
::alerts:: complete
"""
try:
backdt = data.index[-1] - dt.timedelta(days=int(backdate))
except ValueError:
backdt = dateutil.parser.parse(backdate)
interval = data[backdt - dt.timedelta(days=int(window)):backdt]
sample_idx = np.random.choice(interval.index, int(N), replace=False)
sample_idx.sort()
alerts = ['complete']
return (sample_idx, alerts)
def multisim(backtest, cash=10000, weightstep=0.1, confidence=2, days=50, N=10):
"""Run the simulation across various sampled date intervals.
::parents:: backtest
::params:: cash, weightstep, confidence, days, N
::alerts:: complete
"""
days = int(days)
window = backtest.index[-1] - backtest.index[0] - dt.timedelta(days=days)
start_dates, _ = sample_dates(backtest, N=N, window=window.days, backdate=days)
cols = ['start date', 'end date', 'market return', '% return', 'alpha']
results = []
for start in start_dates:
start = pd.Timestamp(start)
end = start + dt.timedelta(days=days)
trial, _ = simulate(backtest, cash, weightstep, confidence, start, days)
endvals = trial.tail(1)[['market return', '% return', 'alpha']].values[0]
results.append([start, end, *endvals])
data = pd.DataFrame(results, columns=cols)
alerts = ['complete']
return data, alerts
def simulate(prices, backtest, cash=10000, weightstep=1.0, confidence=2, start_date=None, days=0):
"""Calculate the portfolio value gains/losses over the backtest period.
::parents:: prices, backtest
::params:: cash, weightstep, confidence, start_date, days
::alerts:: complete
"""
cash = int(cash)
weightstep = float(weightstep)
confidence = int(confidence)
days = int(days)
# Set up sub-intervals
if start_date and days:
if isinstance(start_date, str):
start_date = arrow.get(start_date).replace(tzinfo=Config.TZ)
end_date = start_date.shift(days=days)
prices = prices[start_date:end_date]
backtest = backtest[start_date:end_date]
# Get random market prices
data = pd.merge(
how='inner',
left=prices,
right=backtest,
left_index=True,
right_index=True,
)
data['market'] = data['low'] + np.random.rand(len(data)) * (data['high'] - data['low'])
# Run the portfolio
history = []
portfolio = Portfolio(cash, 0, 0)
for _, row in data.iterrows():
portfolio.price = row['market']
portfolio.split(row['splitFactor'])
net_action = _calc_net_action(row['signals'], confidence)
portfolio.weight_trade(net_action * weightstep)
portfolio.dividend(row['divCash'])
history.append(portfolio.as_dict.copy())
history_df =
|
pd.DataFrame(history, data.index)
|
pandas.DataFrame
|
# --------------
import pandas as pd
import scipy.stats as stats
import math
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#Sample_Size
sample_size=2000
#Z_Critical Score
z_critical = stats.norm.ppf(q = 0.95)
#Code starts here
data = pd.read_csv(path)
data_sample = data.sample(n=sample_size, random_state=0)
sample_mean = data_sample['installment'].mean()
sample_std = data_sample['installment'].std()
margin_of_error = z_critical*(sample_std/sample_size**0.5)
confidence_interval = (sample_mean-margin_of_error),(sample_mean+margin_of_error)
true_mean = data['installment'].mean()
print('True mean :',true_mean)
if (sample_mean-margin_of_error)<true_mean<(sample_mean+margin_of_error) :
print('True mean falls in range of Confidence Interval')
# --------------
import matplotlib.pyplot as plt
import numpy as np
#Different sample sizes to take
sample_size=np.array([20,50,100])
#Code starts here
fig, axes = plt.subplots(nrows = 3 , ncols = 1)
for i in range(len(sample_size)) :
m = []
for j in range(1000) :
sample_data = data.sample(n=sample_size[i])
sample_data_mean = sample_data['installment'].mean()
m.append(sample_data_mean)
mean_series =
|
pd.Series(m)
|
pandas.Series
|
# -*- coding: utf-8 -*-
"""
This code is developed by <NAME>.
Command Line Usage : python HSPP_Anatomy.py Dataset[a|i] Attributes[7|14] Partitions{p} L-Diversity[L]
Example usage: python Hspp_Anatomy.py a 7 50 10"
Dataset = INFORMS | Adults
No of Attributes = 7 | 14
"""
import random
import pandas as pd
import numpy as np
import time
from utils.read_informs_data_2 import read_data as read_informs
from utils.read_data_2 import read_data
from utils.read_data import read_data as read_data_7
from utils.read_informs_data import read_data as read_informs_7
import sys
# Reading the dataset
def preproccess_Informs():
label=['PID','DOBMM','DOBYY','SEX','RACEX','RACEAX','RACEBX', 'RACEWX', 'RACETHNX', 'HISPANX', 'HISPCAT','EDUCYEAR','marry','SAFP']
cvslabel=['PID','DOBMM','DOBYY','SEX','RACEX','RACEAX','RACEBX', 'RACEWX', 'RACETHNX', 'HISPANX', 'HISPCAT','EDUCYEAR','marry']
DATA=()
DATA = read_informs()
#""" STEP 1 : MERGING DATASET """
dataset=pd.DataFrame((DATA[0:58568]),columns=['PID','DOBMM','DOBYY','SEX','RACEX','RACEAX','RACEBX', 'RACEWX','RACETHNX', 'HISPANX', 'HISPCAT','EDUCYEAR','marry','SAFP','Conditional'])
newdata =dataset.drop(dataset.columns[-1] ,axis=1,inplace=False)
cvs_data=newdata.copy();
cvs_data.reset_index(inplace=True)
cvs_data.drop(cvslabel,axis=1,inplace=True)
newdata.drop(['SAFP'],axis=1,inplace=True)
newdata.reset_index(inplace=True);
newdata.rename({'index': 'UID'}, axis=1, inplace=True);
return newdata , cvs_data
def preprocess_adults():
I_Label=['age', 'workcalss', 'final_weight', 'education', 'education_num', 'matrital_status','relationship', 'race','sex','capital_gain', 'capital_loss','hours_per_week','native_country','SA']
label=['age', 'workcalss', 'final_weight', 'education', 'education_num', 'matrital_status', 'relationship', 'race','sex','capital_gain', 'capital_loss','hours_per_week','native_country','SAFP']
cvslabel=['age', 'workcalss', 'final_weight', 'education', 'education_num', 'matrital_status', 'relationship', 'race','sex','capital_gain', 'capital_loss','hours_per_week','native_country']
DATA=()
DATA = read_data()
dataset=pd.DataFrame((DATA[0:40000]),columns=I_Label)
dataset['SAFP'] = dataset.groupby(['age', 'workcalss', 'final_weight', 'education', 'education_num', 'matrital_status','relationship', 'race','sex','capital_gain', 'capital_loss','hours_per_week','native_country'])['SA'].transform('sum')
cvs_data=dataset.copy();
cvs_data.drop_duplicates(subset=cvslabel,inplace=True)
cvs_data.reset_index(inplace=True)
cvs_data.drop(cvslabel,axis=1,inplace=True)
newdata= dataset.drop_duplicates(subset=['age', 'workcalss', 'final_weight', 'education', 'education_num', 'matrital_status','relationship', 'race','sex','capital_gain', 'capital_loss','hours_per_week','native_country'])
newdata.drop(['SA'],axis=1,inplace=True)
newdata.drop(['SAFP'],axis=1,inplace=True)
newdata.reset_index(inplace=True);
newdata.rename({'index': 'UID'}, axis=1, inplace=True);
return newdata,cvs_data
def preprocess_adults_7():
I_Label=['age','workclass','education','matrital_status','race','sex','native_country','SA']
label=['age','workclass','education','matrital_status','race','sex','native_country','SAFP']
cvslabel=['age','workclass','education','matrital_status','race','sex','native_country']
DATA=()
DATA = read_data_7()
dataset=pd.DataFrame((DATA[0:40000]),columns=I_Label)
dataset['SAFP'] = dataset.groupby(['age','workclass','education','matrital_status','race','sex','native_country'])['SA'].transform('sum')
cvs_data=dataset.copy();
cvs_data.drop_duplicates(subset=cvslabel,inplace=True)
cvs_data.reset_index(inplace=True)
cvs_data.drop(cvslabel,axis=1,inplace=True)
newdata= dataset.drop_duplicates(subset=['age','workclass','education','matrital_status','race','sex','native_country'])
newdata.drop(['SA'],axis=1,inplace=True)
newdata.drop(['SAFP'],axis=1,inplace=True)
newdata.reset_index(inplace=True);
newdata.rename({'index': 'UID'}, axis=1, inplace=True);
return newdata,cvs_data
def preprocess_Informs_7():
DATA=()
DATA = read_informs_7()
#""" STEP 1 : MERGING DATASET """
label=['DOBMM','DOBYY','RACEX','EDUCYEAR','marry','SAFP']
cvslabel=['DOBMM','DOBYY','RACEX','EDUCYEAR','marry']
dataset=pd.DataFrame((DATA[0:58568]),columns=['DOBMM','DOBYY','RACEX','EDUCYEAR','marry','SAFP','Conditional'])
newdata =dataset.drop(dataset.columns[-1] ,axis=1,inplace=False)
cvs_data=newdata.copy();
cvs_data.reset_index(inplace=True)
cvs_data.drop(cvslabel,axis=1,inplace=True)
newdata.drop(['SAFP'],axis=1,inplace=True)
newdata.reset_index(inplace=True);
newdata.rename({'index': 'UID'}, axis=1, inplace=True);
return newdata,cvs_data
def sp_anatomy(data,p,L):
newdata=data
z=1;
size = int((len(newdata))/p)
list_of_dfs = [newdata.loc[i:i+size-1,:] for i in range(0, len(newdata),size)] #PARTITIONS list containing all the dataframes/partitions
print(newdata.shape[1])
print(size)
length=len(list_of_dfs)
alldata=pd.DataFrame();
Qpt=pd.DataFrame()
a=list_of_dfs
guid=0;
start_row1=0;
start_row2=0;
start_row3=0;
start_row4=0;
lit= [];
writer = pd.ExcelWriter('MST_TAble.xlsx', engine='xlsxwriter')
for i in range(length):
df=pd.DataFrame.from_records(list_of_dfs[i])
rand=random.randint(0,(df.shape[1]-1)) # randomly selecting Quassi Identifier in the dataset
df.iloc[:,rand]= np.random.permutation(df.iloc[:,rand]) #iloc to index dataframe with integer , randomly permuting the selected quassi identifier
df.reset_index(inplace=True);
df.rename({'index': 'Guid'}, axis=1, inplace=True);
df['Guid']=guid;
guid=guid+1;
FL=random.randint(0,(len(df)-1))
SL=random.randint(0,(len(df)-1))
if FL==SL :
SL=random.randint(0,(len(df)))
print("SL",(SL))
print("FL",(FL))
# Splitting into two separate Tables
lit=df.iloc[FL,0:2]
lit=lit.reset_index()
lit.columns = ['','First Leader']
lit1=df.iloc[SL,0:2]
lit1=lit1.reset_index()
lit1.columns = ['','Second Leader']
df.to_excel(writer,sheet_name=('Validation Table'), index=False , startrow=start_row1)
start_row1 = start_row1 + len(df) + 20;
alldata=alldata.append(df,True);
df.drop(df.columns[0],axis=1,inplace=True)
df.to_excel(writer,sheet_name=('MST'), index=False , startrow=start_row2) #, startcol=df.shape[1]+3)
start_row2 = start_row2 + len(df) + 20;
columns=['firstleader',2];
lit.to_excel(writer,sheet_name=('LIT'), index=False ,header=True, startrow=start_row3)
start_row3 = start_row3 + 4;
columns=['secondleader',2];
lit1.to_excel(writer,sheet_name=('LIT'), index=False ,header=True, startrow=start_row4, startcol=lit.shape[1]+10)
start_row4 = start_row4 + 4;
writer.save()
writer = pd.ExcelWriter('Alldata.xlsx', engine='xlsxwriter')
alldata.to_excel(writer,sheet_name=('Alldata'), index=False , startrow=0)
writer.save()
return alldata,length,size
def cvs(cvs_data,alldata,length,size,csii):
fl=
|
pd.DataFrame()
|
pandas.DataFrame
|
r"""Submodule frequentist_statistics.py includes the following functions: <br>
- **normal_check():** compare the distribution of numeric variables to a normal distribution using the
Kolmogrov-Smirnov test <br>
- **correlation_analysis():** Run correlations for numerical features and return output in different formats <br>
- **correlations_as_sample_increases():** Run correlations for subparts of the data to check robustness <br>
- **multiple_univariate_OLSs():** Tmp <br>
- **potential_for_change_index():** Calculate the potential for change index based on either variants of the r-squared
(from linear regression) or the r-value (pearson correlation) <br>
- **correct_pvalues():** function to correct for multiple testing <br>
- **partial_correlation():** function to calculate the partial correlations whilst correcting for other variables <br>
"""
from itertools import combinations
from itertools import product
from typing import Tuple
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
from matplotlib.lines import Line2D
from scipy import stats
from sklearn.linear_model import LinearRegression
from statsmodels.stats.multitest import multipletests
from .utils import apply_scaling
def normal_check(data: pd.DataFrame) -> pd.DataFrame:
r"""Compare the distribution of numeric variables to a normal distribution using the Kolmogrov-Smirnov test
Wrapper for `scipy.stats.kstest`: the empircal data is compared to a normally distributed variable with the
same mean and standard deviation. A significant result (p < 0.05) in the goodness of fit test means that the
data is not normally distributed.
Parameters
----------
data: pandas.DataFrame
Dataframe including the columns of interest
Returns
----------
df_normality_check: pd.DataFrame
Dataframe with column names, p-values and an indication of normality
Examples
----------
>>> tips = sns.load_dataset("tips")
>>> df_normality_check = normal_check(tips)
"""
# Select numeric columns only
num_features = data.select_dtypes(include="number").columns.tolist()
# Compare distribution of each feature to a normal distribution with given mean and std
df_normality_check = data[num_features].apply(
lambda x: stats.kstest(
x.dropna(), stats.norm.cdf, args=(np.nanmean(x), np.nanstd(x)), N=len(x)
)[1],
axis=0,
)
# create a label that indicates whether a feature has a normal distribution or not
df_normality_check = pd.DataFrame(df_normality_check).reset_index()
df_normality_check.columns = ["feature", "p-value"]
df_normality_check["normality"] = df_normality_check["p-value"] >= 0.05
return df_normality_check
def permute_test(a, test_type, test, **kwargs):
r"""Helper function to run tests for permutations
Parameters
----------
a : np.array
test_type: str {'correlation', 'independent_t_test'}
Type of the test to be used
test:
e.g. `scipy.stats.pearsonr` or `statsmodels.stats.weightstats.ttest_ind`
**kwargs:
Additional keywords to be added to `test`
- `a2` for the second feature if test_type = 'correlation'
Returns
----------
float:
p value for permutation
"""
if test_type == "correlation":
a2 = kwargs["a2"]
_, p = test(a, a2)
else:
raise ValueError("Unknown test_type provided")
def correlation_analysis(
data: pd.DataFrame,
col_list=None,
row_list=None,
check_norm=False,
method: str = "pearson",
dropna: str = "pairwise",
permutation_test: bool = False,
n_permutations: int = 1000,
random_state=None,
):
r"""Run correlations for numerical features and return output in different formats
Different methods to compute correlations and to handle missing values are implemented.
Inspired by `researchpy.corr_case` and `researchpy.corr_pair`.
Parameters
----------
data : pandas.DataFrame
Dataframe with variables in columns, cases in rows
row_list: list or None (default: None)
List with names of columns in `data` that should be in the rows of the correlogram.
If None, all columns are used but only every unique combination.
col_list: list or None (default: None)
List with names of columns in `data` that should be in the columns of the correlogram.
If None, all columns are used and only every unique combination.
check_norm: bool (default: False)
If True, normality will be checked for columns in `data` using `normal_check`. This influences the used method
for correlations, i.e. Pearson or Spearman. Note: normality check ignores missing values.
method: {'pearson', 'kendall', 'spearman'}, default 'pearson'
Type of correlation, either Pearson's r, Spearman's rho, or Kendall's tau, implemented via respectively
`scipy.stats.pearsonr`, `scipy.stats.spearmanr`, and `scipy.stats.kendalltau`
Will be ignored if check_norm=True. Instead, Person's r is used for every combination of normally distributed
columns and Spearman's rho is used for all other combinations.
dropna : {'listwise', 'pairwise'}, default 'pairwise'
Should rows with missing values be dropped over the complete `data` ('listwise') or for every correlation
separately ('pairwise')
permutation_test: bool (default: False)
If true, a permutation test will added
n_permutations: int (default: 1000)
Number of permutations in the permutation test
random_state: None or int (default: None)
Random state for permutation_test. If not None, random_state will be updated for every permutation
plot_permutation: bool (default: False)
Whether to plot the results of the permutation test
figsize: tuple (default: (11.7, 8.27))
Width and height of the figure in inches
Returns
----------
result_dict: dict
Dictionary containing with the following keys:
info : pandas.DataFrame
Description of correlation method, missing values handling and number of observations
r-values : pandas.DataFrame
Dataframe with correlation coefficients. Indices and columns are column names from `data`. Only lower
triangle is filled.
p-values : pandas.DataFrame
Dataframe with p-values. Indices and columns are column names from `data`. Only lower triangle is filled.
N : pandas.DataFrame
Dataframe with numbers of observations. Indices and columns are column names from `data`. Only lower
triangle is filled. If dropna ='listwise', every correlation will have the same number of observations.
summary : pandas.DataFrame
Dataframe with columns ['analysis', 'feature1', 'feature2', 'r-value', 'p-value', 'N', 'stat-sign']
which indicate the type of test used for the correlation, the pair of columns, the correlation coefficient,
the p-value, the number of observations for each combination of columns in `data` and whether the r-value is
statistically significant.
plotted_permuations: Figure
Examples
----------
>>> from jmspack.frequentist_statistics import correlation_analysis
>>> import seaborn as sns
>>> iris = sns.load_dataset('iris')
>>> dict_results = correlation_analysis(iris, method='pearson', dropna='listwise', permutation_test=True,
>>> n_permutations=100, check_norm=True)
>>> dict_results['summary']
References
----------
<NAME> (2018). researchpy's documentation [Revision 9ae5ed63]. Retrieved from
https://researchpy.readthedocs.io/en/latest/
"""
# Settings test
if method == "pearson":
test, test_name = stats.pearsonr, "Pearson"
elif method == "spearman":
test, test_name = stats.spearmanr, "Spearman Rank"
elif method == "kendall":
test, test_name = stats.kendalltau, "Kendall's Tau-b"
else:
raise ValueError("method not in {'pearson', 'kendall', 'spearman'}")
# Copy numerical data from the original data
data = data.copy().select_dtypes("number")
# Get correct lists
if col_list and not row_list:
row_list = data.select_dtypes("number").drop(col_list, axis=1).columns.tolist()
elif row_list and not col_list:
col_list = data.select_dtypes("number").drop(row_list, axis=1).columns.tolist()
# Initializing dataframes to store results
info = pd.DataFrame()
summary = pd.DataFrame()
if not col_list and not row_list:
r_vals = pd.DataFrame(columns=data.columns, index=data.columns)
p_vals = pd.DataFrame(columns=data.columns, index=data.columns)
n_vals = pd.DataFrame(columns=data.columns, index=data.columns)
iterator = combinations(data.columns, 2)
else:
r_vals = pd.DataFrame(columns=col_list, index=row_list)
p_vals = pd.DataFrame(columns=col_list, index=row_list)
n_vals = pd.DataFrame(columns=col_list, index=row_list)
iterator = product(col_list, row_list)
if dropna == "listwise":
# Remove rows with missing values
data = data.dropna(how="any", axis="index")
info = info.append(
{
f"{test_name} correlation test using {dropna} deletion": f"Total observations used = {len(data)}"
},
ignore_index=True,
)
elif dropna == "pairwise":
info = info.append(
{
f"{test_name} correlation test using {dropna} deletion": f"Observations in the data = {len(data)}"
},
ignore_index=True,
)
else:
raise ValueError("dropna not in {'listwise', 'pairwise'}")
if check_norm:
# Check normality of all columns in the data
df_normality = normal_check(data)
norm_names = df_normality.loc[df_normality["normality"], "feature"].tolist()
# Iterating through the Pandas series and performing the correlation
for col1, col2 in iterator:
if dropna == "pairwise":
# Remove rows with missing values in the pair of columns
test_data = data[[col1, col2]].dropna()
else:
test_data = data
if check_norm:
# Select Pearson's r only if both columns are normally distributed
if (col1 in norm_names) and (col2 in norm_names):
test, test_name = stats.pearsonr, "Pearson"
else:
test, test_name = stats.spearmanr, "Spearman Rank"
# Run correlations
r_value, p_value = test(test_data.loc[:, col1], test_data.loc[:, col2])
n_value = len(test_data)
# Store output in matrix format
try:
r_vals.loc[col2, col1] = r_value
p_vals.loc[col2, col1] = p_value
n_vals.loc[col2, col1] = n_value
except KeyError:
r_vals.loc[col1, col2] = r_value
p_vals.loc[col1, col2] = p_value
n_vals.loc[col1, col2] = n_value
# Store output in dataframe format
dict_summary = {
"analysis": test_name,
"feature1": col1,
"feature2": col2,
"r-value": r_value,
"p-value": p_value,
"stat-sign": (p_value < 0.05),
"N": n_value,
}
if permutation_test:
raise ValueError("permutation_test has yet to be implemented")
# # Copy the complete data
# col2_shuffle = np.array(test_data.loc[:, col2])
# col2_shuffle = np.repeat(
# col2_shuffle[:, np.newaxis], n_permutations, axis=1
# )
# # Shuffle within the columns
# np.random.seed(random_state)
# ix_i = np.random.sample(col2_shuffle.shape).argsort(axis=0)
# ix_j = np.tile(np.arange(col2_shuffle.shape[1]), (col2_shuffle.shape[0], 1))
# col2_shuffle = col2_shuffle[ix_i, ix_j]
# permutations = np.apply_along_axis(
# permute_test,
# axis=0,
# arr=col2_shuffle,
# test_type="correlation",
# test=test,
# a2=np.array(test_data.loc[:, col1]),
# )
#
# extreme_permutation = np.where(permutations < p_value, 1, 0)
# p_permutation = extreme_permutation.sum() / len(permutations)
# dict_summary["permutation-p-value"] = p_permutation
#
# # Reset random seed numpy
# np.random.seed(None)
summary = pd.concat(
[summary, pd.DataFrame(data=dict_summary, index=[0])],
axis=0,
ignore_index=True,
sort=False,
)
# Embed results within a dictionary
result_dict = {
"r-value": r_vals,
"p-value": p_vals,
"N": n_vals,
"info": info,
"summary": summary,
}
return result_dict
def correlations_as_sample_increases(
data: pd.DataFrame,
feature1: str,
feature2: str,
starting_N: int = 10,
step: int = 1,
method="pearson",
random_state=42,
bootstrap: bool = False,
bootstrap_per_N: int = 2,
plot: bool = True,
addition_to_title: str = "",
figsize: Tuple[float, float] = (9.0, 4.0),
alpha: float = 0.05,
):
r"""Plot changes in r-value and p-value from correlation between two features when sample size increases.
Different methods to compute correlations are implemented. Data is shuffled first, to prevent any order effects.
Parameters
----------
data : pandas.DataFrame
Dataframe with variables in columns, cases in rows
feature1: str
Name of column with first feature to be included in correlation
feature2: str
Name of column with second feature to be included in correlation
starting_N: int (default: 10)
Number of cases that should be used for first correlation
step: int (default: 1)
Step for increasing the number of cases for the correlations
method: {'pearson', 'kendall', 'spearman'}, default 'pearson'
Type of correlation, either Pearson's r, Spearman's rho, or Kendall's tau, implemented via respectively
`scipy.stats.pearsonr`, `scipy.stats.spearmanr`, and `scipy.stats.kendalltau`.
random_state: int (default: 42)
Random state for reordering the data
bootstrap: bool
Whether to bootstrap the data at each N
bootstrap_per_N: int
If bootstrap is True then how many bootstraps per each sample size should be performed i.e if bootstrap_per_N
is 2 then at sample size N=20, 2 bootstraps will be performed. This will continue until starting_N == N.
plot: bool (default: True)
Whether to plot the results
addition_to_title: str (default: '')
The title of the plot will be "The absolute r-value between {feature1} and {feature2} as N increases" and
followed by the addition (e.g. to describe a dataset).
alpha: float (default: 0.05)
Threshold for p-value that should be shown in the plot
Returns
----------
cor_results: pd.DataFrame
Dataframe with the results for all ran analyses
fig: Figure
Figure will be returned if plot=True, otherwise None. This allows you to change properties of the figure
afterwards, e.g. fig.axes[0].set_title('This is my new title')
Examples
----------
>>> import seaborn as sns
>>> from jmspack.frequentist_statistics import correlations_as_sample_increases
>>> iris = sns.load_dataset('iris')
>>> summary, fig = correlations_as_sample_increases(data=iris,feature1='petal_width',feature2='sepal_length',
>>> starting_N=20)
"""
data = (
data[[feature1, feature2]].copy()
# Remove rows with np.nans
.dropna()
# Randomize order of the data
.sample(frac=1, random_state=random_state)
)
if data.shape[0] < starting_N:
raise ValueError("Number of valid cases is smaller than the starting_N")
if data.shape[0] < starting_N + step:
raise ValueError(
"Number of valid cases is smaller than the starting_N + step (only one correlation possible)"
)
# Initiate data frame for results
corr_results =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Routine to read and clean water quality data of wide/stacked formats
<NAME>, <NAME>
KWR, April-July 2020
Last edit: July 27. Not upodating any more and use the new version.
"""
import pandas as pd
import numpy as np
import logging
import os
import math
# from unit_converter.converter import convert, converts
import re
from molmass import Formula
# %% HGC.IO.defaults
# New definition of NaN. Based on default values of python with the following exception:
# 'NA' is left out to prevent NA (Sodium) being read as NaN.
NA_VALUES = ['#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN', '-NaN', '-nan',
'1.#IND', '1.#QNAN', 'N/A', 'NULL', 'NaN', 'n/a', 'nan', 'null']
# The following dictionary should be extracted from HGC.constants and augmented (or overruled) by the user
DATAMODEL_HGC = {
'HGC_default_feature_units': {
'Fe': 'mg/L',
'SO4': 'mg/L',
'Al': 'µg/L',
},
}
UNIT_CONVERSION = {
'mm':0.001, 'cm':0.01, 'm':1.0, 'km':1000, # add length here
'ng':1e-9, 'μg':0.000001, 'mg':0.001, 'g':1.0, 'kg':1000, # add mass here
'mL':0.001, 'L':1.0, # add volumn here
'μS':1e-6, 'mS':0.001, 'S':1.0, # add conductivity here
'mV': 0.001, 'V':1.0, # add voltage here
'μmol':1e-6, 'mmol':0.001, 'mol':1.0, # add mol here
}
# The following keyworded arguments can be adjusted and merged with the configuration dictionary by the user
KWARGS = {
'na_values': NA_VALUES,
'encoding': 'ISO-8859-1',
'delimiter': None,
}
DEFAULT_FORMAT = {
'Value': 'float64',
'Feature': 'string',
'Unit': 'string',
'Date': 'date',
'LocationID': 'string',
'SampleID': 'string',
'X': 'float64',
'Y': 'float64',
}
# %% define sub-function to be called by the main function
def read_file(file_path='', sheet_name=0, na_values=NA_VALUES, encoding='ISO-8859-1', delimiter=None, **kwargs):
"""
Read pandas dataframe or file.
Parameters
----------
file_path : dataframe or string
string must refer to file. Currenlty, Excel and csv are supported
sheet_name : integer or string
optional, when using Excel file and not reading first sheet
na_values : list
list of strings that are recognized as NaN
"""
logger.info('Reading input file(s) now...')
if isinstance(file_path, pd.DataFrame):
# skipping reading if the input is already a df
df = file_path
# print('dataframe read: ' + [x for x in globals() if globals()[x] is file_path][0])
logger.info('A dataframe has been imported')
elif isinstance(file_path, str):
file_extension = file_path.split('.')[-1]
# filename, file_extension = os.path.splitext(file_path)
if (file_extension == 'xlsx') or (file_extension == 'xls'):
try:
df = pd.read_excel(file_path,
sheet_name=sheet_name,
header=None,
index_col=None,
na_values=na_values,
keep_default_na=False,
encoding=encoding)
logger.info('A excel spreadsheet has been imported')
except:
df = []
logger.error('Encountered an error when importing excel spreadsheet')
elif file_extension == 'csv':
try:
df = pd.read_csv(file_path,
encoding=encoding,
header=None,
index_col=None,
low_memory=False,
na_values=na_values,
keep_default_na=False,
delimiter=delimiter)
logger.info('A csv has been imported')
except:
df = []
logger.error('Encountered an error when importing csv')
else:
df= []
logger.error('Not a recognizable file. Need a csv or xls(x) file.')
else:
df= []
logger.error(['This file path is not recognized: '+file_path])
return df
def _get_slice(df, arrays):
""" Get values by slicing """
if isinstance(arrays[0], list): # check if the array is nested
series = pd.Series([], dtype='object')
for array in arrays:
series = series.append(df.iloc[array[0], array[1]].rename(0))
elif len(arrays) == 1: # only row specified
series = df.iloc[arrays[0]]
else: # row and column specified
series = df.iloc[arrays[0], arrays[1]]
return series
def get_headers_wide(df, slice_sample='', slice_feature='', slice_unit='', **kwargs):
""" Get column headers for a wide-format dataframe. """
# create series with headers
header_sample = _get_slice(df, slice_sample)
header_feature = _get_slice(df, slice_feature)
header_unit = _get_slice(df, slice_unit)
# get headers at 2 levels
ncols = len(df.columns)
level0 = pd.Series(ncols * [''])
level0[header_sample.index] = header_sample
level0[header_feature.index] = header_feature
level1 = pd.Series(ncols * [''])
level1[header_unit.index] = header_unit
# add series by multi-index headers
df.columns = pd.MultiIndex.from_arrays([level0, level1])
logger.info('Got column headers for a wide-format dataframe.')
return df, header_sample, header_feature, header_unit
def get_headers_stacked(df, slice_sample='', **kwargs):
""" Get column headers for a stacked-format dataframe. """
# create series with headers
header_sample = _get_slice(df, slice_sample)
# add column names
ncols = len(df.columns)
level0 = pd.Series(ncols * [''])
level0[header_sample.index] = header_sample
df.columns = level0
return df, header_sample
def slice_rows_with_data(df, slice_data=None, **kwargs):
""" Getting needed data by pre-defined slicing blocks """
df2 = pd.DataFrame([])
# # if isinstance(slice_data, list):
# # df2 = df.iloc[slice_data[0][0], :]
# # else:
# # logger.error('Slicing_data must be a list')
# df2 = df.iloc[slice_data[0]]
# logger.info('Got needed data by pre-defined slicing blocks')
df2 = df.iloc[slice_data[0][0], :]
return df2
def _map_header_2_multilevel(map_header):
""" Convert dictionary with mapping of columns to multiindex. """
map_header2 = {}
for key, value in map_header.items():
map_header2[(key, '')] = (value, '')
return map_header2
def rename_headers_wide(df, map_header={}, **kwargs):
""" Rename columns by pre-defined names for wide format. """
# remove duplicate columns, only keep the column that shows for the first time!
df = df.groupby(level=[0, 1], axis=1).first()
# remove columns without headers
mask1 = df.columns.get_level_values(0).isin([''] + list(np.arange(0, len(df.columns))))
cols1 = np.array(list(df))[mask1]
cols2 = list(set(zip(list(cols1[:, 0]), list(cols1[:, 1]))))
df.drop(cols2, axis=1, inplace=True)
# remove columns that are in values but absent from keys
keys = map_header.keys()
values = map_header.values()
col_in_val_not_in_key = list(set(df.columns.levels[0]) & set(values) - set(keys))
df.drop(col_in_val_not_in_key, axis=1, inplace=True)
logger.info('Headers from the wide-format dataframe have been retrieved')
return df
def rename_headers_stacked(df, map_header={}, **kwargs):
""" Rename columns by pre-defined names for stacked format """
# remove duplicate columns, only keep the column that shows for the first time!
df = df.groupby(level=[0], axis=1).first()
# remove columns without headers
mask1 = df.columns.get_level_values(0).isin([''] + list(np.arange(0, len(df.columns))))
cols1 = np.array(list(df))[mask1]
cols2 = list(cols1)
df.drop(cols2, axis=1, inplace=True)
# remove columns that are in values but absent from keys
# (to prevent identical column names when mapping column names)
keys = map_header.keys()
values = map_header.values()
col_in_val_not_in_key = list(set(df.columns) & set(values) - set(keys))
df.drop(col_in_val_not_in_key, axis=1, inplace=True)
logger.info('Headers from the stacked-format dataframe have been retrieved')
return df
def melt_wide_to_stacked(df, map_header={}, **kwargs):
""" Turn wide format to stacked format """
# Convert mapping to multilevel index
map_header2 = _map_header_2_multilevel(map_header)
# Drop columns that are not present in dataframe
map_header3 = list(set(map_header2) & set(df.columns))
# Convert to stacked shape
df2 = pd.melt(df, id_vars=map_header3, var_name=['Feature', 'Unit'], value_name='Value')
# Convert multiindex to single index (some headers still have a multi-index after melt)
col2 = []
for col in df2.columns:
if isinstance(col, tuple):
col2.append(col[0])
else:
col2.append(col)
df2.columns = col2
logger.info('Turned wide format to stacked format')
return df2
def mapping_headers(df, map_header={}, **kwargs):
''' Mapping headers according to pre-defined dictionary '''
# make a list of headers that are mapped and not mapped
mapped_headers_before = list(set(map_header.keys()) & set(df.columns))
mapped_headers_after = list(map(map_header.get, mapped_headers_before))
unmapped_headers = list(set(df.columns) - set(mapped_headers_before))
# rename columns in df
df.rename(columns=map_header, inplace=True)
# write log
logger.info('Mapping headers now...')
logger.info('The following headers have been mapped from {0} to {1}'.\
format(mapped_headers_before, mapped_headers_after))
logger.info('The following headers have been kept as they are {0}'.format(unmapped_headers))
return df
def mapping_featurenames(df, map_features={}, **kwargs):
""" Mapping feature names according to pre-defined dictionary """
# make a list of features that are mapped
features_before = list(set(map_features.keys()) & set(df['Feature']))
features_after = list(map(map_features.get, features_before))
unmapped_features = list(set(df['Feature']) - set(features_before))
# rename features in df
df['Feature'].replace(map_features, inplace=True)
try:
df['SampleID']
except:
raise Exception('SampleID is missing. Must define it.')
# write log
logger.info('Mapping features now...')
logger.info('The following features have been mapped from {0} to {1}'.\
format(features_before, features_after))
logger.info('The following headers have been kept as they are {0}'.format(unmapped_features))
return df
def mapping_units(df, map_units={}, **kwargs):
""" Mapping unit names according to pre-defined dictionary """
# make a list of units that are mapped
units_before = list(set(map_units.keys()) & set(df['Unit']))
units_after = list(map(map_units.get, units_before))
unmapped_units = list(set(df['Unit']) - set(units_before))
# rename units in df
df['Unit'].replace(map_units, inplace=True)
# write log
logger.info('Mapping units now...')
# logger.info('The following units have been mapped from {0} to {1}'.\
# format(units_before, units_after))
logger.info('The following headers have been kept as they are {0}'.format(unmapped_units))
return df
def deal_with_mol_in_unit(df, DATAMODEL_HGC, unit_conversion={}, user_defined_feature_units={}, **kwargs):
'''
To deal with units that contain mol or umol
This step is done before converting units to standard HGC units
'''
# record old unit for mols
df['Unit_old_mol'] = df['Unit']
# spilt units and store in a df
unit0 = df['Unit'].where(pd.notnull(df['Unit']), None)
unit0 = unit0.replace([r''],[None])
unit0_split = _list_to_array([re.split('/| ', str(unit)) for unit in unit0])
unit0_split = pd.DataFrame(unit0_split, columns=['Col0', 'Col1','Col2'])
# create a empty column for storing ration_mol
ratio_mol=[None]*len(unit0)
# get default dictionary
unit_default = {**DATAMODEL_HGC['HGC_default_feature_units'], **user_defined_feature_units}
# replace mmol by mg and get ratio for conversion
for i in range(len(unit0)):
if df['Feature'][i] in unit_default.keys() and 'mol' in unit0_split.iloc[i,0]:
ratio_mol[i] = Formula(df['Feature'][i]).mass # has to use a loop as Formula does not support vector operation with nan
unit0_split.iloc[i,2] = df['Feature'][i]
unit0_split.iloc[i,0] = unit0_split.iloc[i,0].replace('mol', 'g')
# put units back from split
unit1_0 = unit0_split.Col0
unit1_1 = pd.Series(['/' + str(str_unit) for str_unit in unit0_split.Col1]).replace([r'/None'],'')
unit1_2 = ' '+ unit0_split.Col2.fillna('')
unit1 = unit1_0 + unit1_1 + unit1_2
unit1 = unit1.replace([r'None/ '],[None])
# get a ratio
df['ratio_mol'] = ratio_mol
# write new unit for mols
df['Unit'] = unit1
# write log
logger.info('"mol" has been mapped to "g"')
return df
def convert_units_get_ratio(df, DATAMODEL_HGC, unit_conversion={}, user_defined_feature_units={}, **kwargs):
"""
Covnert units to stardard ones defined by HGC and compute conversion ratio.
If not recognisable, keep them as they are.
Before conversion, the name of the feature, if any, has to be extracted from the unit.
e.g. mg/L N --> mg/L (*molmass(NH4)/molmass(N))
μg/l --> 1e-3 mg/L, ...
To implement unit conversion,
two external modules are called:
unit_converter, which is used for conversion
molemass, which is used to compute mol weigh
"""
# save old units
df['Unit_old_unit'] = df['Unit']
# combine two dictionaries from users and hgc default. Users' format has HIGHER priority.
unit_default = {**DATAMODEL_HGC['HGC_default_feature_units'], **user_defined_feature_units}
# get unit from data, nan labeled as none, to be identical to unit1
unit0 = df['Unit'].where(pd.notnull(df['Unit']), None)
# get desired formats of units from user's definition or ghc's default
unit1 = pd.Series(list(map(unit_default.get, df['Feature']))).replace('', np.nan)
# Note: if a unit is not defined by user or hgc, use the unit from the data
unit1 = unit1.fillna(unit0)
unit1_output = unit1 #_list_to_array([re.split(' ', str(unit)) for unit in unit0])[:,0]
# unit1 = unit1.where(pd.notnull(unit1), None)
# split the column units into three parts based on / and space. Currently, the symbol_list must have two elements: / and space
unit0_split = _list_to_array([re.split('/| ', str(unit)) for unit in unit0])
unit0_split = pd.DataFrame(unit0_split, columns=['Col0', 'Col1','Col2'])
unit0_split.Col2.replace('', np.nan, inplace=True) # fill the nan by feature names
unit0_split.Col2.fillna(df['Feature'], inplace=True) # fill the nan by feature names
unit1_split = _list_to_array([re.split('/| ', str(unit)) for unit in unit1])
unit1_split = np.column_stack((unit1_split[:,0:2], df['Feature'].values))
unit1_split = pd.DataFrame(unit1_split, columns=['Col0', 'Col1','Col2'])
unit1_split.Col2.fillna(df['Feature'], inplace=True) # fill the nan by feature names
# get conversion ratio for units
ratio_col0 = _compute_convert_ratio(list(unit0_split.iloc[:,0]), list(unit1_split.iloc[:,0]), unit_conversion)
ratio_col1 = _compute_convert_ratio(list(unit1_split.iloc[:,1]), list(unit0_split.iloc[:,1]), unit_conversion)
# compute molar mass for both units, have to write a loop to reach each
MolarMass0 = list()
MolarMass1 = list()
for i in range(len(unit0_split)):
try:
MolarMass0.append(Formula(unit0_split.iloc[i,2]).mass)
except:
MolarMass0.append(1) # make it as 1 if the feature name is not recognisable
for i in range(len(unit1_split)):
try:
MolarMass1.append(Formula(unit1_split.iloc[i,2]).mass)
except:
MolarMass1.append(1) # make it as 1 if the feature name is not recognisable
ratio_col2 = pd.Series(MolarMass1)/pd.Series(MolarMass0)
# multiple ratios
ratio = ratio_col0*ratio_col1*ratio_col2*df['ratio_mol'].fillna(1)
ratio = ratio.fillna(1)
# save old and write new columns
df['ratio_unit'] = ratio_col0*ratio_col1*ratio_col2
df['ratio'] = ratio
df['Unit'] = unit1_output
# write log
logger.info('Units have been converted to stardard ones if the corresponding features are defined in default dict')
return df
# def _get_symbol_from_string(list0, symbol):
# '''
# a function defined to count the frequence of symbols from a list
# '''
# if isinstance(list0, int) or isinstance(list0, float):
# list0 = [list0] # make it as a list
# symb_count = np.empty([len(list0),len(symbol)]) # make a empty matrix
# for j in range(len(symbol)):
# for i in range(len(list0)):
# symb_count[i, j] = str(list0[i]).count(symbol[j]) # count number of symbols
# return symb_count
# def _flatten(seq):
# '''
# to get elements from a list of lists
# '''
# l = []
# for elt in seq:
# t = type(elt)
# if t is tuple or t is list:
# for elt2 in _flatten(elt):
# l.append(elt2)
# else:
# l.append(elt)
# return l
def _get_number_from_string(string):
''' function to get numbers from string '''
number0 = float(''.join([x for x in string if x.isdigit() or x == '.']))
return number0
def split_strings_from_value(df):
'''
to spilt strings if they contain "<", ">", "-": put string symbol in separate column
<100 --> "<" in temporary column, "100" remain in value column
'''
df['Value_old_split'] = df['Value']
df['Value_str'] = ""
df['Value_sign'] = ""
df['Value_num'] = ""
# define a list of signs
symb_list = ['<','>','-']
# find number out
for i in range(len(df)):
df.loc[i, 'Value_str'] = str(df['Value'][i])
try: # get number out of a string. if there is no numbers, skip
df.loc[i, 'Value_num'] = re.findall(r'\d+(?:\.\d+)?', df.loc[i, 'Value_str'])[0] # get string for deleting
df.loc[i, 'Value_sign'] = df.loc[i, 'Value_str'].replace(df['Value_num'][i], '').replace('.', '') # delete number from string
df.loc[i, 'Value_num'] = _get_number_from_string(df.loc[i, 'Value_str']) # get real number -.3 --> - and 0.3
except:
pass
df['Value'] = df['Value_num']
df.drop(columns=['Value_num','Value_str'], inplace=True)
# write log
logger.info('Cleaned the columns that has invalid values')
return df
def _convert_format(series0, format0):
if format0 == 'string':
series1 = [str(i) for i in series0]
elif format0 == 'float64':
series1 = pd.to_numeric(series0)
elif format0 == 'date':
series1 =
|
pd.to_datetime(series0)
|
pandas.to_datetime
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# moldynplot.relaxation.py
#
# Copyright (C) 2012-2017 <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Processes NMR relaxation and related data
"""
################################### MODULES ###################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
################################## FUNCTIONS ##################################
def spawn(function):
def run_function(queue_in, queue_out):
while True:
i, argument = queue_in.get()
if i is None:
break # 'None' signals that queue is empty
queue_out.put((i, function(argument)))
return run_function
def multiprocess_map(function, arguments, n_processes=1):
"""
Runs a *function* with *arguments* using *n_processes* Meant
as a replacement for multiproccessing.Pool.imap_unordered,
which can only accept module-level functions.
**Arguments:**
:*function*: Function to run
:*arguments*: Iterable of arguments to pass to function
:*n_processes: Number of processes to use
**Returns:**
:*results*: List of results returned from *function*
.. todo:
- Does this work, or can it be made to smoothly work, with more
complex arguments?
- Accept multiple functions, in addition to arguments
- Additional improvements likely possible
"""
from multiprocessing import Queue, Process
# Initialize queues
queue_in = Queue(1)
queue_out = Queue()
# Initialize processes and link to input and output queues
processes = [Process(target=spawn(function), args=(queue_in, queue_out))
for i in range(n_processes)]
for p in processes:
p.daemon = True
p.start()
# Construct input queue, including 'None' signals to terminate
input = [queue_in.put((i, argument)) for i, argument in
enumerate(arguments)]
for i in range(n_processes):
queue_in.put((None, None))
# Retrieve output queue
output = [queue_out.get() for i in range(len(input))]
# Rejoin processes and return results
for p in processes:
p.join()
return [x for i, x in sorted(output)]
def process_ired(infiles, outfile, indexfile=None, **kwargs):
"""
"""
from os import devnull
import re
from subprocess import Popen, PIPE
import pandas as pd
import numpy as np
r1r2noe_datasets = []
s2_datasets = []
# Load data
for i, infile in enumerate(infiles):
with open(devnull, "w") as fnull:
fields = Popen("head -n 1 {0}".format(infile), stdout=PIPE,
stderr=fnull, shell=True).stdout.read().strip()
re_t1t2noe = re.compile(
"^#Vec\s+[\w_]+\[T1\]\s+[\w_]+\[T2\]\s+[\w_]+\[NOE\]$")
re_s2 = re.compile("^#Vec\s+[\w_]+\[S2\]$")
if re.match(re_t1t2noe, fields):
raw_data = np.loadtxt(infile, dtype=np.float32)
read_csv_kw = kwargs.get("read_csv_kw",
dict(delim_whitespace=True, header=0, index_col=0,
names=["r1", "r2", "noe"]))
raw_data = pd.read_csv(infile, **read_csv_kw)
raw_data["r1"] = 1 / raw_data["r1"]
raw_data["r2"] = 1 / raw_data["r2"]
r1r2noe_datasets.append(raw_data)
elif re.match(re_s2, fields):
raw_data = np.loadtxt(infile, dtype=np.float32)
read_csv_kw = kwargs.get("read_csv_kw",
dict(delim_whitespace=True, header=0, index_col=0, names=["s2"]))
raw_data = pd.read_csv(infile, **read_csv_kw)
s2_datasets.append(raw_data)
else:
raise Exception()
if indexfile is not None:
residue = np.loadtxt(indexfile, dtype=np.str).flatten()
# Process data
items = []
fmt = []
if indexfile is not None:
items.append(("residue", residue))
fmt.append("%12s")
else:
fmt.append("%12d")
if len(r1r2noe_datasets) >= 2:
r1r2noe_mean = pd.concat(r1r2noe_datasets).groupby(level=0).mean()
r1r2noe_std = pd.concat(r1r2noe_datasets).groupby(level=0).std()
items.extend([("r1", r1r2noe_mean["r1"]), ("r1 se", r1r2noe_std["r1"]),
("r2", r1r2noe_mean["r2"]), ("r2 se", r1r2noe_std["r2"]),
("noe", r1r2noe_mean["noe"]), ("noe se", r1r2noe_std["noe"])])
fmt.extend(
["%11.5f", "%11.5f", "%11.5f", "%11.5f", "%11.5f", "%11.5f"])
elif len(r1r2noe_datasets) == 1:
r1r2noe_mean = r1r2noe_datasets[0]
items.extend([("r1", r1r2noe_mean["r1"]), ("r2", r1r2noe_mean["r2"]),
("noe", r1r2noe_mean["noe"])])
fmt.extend(["%11.5f", "%11.5f", "%11.5f"])
if len(s2_datasets) >= 2:
s2_mean =
|
pd.concat(s2_datasets)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 5 15:54:03 2016
@author: waffleboy
"""
from flask import Flask, render_template
import requests
import ast
from datetime import datetime
from datetime import timedelta
import pandas as pd
import pickle,json
from pandas_highcharts.core import serialize
from collections import OrderedDict
app = Flask(__name__) #initialize app
# Main function - Displays the data from masterDic, which contains all the information
# to make the dashboard.
@app.route("/")
def main():
masterDic,summaryStats = loadData(years=5)
return render_template('main.html',masterDic=masterDic,summaryStats=summaryStats)
# generates the master dictionary that contains all information for text and graph
# Input: <int> years: number of years worth of data to show
# Output: <dictionary> masterDic: dictionary of dictionaries. Example:
# {'chart0': {'stockmetrics': {'sbux': {'Ask':3,'Bid':4,..}} ,'highChartsDic':{<highcharts constructor>}}}
def loadData(years,csv=False,csvfile=False):
if csv:
df = csvfile
else:
df = pd.read_csv('input.csv')
companySym = list(df['stockname'])
query,additionalOptions = getQuery(companySym) #generate link to query from, column names to map
queryDF = fix_overall_details_request(query)
columnNames = getColumnNames(additionalOptions)
queryDF.columns = columnNames #set columm names to actual names of options
queryDF["Symbol"] = queryDF["Symbol"].map(lambda x:x.replace('"',''))
queryDF = queryDF.round(3)
col = queryDF.set_index('Symbol').T.to_dict() #make dictionary of key: symbol, value: everything in the row
masterDic = populateMasterDic(df,col,years,OrderedDict()) #populate an orderedDict with data
summary = getSummaryStatistics(masterDic)
return masterDic,summary
def getSummaryStatistics(masterDic):
totalProfit = 0
totalValue = 0
totalCost = 0
totalStock = 0
for key in masterDic:
totalProfit += masterDic[key]['performance']['currentProfit']
totalValue += masterDic[key]['performance']['currentValue']
totalCost += masterDic[key]['performance']['totalPurchaseCost']
totalStock += masterDic[key]['performance']['boughtamount']
return {'totalProfit':totalProfit,'totalValue':totalValue,
'totalCost':totalCost,'totalStock':totalStock}
# Used by loadData(), fills the ordered dict with information
# Input:
# 1. <pandas dataframe> df: input.csv
# 2. <dictionary> col: dictionary containing all text data for every stock
# 3. <int> years: number of years worth of data to show
# 4. <OrderedDict> masterDic: an orderedDict to populate data with
# Output:
# 1. <OrderedDict> masterDic: masterdic populated with information
def populateMasterDic(df,col,years,masterDic):
for index in df.index:
name=df['stockname'][index]
boughtprice = df['boughtprice'][index]
boughtamount = df['boughtamount'][index]
data = fix_ticker_details_request(name, datetime.now()-timedelta(days=365*years), datetime.now())
data = data[['Adj Close']]
#make 21 day moving average
data['21DayAvg'] = data['Adj Close'].rolling(21).mean().fillna(0)
#make 100 day average
data['100DayAvg'] = data['Adj Close'].rolling(100).mean().fillna(0)
if not pd.isnull(boughtprice):
bought = [boughtprice]*len(data.index)
data = pd.DataFrame(data)
data['bought'] = bought
else:
data = pd.DataFrame(data)
content = serialize(data,render_to='chart'+str(index), title=name.upper()+' Stock',output_type='json')
content = changeChartOptions(content)
# get total purchase cost, etc
stockPerformance = getStockPerformance(data,boughtamount,boughtprice)
masterDic['chart'+str(index)] = {'stockmetrics':col[name],'highChartsDic':content,
'performance':stockPerformance}
return masterDic
def getStockPerformance(data,boughtamount,boughtprice):
latestSellPrice = float(data['Adj Close'].tail(1))
latestSellPrice = round(latestSellPrice,4)
if pd.isnull(boughtprice) or
|
pd.isnull(boughtamount)
|
pandas.isnull
|
#%%
import numpy as np
import pandas as pd
from collections import defaultdict
import datatable as dt
import lightgbm as lgb
from matplotlib import pyplot as plt
import random
from sklearn.metrics import roc_auc_score
import gc
import pickle
import zipfile
# HOME = "/home/scao/Documents/kaggle-riiid-test/"
HOME = os.path.abspath(os.path.join(os.path.dirname(__file__),".."))
sys.path.append(HOME)
from utils import *
get_system()
pd.set_option('display.max_rows', 150)
|
pd.set_option('display.max_columns', 50)
|
pandas.set_option
|
from sklearn.datasets import load_breast_cancer, fetch_california_housing
import pandas as pd
import numpy as np
import pickle
import os
import collections
from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
def handle_categorical_feat(X_df):
''' It moves the categorical features to the last '''
original_columns = []
one_hot_columns = []
for col_name, dtype in zip(X_df.dtypes.index, X_df.dtypes):
if dtype == object:
one_hot_columns.append(col_name)
else:
original_columns.append(col_name)
X_df = X_df[original_columns + one_hot_columns]
return X_df, one_hot_columns
def load_breast_data():
breast = load_breast_cancer()
feature_names = list(breast.feature_names)
X, y = pd.DataFrame(breast.data, columns=feature_names), pd.Series(breast.target)
dataset = {
'problem': 'classification',
'full': {
'X': X,
'y': y,
},
'd_name': 'breast',
'search_lam': np.logspace(-1, 2.5, 15),
}
return dataset
def load_adult_data():
# https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data
df = pd.read_csv("./datasets/adult.data", header=None)
df.columns = [
"Age", "WorkClass", "fnlwgt", "Education", "EducationNum",
"MaritalStatus", "Occupation", "Relationship", "Race", "Gender",
"CapitalGain", "CapitalLoss", "HoursPerWeek", "NativeCountry", "Income"
]
train_cols = df.columns[0:-1]
label = df.columns[-1]
X_df = df[train_cols].copy()
# X_df = pd.get_dummies(X_df)
X_df, onehot_columns = handle_categorical_feat(X_df)
y_df = df[label].copy()
# Make it as 0 or 1
y_df.loc[y_df == ' >50K'] = 1.
y_df.loc[y_df == ' <=50K'] = 0.
y_df = y_df.astype(int)
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'adult',
'search_lam': np.logspace(-2, 2, 15),
'n_splines': 50,
'onehot_columns': onehot_columns,
}
return dataset
def load_credit_data():
# https://www.kaggle.com/mlg-ulb/creditcardfraud
df = pd.read_csv(r'./datasets/creditcard.csv')
train_cols = df.columns[0:-1]
label = df.columns[-1]
X_df = df[train_cols]
y_df = df[label]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'credit',
'search_lam': np.logspace(-0.5, 2.5, 8),
}
return dataset
def load_churn_data():
# https://www.kaggle.com/blastchar/telco-customer-churn/downloads/WA_Fn-UseC_-Telco-Customer-Churn.csv/1
df = pd.read_csv(r'./datasets/WA_Fn-UseC_-Telco-Customer-Churn.csv')
train_cols = df.columns[1:-1] # First column is an ID
label = df.columns[-1]
X_df = df[train_cols].copy()
# Handle special case of TotalCharges wronly assinged as object
X_df['TotalCharges'][X_df['TotalCharges'] == ' '] = 0.
X_df.loc[:, 'TotalCharges'] = pd.to_numeric(X_df['TotalCharges'])
# X_df = pd.get_dummies(X_df)
X_df, onehot_columns = handle_categorical_feat(X_df)
y_df = df[label].copy() # 'Yes, No'
# Make it as 0 or 1
y_df[y_df == 'Yes'] = 1.
y_df[y_df == 'No'] = 0.
y_df = y_df.astype(int)
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'churn',
'search_lam': np.logspace(0, 3, 15),
'onehot_columns': onehot_columns,
}
return dataset
def load_pneumonia_data(folder='/media/intdisk/medical/RaniHasPneumonia/'):
featurename_file = os.path.join(folder, 'featureNames.txt')
col_names = pd.read_csv(featurename_file, delimiter='\t', header=None, index_col=0).iloc[:, 0].values
def read_data(file_path='pneumonia/RaniHasPneumonia/medis9847c.data'):
df =
|
pd.read_csv(file_path, delimiter='\t', header=None)
|
pandas.read_csv
|
"""
Rank summarization results.
"""
import os
import sys
import time
import argparse
from datetime import datetime
from itertools import product
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import sem
from tqdm import tqdm
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../')
from experiments import util as exp_util
from postprocess import util as pp_util
from config import rank_args
from rank.remove import get_mean_df
def process(args, exp_hash, out_dir, logger):
begin = time.time()
color, line, label = pp_util.get_plot_dicts()
df_loss_list = []
df_li_loss_list = []
df_acc_list = []
df_li_acc_list = []
df_auc_list = []
df_li_auc_list = []
for tree_type in args.tree_type:
in_dir = os.path.join(args.in_dir,
tree_type,
f'exp_{exp_hash}',
'summary')
for ckpt in args.ckpt:
ckpt_dir = os.path.join(in_dir, f'ckpt_{ckpt}')
# define paths
fp_loss = os.path.join(ckpt_dir, 'loss_rank.csv')
fp_li_loss = os.path.join(ckpt_dir, 'loss_rank_li.csv')
fp_acc = os.path.join(ckpt_dir, 'acc_rank.csv')
fp_li_acc = os.path.join(ckpt_dir, 'acc_rank_li.csv')
fp_auc = os.path.join(ckpt_dir, 'auc_rank.csv')
fp_li_auc = os.path.join(ckpt_dir, 'auc_rank_li.csv')
# check paths
assert os.path.exists(fp_loss), f'{fp_loss} does not exist!'
assert os.path.exists(fp_li_loss), f'{fp_li_loss} does not exist!'
assert os.path.exists(fp_acc), f'{fp_acc} does not exist!'
assert os.path.exists(fp_li_acc), f'{fp_li_acc} does not exist!'
assert os.path.exists(fp_auc), f'{fp_auc} does not exist!'
assert os.path.exists(fp_auc), f'{fp_auc} doess not exist!'
# read results
df_loss_list.append(pd.read_csv(fp_loss))
df_li_loss_list.append(pd.read_csv(fp_li_loss))
df_acc_list.append(pd.read_csv(fp_acc))
df_li_acc_list.append(pd.read_csv(fp_li_acc))
df_auc_list.append(pd.read_csv(fp_auc))
df_li_auc_list.append(pd.read_csv(fp_li_auc))
# compile results
df_loss_all = pd.concat(df_loss_list)
df_li_loss_all =
|
pd.concat(df_li_loss_list)
|
pandas.concat
|
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.ticker import PercentFormatter
import matplotlib.dates as md
import sklearn
from sklearn import svm
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from textblob import TextBlob, Word, Blobber
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
stop = stopwords.words('english')
#nltk.download('names')
#from nltk.corpus import names
#male_names = names.words('male.txt')
#female_names = names.words('female.txt')
import textstat
from lexicalrichness import LexicalRichness
import pandas as pd
pd.options.mode.chained_assignment = None
import numpy as np
import random
import plotly
plotly.tools.set_credentials_file(username='feupos', api_key='<KEY>')
#Plotly Tools
from plotly.offline import init_notebook_mode, iplot
#init_notebook_mode(connected=False)
import cufflinks as cf
cf.go_offline()
cf.set_config_file(offline=False, world_readable=True)
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import seaborn as sns
sns.set()
from itertools import groupby
import time
from datetime import datetime
import sys
import csv
import ctypes
csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
from statistics import mean
import itertools
#from multiprocessing.pool import ThreadPool
#pool = ThreadPool(20) # However many you wish to run in parallel
from tqdm import tqdm
import glob
import os.path
import sys
from os import getcwd
from sentistrength import PySentiStr
senti = PySentiStr()
#senti.setSentiStrengthPath('C:\\SentiStrength\\SentiStrength.jar') # e.g. 'C:\Documents\SentiStrength.jar'
#senti.setSentiStrengthLanguageFolderPath('C:\\SentiStrength') # e.g. 'C:\Documents\SentiStrengthData\'
senti.setSentiStrengthPath(os.path.join(getcwd(),"SentiStrengthData/SentiStrength.jar"))
senti.setSentiStrengthLanguageFolderPath(os.path.join(getcwd(),"SentiStrengthData/"))
def preprocess_data(data):
data_out = pd.DataFrame()
data_out = data[['type','content']]
data_out.dropna(inplace=True)
return data_out
def count_words(text):
try:
return len(TextBlob(text).words)
except:
return 0
def calc_ttr(text):
try:
return LexicalRichness(text).ttr
except:
return 0
def count_nouns(text):
try:
tags = TextBlob(text).tags
tags = [i[1] for i in tags]
return sum(map(lambda x : 1 if 'NN' in x else 0, tags))
except:
return 0
def count_proper_nouns(text):
try:
tags = TextBlob(text).tags
tags = [i[1] for i in tags]
return sum(map(lambda x : 1 if 'NNP' in x else 0, tags))
except:
return 0
def count_quotes(text):
try:
return sum(map(lambda x : 1 if '"' in x else 0, text))/2
except:
return 0
def count_per_stop(text):
try:
words = TextBlob(text).words
return sum(map(lambda x : 1 if x in stop else 0, words))/len(words)
except:
return 0
def avg_wlen(text):
try:
words = TextBlob(text).words
return sum(map(lambda x : len(x), words))/len(words)
except:
return 0
def fk_grade(text):
try:
return textstat.flesch_kincaid_grade(text)
except:
return 0
#blob = TextBlob(text)
#return 0.39 * (len(blob.words)/len(blob.sentences)) + 11.8 ((len(blob.sy))/len(blob.words)) -15.59
def get_bow(text):
tfidf_transformer = TfidfTransformer()
count_vect = CountVectorizer()
X_train_counts = count_vect.fit_transform([text])
return tfidf_transformer.fit_transform(X_train_counts)
#bow = CountVectorizer(max_features=1000, lowercase=True, ngram_range=(1,1),analyzer = "word")
#return bow.fit_transform([text])
def get_polarity(text):
polarity = 0
try:
polarity = TextBlob(text).sentiment.polarity
except:
#print("invalid content for polarity")
pass
return polarity
def get_pos_str(text):
try:
return senti.getSentiment(text, score='binary')[0][1]
except:
return 0
def get_neg_str(text):
try:
return senti.getSentiment(text, score='binary')[0][0]
except:
return 0
def count_names(text):
try:
print(sum(map(lambda x : 1 if x in male_names else 0, TextBlob(text).words)))
return 0
except:
return 0
def count_JJS(text):
try:
tags = TextBlob(text).tags
tags = [i[1] for i in tags]
return sum(map(lambda x : 1 if 'JJS' in x else 0, tags))
except:
return 0
def count_JJR(text):
try:
tags = TextBlob(text).tags
tags = [i[1] for i in tags]
return sum(map(lambda x : 1 if 'JJR' in x else 0, tags))
except:
return 0
def count_RBR(text):
try:
tags = TextBlob(text).tags
tags = [i[1] for i in tags]
return sum(map(lambda x : 1 if 'JJS' in x else 0, tags))
except:
return 0
def count_RBS(text):
try:
tags = TextBlob(text).tags
tags = [i[1] for i in tags]
return sum(map(lambda x : 1 if 'JJR' in x else 0, tags))
except:
return 0
def parse_features(data, title_features, body_features):
new_body_features = pd.DataFrame({'type':data['type'],
#'BoW':data['content'].map(get_bow),
'per_stop':data['content'].map(count_per_stop),
'WC':data['content'].map(count_words),
'TTR':data['content'].map(calc_ttr),
'NN':data['content'].map(count_nouns),
'avg_wlen':data['content'].map(avg_wlen),
'quote':data['content'].map(count_quotes),
'FK':data['content'].map(fk_grade),
#'polarity':data['content'].map(get_polarity),
'NNP':data['content'].map(count_proper_nouns),
'str_neg':data['content'].map(get_neg_str),
'str_pos':data['content'].map(get_pos_str),
'JJR':data['content'].map(count_JJR),
'JJS':data['content'].map(count_JJS),
'RBR':data['content'].map(count_RBR),
'RBS':data['content'].map(count_RBS)
})
#print(new_body_features)
body_features = body_features.append(new_body_features)
#need this for some reason
body_features['WC'] = body_features['WC'].astype(int)
new_title_features = pd.DataFrame({'type':data['type'],
#'BoW':data['title'].map(get_bow),
'per_stop':data['title'].map(count_per_stop),
'WC':data['title'].map(count_words),
'TTR':data['title'].map(calc_ttr),
'NN':data['title'].map(count_nouns),
'avg_wlen':data['title'].map(avg_wlen),
'quote':data['title'].map(count_quotes),
'FK':data['title'].map(fk_grade),
'polarity':data['title'].map(get_polarity),
'NNP':data['title'].map(count_proper_nouns),
'str_neg':data['title'].map(get_neg_str),
'str_pos':data['title'].map(get_pos_str),
'JJR':data['title'].map(count_JJR),
'JJS':data['title'].map(count_JJS),
'RBR':data['title'].map(count_RBR),
'RBS':data['title'].map(count_RBS)
})
title_features = title_features.append(new_title_features)
#need this for some reason
title_features['WC'] = title_features['WC'].astype(int)
return title_features, body_features
def parse_full_dataset():
start = time.time()
print("start")
filename = 'FakeNewsAnalysis/Data/news_full.csv'
#filename = 'FakeNewsAnalysis/Data/news_sample.csv'
sample_size = 0.001 # up to 1
n_rows = 9408908#20000000#84999000000
chunk_size = int(n_rows/1000)
df_chunk = pd.read_csv( filename, chunksize = chunk_size, header = 0, nrows = n_rows,
engine='python', skip_blank_lines=True, error_bad_lines = False)
#skiprows=lambda i: i>0 and random.random() > sample_size)
title_features = pd.DataFrame()
body_features =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import tweepy
from tweepy import API
from tweepy import Cursor
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import twitter_credentials
# # # # TWITTER CLIENT # # # #
class TwitterClient():
def __init__(self, twitter_user=None):
self.auth = TwitterAuthenticator().authenticate_twitter_app()
self.twitter_client = API(self.auth)
self.twitter_user = twitter_user
def get_twitter_client_api(self):
return self.twitter_client
# # # # TWITTER AUTHENTICATER # # # #
class TwitterAuthenticator():
def authenticate_twitter_app(self):
auth = OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)
auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)
return auth
class TweetAnalyzer():
"""
Functionality for analyzing and categorizing content from tweets.
"""
def tweets_to_data_frame(self, tweets):
df =
|
pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['Tweets'])
|
pandas.DataFrame
|
"""Atom Mapping.
Module that automatically performs atom-atom mapping of reactions,
when provided a metabolic model with suitable annotations.
Relies heavily on:
Reaction Decoder Tool (RDT) (https://github.com/asad/ReactionDecoder)
to map the reactions;
CADD Group Chemoinformatics Tools and User Services
(https://cactus.nci.nih.gov/chemical/structure)
to resolve InChI strings from given InChI keys;
Java, to run RDT"""
import requests
import os
import glob
import platform
import subprocess
import pandas as pd
from pymatgen.symmetry import analyzer
from pymatgen.core import structure
try:
from rdkit import Chem, RDLogger
except ModuleNotFoundError:
print(
'RDKit is not installed. Please install '
'RDKit to use the atom mapping module.'
)
__version__ = "0.0.1"
class MolfileDownloader:
def __init__(self, metabolite_data, db_preference=(0, 1, 2, 3)):
""" Class to find and download metabolite structures in
Molfile format.
Parameters
----------
metabolite_data : pandas.DataFrame
Dataframe that contains information about metabolites
the model. Obtain from INCA_input_parser module.
db_preference : tuple of int, optional
Four integers specify the order of preference of
databases to obtain the metabolite structures from:
0: Using InChI key > InChI string conversion
1: KEGG Compound database
2: HMDB database
3: CHEBI database
"""
self.database_dict = {0: 'get_from_inchi_key',
1: 'get_from_kegg',
2: 'get_from_hmdb',
3: 'get_from_chebi'}
self.metabolite_data = metabolite_data
self.db_preference = db_preference
def generate_molfile_database(self):
""" Main method that calls other methods and performs
most sanity checks on obtained files.
Outputs
-------
Molfiles : .mol files
"""
print('Fetching metabolite structures...')
if not os.path.isdir('metabolites'):
os.mkdir('metabolites')
# Disable RDKit warnings
RDLogger.DisableLog('rdApp.*')
for i, met in self.metabolite_data.iterrows():
self.filename = met.met_id + '.mol'
get_from_inchi_keyBool = False
get_from_keggBool = False
get_from_hmdbBool = False
get_from_chebiBool = False
# Check for annotations and set bool values for those
# present.
if 'inchi_key' in met.annotations:
self.inchi_key = met.annotations['inchi_key'][0]
get_from_inchi_keyBool = True # noqa: F841
if 'kegg.compound' in met.annotations:
self.keggIDs = [
keggID for keggID in met.annotations['kegg.compound']]
get_from_keggBool = True # noqa: F841
if 'hmdb' in met.annotations:
self.hmdbIDnums = ['0' * (7 - len(hmdbID[4:])) + hmdbID[4:]
for hmdbID in met.annotations['hmdb']]
get_from_hmdbBool = True # noqa: F841
if 'chebi' in met.annotations:
self.chebiIDs = [
chebiID for chebiID in met.annotations['chebi']]
get_from_chebiBool = True # noqa: F841
# Call helper functions according to order of preference
# and available references (specified by previous bool values).
for opt in self.db_preference:
if eval(self.database_dict.get(opt) + 'Bool'):
getattr(self, self.database_dict.get(opt))()
else:
continue
try:
# Rewrites the .mol file. This removes hydrogens from
# structure, lowkey standardizes molecules, and works
# as a failswitch to see if file is of correct format.
m = Chem.MolFromMolFile(f'metabolites/{self.filename}')
with open(f'metabolites/{self.filename}', "w+") as f:
print(Chem.MolToMolBlock(m), file=f)
# Add metabolite ID to the first line of Molfile
with open(f'metabolites/{self.filename}', 'r') as f:
lines = f.readlines()
lines.insert(0, self.filename[:-4])
with open(f'metabolites/{self.filename}', 'w') as wf:
wf.writelines(lines)
break
except BaseException:
os.remove(f'metabolites/{self.filename}')
continue
print(
f"Successfully fetched {len(os.listdir('metabolites'))}/"
f"{self.metabolite_data.shape[0]} metabolites"
)
def get_from_inchi_key(self):
""" Helper method to obtain InChI string from InChI key,
and generate the Molfile from the string.
"""
url = (
f'https://cactus.nci.nih.gov/chemical/'
f'structure/{self.inchi_key}/stdinchi'
)
r = requests.get(url, allow_redirects=False)
inchi_string = r.text
try:
molfile = Chem.inchi.MolFromInchi(inchi_string)
with open(f'metabolites/{self.filename}', "w+") as f:
print(Chem.MolToMolBlock(molfile), file=f)
except BaseException:
return
def get_from_kegg(self):
""" Helper method to obtain Molfile from KEGG Compound database
"""
for keggID in self.keggIDs:
url = (
f'https://www.genome.jp/dbget-bin/'
f'www_bget?-f+m+compound+{keggID}'
)
r = requests.get(url, allow_redirects=False)
open(f'metabolites/{self.filename}', 'wb').write(r.content)
if os.path.getsize(f'metabolites/{self.filename}') != 0:
break
def get_from_hmdb(self):
""" Helper method to obtain Molfile from HMDB database
"""
for hmdbID in self.hmdbIDnums:
url = f'https://hmdb.ca/structures/metabolites/HMDB{hmdbID}.mol'
r = requests.get(url, allow_redirects=False)
open(f'metabolites/{self.filename}', 'wb').write(r.content)
if os.path.getsize(f'metabolites/{self.filename}') != 0:
break
def get_from_chebi(self):
""" Helper method to obtain Molfile from CHEBI database
"""
for chebiID in self.chebiIDs:
url = (
f'https://www.ebi.ac.uk/chebi/saveStructure.do'
f'?defaultImage=true&chebiId={chebiID}&imageId=0'
)
r = requests.get(url, allow_redirects=False)
open(f'metabolites/{self.filename}', 'wb').write(r.content)
if os.path.getsize(f'metabolites/{self.filename}') != 0:
break
def write_rxn_files(rxn_data):
""" Generates RXN files in RDT suitable format.
Requires Molfiles of all metabolites to be present
in the working directory/metabolites folder.
Parameters
----------
rxn_data : pandas.DataFrame
Dataframe that contains information about reactions
in the model. Obtained from INCA_input_parser module.
Outputs
-------
RXN files : .rxn files
"""
met_filter = ['h_e', 'h_c', 'h_p', 'h2_e', 'h2_c', 'h2_p']
biomass_filter = ['Biomass', 'biomass', 'BIOMASS']
if not os.path.isdir('unmappedRxns'):
os.mkdir('unmappedRxns')
path = os.path.join(os.getcwd(), 'unmappedRxns')
for i, rxn in rxn_data.iterrows():
# Filter out biomass reaction
if any(biomass_id in rxn.rxn_id for biomass_id in biomass_filter):
print(f'Excluded {rxn.rxn_id} reaction from mapping')
continue
rxn_filename = rxn.rxn_id + '.rxn'
# Use copies to avoid messing up the original dataframe
reactants = rxn.reactants_ids.copy()
reactants_st = [int(abs(s))
for s in rxn.reactants_stoichiometry.copy()]
products = rxn.products_ids.copy()
products_st = [int(abs(s)) for s in rxn.products_stoichiometry.copy()]
# Filter out unwanted molecules
react_indexes = []
prod_indexes = []
for i, met in enumerate(reactants):
if met in met_filter:
react_indexes.append(i)
if len(react_indexes) != 0:
for i in sorted(react_indexes, reverse=True):
del reactants[i]
del reactants_st[i]
for i, met in enumerate(products):
if met in met_filter:
prod_indexes.append(i)
if len(prod_indexes) != 0:
for i in sorted(prod_indexes, reverse=True):
del products[i]
del products_st[i]
metabolites = reactants + products
metabolites_st = reactants_st + products_st
# Check if all metabolite structures are present
if not all(
[os.path.isfile(
f'metabolites/{met}.mol'
) for met in metabolites]):
print(
f"Metabolite structures missing for reaction {rxn.rxn_id}")
continue
else:
with open(os.path.join(path, rxn_filename), "w") as f:
# Write first three lines, including reaction equation
f.write(f"$RXN\n{rxn.rxn_id}\n\n{rxn.equation}\n")
# Write export reactions (1 reactant)
if not products_st and abs(int(sum(reactants_st))) == 1:
f.write(
f'{abs(int(sum(reactants_st)))} '
f'{int(sum(reactants_st))}\n')
met = metabolites[0]
with open(f'metabolites/{met}.mol', 'r') as wf:
structure = wf.read()
f.write(f'$MOL\n{structure}')
f.write(f'$MOL\n{structure}')
# Write all the other reactions with at least 1 metabolite on
# each side
else:
f.write(
f'{abs(int(sum(reactants_st)))} '
f'{int(sum(products_st))}\n')
for s, met in zip(metabolites_st, metabolites):
with open(f'metabolites/{met}.mol', 'r') as wf:
structure = wf.read()
# Repeat structure based on stoichiometry
for i in range(s):
f.write(f'$MOL\n{structure}')
print(f"Generated {len(os.listdir('unmappedRxns'))}/{rxn_data.shape[0]}")
def obtain_atom_mappings(max_time=120):
""" Performs atom mapping by using RDT.
Only maps reactions that are available in .rxn format,
in the working_directory/unmappedRxns folder.
Parameters
----------
max_time : int, optional
Specifies time limit for single reaction mapping
in seconds. Default: 120s.
Outputs
-------
mapped RXN files : .rxn files
mapped TXT files : .txt files
Mappings in SMILES format
pictures of mappings : .png files
"""
# Check if Java is installed
if os.system('java -version') != 0:
raise RuntimeError('Java installation not found')
print('Mapping reactions...')
# Set the original working dir
owd = os.getcwd()
# Check if RDT is present in working dir, download if not
if not os.path.isfile('RDT.jar'):
url = (
'https://github.com/asad/ReactionDecoder/'
'releases/download/v2.4.1/rdt-2.4.1-jar-with-dependencies.jar'
)
r = requests.get(url)
open(os.getcwd() + '/RDT.jar', 'wb').write(r.content)
# Check if required directories are present
if not os.path.isdir('mappedRxns'):
os.makedirs('mappedRxns/rxnFiles')
os.makedirs('mappedRxns/txtFiles')
os.makedirs('mappedRxns/pngFiles')
rxn_list = os.listdir('unmappedRxns')
# Change working dir to keep the output organized
os.chdir('mappedRxns')
try:
for rxnFile in rxn_list:
# Check if reaction is mapped, and run RDT with specified time
# limit if not
try:
if not os.path.isfile(f'rxnFiles/{rxnFile}'):
subprocess.run(['java', '-jar',
'../RDT.jar', '-Q',
'RXN', '-q',
f'../unmappedRxns/{rxnFile}',
'-g', '-j',
'AAM', '-f',
'TEXT'], timeout=max_time)
except BaseException:
continue
# Obtain filenames of generated files and simplify them to respective
# reaction IDs
for name in glob.glob('ECBLAST*'):
os.rename(name, name[8:-8] + name[-4:])
# Move all generated files to different directories, in respect to
# their filetype
if platform.system() == 'Windows':
os.system('move *.png pngFiles')
os.system('move *.rxn rxnFiles')
os.system('move *.txt txtFiles')
else:
os.system('mv *.png ./pngFiles')
os.system('mv *.rxn ./rxnFiles')
os.system('mv *.txt ./txtFiles')
except BaseException:
# Make sure that wd is back to normal no matter what
os.chdir(owd)
print(
f"Reactions mapped in total: "
f"{len(os.listdir('rxnFiles'))}/{len(rxn_list)}")
# Change working dir back to original
os.chdir(owd)
# Remove RDT.jar from working dir
os.remove('RDT.jar')
def parse_reaction_mappings():
""" Parses reaction mappings from mapped RXN files
to a dataframe in suitable format for INCA. Requires
all mapped RXN files to be present in the working_dir/
mappedRxns/rxnFiles folder. For unmapped reactions,
data is picked from working_dir/unmappedRxns folder
and all mapping data is represented as blanks.
Returns
-------
mapping_data : pandas.DataFrame
Reaction mapping data.
"""
if not os.path.isdir('mappedRxns'):
raise RuntimeError(
"'mappedRxns' directory not present in current working directory")
rxn_list = sorted(os.listdir('unmappedRxns'))
# Compile list of reactions that do not have any mapping
unmapped_list = list(
set(os.listdir('unmappedRxns')) - set(
os.listdir('mappedRxns/rxnFiles')))
keys = ['Unnamed: 0',
'Unnamed: 0.1',
'id',
'mapping_id',
'rxn_id',
'rxn_description',
'reactants_stoichiometry_tracked',
'products_stoichiometry_tracked',
'reactants_ids_tracked',
'products_ids_tracked',
'reactants_mapping',
'products_mapping',
'rxn_equation',
'used_',
'comment_',
'reactants_elements_tracked',
'products_elements_tracked',
'reactants_positions_tracked',
'products_positions_tracked'
]
mapping_dict_tmp = {}
for i, rxn in enumerate(rxn_list):
mapping_dict = {k: [] for k in keys}
met_mapping = []
react_cnt = 0
prod_cnt = 0
productBool = False
if rxn not in unmapped_list:
# Extract info from mapped .rxn files
with open(f'mappedRxns/rxnFiles/{rxn}', 'r') as f:
lines = f.readlines()
for j, line in enumerate(lines):
if line.rstrip() == '$RXN':
# Extract number of reactants
react_lim = int(lines[j + 4].split()[0])
# Extract number of products
prod_lim = int(lines[j + 4].split()[1])
if line.rstrip() == '$MOL':
met_id = lines[j + 1].rstrip()
# Hard-coded, since 16 columns is standard for Molfile
# atom rows,
# and 15 can occur if we have >100 atoms on one side (cols
# merge)
if len(line.split()) in (15, 16):
atom_row = line.split()
if atom_row[3] == 'C':
# Split columns if they get merged
if atom_row[-3][0] == '0':
atom_row[-3] = atom_row[-3][1:]
met_mapping.append(atom_row[-3])
# Check if reached the last atom row
if len(lines[j + 1].split()) not in (15, 16):
# Check if current metabolite is reactant or
# product
if not productBool:
# Check if any carbons are present
if met_mapping:
c_tracked = ['C' for atom in met_mapping]
pos_tracked = list(range(len(met_mapping)))
mapping_dict[
'reactants_ids_tracked'].append(
met_id)
mapping_dict[
'reactants_mapping'].append(
met_mapping)
mapping_dict[
'reactants_elements_tracked'].append(
c_tracked)
mapping_dict[
'reactants_positions_tracked'].append(
pos_tracked)
react_cnt += 1
if react_cnt == react_lim:
productBool = True
# Assign metabolite to products if reached reactant
# limit
else:
if met_mapping:
c_tracked = ['C' for atom in met_mapping]
pos_tracked = list(range(len(met_mapping)))
mapping_dict[
'products_ids_tracked'].append(
met_id)
mapping_dict[
'products_mapping'].append(
met_mapping)
mapping_dict[
'products_elements_tracked'].append(
c_tracked)
mapping_dict[
'products_positions_tracked'].append(
pos_tracked)
prod_cnt += 1
if prod_cnt == prod_lim:
react_stoich = [
'-1' for met in range(len(mapping_dict[
'reactants_mapping']))]
prod_stoich = ['1' for met in range(
len(mapping_dict['products_mapping']))]
mapping_dict[
'reactants_stoichiometry_tracked'
] = react_stoich
mapping_dict[
'products_stoichiometry_tracked'
] = prod_stoich
met_mapping = []
mapping_dict['rxn_id'] = rxn[:-4]
mapping_dict['used_'] = True
# Fill all empty fields
mapping_dict['Unnamed: 0'] = 'NULL'
mapping_dict['Unnamed: 0.1'] = 'NULL'
mapping_dict['id'] = 'NULL'
mapping_dict['mapping_id'] = 'NULL'
mapping_dict['rxn_description'] = 'NULL'
mapping_dict['rxn_equation'] = 'NULL'
mapping_dict['comment_'] = 'NULL'
mapping_dict_tmp[i] = mapping_dict
mapping_data = pd.DataFrame.from_dict(mapping_dict_tmp, 'index')
# alphabet for number-letter matching. Max capacity is 63 characters,
# which is a limit set in INCA for this format.
alphabet = list(map(chr, range(97, 123))) + list(map(
chr,
range(65, 91))) + list(map(chr, range(48, 58))) + ['_']
# Loop through all reactions
for i, rxn in mapping_data.iterrows():
try:
# Convert number mappings to letters
carbons_list = [atom for met in rxn['reactants_mapping']
for atom in met]
carbon_map_dict = dict(zip(carbons_list, alphabet))
# Compile alphabetical mapping in curly bracket format
carbon_str = '{'
for j, met in enumerate(rxn['reactants_mapping']):
if j != 0:
carbon_str += ','
for atom in met:
carbon_str += carbon_map_dict[atom]
carbon_str += '}'
mapping_data.at[i, 'reactants_mapping'] = carbon_str
carbon_str = '{'
for j, met in enumerate(rxn['products_mapping']):
if j != 0:
carbon_str += ','
for atom in met:
carbon_str += carbon_map_dict[atom]
carbon_str += '}'
mapping_data.at[i, 'products_mapping'] = carbon_str
except KeyError:
if len(carbons_list) > 63:
print(
f'Reaction {rxn["rxn_id"]} contains '
f'more than 63 carbon atoms')
else:
# Mostly happens when one of the metabolites has (R)
# group in the Molfile, and other has a C in that spot
print(f'{rxn["rxn_id"]} has unmapped carbon(-s)')
mapping_data.at[i, 'reactants_mapping'] = '{}'
mapping_data.at[i, 'products_mapping'] = '{}'
# Convert metabolite lists/stoichiometries to strings in curly brackets
metabolite_str = '{%s}' % (','.join(rxn['reactants_ids_tracked']))
mapping_data.at[i, 'reactants_ids_tracked'] = metabolite_str
metabolite_str = '{%s}' % (','.join(rxn['products_ids_tracked']))
mapping_data.at[i, 'products_ids_tracked'] = metabolite_str
stoich_str = '{%s}' % (','.join(
rxn['reactants_stoichiometry_tracked']))
mapping_data.at[i, 'reactants_stoichiometry_tracked'] = stoich_str
stoich_str = '{%s}' % (','.join(rxn['products_stoichiometry_tracked']))
mapping_data.at[i, 'products_stoichiometry_tracked'] = stoich_str
return mapping_data
def parse_metabolite_mappings():
""" Parses metabolite mapping and symmetry data into
INCA suitable format. Requires all Molfiles to be present
in the working_dir/metabolites directory.
Returns
-------
metabolite_data : pandas.DataFrame
Dataframe containing mapped metabolite data.
"""
metabolite_list = sorted(os.listdir('metabolites'))
keys = ['mapping_id',
'met_id',
'met_elements',
'met_atompositions',
'met_symmetry_elements',
'met_symmetry_atompositions',
'used_',
'comment_',
'met_mapping',
'base_met_ids',
'base_met_elements',
'base_met_atompositions',
'base_met_symmetry_elements',
'base_met_symmetry_atompositions',
'base_met_indices'
]
metabolite_dict_tmp = {}
# Works in a similar fashion to parse_reaction_mappings()
for i, met in enumerate(metabolite_list):
met_dict = {k: 'NULL' for k in keys}
with open(f'metabolites/{met}', 'r') as f:
lines = f.readlines()
carbon_count = 0
for j, line in enumerate(lines):
if j == 0:
met_dict['met_id'] = line.rstrip()
if len(line.split()) == 16:
atom_row = line.split()
if atom_row[3] == 'C':
carbon_count += 1
# Generate carbon atom lists/mappings
carbon_count_list = ['C' for x in range(carbon_count)]
carbon_count_string = '{%s}' % (','.join(carbon_count_list))
carbon_count_range = '{%s}' % (
','.join([str(x) for x in range(carbon_count)]))
met_dict['met_elements'] = carbon_count_string
met_dict['met_atompositions'] = carbon_count_range
# Check if the metabolite is symmetrical
if check_symmetry(met):
carbon_count_range_rev = '{%s}' % (
','.join(
[str(x) for x in range(carbon_count - 1, -1, -1)]
))
met_dict["met_symmetry_elements"] = carbon_count_string
met_dict['met_symmetry_atompositions'] = carbon_count_range_rev
metabolite_dict_tmp[i] = met_dict
metabolite_data =
|
pd.DataFrame.from_dict(metabolite_dict_tmp, 'index')
|
pandas.DataFrame.from_dict
|
#!/usr/bin/env python
# coding: utf-8
# # Benchmark Results
# This notebook visualizes the output from the different models on different classification problems
# In[1]:
import collections
import glob
import json
import os
import numpy as np
import pandas as pd
from plotnine import *
from saged.utils import split_sample_names, create_dataset_stat_df, get_dataset_stats, parse_map_file
# ## Set Up Functions and Get Metadata
# In[3]:
def return_unlabeled():
# For use in a defaultdict
return 'unlabeled'
# In[4]:
data_dir = '../../data/'
map_file = os.path.join(data_dir, 'sample_classifications.pkl')
sample_to_label = parse_map_file(map_file)
sample_to_label = collections.defaultdict(return_unlabeled, sample_to_label)
# In[ ]:
metadata_path = os.path.join(data_dir, 'aggregated_metadata.json')
metadata = None
with open(metadata_path) as json_file:
metadata = json.load(json_file)
sample_metadata = metadata['samples']
# In[ ]:
experiments = metadata['experiments']
sample_to_study = {}
for study in experiments:
for accession in experiments[study]['sample_accession_codes']:
sample_to_study[accession] = study
# ## Sepsis classification
# In[8]:
in_files = glob.glob('../../results/single_label.*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[9]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics
# In[10]:
plot = ggplot(sepsis_metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('PCA vs untransformed data for classifying sepsis')
print(plot)
# In[11]:
plot = ggplot(sepsis_metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_jitter(size=3)
plot += ggtitle('PCA vs untransformed data for classifying sepsis')
print(plot)
# ## All labels
# In[12]:
in_files = glob.glob('../../results/all_labels.*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[13]:
metrics = None
for path in in_files:
if metrics is None:
metrics = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all_labels.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
metrics['unsupervised'] = unsupervised_model
metrics['supervised'] = supervised_model
else:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all_labels.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
metrics = pd.concat([metrics, new_df])
metrics
# In[14]:
plot = ggplot(metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('PCA vs untransformed data for all label classification')
print(plot)
# In[15]:
plot = ggplot(metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_jitter(size=2)
plot += ggtitle('PCA vs untransformed data for all label classification')
print(plot)
# # Subsets of healthy labels
# In[16]:
in_files = glob.glob('../../results/subset_label.sepsis*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[17]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
sepsis_metrics
# In[18]:
print(sepsis_metrics[sepsis_metrics['healthy_used'] == 1])
# In[19]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', ))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[20]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[21]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Same analysis, but with tb instead of sepsis
# In[22]:
in_files = glob.glob('../../results/subset_label.tb*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[23]:
tuberculosis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
tuberculosis_metrics = pd.concat([tuberculosis_metrics, new_df])
tuberculosis_metrics = tuberculosis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
tuberculosis_metrics['healthy_used'] = tuberculosis_metrics['healthy_used'].round(1)
tuberculosis_metrics
# In[24]:
print(tuberculosis_metrics[tuberculosis_metrics['healthy_used'] == 1])
# In[25]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[26]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[27]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Supervised Results Only
# The results above show that unsupervised learning mostly hurts performance rather than helping.
# The visualizations below compare each model based only on its supervised results.
# In[28]:
supervised_sepsis = sepsis_metrics[sepsis_metrics['unsupervised'] == 'untransformed']
# In[29]:
plot = ggplot(supervised_sepsis, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[30]:
supervised_tb = tuberculosis_metrics[tuberculosis_metrics['unsupervised'] == 'untransformed']
# In[31]:
plot = ggplot(supervised_tb, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[32]:
plot = ggplot(supervised_tb, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Batch Effect Correction
# In[33]:
in_files = glob.glob('../../results/subset_label.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[34]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
print(model_info)
model_info = model_info.split('.')
print(model_info)
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
sepsis_metrics
# In[35]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', ))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[36]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## TB Batch effect corrected
# In[37]:
in_files = glob.glob('../../results/subset_label.tb*be_corrected.tsv')
print(in_files[:5])
# In[38]:
tuberculosis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
tuberculosis_metrics = pd.concat([tuberculosis_metrics, new_df])
tuberculosis_metrics = tuberculosis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
tuberculosis_metrics['healthy_used'] = tuberculosis_metrics['healthy_used'].round(1)
tuberculosis_metrics
# In[39]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[40]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Better Metrics, Same Label Distribution in Train and Val sets
# In[11]:
in_files = glob.glob('../../results/keep_ratios.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[12]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of data used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[13]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[14]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[15]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='balanced_accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[16]:
sepsis_stat_df = create_dataset_stat_df(sepsis_metrics,
sample_to_study,
sample_metadata,
sample_to_label,
'sepsis')
sepsis_stat_df.tail(5)
# In[17]:
ggplot(sepsis_stat_df, aes(x='train_val_diff',
y='balanced_accuracy',
color='val_disease_count')) + geom_point() + facet_grid('model ~ .')
# In[18]:
plot = ggplot(sepsis_metrics, aes(x='train sample count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Effect of All Sepsis Data')
plot
# ## Same Distribution Tuberculosis
# In[19]:
in_files = glob.glob('../../results/keep_ratios.tb*be_corrected.tsv')
print(in_files[:5])
# In[20]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics = tb_metrics.rename({'fraction of data used': 'healthy_used'}, axis='columns')
tb_metrics['healthy_used'] = tb_metrics['healthy_used'].round(1)
tb_metrics
# In[21]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[22]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[23]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='balanced_accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[24]:
tb_stat_df = create_dataset_stat_df(tb_metrics,
sample_to_study,
sample_metadata,
sample_to_label,
'tb')
tb_stat_df.tail(5)
# In[55]:
ggplot(tb_stat_df, aes(x='train_val_diff',
y='balanced_accuracy',
color='val_disease_count')) + geom_point() + facet_grid('model ~ .')
# In[25]:
plot = ggplot(tb_metrics, aes(x='train sample count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot
# ## Results from Small Datasets
# In[57]:
in_files = glob.glob('../../results/small_subsets.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[58]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics['train_count'] = sepsis_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[59]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Dataset Size Effects (equal label counts)')
print(plot)
# In[60]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Datset Size by Model (equal label counts)')
print(plot)
# In[61]:
plot = ggplot(sepsis_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Sepsis Crossover Point')
plot
# ## Small Training Set TB
# In[62]:
in_files = glob.glob('../../results/small_subsets.tb*be_corrected.tsv')
print(in_files[:5])
# In[63]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics['train_count'] = tb_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
tb_metrics = tb_metrics[~(tb_metrics['supervised'] == 'deep_net')]
tb_metrics['supervised'] = tb_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tb_metrics
# In[64]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('TB Dataset Size Effects (equal label counts)')
print(plot)
# In[65]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('TB Dataset Size vs Models (equal label counts)')
print(plot)
# In[66]:
plot = ggplot(tb_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth(method='loess')
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('TB (lack of a) Crossover Point')
plot
# ## Small training sets without be correction
# In[67]:
in_files = glob.glob('../../results/small_subsets.sepsis*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[68]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics['train_count'] = sepsis_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[69]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Dataset Size Effects (equal label counts)')
print(plot)
# In[70]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Datset Size by Model (equal label counts)')
print(plot)
# In[71]:
plot = ggplot(sepsis_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Sepsis Crossover Point')
plot
# ## TB Not Batch Effect Corrected
# In[72]:
in_files = glob.glob('../../results/small_subsets.tb*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[73]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics['train_count'] = tb_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
tb_metrics = tb_metrics[~(tb_metrics['supervised'] == 'deep_net')]
tb_metrics['supervised'] = tb_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tb_metrics
# In[74]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('tb Dataset Size Effects (equal label counts)')
print(plot)
# In[75]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('tb Datset Size by Model (equal label counts)')
print(plot)
# In[76]:
plot = ggplot(tb_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('tb Crossover Point')
plot
# ## Large training sets without be correction
# In[6]:
in_files = glob.glob('../../results/keep_ratios.sepsis*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[9]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics['train_count'] = sepsis_metrics['train sample count']
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[10]:
plot = ggplot(sepsis_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Sepsis Crossover Point')
plot
# ## TB Not Batch Effect Corrected
# In[80]:
in_files = glob.glob('../../results/keep_ratios.tb*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[81]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics['train_count'] = tb_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
tb_metrics = tb_metrics[~(tb_metrics['supervised'] == 'deep_net')]
tb_metrics['supervised'] = tb_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tb_metrics
# In[82]:
plot = ggplot(tb_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('tb Crossover Point')
plot
# ## Lupus Analyses
# In[83]:
in_files = glob.glob('../../results/keep_ratios.lupus*.tsv')
in_files = [file for file in in_files if 'be_corrected' in file]
print(in_files[:5])
# In[84]:
lupus_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('lupus.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
lupus_metrics = pd.concat([lupus_metrics, new_df])
lupus_metrics['train_count'] = lupus_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
lupus_metrics = lupus_metrics[~(lupus_metrics['supervised'] == 'deep_net')]
lupus_metrics['supervised'] = lupus_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
lupus_metrics
# In[85]:
plot = ggplot(lupus_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('lupus Crossover Point')
plot
# ## Lupus Not Batch Effect Corrected
# In[86]:
in_files = glob.glob('../../results/keep_ratios.lupus*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[87]:
lupus_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('lupus.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
lupus_metrics = pd.concat([lupus_metrics, new_df])
lupus_metrics['train_count'] = lupus_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
lupus_metrics = lupus_metrics[~(lupus_metrics['supervised'] == 'deep_net')]
lupus_metrics['supervised'] = lupus_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
lupus_metrics
# In[88]:
plot = ggplot(lupus_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('lupus Crossover Point')
plot
# ## Tissue Prediction
# In[2]:
in_files = glob.glob('../../results/Blood.Breast.*.tsv')
in_files = [f for f in in_files if 'be_corrected' not in f]
print(in_files[:5])
# In[3]:
tissue_metrics =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
from datetime import timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (Timedelta,
period_range, Period, PeriodIndex,
_np_version_under1p10)
import pandas.core.indexes.period as period
class TestPeriodIndexArithmetic(object):
def test_pi_add_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = pi + offs
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
unanchored = np.array([pd.offsets.Hour(n=1),
pd.offsets.Minute(n=-2)])
with pytest.raises(period.IncompatibleFrequency):
pi + unanchored
with pytest.raises(TypeError):
unanchored + pi
@pytest.mark.xfail(reason='GH#18824 radd doesnt implement this case')
def test_pi_radd_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = offs + pi
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + delta
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng += delta
def test_pi_add_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + one
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize('five', [5, np.array(5, dtype=np.int64)])
def test_sub(self, five):
rng = period_range('2007-01', periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng =
|
pd.period_range('2014', '2024', freq='A')
|
pandas.period_range
|
# **********************************************************************************************************************
#
# brief: simple script to plot the optimizer runs
#
# author: <NAME>
# date: 25.08.2020
#
# **********************************************************************************************************************
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from tikzplotlib import save as tikz_save
plt.style.use('ggplot')
SMALL_SIZE = 14
MEDIUM_SIZE = 16
BIGGER_SIZE = 18
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
def get_trend(x, y):
z = np.polyfit(x, y, 3)
p = np.poly1d(z)
return p
sun = pd.read_csv("run-sun_rgb_train-tag-epoch_loss.csv")
coco = pd.read_csv("run-coco_train-tag-epoch_loss.csv")
none =
|
pd.read_csv("run-elevator_train-tag-epoch_loss.csv")
|
pandas.read_csv
|
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import layout, widgetbox
from bokeh.models import (ColumnDataSource, HoverTool, Text, Div, Circle,
SingleIntervalTicker, Slider, Button, Label)
from bokeh.palettes import Spectral6
from bokeh.plotting import figure
from data import process_data
fertility_df, life_expectancy_df, population_df_size, regions_df, years, regions = process_data()
sources = {}
region_color = regions_df['region_color']
region_color.name = 'region_color'
for year in years:
fertility = fertility_df[year]
fertility.name = 'fertility'
life = life_expectancy_df[year]
life.name = 'life'
population = population_df_size[year]
population.name = 'population'
df =
|
pd.concat([fertility, life, population, region_color], axis=1)
|
pandas.concat
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 9 09:23:19 2020
Arbol de Desiciones
@author: andyvillamayor
"""
#El calulo del arbol esta hecho automaticamente por DecisionTreeClassifier
#Parametros = entropia valor maximo de desglose = 6
#librerias
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.tree import export_graphviz
import pydot
from sklearn import preprocessing
#lectura de datos
data= pd.read_csv('creditos.csv',sep =',')
#verificacion de los datos
print(data.head())
#validacion del dataframe
print(pd.isnull(data).sum()) #tipo cartera tiene 3777 valores faltantes Nan
#calular los la edad verificacion de datos
data['fechaHora'] = pd.to_datetime(data['fechaHora'])
data['nacimiento'] = pd.to_datetime(data['nacimiento'])
data['edad'] = ((data['fechaHora']-data['nacimiento'])/np.timedelta64(1,'Y')).astype(int)
#columna edad esta en el ultimo lugar del dataframe
# seleccionar variables y target, descartar variables pos aprobación
# utilizando Hold out
df1 = data.iloc[:,2:3]
df2 = data.iloc[:,83:84]
df3 = data.iloc[:,4:68]
df4 = data.iloc[:,82:83]
# # Unificar en un dataframe filtrado
df =
|
pd.concat([df1,df2,df3,df4], axis=1)
|
pandas.concat
|
import ipywidgets
import numpy as np
import pandas as pd
import pathlib
from scipy.stats import linregress
from bokeh.io import push_notebook, show, output_notebook
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, RangeTool, Circle, Slope, Label, Legend, LegendItem, LinearColorMapper
from bokeh.layouts import gridplot, column, row
from bokeh.transform import transform
class view_files:
def __init__(self):
self.ep_columns_filtered = ['date','time', 'H', 'qc_H', 'LE', 'qc_LE','sonic_temperature', 'air_temperature', 'air_pressure', 'air_density',
'ET', 'e', 'es', 'RH', 'VPD','Tdew', 'u_unrot', 'v_unrot', 'w_unrot', 'u_rot', 'v_rot', 'w_rot', 'wind_speed', 'max_wind_speed', 'wind_dir', 'u*', '(z-d)/L',
'un_H', 'H_scf', 'un_LE', 'LE_scf','u_var', 'v_var', 'w_var', 'ts_var','H_strg','LE_strg']
self.lf_columns_filtered = ['TIMESTAMP','Hs','u_star','Ts_stdev','Ux_stdev','Uy_stdev','Uz_stdev','Ux_Avg', 'Uy_Avg', 'Uz_Avg',
'Ts_Avg', 'LE_wpl', 'Hc','H2O_mean', 'amb_tmpr_Avg', 'amb_press_mean', 'Tc_mean', 'rho_a_mean','CO2_sig_strgth_mean',
'H2O_sig_strgth_mean','T_tmpr_rh_mean', 'e_tmpr_rh_mean', 'e_sat_tmpr_rh_mean', 'H2O_tmpr_rh_mean', 'RH_tmpr_rh_mean',
'Rn_Avg', 'albedo_Avg', 'Rs_incoming_Avg', 'Rs_outgoing_Avg', 'Rl_incoming_Avg', 'Rl_outgoing_Avg', 'Rl_incoming_meas_Avg',
'Rl_outgoing_meas_Avg', 'shf_Avg(1)', 'shf_Avg(2)', 'precip_Tot', 'panel_tmpr_Avg']
self.TOOLS="pan,wheel_zoom,box_zoom,box_select,lasso_select,reset"
output_notebook()
self.tabs = ipywidgets.Tab([self.tab00(), self.tab01(), self.tab02()])
self.tabs.set_title(0, 'EP - Master Folder')
self.tabs.set_title(1, 'LowFreq - Master Folder')
self.tabs.set_title(2, 'Plot')
self.source_ep = ColumnDataSource(data=dict(x=[], y=[], y2=[], date=[],time=[],et=[]))
self.fig_01 = figure(title='EP', plot_height=250, plot_width=700, x_axis_type='datetime', tools=self.TOOLS)
circle_ep = self.fig_01.circle(x='x', y='y', source=self.source_ep)
self.fig_02 = figure(title='LF', plot_height=250, plot_width=700, x_axis_type='datetime', x_range=self.fig_01.x_range)
circle_lf = self.fig_02.circle(x='x', y='y2', source=self.source_ep, color='red')
self.fig_03 = figure(title='EP x LF',plot_height=500, plot_width=500)
circle_teste = self.fig_03.circle(x='y2', y='y', source=self.source_ep, color='green', selection_color="green",selection_fill_alpha=0.3, selection_line_alpha=0.3,
nonselection_fill_alpha=0.1,nonselection_fill_color="grey",nonselection_line_color="grey",nonselection_line_alpha=0.1)
# self.fig_04 = figure(title='ET', plot_width=1200, plot_height=600)
# colors = ['#440154', '#404387', '#29788E', '#22A784', '#79D151', '#FDE724']
# self.colorMapper = LinearColorMapper(palette=colors)
# self.fig_04.rect(source=self.source_ep, x='date',y='time', fill_color=transform('et', self.colorMapper), line_color=None, width=1,height=1)
# self.hm = self.fig_04.rect(source=self.source_ep, x='date',y='time', line_color=None, width=1,height=1)
self.label = Label(x=1.1, y=18, text='teste', text_color='black')
self.label2 = Label(x=1.1, y=10, text='teste2', text_color='black')
self.label3 = Label(x=1.2, y=11, text='teste3', text_color='black')
self.label4 = Label(x=1,y=11, text='teste4', text_color='black')
# self.label5 = Label(x=1, y=11, text='teste5', text_color='black')
self.fig_03.add_layout(self.label)
self.fig_03.add_layout(self.label2)
self.fig_03.add_layout(self.label3)
self.fig_03.add_layout(self.label4)
# self.fig_03.add_layout(self.label5)
# self.label_teste = Label(x=0,y=0, text='fasdfasdfasdfasdfas', text_color='black')
# self.fig_03.add_layout(self.label_teste)
# self.source_ep.selected.on_change('indices', self.selection_change)
# slope11_l = self.fig_03.line(color='orange', line_dash='dashed')
slope_11 = Slope(gradient=1, y_intercept=0, line_color='orange', line_dash='dashed', line_width=3)
self.fig_03.add_layout(slope_11)
# self.slope_lin_label = self.fig_03.line(color='red', line_width=3)
self.slope_linregress = Slope(gradient=1.3, y_intercept=0,line_color='red', line_width=3)
self.fig_03.add_layout(self.slope_linregress)
c = column([self.fig_01, self.fig_02])
display(self.tabs)
show(row(c, self.fig_03), notebook_handle=True)
# def teste_apagar(self, attr, old,new):
# print(new)
def tab00(self):
self.out_00 = ipywidgets.Output()
with self.out_00:
self.path_EP = ipywidgets.Text(placeholder='Path EP output',
layout=ipywidgets.Layout(width='90%'))
self.button_path_ep = ipywidgets.Button(description='Show EP')
self.button_path_ep.on_click(self._button_Path)
self.select_meta = ipywidgets.Select(description='Configs:',
layout=ipywidgets.Layout(width='90%'),
style={'description_width':'initial'})
self.select_meta.observe(self._select_config, 'value')
return ipywidgets.VBox([ipywidgets.HBox([self.path_EP, self.button_path_ep]),
self.select_meta,
self.out_00])
def tab01(self):
self.out_01 = ipywidgets.Output()
with self.out_01:
self.path_LF = ipywidgets.Text(placeholder='Path LF output',
layout=ipywidgets.Layout(width='90%'))
self.button_path_lf = ipywidgets.Button(description='Show LF')
self.button_path_lf.on_click(self._button_Path)
self.html_lf = ipywidgets.HTML()
return ipywidgets.VBox([self.out_01,
ipywidgets.HBox([self.path_LF, self.button_path_lf]),
self.html_lf])
def tab02(self):
self.out_02 = ipywidgets.Output()
with self.out_02:
self.dropdown_yAxis_ep = ipywidgets.Dropdown(description='EP Y-Axis', options=self.ep_columns_filtered)
self.dropdown_yAxis_lf = ipywidgets.Dropdown(description='LF Y-Axis', options=self.lf_columns_filtered)
self.checkBox_EnergyBalance = ipywidgets.Checkbox(value=False, description='Energy Balance')
# self.intSlider_flagFilter = ipywidgets.IntSlider(value=2, min=0, max=2, step=1, description='Flag Filter')
self.selectionSlider_flagFilter = ipywidgets.SelectionSlider(options=[0,1,2,'All'], value='All', description='Flag Filter')
self.checkBox_rainfallFilter = ipywidgets.Checkbox(value=False, description='Rainfall Filter')
self.floatSlider_signalStrFilter = ipywidgets.FloatSlider(value=0, min=0, max=1, step=0.01, description='Signal Str Filter')
self.selectionRangeSlider_date = ipywidgets.SelectionRangeSlider(options=[0,1], description='Date Range', layout=ipywidgets.Layout(width='500px'))
self.selectionRangeSlider_hour = ipywidgets.SelectionRangeSlider(options=[0,1], description='Hour Range', layout=ipywidgets.Layout(width='500px'))
self.button_plot = ipywidgets.Button(description='Plot')
# self.button_plot.on_click(self.update_ep)
self.button_plot.on_click(self._button_plot)
controls_ep = [self.dropdown_yAxis_ep,
self.selectionSlider_flagFilter,
self.checkBox_rainfallFilter,
self.floatSlider_signalStrFilter,
self.checkBox_EnergyBalance,
self.selectionRangeSlider_date,
self.selectionRangeSlider_hour]
for control in controls_ep:
control.observe(self.update_ep, 'value')
controls_lf = [self.dropdown_yAxis_lf]
for control in controls_lf:
# control.observe(self.update_lf, 'value')
control.observe(self.update_ep, 'value')
return ipywidgets.VBox([ipywidgets.HBox([self.dropdown_yAxis_ep, self.dropdown_yAxis_lf, self.checkBox_EnergyBalance]),
ipywidgets.HBox([self.selectionSlider_flagFilter, self.checkBox_rainfallFilter, self.floatSlider_signalStrFilter]),
self.selectionRangeSlider_date,
self.selectionRangeSlider_hour,
self.button_plot])
def _button_Path(self, *args):
if self.tabs.selected_index == 0:
with self.out_00:
try:
self.folder_path_ep = pathlib.Path(self.path_EP.value)
readme = self.folder_path_ep.rglob('Readme.txt')
readme_df = pd.read_csv(list(readme)[0], delimiter=',')
temp_list = [row.to_list() for i,row in readme_df[['rotation', 'lowfrequency','highfrequency','wpl','flagging','name']].iterrows()]
a = []
self.config_name = []
for i in temp_list:
self.config_name.append(i[5])
a.append('Rotation:{} |LF:{} |HF:{} |WPL:{} |Flag:{}'.format(i[0],i[1],i[2],i[3],i[4]))
self.select_meta.options = a
except:
print('Erro')
if self.tabs.selected_index == 1:
with self.out_01:
try:
self.folder_path_lf = pathlib.Path(self.path_LF.value)
lf_files = self.folder_path_lf.rglob('TOA5*.flux.dat')
self.dfs_02_01 = []
for file in lf_files:
# print(file)
self.dfs_02_01.append(pd.read_csv(file, skiprows=[0,2,3], parse_dates=['TIMESTAMP'],na_values='NAN', usecols=self.lf_columns_filtered))
self.dfs_concat_02_01 = pd.concat(self.dfs_02_01)
# self.dropdown_yAxis_lf.options = self.lf_columns_filtered
self.html_lf.value = "<table> <tr><td><span style='font-weight:bold'>Number of Files:</spam></td> <td>{}</td></tr><tr><td><span style='font-weight:bold'>Begin:</span></td> <td>{}</td></tr> <tr> <td><span style='font-weight:bold'>End:</span></td><td>{}</td> </tr>".format(len(self.dfs_02_01), self.dfs_concat_02_01['TIMESTAMP'].min(),self.dfs_concat_02_01['TIMESTAMP'].max())
except:
print('erro')
def _select_config(self, *args):
with self.out_00:
# self.dfs_01_01 = []
# for i in self.select_meta.index:
full_output_files = self.folder_path_ep.rglob('*{}*_full_output*.csv'.format(self.config_name[self.select_meta.index]))
dfs_single_config = []
for file in full_output_files:
dfs_single_config.append(pd.read_csv(file, skiprows=[0,2], na_values=-9999, parse_dates={'TIMESTAMP':['date', 'time']},keep_date_col=True, usecols=self.ep_columns_filtered))
# self.df_ep = pd.read_csv(file, skiprows=[0,2], na_values=-9999, parse_dates={'TIMESTAMP':['date', 'time']}, usecols=self.ep_columns_filtered)
self.df_ep = pd.concat(dfs_single_config)
# try:
# self.dropdown_yAxis_ep.options = self.ep_columns_filtered
# self.dropdown_yAxis_ep.value = 'H'
# except:
# pass
def filter_flag_ep(self):
try:
flag = self.dfs_compare[
(self.dfs_compare['H2O_sig_strgth_mean'] >= self.floatSlider_signalStrFilter.value) &
(self.dfs_compare['TIMESTAMP'].dt.date >= self.selectionRangeSlider_date.value[0]) &
(self.dfs_compare['TIMESTAMP'].dt.date <= self.selectionRangeSlider_date.value[1]) &
(self.dfs_compare['TIMESTAMP'].dt.time >= self.selectionRangeSlider_hour.value[0]) &
(self.dfs_compare['TIMESTAMP'].dt.time <= self.selectionRangeSlider_hour.value[1])
]
except:
flag = self.dfs_compare[
(self.dfs_compare['H2O_sig_strgth_mean'] >= self.floatSlider_signalStrFilter.value)
]
if self.checkBox_rainfallFilter.value == True:
flag = flag[flag['precip_Tot']==0]
if self.checkBox_EnergyBalance.value == True:
if self.selectionSlider_flagFilter.value in [0,1,2]:
flag = flag[flag[['qc_H', 'qc_LE']].isin([self.selectionSlider_flagFilter.value]).sum(axis=1)==2]
if self.selectionSlider_flagFilter.value == 'All':
pass
if self.checkBox_EnergyBalance.value == False:
if self.selectionSlider_flagFilter.value in [0,1,2]:
flag = flag[flag['qc_{}'.format(self.dropdown_yAxis_ep.value)]==self.selectionSlider_flagFilter.value]
if self.selectionSlider_flagFilter.value == 'All':
pass
return flag
def _button_plot(self, *args):
with self.out_02:
self.dfs_compare = pd.merge(left=self.dfs_concat_02_01, right=self.df_ep, how='outer', on='TIMESTAMP', suffixes=("_lf","_ep"))
self.selectionRangeSlider_date.options = self.dfs_compare['TIMESTAMP'].dt.date.unique()
self.selectionRangeSlider_hour.options = sorted(list(self.dfs_compare['TIMESTAMP'].dt.time.unique()))
# print(self.dfs_compare)
# self.update_lf()
# self.slope_linregress.gradient = 5
self.update_ep()
def update_ep(self, *args):
self.df_filter_ep = self.filter_flag_ep()
# self.source_ep.data = dict(x=self.df_filter_ep['TIMESTAMP'], y=self.df_filter_ep['{}'.format(self.dropdown_yAxis_ep.value)], y2=self.df_filter_ep['{}'.format(self.dropdown_yAxis_lf.value)])
# self.fig_01.xaxis.axis_label = 'TIMESTAMP'
# self.fig_01.yaxis.axis_label = '{}'.format(self.dropdown_yAxis_ep.value)
if self.checkBox_EnergyBalance.value == True:
self.source_ep.data = dict(x=self.df_filter_ep['TIMESTAMP'],
y=self.df_filter_ep[['H', 'LE','H_strg','LE_strg']].sum(axis=1, min_count=1),
y2=self.df_filter_ep['Rn_Avg']-self.df_filter_ep[['shf_Avg(1)','shf_Avg(2)']].mean(axis=1))
# self.hm.fill_color=transform('et', self.colorMapper)
#self.df_filter_ep[['Rn_Avg', 'shf_Avg(1)']].sum(axis=1, min_count=1)
#self.df_filter_ep[['H', 'LE']].sum(axis=1, min_count=1)
self.fig_01.xaxis.axis_label = 'TIMESTAMP'
self.fig_01.yaxis.axis_label = 'H + LE'
self.fig_02.xaxis.axis_label = 'TIMESTAMP'
self.fig_02.yaxis.axis_label = 'Rn - G'
self.fig_03.yaxis.axis_label = 'H + LE'
self.fig_03.xaxis.axis_label = 'Rn - G'
# self.fig_04.x_range.factors = self.df_filter_ep['date'].unique()
# self.fig_04.y_range.factors = self.df_filter_ep['time'].unique()
# self.label.text = 'fasfdasfasfasfaf'
self.df_corr =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.linear_model import LinearRegression
data = load_iris()
df =
|
pd.DataFrame(data.data, columns=data.feature_names)
|
pandas.DataFrame
|
from itertools import chain
from itertools import groupby
from typing import Dict
from typing import List
from typing import Tuple
from typing import Union
import oedialect # noqa: F401
import open_FRED.cli as ofr
import pandas as pd
import sqlalchemy as sqla
from geoalchemy2.elements import WKTElement as WKTE
from geoalchemy2.shape import to_shape
from pandas import DataFrame as DF
from pandas import Series
from pandas import Timedelta as TD
from pandas import to_datetime as tdt
from shapely.geometry import Point
from sqlalchemy.orm import sessionmaker
from .dedup import deduplicate
#: The type of variable selectors. A selector should always contain the
#: name of the variable to select and optionally the height to select,
#: if only a specific one is desired.
Selector = Union[Tuple[str], Tuple[str, int]]
TRANSLATIONS: Dict[str, Dict[str, List[Selector]]] = {
"windpowerlib": {
"wind_speed": [("VABS_AV",)],
"temperature": [("T",)],
"roughness_length": [("Z0",)],
"pressure": [("P",)],
},
"pvlib": {
"wind_speed": [("VABS_AV", 10)],
"temp_air": [("T", 10)],
"pressure": [("P", 10)],
"dhi": [("ASWDIFD_S", 0)],
"ghi": [("ASWDIFD_S", 0), ("ASWDIR_S", 0)],
"dni": [("ASWDIRN_S", 0)],
},
}
def defaultdb():
engine = getattr(defaultdb, "engine", None) or sqla.create_engine(
"postgresql+oedialect://openenergy-platform.org"
)
defaultdb.engine = engine
session = (
getattr(defaultdb, "session", None) or sessionmaker(bind=engine)()
)
defaultdb.session = session
metadata = sqla.MetaData(schema="climate", bind=engine, reflect=False)
return {"session": session, "db": ofr.mapped_classes(metadata)}
class Weather:
"""
Load weather measurements from an openFRED conforming database.
Note that you need a database storing weather data using the openFRED
schema in order to use this class. There is one publicly available at
https://openenergy-platform.org
Now you can simply instantiate a `Weather` object via e.g.:
Examples
--------
>>> from shapely.geometry import Point
>>> point = Point(9.7311, 53.3899)
>>> weather = Weather(
... start="2007-04-05 06:00",
... stop="2007-04-05 07:31",
... locations=[point],
... heights=[10],
... variables="pvlib",
... **defaultdb()
... )
Instead of the special values `"pvlib"` and `"windpowerlib"` you can
also supply a list of variables, like e.g. `["P", "T", "Z0"]`, to
retrieve from the database.
After initialization, you can use e.g. `weather.df(point, "pvlib")`
to retrieve a `DataFrame` with weather data from the measurement
location closest to the given `point`.
Parameters
----------
start : Anything `pandas.to_datetime` can convert to a timestamp
Load weather data starting from this date.
stop : Anything `pandas.to_datetime` can convert to a timestamp
Don't load weather data before this date.
locations : list of :shapely:`Point`
Weather measurements are collected from measurement locations closest
to the the given points.
location_ids : list of int
Weather measurements are collected from measurement locations having
primary keys, i.e. IDs, in this list. Use this e.g. if you know you're
using the same location(s) for multiple queries and you don't want
the overhead of doing the same nearest point query multiple times.
heights : list of numeric
Limit selected timeseries to these heights. If `variables` contains a
variable which isn't height dependent, i.e. it has only one height,
namely `0`, the corresponding timeseries is always
selected. Don't select the correspoding variable, in order to avoid
this.
Defaults to `None` which means no restriction on height levels.
variables : list of str or one of "pvlib" or "windpowerlib"
Load the weather variables specified in the given list, or the
variables necessary to calculate a feedin using `"pvlib"` or
`"windpowerlib"`.
Defaults to `None` which means no restriction on loaded variables.
regions : list of :shapely:`Polygon`
Weather measurements are collected from measurement locations
contained within the given polygons.
session : `sqlalchemy.orm.Session`
db : dict of mapped classes
"""
def __init__(
self,
start,
stop,
locations,
location_ids=None,
heights=None,
variables=None,
regions=None,
session=None,
db=None,
):
self.session = session
self.db = db
if self.session is None and self.db is None:
return
variables = {
"windpowerlib": ["P", "T", "VABS_AV", "Z0"],
"pvlib": [
"ASWDIFD_S",
"ASWDIRN_S",
"ASWDIR_S",
"P",
"T",
"VABS_AV",
],
None: variables,
}[variables if variables in ["pvlib", "windpowerlib"] else None]
self.locations = (
{(p.x, p.y): self.location(p) for p in locations}
if locations is not None
else {}
)
self.regions = (
{WKTE(r, srid=4326): self.within(r) for r in regions}
if regions is not None
else {}
)
if location_ids is None:
location_ids = []
self.location_ids = set(
[
d.id
for d in chain(self.locations.values(), *self.regions.values())
]
+ location_ids
)
self.locations = {
k: to_shape(self.locations[k].point) for k in self.locations
}
self.locations.update(
{
(p.x, p.y): p
for p in chain(
self.locations.values(),
(
to_shape(location.point)
for region in self.regions.values()
for location in region
),
)
}
)
self.regions = {
k: [to_shape(location.point) for location in self.regions[k]]
for k in self.regions
}
series = sorted(
session.query(
db["Series"], db["Variable"], db["Timespan"], db["Location"]
)
.join(db["Series"].variable)
.join(db["Series"].timespan)
.join(db["Series"].location)
.filter((db["Series"].location_id.in_(self.location_ids)))
.filter(
True
if variables is None
else db["Variable"].name.in_(variables)
)
.filter(
True
if heights is None
else (db["Series"].height.in_(chain([0], heights)))
)
.filter(
(db["Timespan"].stop >= tdt(start))
& (db["Timespan"].start <= tdt(stop))
)
.all(),
key=lambda p: (
p[3].id,
p[1].name,
p[0].height,
p[2].start,
p[2].stop,
),
)
self.series = {
k: [
(
segment_start.tz_localize("UTC")
if segment_start.tz is None
else segment_start,
segment_stop.tz_localize("UTC")
if segment_stop.tz is None
else segment_stop,
value,
)
for (series, variable, timespan, location) in g
for (segment, value) in zip(timespan.segments, series.values)
for segment_start in [tdt(segment[0])]
for segment_stop in [tdt(segment[1])]
if segment_start >= tdt(start) and segment_stop <= tdt(stop)
]
for k, g in groupby(
series,
key=lambda p: (
(to_shape(p[3].point).x, to_shape(p[3].point).y),
p[1].name,
p[0].height,
),
)
}
self.series = {k: deduplicate(self.series[k]) for k in self.series}
self.variables = {
k: sorted(set(h for _, h in g))
for k, g in groupby(
sorted((name, height) for _, name, height in self.series),
key=lambda p: p[0],
)
}
self.variables = {k: {"heights": v} for k, v in self.variables.items()}
@classmethod
def from_df(klass, df):
assert isinstance(df.columns, pd.MultiIndex), (
"DataFrame's columns aren't a `pandas.indexes.multi.MultiIndex`.\n"
"Got `{}` instead."
).format(type(df.columns))
assert len(df.columns.levels) == 2, (
"DataFrame's columns have more than two levels.\nGot: {}.\n"
"Should be exactly two, the first containing variable names and "
"the\n"
"second containing matching height levels."
)
variables = {
variable: {"heights": [vhp[1] for vhp in variable_height_pairs]}
for variable, variable_height_pairs in groupby(
df.columns.values,
key=lambda variable_height_pair: variable_height_pair[0],
)
}
locations = {xy: Point(xy[0], xy[1]) for xy in df.index.values}
series = {
(xy, *variable_height_pair): df.loc[xy, variable_height_pair]
for xy in df.index.values
for variable_height_pair in df.columns.values
}
instance = klass(start=None, stop=None, locations=None)
instance.locations = locations
instance.series = series
instance.variables = variables
return instance
def location(self, point: Point):
""" Get the measurement location closest to the given `point`.
"""
point = WKTE(point.to_wkt(), srid=4326)
return (
self.session.query(self.db["Location"])
.order_by(self.db["Location"].point.distance_centroid(point))
.first()
)
def within(self, region=None):
""" Get all measurement locations within the given `region`.
"""
region = WKTE(region.to_wkt(), srid=4326)
return (
self.session.query(self.db["Location"])
.filter(self.db["Location"].point.ST_Within(region))
.all()
)
def to_csv(self, path):
df = self.df()
df = df.applymap(
# Unzip, i.e. convert a list of tuples to a tuple of lists, the
# list of triples in each DataFrame cell and convert the result to
# a JSON string. Unzipping is necessary because the pandas'
# `to_json` wouldn't format the `Timestamps` correctly otherwise.
lambda s: pd.Series(pd.Series(xs) for xs in zip(*s)).to_json(
date_format="iso"
)
)
return df.to_csv(path, quotechar="'")
@classmethod
def from_csv(cls, path_or_buffer):
df = pd.read_csv(
path_or_buffer,
# This is necessary because the automatic conversion isn't precise
# enough.
converters={0: float, 1: float},
header=[0, 1],
index_col=[0, 1],
quotechar="'",
)
df.columns.set_levels(
[df.columns.levels[0], [float(c) for c in df.columns.levels[1]]],
inplace=True,
)
df = df.applymap(lambda s: pd.read_json(s, typ="series"))
# Reading the JSON string back in yields a weird format. Instead of a
# nested `Series` we get a `Series` containing three dictionaries. The
# `dict`s "emulate" a `Series` since their keys are string
# representations of integers and their values are the actual values
# that would be stored at the corresponding position in a `Series`. So
# we have to manually reformat the data we get back. Since there's no
# point in doing two conversions, we don't convert it back to nested
# `Series`, but immediately to `list`s of `(start, stop, value)`
# triples.
df = df.applymap(
lambda s: list(
zip(
*[
[
# The `Timestamp`s in the inner `Series`/`dict`s
# where also not converted, so we have to do this
# manually, too.
pd.to_datetime(v, utc=True) if n in [0, 1] else v
for k, v in sorted(
s[n].items(), key=lambda kv: int(kv[0])
)
]
for n in s.index
]
)
)
)
return cls.from_df(df)
def df(self, location=None, lib=None):
if lib is None and location is None:
columns = sorted(set((n, h) for (xy, n, h) in self.series))
index = sorted(xy for xy in set(xy for (xy, n, h) in self.series))
data = {
(n, h): [self.series[xy, n, h] for xy in index]
for (n, h) in columns
}
return DF(index=pd.MultiIndex.from_tuples(index), data=data)
if lib is None:
raise NotImplementedError(
"Arbitrary dataframes not supported yet.\n"
'Please use one of `lib="pvlib"` or `lib="windpowerlib"`.'
)
xy = (location.x, location.y)
location = (
self.locations[xy]
if xy in self.locations
else to_shape(self.location(location).point)
if self.session is not None
else min(
self.locations.values(),
key=lambda point: location.distance(point),
)
)
point = (location.x, location.y)
index = (
[
dhi[0] + (dhi[1] - dhi[0]) / 2
for dhi in self.series[point, "ASWDIFD_S", 0]
]
if lib == "pvlib"
else [
wind_speed[0]
for wind_speed in self.series[
point, "VABS_AV", self.variables["VABS_AV"]["heights"][0]
]
]
if lib == "windpowerlib"
else []
)
def to_series(v, h):
s = self.series[point, v, h]
return
|
Series([p[2] for p in s], index=[p[0] for p in s])
|
pandas.Series
|
import os
import warnings
import numpy as np
import pandas as pd
from typing import Iterable, Optional, List
def sets_match(a: Iterable, b: Iterable):
return a is None or set(a) == set(b)
def sub_set(self: pd.DataFrame, new: pd.DataFrame):
new_index = set(new.index).difference(self.index)
new_columns = set(new.columns).difference(self.columns)
for column in new_columns: self[column] = np.nan
if len(self.columns) > 0:
for key in new_index: self.loc[key] = np.nan
if len(new.columns) == 0 or len(new.index) == 0: return
self.loc[list(new.index), list(new.columns)] = new
def sub_get(self: pd.DataFrame, cols: Optional[List[str]], index: Optional[List[str]]):
if cols is None: cols = self.columns
if index is None: index = self.index
if len(cols) == 0 or len(index) == 0: return
|
pd.DataFrame()
|
pandas.DataFrame
|
import ast
import multiprocessing
import pickle
import pandas as pd
import numpy as np
import joblib
import re
import os
from collections import namedtuple
from sklearn.model_selection import train_test_split
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import konlpy
from konlpy.tag import Kkma, Okt, Hannanum, Twitter
from datetime import datetime
#tokenize & word2vec packages
from soynlp import DoublespaceLineCorpus
from soynlp.word import WordExtractor
from soynlp.tokenizer import LTokenizer
from soynlp.noun import NewsNounExtractor
from gensim.models import Word2Vec
import gensim, logging
from gensim.test.utils import common_texts, get_tmpfile
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from gensim.summarization import keywords
from newspaper import Article
from newspaper import fulltext
from news.task_module.news_crawler import NaverNewsCrawler
import platform
import os
# from news.task_module.professor import ProfessorNews
class ProfessorNews:
def __init__(self):
path = os.getcwd()
# self.new_small = pd.read_excel('./nlp_data/new_small_class.xlsx')
if platform.system() == 'Linux':
# path = '/home/ubuntu/sunbo_django/recommender/models/'
# path2 = '/home/ubuntu/sunbo_django/news/task_module'
self.ko_stopwords = pd.read_csv(path + '/news/task_module/nlp_data/korean_stopwords.txt')
self.token_stops = pd.read_csv(path + '/news/task_module/nlp_data/token_stopwords.csv', engine='python', encoding='cp949')['stopwords'].tolist()
self.doc_vectorizer = Doc2Vec.load(path + '/recommender/models/Doc2vec1.model')
self.doc_set =
|
pd.read_excel(path + '/news/task_module/nlp_data/doc_set.xlsx')
|
pandas.read_excel
|
import requests as req
from pathlib import Path
import pandas as pd
import pprint as pp
import json
from bs4 import BeautifulSoup
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
# Import API importers
import musicbrainzngs
def hot100(iterator, path, proxies, timeout):
'''
Add your custom Scrape function here. As an example you can find the scrape function to get Walmart Stores across the US.
This example will scrape all Walmarts (does not include Sam's Club). You can fully customize this function.
IMPORTANT: It's necessary that you name your function the same as your `target` keyword (e.g. in this case the target=walmart).
For return statements make sure you return `False` for a failed or skipped scraping and `True` for a successful scraping.
'''
hits = []
this = Path(path)
if this.is_file():
# Iterator exists
return False
response = req.get(
url='https://www.billboard.com/charts/hot-100/' + iterator
)
def get_lyric_links(divs):
links = []
for div in divs:
href = ''
try:
href = div.find('div',{"class": "chart-list-item__lyrics"}).find('a').get('href')
except:
pass
links.append(href)
return links
try:
html = response.text
soup = BeautifulSoup(html,'html.parser')
date = soup.findAll("button", {"class": "chart-detail-header__date-selector-button"})
hot100s = []
date = date[0].get_text().strip()
ranks = [rank.get_text().strip() for rank in soup.findAll("div", {"class": "chart-list-item__rank"})]
titles = [title.get_text().strip() for title in soup.findAll("span", {"class": "chart-list-item__title-text"})]
artists = [artist.get_text().strip() for artist in soup.findAll("div",{"class": "chart-list-item__artist"})]
links = get_lyric_links(soup.findAll("div",{"class": "chart-list-item__text"}))
equal_length = len(ranks) == len(titles) == len(artists) == len(ranks)
array_len = len(ranks)
dates = [date for i in range(100)]
dates = [date for i in range(array_len)]
if not equal_length:
print('not equal length')
df =
|
pd.DataFrame({'date': dates,'rank': ranks, 'title': titles, 'artist': artists, 'link': links})
|
pandas.DataFrame
|
import unittest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal, assert_series_equal
from src.preprocessing import add_derived_title, categorize_column, add_is_alone_column, impute_nans
class TestProcessing(unittest.TestCase):
def test_add_derived_title(self):
df = pd.DataFrame({
'Name': ['Smith, Mr. <NAME> ', 'Heikkinen, M<NAME> ', '<NAME>',
'Allen, Ms. Maisie', 'Allen, Mme. Maisie',
# rare titles
'<NAME> ', 'Heikkinen, Countess. X ', 'Allen, Capt. Maisie',
'<NAME> ', '<NAME> ', 'Allen, Dr. Maisie',
'<NAME> ', 'Heikkinen, Rev. Laina ', 'Allen, Sir. Maisie',
'<NAME>. <NAME> ', '<NAME> '
],
})
expected = pd.DataFrame({
'Name': ['<NAME> ', '<NAME> ', '<NAME>',
'Allen, Ms. Maisie', 'Allen, Mme. Maisie',
'<NAME> ', 'Heikkinen, Countess. X ', 'Allen, Capt. Maisie',
'<NAME>. <NAME> ', '<NAME> ', 'Allen, Dr. Maisie',
'<NAME> ', 'Heikkinen, Rev. Laina ', 'Allen, Sir. Maisie',
'<NAME>. <NAME> ', '<NAME> '
],
'Title': ['Mr', 'Miss', 'Miss',
'Miss', 'Mrs',
'Rare', 'Rare', 'Rare',
'Rare', 'Rare', 'Rare',
'Rare', 'Rare', 'Rare',
'Rare', 'Rare']
})
assert_frame_equal(expected, add_derived_title(df))
def test_categorize_column_into_2_categories(self):
series =
|
pd.Series([5, 20, 10, 25])
|
pandas.Series
|
"""Misc utilities for MODFLOW."""
__all__ = [
"geotransform_from_flopy",
"transform_data_to_series_or_frame",
]
from collections.abc import Iterable
import pandas as pd
from swn.util import abbr_str
def sfr_rec_to_df(sfr):
"""Convert flopy rec arrays for ds2 and ds6 to pandas dataframes."""
d = sfr.segment_data
# multi index
reform = {(i, j): d[i][j] for i in d.keys() for j in d[i].dtype.names}
segdatadf =
|
pd.DataFrame.from_dict(reform)
|
pandas.DataFrame.from_dict
|
import pandas as pd
import time
import sys
def df_parser(file) -> tuple:
"""
Returns df and name depending on file type
"""
extention = file.split('.')[1]
name = file.split('.')[0]
if extention == 'csv':
return pd.read_csv(file), name
elif extention == 'xlsx':
return pd.read_excel(file), name
elif extention == 'sql':
return pd.read_sql(file), name
else:
return None
def data_lst(df) -> dict:
"""
Stores input data into a dictionary
"""
data = {}
value = ''
for row in df.index:
value = ','.join(str(x) for x in df.iloc[row].values)
if data.get(value):
data[value]['rows'].append(row)
else:
data[value] = {'rows': [row]}
return data
def val_list_count(lst1, lst2) -> list:
"""
Computes total match and unmatch lengths and returns counts and data list.
"""
match_count = 0
unmatch_count = 0
match_lst = []
unmatch_lst = []
for key in lst1:
if lst2.get(key):
match_count += len(lst1[key]['rows'])
match_lst.append(key.split(','))
else:
unmatch_count += len(lst1[key]['rows'])
unmatch_lst.append(key.split(','))
return match_lst, unmatch_lst, match_count, unmatch_count
def validator(workbook1, workbook2) -> None:
"""
Auto validator that will return percentage of corrlelation either positive or negative
:param workbook1 string: path to workbook 1
:param workbook2 string: path to workbook 2
:param corr boolean: choose whether you would like to search for positive or negative correlation
:return: percentages
"""
start_time = time.time()
df1, name1 = df_parser(workbook1)
df2, name2 = df_parser(workbook2)
val_list_1 = data_lst(df1)
val_list_2 = data_lst(df2)
mlst1, ulst1, mcount1, ucount1 = val_list_count(val_list_1, val_list_2)
mlst2, ulst2, mcount2, ucount2 = val_list_count(val_list_2, val_list_1)
pd.DataFrame(mlst1).to_csv(f'reports/Matching_{name1}.csv')
|
pd.DataFrame(mlst2)
|
pandas.DataFrame
|
# coding: utf-8
# ## Integrating LSTM model with Azure Machine Learning Package for Forecasting
#
# In this notebook, learn how to integrate LSTM model in the framework provided by Azure Machine Learning Package for Forecasting (AMLPF) to quickly build a forecasting model.
# We will use dow jones dataset to build a model that forecasts quarterly revenue for these 30 dow jones listed companies.
#
# #### Disclaimer:
# This notebook is based on the ongoing development work as part of the future release of AMLPF. Therefore, please consider this as a preview of what might become available in future as part of AMLPF.
# Further, please note that this work has currently been tested only on Windows platform.
#
# ### Prerequisites:
# If you don't have an Azure subscription, create a free account before you begin. The following accounts and application must be set up and installed:<br/>
# * Azure Machine Learning Experimentation account.
#
# If these three are not yet created or installed, follow the Azure Machine Learning Quickstart and Workbench installation article.
#
# In[18]:
import warnings
warnings.filterwarnings('ignore') # comment out this statement if you do not want to suppress the warnings.
import sys, os, inspect
import numpy as np
import pandas as pd
from datetime import datetime
import json
import requests
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import ftk
ftk_root_path = (ftk.__path__)[0] # This is the path where ftk package is installed.
from ftk.pipeline import AzureMLForecastPipeline
from ftk.operationalization.dnnscorecontext import DnnScoreContext
from ftk.operationalization.dnn_score_script_helper import score_run
from ftk.dnn_utils import create_lag_lead_features
from ftk.dnn_utils import pickle_keras_models
from keras.models import Model, Sequential
from keras.layers import Input, LSTM, Dense
from keras.models import load_model
print('imports done')
# In[2]:
np.random.seed(1000) # Set random seed for reproducibility.
# In[3]:
data_file_path = ftk_root_path + "\\data\\dow_jones\\dow_jones_data.tsv" # Change it depending upon where this file is stored.
num_lag_feats = 16 # Number of lag features to be used while training the model.
num_leads = 0 # Lead zero indicates current-time's value. forecast only one step at a time.
# Note: MAPE error computation is done considering num_leads = 0. It may need to be updated to take into account num_leads > 0. It has not been done yet.
num_test_records = 4 # Keep last four records for each company in the test data.
num_lstm_au = 50 # Number of units in single lstm layer.
num_epochs = 150 # Number of epochs to fit the model.
dj_series_freq = 'Q'
# In[4]:
# Read the dow_jones_data.
dj_df = pd.read_table(data_file_path)
print(dj_df.head())
print(dj_df.info())
# In[5]:
# Revenue has null values for some company. 'V' has been such identified company.
# In this experiment, we remove the company from the dataset instead of interpolating.
dj_df = dj_df[dj_df['company_ticker'] != 'V']
# Convert quarter_start field to datetime.
dj_df['quarter_start'] = pd.to_datetime(dj_df['quarter_start'])
print(dj_df.info())
# In[6]:
# Group data by company to normalize it accordingly.
grouped_data = dj_df.groupby(by='company_ticker')
cmp_to_scaler = {}
norm_dj_df = pd.DataFrame(columns=dj_df.columns) # Dataframe with quarter_start, company_ticker, normalized-revenue information.
# In[7]:
# Normalize each company's data individually and save the scaler into a dictionary to be used later.
for grp_name, grp_data in grouped_data:
cur_grp_data = grp_data.sort_values(by=['quarter_start'])
cur_grp_data = cur_grp_data.drop(['company_ticker', 'quarter_start'], axis=1)
scaler = MinMaxScaler(feature_range=(0.000001, 1))
norm_grp_data = scaler.fit_transform(cur_grp_data)
cmp_to_scaler[grp_name] = scaler
norm_grp_df = pd.DataFrame(norm_grp_data, columns=['revenue'])
aux_data_df = grp_data.loc[:,('quarter_start', 'company_ticker')]
aux_data_df.reset_index(drop=True, inplace=True)
cur_grp_norm_df = pd.concat((aux_data_df, norm_grp_df), axis=1)
norm_dj_df = norm_dj_df.append(cur_grp_norm_df)
# In[8]:
# Create 16 lags as features for each quarterly data point (normalized revenue in previous step).
dj_reg = pd.DataFrame()
norm_grp_data = norm_dj_df.groupby(by='company_ticker')
for grp_name, grp_data in norm_grp_data:
cur_grp_data = grp_data.sort_values(by=['quarter_start'])
dj_reg_grp = create_lag_lead_features(cur_grp_data, ts_col='revenue',
aux_cols=['company_ticker', 'quarter_start'], num_lags=num_lag_feats)
dj_reg = dj_reg.append(dj_reg_grp)
# In[9]:
# Create list of feature column-names.
feat_cols = []
feat_tgt_cols = []
for i in range(num_lag_feats, 0, -1) :
feat_cols.append('revenueLag' + str(i))
feat_tgt_cols.extend(feat_cols)
# Create list of target column-names.
target_cols = ['revenueLead0']
for i in range(1, num_leads+1) :
target_cols.append('revenueLead' + str(i))
feat_tgt_cols.extend(target_cols)
# In[10]:
# Divide the data into taining and test dataset for each company.
dj_reg_grp_data = dj_reg.groupby(by='company_ticker')
train_data = pd.DataFrame(columns=dj_reg.columns)
test_data = pd.DataFrame(columns=dj_reg.columns)
for grp_name, grp_data in dj_reg_grp_data:
cur_grp_data = grp_data.sort_values(by=['quarter_start'])
num_records = cur_grp_data.shape[0]
train_data = train_data.append(pd.DataFrame(cur_grp_data.iloc[:(num_records - num_test_records),:]))
test_data = test_data.append(pd.DataFrame(cur_grp_data.iloc[(num_records - num_test_records):,:]))
# In[11]:
# Extract features and target values for training data.
train_X = train_data[feat_cols]
train_Y = train_data[target_cols]
"""
Formatting the input to be of the shape (number_of_samples, timesteps, number_of_features).
For detail explanation refer to https://keras.io/layers/recurrent/.
Note: I am considering here single timestep (set to 1) and number of features to be 16. It could be specified in
a different way (I mean, 16 timesteps instead of 1) and I plan to experiment that in future.
"""
train_X = train_X.values.reshape((train_X.shape[0], 1, train_X.shape[1]))
train_Y = train_Y.values.reshape((train_Y.shape[0], train_Y.shape[1]))
print(train_X.shape)
print(train_Y.shape)
# In[12]:
# Create a LSTM network.
model = Sequential()
model.add(LSTM(num_lstm_au, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1)) #dimension of the output vector
model.compile(loss='mean_squared_error', optimizer='adam')
# In[13]:
# Fit network. Currently set the batch_size=1; will add more relevant information on this later.
history = model.fit(train_X, train_Y, epochs=num_epochs, batch_size=1, verbose=2, shuffle=False)
# In[14]:
# Print model.summary.
print(model.summary())
# In[21]:
pickle_keras_models()
# In[36]:
# Initialize dataframe with column-names to hold forecasts and other relevant information.
final_test_forecasts = pd.DataFrame(columns=['company_ticker', 'quarter_start', 'actual', 'forecast'])
# Initialize dataframe with column-names to hold MAPE (Mean Absolute Percentage Error) for each company.
final_mapes = pd.DataFrame(columns=['company_ticker', 'mape'])
"""
Compute prediction of test data one company at a time.
This is to simplify the process of scaling it back to original scale for that company.
"""
test_grp_data = test_data.groupby(by='company_ticker')
for grp_name, grp_data in test_grp_data:
cur_grp_data = grp_data.reset_index(drop=True)
cur_grp_data['quarter_start'] =
|
pd.to_datetime(cur_grp_data['quarter_start'])
|
pandas.to_datetime
|
import warnings
from collections import OrderedDict as odict
from glob import glob
from os.path import basename, dirname, join
from deprecated.sphinx import versionadded
from ._bed_read import read_bed
from ._util import last_replace
def read_plink(file_prefix, verbose=True):
"""
Read PLINK files into data frames.
Note
----
We suggest using :func:`read_plink1_bin` instead as it provides a clearer interface.
Examples
--------
We have shipped this package with an example so can load and inspect by doing
.. doctest::
>>> from os.path import join
>>> from pandas_plink import read_plink
>>> from pandas_plink import get_data_folder
>>> (bim, fam, bed) = read_plink(join(get_data_folder(), "data"), verbose=False)
>>> print(bim.head())
chrom snp cm pos a0 a1 i
0 1 rs10399749 0.00 45162 G C 0
1 1 rs2949420 0.00 45257 C T 1
2 1 rs2949421 0.00 45413 0 0 2
3 1 rs2691310 0.00 46844 A T 3
4 1 rs4030303 0.00 72434 0 G 4
>>> print(fam.head())
fid iid father mother gender trait i
0 Sample_1 Sample_1 0 0 1 -9.00 0
1 Sample_2 Sample_2 0 0 2 -9.00 1
2 Sample_3 Sample_3 Sample_1 Sample_2 2 -9.00 2
>>> print(bed.compute())
[[2.00 2.00 1.00]
[2.00 1.00 2.00]
[ nan nan nan]
[ nan nan 1.00]
[2.00 2.00 2.00]
[2.00 2.00 2.00]
[2.00 1.00 0.00]
[2.00 2.00 2.00]
[1.00 2.00 2.00]
[2.00 1.00 2.00]]
The values of the ``bed`` matrix denote how many alleles ``a1`` (see output of data
frame ``bim``) are in the corresponding position and individual. Notice the column
``i`` in ``bim`` and ``fam`` data frames. It maps to the corresponding position of
the bed matrix:
.. doctest::
>>> chrom1 = bim.query("chrom=='1'")
>>> X = bed[chrom1.i.values, :].compute()
>>> print(X)
[[2.00 2.00 1.00]
[2.00 1.00 2.00]
[ nan nan nan]
[ nan nan 1.00]
[2.00 2.00 2.00]
[2.00 2.00 2.00]
[2.00 1.00 0.00]
[2.00 2.00 2.00]
[1.00 2.00 2.00]
[2.00 1.00 2.00]]
It also allows the use of the wildcard character ``*`` for mapping
multiple BED files at
once: ``(bim, fam, bed) = read_plink("chrom*")``.
In this case, only one of the FAM files will be used to define
sample information. Data from BIM and BED files are concatenated to
provide a single view of the files.
Parameters
----------
file_prefix : str
Path prefix to the set of PLINK files. It supports loading many BED files at
once using globstrings wildcard.
verbose : bool
``True`` for progress information; ``False`` otherwise.
Returns
-------
alleles : :class:`pandas.DataFrame`
Alleles.
samples : :class:`pandas.DataFrame`
Samples.
genotypes : :class:`numpy.ndarray`
Genotype.
"""
from tqdm import tqdm
import pandas as pd
from dask.array import concatenate
file_prefixes = sorted(glob(file_prefix))
if len(file_prefixes) == 0:
file_prefixes = [file_prefix.replace("*", "")]
file_prefixes = sorted(_clean_prefixes(file_prefixes))
fn = []
for fp in file_prefixes:
fn.append({s: "%s.%s" % (fp, s) for s in ["bed", "bim", "fam"]})
pbar = tqdm(desc="Mapping files", total=3 * len(fn), disable=not verbose)
bim = _read_file(fn, lambda fn: _read_bim(fn["bim"]), pbar)
if len(file_prefixes) > 1:
if verbose:
msg = "Multiple files read in this order: {}"
print(msg.format([basename(f) for f in file_prefixes]))
nmarkers = dict()
index_offset = 0
for i, bi in enumerate(bim):
nmarkers[fn[i]["bed"]] = bi.shape[0]
bi["i"] += index_offset
index_offset += bi.shape[0]
bim = pd.concat(bim, axis=0, ignore_index=True)
fam = _read_file([fn[0]], lambda fn: _read_fam(fn["fam"]), pbar)[0]
nsamples = fam.shape[0]
bed = _read_file(
fn, lambda f: _read_bed(f["bed"], nsamples, nmarkers[f["bed"]]), pbar
)
bed = concatenate(bed, axis=0)
pbar.close()
return (bim, fam, bed)
@versionadded(version="2.0.0")
def read_plink1_bin(bed, bim=None, fam=None, verbose=True):
"""
Read PLINK 1 binary files [1]_ into a data array.
A PLINK 1 binary file set consists of three files:
- BED: containing the genotype.
- BIM: containing variant information.
- FAM: containing sample information.
The user might provide a single file path to a BED file, from which this function
will try to infer the file path of the other two files.
This function also allows the user to provide file path to multiple BED and
BIM files, as it is common to have a data set split into multiple files, one per
chromosome.
This function returns a samples-by-variants matrix. This is a special kind of matrix
with rows and columns having multiple coordinates each. Those coordinates have the
metainformation contained in the BIM and FAM files.
Examples
--------
The following example reads two BED files and two BIM files correspondig to
chromosomes 11 and 12, and read a single FAM file whose filename is inferred from
the BED filenames.
.. doctest::
>>> from os.path import join
>>> from pandas_plink import read_plink1_bin
>>> from pandas_plink import get_data_folder
>>> G = read_plink1_bin(join(get_data_folder(), "chr*.bed"), verbose=False)
>>> print(G)
<xarray.DataArray 'genotype' (sample: 14, variant: 1252)>
dask.array<concatenate, shape=(14, 1252), dtype=float64, chunksize=(14, 779), chunktype=numpy.ndarray>
Coordinates:
* sample (sample) object 'B001' 'B002' 'B003' ... 'B012' 'B013' 'B014'
* variant (variant) object '11_316849996' '11_316874359' ... '12_373081507'
fid (sample) <U4 'B001' 'B002' 'B003' 'B004' ... 'B012' 'B013' 'B014'
iid (sample) <U4 'B001' 'B002' 'B003' 'B004' ... 'B012' 'B013' 'B014'
father (sample) <U1 '0' '0' '0' '0' '0' '0' ... '0' '0' '0' '0' '0' '0'
mother (sample) <U1 '0' '0' '0' '0' '0' '0' ... '0' '0' '0' '0' '0' '0'
gender (sample) <U1 '0' '0' '0' '0' '0' '0' ... '0' '0' '0' '0' '0' '0'
trait (sample) float64 -9.0 -9.0 -9.0 -9.0 -9.0 ... -9.0 -9.0 -9.0 -9.0
chrom (variant) <U2 '11' '11' '11' '11' '11' ... '12' '12' '12' '12' '12'
snp (variant) <U9 '316849996' '316874359' ... '372918788' '373081507'
cm (variant) float64 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 0.0
pos (variant) int64 157439 181802 248969 ... 27163741 27205125 27367844
a0 (variant) <U1 'C' 'G' 'G' 'C' 'C' 'T' ... 'A' 'A' 'G' 'A' 'T' 'G'
a1 (variant) <U1 'T' 'C' 'C' 'T' 'T' 'A' ... 'T' 'G' 'A' 'T' 'C' 'A'
>>> print(G.shape)
(14, 1252)
Suppose we want the genotypes of the chromosome 11 only:
.. doctest::
>>> G = G.where(G.chrom == "11", drop=True)
>>> print(G)
<xarray.DataArray 'genotype' (sample: 14, variant: 779)>
dask.array<where, shape=(14, 779), dtype=float64, chunksize=(14, 779), chunktype=numpy.ndarray>
Coordinates:
* sample (sample) object 'B001' 'B002' 'B003' ... 'B012' 'B013' 'B014'
* variant (variant) object '11_316849996' '11_316874359' ... '11_345698259'
fid (sample) <U4 'B001' 'B002' 'B003' 'B004' ... 'B012' 'B013' 'B014'
iid (sample) <U4 'B001' 'B002' 'B003' 'B004' ... 'B012' 'B013' 'B014'
father (sample) <U1 '0' '0' '0' '0' '0' '0' ... '0' '0' '0' '0' '0' '0'
mother (sample) <U1 '0' '0' '0' '0' '0' '0' ... '0' '0' '0' '0' '0' '0'
gender (sample) <U1 '0' '0' '0' '0' '0' '0' ... '0' '0' '0' '0' '0' '0'
trait (sample) float64 -9.0 -9.0 -9.0 -9.0 -9.0 ... -9.0 -9.0 -9.0 -9.0
chrom (variant) <U2 '11' '11' '11' '11' '11' ... '11' '11' '11' '11' '11'
snp (variant) <U9 '316849996' '316874359' ... '345653648' '345698259'
cm (variant) float64 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 0.0
pos (variant) int64 157439 181802 248969 ... 28937375 28961091 29005702
a0 (variant) <U1 'C' 'G' 'G' 'C' 'C' 'T' ... 'T' 'A' 'C' 'A' 'A' 'T'
a1 (variant) <U1 'T' 'C' 'C' 'T' 'T' 'A' ... 'C' 'G' 'T' 'G' 'C' 'C'
>>> print(G.shape)
(14, 779)
Lets now print the genotype value of the sample `B003` for variant `11_316874359`:
.. doctest::
>>> print(G.sel(sample="B003", variant="11_316874359").values)
0.0
The special matrix we return is of type :class:`xarray.DataArray`. More information
about it can be found at the `xarray documentation <http://xarray.pydata.org>`_.
Parameters
----------
bed : str
Path to a BED file. It can contain shell-style wildcards to indicate multiple
BED files.
bim : str, optional
Path to a BIM file. It can contain shell-style wildcards to indicate multiple
BIM files. It defaults to ``None``, in which case it will try to be inferred.
fam : str, optional
Path to a FAM file. It defaults to ``None``, in which case it will try to be
inferred.
verbose : bool
``True`` for progress information; ``False`` otherwise.
Returns
-------
G : :class:`xarray.DataArray`
Genotype with metadata.
References
----------
.. [1] PLINK 1 binary. https://www.cog-genomics.org/plink/2.0/input#bed
"""
from numpy import int64, float64
from tqdm import tqdm
from xarray import DataArray
import pandas as pd
import dask.array as da
bed_files = sorted(glob(bed))
if len(bed_files) == 0:
raise ValueError("No BED file has been found.")
if bim is None:
bim_files = [last_replace(f, ".bed", ".bim") for f in bed_files]
else:
bim_files = sorted(glob(bim))
if len(bim_files) == 0:
raise ValueError("No BIM file has been found.")
if fam is None:
fam_files = [last_replace(f, ".bed", ".fam") for f in bed_files]
else:
fam_files = sorted(glob(fam))
if len(fam_files) == 0:
raise ValueError("No FAM file has been found.")
if len(bed_files) != len(bim_files):
raise ValueError("The numbers of BED and BIM files must match.")
if len(fam_files) > 1:
msg = "More than one FAM file has been specified. Only the first one will be "
msg += "considered."
if verbose:
warnings.warn(msg, UserWarning)
fam_files = fam_files[:1]
nfiles = len(bed_files) + len(bim_files) + 1
pbar = tqdm(desc="Mapping files", total=nfiles, disable=not verbose)
bims = _read_file(bim_files, lambda f: _read_bim(f), pbar)
nmarkers = {bed_files[i]: b.shape[0] for i, b in enumerate(bims)}
bim =
|
pd.concat(bims, axis=0, ignore_index=True)
|
pandas.concat
|
#!/usr/bin/env python
import os
import dill as pickle
import sys
import emcee
import matplotlib
import matplotlib.pyplot as plt
from astropy.table import Table
from emcee.mpi_pool import MPIPool
import src.globals as glo
import pandas as pd
import numpy as np
import corner
from src.objects import SpecCandidate, TargetDataFrame, load_training_data
from src.utils import fits_pandas, parallelize, Str
from pyspark.sql import SparkSession
def process(func, items, hpc=False, sc=None, multi=True, n_proc=2):
if multi:
if hpc:
sc.parallelize(items).foreach(lambda i: func(*i))
else:
parallelize(lambda i: func(*i), items, n_proc=n_proc)
else:
for c in items:
func(*c)
def norm(z, n3, n2, n1, n0):
return n3 * z**3 + n2 * z**2 + n1 * z + n0
# return n1 * z + n0
# return n1 * z + n0
def slope(z, s3, s2, s1, s0):
return s3 * z**3 + s2 * z**2 + s1 * z + s0
def model(mag, z, params):
slope_args, norm_args = params[:4], params[4:]
col = (slope(z, *slope_args) * mag) + norm(z, *norm_args)
return col
def log_likelihood(theta, x, y, z, xerr, yerr, zerr):
params, lnf = theta[:-1], theta[-1]
mdl = model(x, z, params)
inv_sigma2 = 1.0 / (xerr**2 + yerr**2 + zerr**2 + mdl**2 * np.exp(2 * lnf))
return -0.5 * (np.sum((y - mdl)**2 * inv_sigma2 - np.log(inv_sigma2)))
def log_prior(theta):
n = 100
if all([(-n < param < n) for param in theta]):
return 0.0
return -np.inf
def log_probability(theta, x, y, z, xerr, yerr, zerr):
lg_prior = log_prior(theta)
if not np.isfinite(lg_prior):
return -np.inf
return lg_prior + log_likelihood(theta, x, y, z, xerr, yerr, zerr)
def main(df=None, pkl_chains=True, burnin=10000, nsteps=100000):
with MPIPool() as pool:
if not pool.is_master():
pool.wait()
sys.exit(0)
# pool = None
# if True:
# print('here')
df = load_training_data(from_pkl=True)
for col in glo.col_options:
key_mag, key_col = 'MAG_AUTO_I', f'MAG_AUTO_{col:u}'
key_mag_err = 'MAGERR_DETMODEL_I'
key_mag_err_0 = f'MAGERR_DETMODEL_{col[0]:u}'
key_mag_err_1 = f'MAGERR_DETMODEL_{col[1]:u}'
magnitude = df[key_mag].values
magnitude_err = df[key_mag_err].values
colour = df[key_col].values
colour_err = np.hypot(df[key_mag_err_0], df[key_mag_err_1]).values
redshift = df['Z'].values
redshift_err = df['Z_ERR'].values # pos=pos
ndim, nwalkers = 9, 100
pos = [1e-4 * np.random.randn(ndim) for _ in range(nwalkers)]
args = magnitude, colour, redshift, magnitude_err, colour_err, redshift_err
settings = nwalkers, ndim, log_probability
sampler = emcee.EnsembleSampler(*settings, args=args, pool=pool)
sampler.run_mcmc(pos, nsteps)
if pkl_chains is True:
name = f'rs.model.{col:l}.chain.pkl'
data = os.path.join(glo.DIR_INTERIM, name)
with open(data, 'wb') as data_out:
pickle.dump(sampler.chain, data_out)
plt.clf()
fig, axes = plt.subplots(ndim, 1, sharex=True, figsize=(8, 9))
labels = [f's{i}' for i in np.arange(0, 4, 1)] + [
# 'c'
f'n{i}' for i in np.arange(0, 4, 1)
] + ['f']
for i, param in enumerate(labels):
steps = sampler.chain[:, :, i].T
if param == 'f':
steps = np.exp(steps)
axes[i].plot(steps, color='k', alpha=0.4)
# axes[i].yaxis.set_major_locator(MaxNLocator(5))
axes[i].axhline(param, color='#888888', lw=3)
axes[i].set_ylabel(f'{param}', fontsize=16)
axes[i].tick_params(
'both', length=3, width=1.5, which='major', labelsize=16)
fig.subplots_adjust(wspace=0, hspace=0)
fig.tight_layout(h_pad=0.0)
fig.savefig(
'rs.model.{0:l}.chain.png'.format(col), format='png', dpi=300)
plt.close()
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
fig = corner.corner(samples, labels=labels)
fig.savefig(
'rs.model.{0:l}.corner.png'.format(col), format='png', dpi=300)
def plot_red_sequence(col, z, group, burnin=10000):
title = 'COL: {0:u}, Z: {1:.4f}, LEN: {2}'.format(col, z, len(group))
print(title)
plt.clf()
ax = plt.subplot(111)
try:
key_mag, key_col = 'MAG_AUTO_I', f'MAG_AUTO_{col:u}'
key_mag_err = 'MAGERR_DETMODEL_I'
key_mag_err_0 = f'MAGERR_DETMODEL_{col[0]:u}'
key_mag_err_1 = f'MAGERR_DETMODEL_{col[1]:u}'
# These detmodel errors are required for calculating the weights.
# Combine errors from multiple bands to calculate the error on the cols.
mag_err = group[key_mag_err]
col_err = np.hypot(group[key_mag_err_0], group[key_mag_err_1])
mag, colour = group[key_mag], group[key_col]
ax.errorbar(mag, colour, xerr=mag_err, yerr=col_err, fmt='.k')
except Exception as e:
# No observed data for this
print(e)
ax.set_xlim([16, 22])
ax.set_ylim([-2, 3])
ax.set_xlabel('i')
ax.set_ylabel(f'{col:l}')
name = f'rs.model.{col:l}.chain.pkl'
data = os.path.join(glo.DIR_INTERIM, name)
with open(data, 'rb') as data_in:
samples = pickle.load(data_in)[:, burnin:, :].reshape((-1, 9))
xl = np.array([16, 22])
for theta in samples[np.random.randint(len(samples), size=100)]:
params = theta[:-1]
plt.plot(xl, model(xl, z, params), color='k', alpha=0.1)
plt.suptitle(title)
name = f'rs.model.{col:l}.{z:.5f}.png'
plt.savefig(os.path.join(glo.DIR_FIGS, 'models', name), dpi=300)
def plot_everything(df=None, hpc=True):
df = load_training_data(from_pkl=True)
if hpc is True:
# Set up spark context and parallelize over the objects
sc = SparkSession.builder.appName('rs_plots').getOrCreate().sparkContext
sc.addPyFile(f'file:///{glo.DIR_PROJECT}/src/misc/spark_logging.py')
else:
sc = None
def gen_items(galaxies):
for rs_colour in glo.col_options:
# rs_colour = glo.col_options[-1]
for z_bin, group in galaxies.groupby('Z_BIN'):
# z = round(z_bin.mid, 8)
z = z_bin.mid
yield rs_colour, z, group
z_bins = np.arange(0, 0.7, 0.0005)
df['Z_BIN'] = pd.cut(df['Z'], z_bins)
process(func=plot_red_sequence, items=list(gen_items(df)), hpc=hpc, sc=sc)
# def plot_col_z(df, hpc=True):
def rs_norm_slope(z, burnin=2000):
# title = 'COL: {0:u}, Z: {1:.5f}'.format(col, z)
# print(title)
# plt.clf()
# ax = plt.subplot(111)
data_dict = {}
for col in glo.col_options:
name = f'rs.model.{col:l}.chain.pkl'
data = os.path.join(glo.DIR_INTERIM, name)
with open(data, 'rb') as data_in:
samples = pickle.load(data_in)[:, burnin:, :].reshape((-1, 9))
# samples = np.exp(samples)
results = zip(*np.percentile(samples, [16, 50, 84], axis=0))
params = [np.array([v[1], v[2] - v[1], v[1] - v[0]]) for v in results]
params = np.array(params)
data_dict['REDSHIFT'] = z
data_dict[f'SLOPE_{col:u}'] = slope(z, *params[:4, 0])
data_dict[f'NORM_{col:u}'] = norm(z, *params[4:-1, 0])
return
|
pd.Series(data_dict)
|
pandas.Series
|
import mysql.connector
import base64
from datetime import date, timedelta, timezone
import pandas as pd
import requests
from datetime import datetime
from data_management import settings as config
from urllib.parse import urlencode
import data_processing_app.models as models
from django.db.models.functions import ExtractWeek, ExtractYear
from django.db.models import Sum
import data_processing_app.toggl_data_processing as data_processing
from plotly.offline import plot
from plotly.graph_objs import Scatter
import plotly.graph_objs as go
def connect_to_toggl(api_token):
"""Connect to toggl and get response containing information to the
:param api_token: Token for you user profile, you can find the token at
Toggl.com at the end of the profile settings page
"""
string = api_token + ':api_token'
headers = {
'Authorization': 'Basic ' + base64.b64encode(string.encode('ascii')).decode("utf-8")}
url = 'https://www.toggl.com/api/v8/me'
response = requests.get(url, headers=headers)
response = response.json()
email = response['data']['email']
workspaces = [{'name': item['name'], 'id': item['id']} for item in response['data']['workspaces'] if
item['admin'] == True]
return email, workspaces, headers
def get_all_clients_and_projects(my_workspace, headers):
'''Gets all clients and projects for your workspace id'''
url = 'https://www.toggl.com/api/v8/workspaces/' + str(my_workspace) + '/clients'
clients = requests.get(url, headers=headers).json()
url = 'https://www.toggl.com/api/v8/workspaces/' + str(my_workspace) + '/projects'
projects = requests.get(url, headers=headers).json()
return clients, projects
def get_all_time_entries(headers, start_date, end_date):
'''Finds all time entries in the time frame [start_date - end_date]'''
start_date = datetime.combine(start_date, datetime.min.time())
end_date = datetime.combine(end_date, datetime.min.time())
start_date = start_date.replace(tzinfo=timezone.utc).isoformat()
end_date = end_date.replace(tzinfo=timezone.utc).isoformat()
url = 'https://api.track.toggl.com/api/v8/time_entries?'
params = {'start_date': start_date, 'end_date': end_date}
url = url + '{}'.format(urlencode(params))
time_entries = requests.get(url, headers=headers).json()
return time_entries
def data_processing(clients,projects,time_entries):
'''Join clients, projects and time entries to a data frame with all time entries
and the corresponding information to clients and projects'''
projects_filtered = [{'pid': item['id'],
'cid': item['cid'],
'project_name': item['name']} for item in projects]
clients_filtered = [{'cid': item['id'],
'client_name': item['name']} for item in clients]
projects_df =
|
pd.DataFrame(data=projects_filtered)
|
pandas.DataFrame
|
"""
Performance testing of the Shapley module based on processing times
"""
from modules import shapley
from pprint import pprint
from pandas import pandas
from time import time
DATASET_SIMPLE = pandas.read_csv('./test_datasets/simple.csv', sep=',')
DATASET_MEDIUM = pandas.read_csv('./test_datasets/medium.csv', sep=',')
DATASET_BIG =
|
pandas.read_csv('./test_datasets/big.csv', sep=',')
|
pandas.pandas.read_csv
|
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score, confusion_matrix
from keras.callbacks import ModelCheckpoint
import seaborn as sns
from keras.optimizers import Adam
import pickle
import matplotlib.pyplot as plt
import lime
import lime.lime_tabular
from lime.lime_tabular import LimeTabularExplainer
# fix random seed for reproducibility
np.random.seed(0)
# Read dataset
db =
|
pd.read_csv("covid_filtered_1-5_allMin3.csv")
|
pandas.read_csv
|
from datetime import datetime
from io import StringIO
import itertools
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Period,
Series,
Timedelta,
date_range,
)
import pandas._testing as tm
class TestDataFrameReshape:
def test_stack_unstack(self, float_frame):
df = float_frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({"foo": stacked, "bar": stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
tm.assert_frame_equal(unstacked, df)
tm.assert_frame_equal(unstacked_df["bar"], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
tm.assert_frame_equal(unstacked_cols.T, df)
tm.assert_frame_equal(unstacked_cols_df["bar"].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, "a", "b"], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
tm.assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(
1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1]
)
tm.assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[["a", "b"]].stack(1)
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_unstack_not_consolidated(self, using_array_manager):
# Gh#34708
df = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
df2 = df[["x"]]
df2["y"] = df["y"]
if not using_array_manager:
assert len(df2._mgr.blocks) == 2
res = df2.unstack()
expected = df.unstack()
tm.assert_series_equal(res, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack(fill_value=-1)
expected = DataFrame(
{"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16
)
tm.assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame(
{"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=float
)
tm.assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame(
{"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]}
).set_index(["x", "y", "z"])
unstacked = df.unstack(["x", "y"], fill_value=0)
key = ("<KEY>")
expected = unstacked[key]
result = Series([0, 0, 2], index=unstacked.index, name=key)
tm.assert_series_equal(result, expected)
stacked = unstacked.stack(["x", "y"])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
tm.assert_frame_equal(result, df)
# From a series
s = df["w"]
result = s.unstack(["x", "y"], fill_value=0)
expected = unstacked["w"]
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list("AB"), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list("xyz"), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
# From a mixed type dataframe
df["A"] = df["A"].astype(np.int16)
df["B"] = df["B"].astype(np.float64)
result = df.unstack(fill_value=-1)
expected["A"] = expected["A"].astype(np.int16)
expected["B"] = expected["B"].astype(np.float64)
tm.assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list("xyz"), dtype=float)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = date_range("2012-01-01", periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame(
{"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [td[0], pd.NaT, td[3]], "b": [td[1], td[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame(
{"a": [td[0], td[1], td[3]], "b": [td[1], td[2], td[1]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [
Period("2012-01"),
Period("2012-02"),
Period("2012-03"),
Period("2012-04"),
]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [periods[0], None, periods[3]], "b": [periods[1], periods[2], None]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame(
{
"a": [periods[0], periods[1], periods[3]],
"b": [periods[1], periods[2], periods[1]],
},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = Series(["a", "b", "c", "a"], dtype="category")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{
"a": pd.Categorical(list("axa"), categories=list("abc")),
"b": pd.Categorical(list("bcx"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
# Fill with non-category results in a ValueError
msg = r"'fill_value=d' is not present in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value="d")
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value="c")
expected = DataFrame(
{
"a": pd.Categorical(list("aca"), categories=list("abc")),
"b": pd.Categorical(list("bcc"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_tuplename_in_multiindex(self):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]
)
df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx)
result = df.unstack(("A", "a"))
expected = DataFrame(
[[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],
columns=MultiIndex.from_tuples(
[
("d", "a"),
("d", "b"),
("d", "c"),
("e", "a"),
("e", "b"),
("e", "c"),
],
names=[None, ("A", "a")],
),
index=Index([1, 2, 3], name=("B", "b")),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"unstack_idx, expected_values, expected_index, expected_columns",
[
(
("A", "a"),
[[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]],
MultiIndex.from_tuples(
[(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]
),
MultiIndex.from_tuples(
[("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")],
names=[None, ("A", "a")],
),
),
(
(("A", "a"), "B"),
[[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]],
Index([3, 4], name="C"),
MultiIndex.from_tuples(
[
("d", "a", 1),
("d", "a", 2),
("d", "b", 1),
("d", "b", 2),
("e", "a", 1),
("e", "a", 2),
("e", "b", 1),
("e", "b", 2),
],
names=[None, ("A", "a"), "B"],
),
),
],
)
def test_unstack_mixed_type_name_in_multiindex(
self, unstack_idx, expected_values, expected_index, expected_columns
):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]
)
df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx)
result = df.unstack(unstack_idx)
expected = DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = DataFrame(
{
"state": ["IL", "MI", "NC"],
"index": ["a", "b", "c"],
"some_categories": Series(["a", "b", "c"]).astype("category"),
"A": np.random.rand(3),
"B": 1,
"C": "foo",
"D": pd.Timestamp("20010102"),
"E": Series([1.0, 50.0, 100.0]).astype("float32"),
"F": Series([3.0, 4.0, 5.0]).astype("float64"),
"G": False,
"H": Series([1, 200, 923442], dtype="int8"),
}
)
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
tm.assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(["state", "index"])
unstack_and_compare(df1, "index")
df1 = df.set_index(["state", "some_categories"])
unstack_and_compare(df1, "some_categories")
df1 = df.set_index(["F", "C"])
unstack_and_compare(df1, "F")
df1 = df.set_index(["G", "B", "state"])
unstack_and_compare(df1, "B")
df1 = df.set_index(["E", "A"])
unstack_and_compare(df1, "E")
df1 = df.set_index(["state", "index"])
s = df1["A"]
unstack_and_compare(s, "index")
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3), repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
tm.assert_frame_equal(df.stack(level=[1, 2]), df.stack(level=1).stack(level=1))
tm.assert_frame_equal(
df.stack(level=[-2, -1]), df.stack(level=1).stack(level=1)
)
df_named = df.copy()
return_value = df_named.columns.set_names(range(3), inplace=True)
assert return_value is None
tm.assert_frame_equal(
df_named.stack(level=[1, 2]), df_named.stack(level=1).stack(level=1)
)
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ["exp", "animal", 1]
tm.assert_frame_equal(
df2.stack(level=["animal", 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=["exp", 1]), exp_hair_stacked, check_names=False
)
# When mixed types are passed and the ints are not level
# names, raise
msg = (
"level should contain all level names or all level numbers, not "
"a mixture of the two"
)
with pytest.raises(ValueError, match=msg):
df2.stack(level=["animal", 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ["exp", "animal", 0]
tm.assert_frame_equal(
df3.stack(level=["animal", 0]), animal_hair_stacked, check_names=False
)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=["exp", "animal"])
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
tm.assert_frame_equal(
df2.stack(level=[1, 2]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 1]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 2]), exp_hair_stacked, check_names=False
)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
tm.assert_frame_equal(
df3.stack(level=[0, 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 0]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 1]), exp_hair_stacked, check_names=False
)
def test_unstack_bool(self):
df = DataFrame(
[False, False],
index=MultiIndex.from_arrays([["a", "b"], ["c", "l"]]),
columns=["col"],
)
rs = df.unstack()
xp = DataFrame(
np.array([[False, np.nan], [np.nan, False]], dtype=object),
index=["a", "b"],
columns=MultiIndex.from_arrays([["col", "col"], ["c", "l"]]),
)
tm.assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"], ["a", "b"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=["first", "second", "third"],
)
s = Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["first", "second"],
)
expected = DataFrame(
np.array(
[[np.nan, 0], [0, np.nan], [np.nan, 0], [0, np.nan]], dtype=np.float64
),
index=expected_mi,
columns=Index(["a", "b"], name="third"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_to_series(self, float_frame):
# check reversibility
data = float_frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
tm.assert_frame_equal(undo, float_frame)
# check NA handling
data = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
data.index = Index(["a", "b", "c"])
result = data.unstack()
midx = MultiIndex(
levels=[["x", "y"], ["a", "b", "c"]],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
)
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
tm.assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
tm.assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4], [1, 2, 3, 4], [2, 1, 3, 4], [2, 2, 3, 4]]
df = DataFrame(rows, columns=list("ABCD"))
result = df.dtypes
expected = Series([np.dtype("int64")] * 4, index=list("ABCD"))
tm.assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(["A", "B"])
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("int64")] * 4,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# mixed
df2 = df.set_index(["A", "B"])
df2["C"] = 3.0
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("int64")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
df2["D"] = "foo"
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("object")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# GH7405
for c, d in (
(np.zeros(5), np.zeros(5)),
(np.arange(5, dtype="f8"), np.arange(5, 10, dtype="f8")),
):
df = DataFrame(
{
"A": ["a"] * 5,
"C": c,
"D": d,
"B": date_range("2012-01-01", periods=5),
}
)
right = df.iloc[:3].copy(deep=True)
df = df.set_index(["A", "B"])
df["D"] = df["D"].astype("int64")
left = df.iloc[:3].unstack(0)
right = right.set_index(["A", "B"]).unstack(0)
right[("D", "a")] = right[("D", "a")].astype("int64")
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([("a", "b"), ("c", "d")], names=["c1", "c1"])
df = DataFrame([1, 2], index=idx)
msg = "The name c1 occurs multiple times, use a level number"
with pytest.raises(ValueError, match=msg):
df.unstack("c1")
with pytest.raises(ValueError, match=msg):
df.T.stack("c1")
def test_unstack_unused_levels(self):
# GH 17845: unused codes in index make unstack() cast int to float
idx = MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1]
df = DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = MultiIndex.from_product([[0, 1], ["A", "B", "C"]])
expected = DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = MultiIndex(levels, codes)
block = np.arange(4).reshape(2, 2)
df = DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = DataFrame(
np.concatenate([block * 2, block * 2 + 1], axis=1), columns=idx
)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# With mixed dtype and NaN
levels = [["a", 2, "c"], [1, 3, 5, 7]]
codes = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = MultiIndex(levels, codes)
data = np.arange(8)
df = DataFrame(data.reshape(4, 2), index=idx)
cases = (
(0, [13, 16, 6, 9, 2, 5, 8, 11], [np.nan, "a", 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16], [np.nan, 5, 1], [np.nan, "a", 2]),
)
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = MultiIndex.from_product([[0, 1], col_level])
expected = DataFrame(exp_data.reshape(3, 6), index=idx_level, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cols", [["A", "C"], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused codes on the unstacked level
df = DataFrame([[2010, "a", "I"], [2011, "b", "II"]], columns=["A", "B", "C"])
ind = df.set_index(["A", "B", "C"], drop=False)
selection = ind.loc[(slice(None), slice(None), "I"), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product(
[expected.columns, ["I"]], names=[None, "C"]
)
expected.index = expected.index.droplevel("C")
tm.assert_frame_equal(result, expected)
def test_unstack_long_index(self):
# PH 32624: Error when using a lot of indices to unstack.
# The error occurred only, if a lot of indices are used.
df = DataFrame(
[[1]],
columns=MultiIndex.from_tuples([[0]], names=["c1"]),
index=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["i1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
)
result = df.unstack(["i2", "i3", "i4", "i5", "i6", "i7"])
expected = DataFrame(
[[1]],
columns=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["c1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
index=Index([0], name="i1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multi_level_cols(self):
# PH 24729: Unstack a df with multi level columns
df = DataFrame(
[[0.0, 0.0], [0.0, 0.0]],
columns=MultiIndex.from_tuples(
[["B", "C"], ["B", "D"]], names=["c1", "c2"]
),
index=MultiIndex.from_tuples(
[[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"]
),
)
assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"]
def test_unstack_multi_level_rows_and_cols(self):
# PH 28306: Unstack df with multi level cols and rows
df = DataFrame(
[[1, 2], [3, 4], [-1, -2], [-3, -4]],
columns=MultiIndex.from_tuples([["a", "b", "c"], ["d", "e", "f"]]),
index=MultiIndex.from_tuples(
[
["m1", "P3", 222],
["m1", "A5", 111],
["m2", "P3", 222],
["m2", "A5", 111],
],
names=["i1", "i2", "i3"],
),
)
result = df.unstack(["i3", "i2"])
expected = df.unstack(["i3"]).unstack(["i2"])
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index1(self):
# GH7466
def cast(val):
val_str = "" if val != val else val
return f"{val_str:1}"
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split("."))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(map(cast, right))
assert left == right
df = DataFrame(
{
"jim": ["a", "b", np.nan, "d"],
"joe": ["w", "x", "y", "z"],
"jolie": ["a.w", "b.x", " .y", "d.z"],
}
)
left = df.set_index(["jim", "joe"]).unstack()["jolie"]
right = df.set_index(["joe", "jim"]).unstack()["jolie"].T
tm.assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf["jolie"])
df = DataFrame(
{
"1st": ["d"] * 3
+ [np.nan] * 5
+ ["a"] * 2
+ ["c"] * 3
+ ["e"] * 2
+ ["b"] * 5,
"2nd": ["y"] * 2
+ ["w"] * 3
+ [np.nan] * 3
+ ["z"] * 4
+ [np.nan] * 3
+ ["x"] * 3
+ [np.nan] * 2,
"3rd": [
67,
39,
53,
72,
57,
80,
31,
18,
11,
30,
59,
50,
62,
59,
76,
52,
14,
53,
60,
51,
],
}
)
df["4th"], df["5th"] = (
df.apply(lambda r: ".".join(map(cast, r)), axis=1),
df.apply(lambda r: ".".join(map(cast, r.iloc[::-1])), axis=1),
)
for idx in itertools.permutations(["1st", "2nd", "3rd"]):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ["4th", "5th"]:
verify(udf[col])
def test_unstack_nan_index2(self):
# GH7403
df = DataFrame({"A": list("aaaabbbb"), "B": range(8), "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [
[3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7],
]
vals = list(map(list, zip(*vals)))
idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name="B")
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[3, np.nan], [0, 4], [1, 5], [2, 6], [np.nan, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index3(self, using_array_manager):
# GH7401
df = DataFrame(
{
"A": list("aaaaabbbbb"),
"B": (date_range("2012-01-01", periods=5).tolist() * 2),
"C": np.arange(10),
}
)
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack()
vals = np.array([[3, 0, 1, 2, np.nan, 4], [np.nan, 5, 6, 7, 8, 9]])
idx = Index(["a", "b"], name="A")
cols = MultiIndex(
levels=[["C"],
|
date_range("2012-01-01", periods=5)
|
pandas.date_range
|
import os
import audiofile
import audiofile as af
import numpy as np
import pandas as pd
import pytest
import audinterface
import audformat
def signal_duration(signal, sampling_rate):
return signal.shape[1] / sampling_rate
def signal_max(signal, sampling_rate):
return np.max(signal)
SEGMENT = audinterface.Segment(
process_func=lambda x, sr:
audinterface.utils.signal_index(
pd.to_timedelta(0),
pd.to_timedelta(x.shape[1] / sr, unit='s') / 2,
)
)
def signal_modification(signal, sampling_rate, subtract=False):
if subtract:
signal -= 0.1 * signal
else:
signal += 0.1 * signal
return signal
@pytest.mark.parametrize(
'process_func, segment, signal, sampling_rate, start, end, keep_nat, '
'channels, mixdown, expected_output',
[
(
signal_max,
None,
np.ones((1, 3)),
8000,
None,
None,
False,
None,
False,
1,
),
(
signal_max,
SEGMENT,
np.ones((1, 8000)),
8000,
None,
None,
False,
None,
False,
1,
),
(
signal_max,
None,
np.ones(3),
8000,
None,
None,
False,
0,
False,
1,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
0,
False,
0,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
0,
False,
0,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
1,
False,
1,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
None,
True,
0.5,
),
(
signal_max,
None,
np.array([[-1., -1., -1.], [0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
[1, 2],
True,
0.5,
),
# invalid channel selection
pytest.param(
signal_max,
None,
np.ones((1, 3)),
8000,
None,
None,
False,
1,
False,
1,
marks=pytest.mark.xfail(raises=ValueError),
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
None,
None,
False,
None,
False,
3.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.NaT,
pd.NaT,
False,
None,
False,
3.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.NaT,
pd.NaT,
True,
None,
False,
3.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.to_timedelta('1s'),
None,
False,
None,
False,
2.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.to_timedelta('1s'),
pd.NaT,
False,
None,
False,
2.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
None,
pd.to_timedelta('2s'),
False,
None,
False,
2.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.NaT,
pd.to_timedelta('2s'),
False,
None,
False,
2.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.to_timedelta('1s'),
pd.to_timedelta('2s'),
False,
None,
False,
1.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
'1s',
'2s',
False,
None,
False,
1.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
'1000ms',
'2000ms',
False,
None,
False,
1.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
1,
2,
False,
None,
False,
1.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
1.0,
2.0,
False,
None,
False,
1.0,
),
],
)
def test_process_file(
tmpdir,
process_func,
segment,
signal,
sampling_rate,
start,
end,
keep_nat,
channels,
mixdown,
expected_output,
):
process = audinterface.Process(
process_func=process_func,
sampling_rate=sampling_rate,
resample=False,
channels=channels,
mixdown=mixdown,
segment=segment,
keep_nat=keep_nat,
verbose=False,
)
# create test file
root = str(tmpdir.mkdir('wav'))
file = 'file.wav'
path = os.path.join(root, file)
af.write(path, signal, sampling_rate)
# test absolute path
y = process.process_file(
path,
start=start,
end=end,
)
np.testing.assert_almost_equal(
y.values, expected_output, decimal=4,
)
# test relative path
y = process.process_file(
file,
start=start,
end=end,
root=root,
)
np.testing.assert_almost_equal(
y.values, expected_output, decimal=4,
)
@pytest.mark.parametrize(
'process_func, num_files, signal, sampling_rate, starts, ends, '
'expected_output',
[
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
None,
None,
[3.0] * 2,
),
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
'1s',
'2s',
[1.0] * 2,
),
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
1,
2,
[1.0] * 2,
),
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
[None, 1],
[None, 2],
[3.0, 1.0],
),
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
[None, '1s'],
[None, '2s'],
[3.0, 1.0],
),
(
signal_duration,
3,
np.zeros((1, 24000)),
8000,
[None, '1s'],
[None, '2s', None],
[3.0, 1.0],
),
(
signal_duration,
1,
np.zeros((1, 24000)),
8000,
[None],
[None, '2s'],
[3.0],
),
],
)
def test_process_files(
tmpdir,
process_func,
num_files,
signal,
sampling_rate,
starts,
ends,
expected_output,
):
process = audinterface.Process(
process_func=process_func,
sampling_rate=sampling_rate,
resample=False,
verbose=False,
)
# create files
files = []
paths = []
root = tmpdir
for idx in range(num_files):
file = f'file{idx}.wav'
path = os.path.join(root, file)
af.write(path, signal, sampling_rate)
files.append(file)
paths.append(path)
# test absolute paths
output = process.process_files(
paths,
starts=starts,
ends=ends,
)
np.testing.assert_almost_equal(
output.values,
expected_output,
decimal=4,
)
# test relative paths
output = process.process_files(
files,
starts=starts,
ends=ends,
root=root,
)
np.testing.assert_almost_equal(
output.values,
expected_output,
decimal=4,
)
@pytest.mark.parametrize(
'num_files, segment, num_workers, multiprocessing',
[
(3, None, 1, False, ),
(3, None, 2, False, ),
(3, None, None, False, ),
(3, SEGMENT, 1, False, ),
]
)
def test_process_folder(
tmpdir,
num_files,
segment,
num_workers,
multiprocessing,
):
process = audinterface.Process(
process_func=None,
sampling_rate=None,
resample=False,
segment=segment,
num_workers=num_workers,
multiprocessing=multiprocessing,
verbose=False,
)
sampling_rate = 8000
path = str(tmpdir.mkdir('wav'))
files = [
os.path.join(path, f'file{n}.wav') for n in range(num_files)
]
for file in files:
signal = np.random.uniform(-1.0, 1.0, (1, sampling_rate))
af.write(file, signal, sampling_rate)
y = process.process_folder(path)
pd.testing.assert_series_equal(
y,
process.process_files(files),
)
def test_process_func_args():
def process_func(s, sr, arg1, arg2):
assert arg1 == 'foo'
assert arg2 == 'bar'
audinterface.Process(
process_func=process_func,
process_func_args={
'arg1': 'foo',
'arg2': 'bar',
}
)
with pytest.warns(UserWarning):
audinterface.Process(
feature_names=('o1', 'o2', 'o3'),
process_func=process_func,
arg1='foo',
arg2='bar',
)
@pytest.mark.parametrize(
'num_workers, multiprocessing',
[
(1, False, ),
(2, False, ),
(None, False, ),
]
)
def test_process_index(tmpdir, num_workers, multiprocessing):
process = audinterface.Process(
process_func=None,
sampling_rate=None,
resample=False,
num_workers=num_workers,
multiprocessing=multiprocessing,
verbose=False,
)
sampling_rate = 8000
signal = np.random.uniform(-1.0, 1.0, (1, 3 * sampling_rate))
# create file
root = str(tmpdir.mkdir('wav'))
file = 'file.wav'
path = os.path.join(root, file)
af.write(path, signal, sampling_rate)
# empty index
index = audformat.segmented_index()
y = process.process_index(index)
assert y.empty
# segmented index with absolute paths
index = audformat.segmented_index(
[path] * 3,
pd.timedelta_range('0s', '2s', 3),
pd.timedelta_range('1s', '3s', 3),
)
y = process.process_index(index)
for (path, start, end), value in y.items():
signal, sampling_rate = audinterface.utils.read_audio(
path, start=start, end=end
)
np.testing.assert_equal(signal, value)
# filewise index with absolute paths
index = audformat.filewise_index(path)
y = process.process_index(index)
for (path, start, end), value in y.items():
signal, sampling_rate = audinterface.utils.read_audio(
path, start=start, end=end
)
np.testing.assert_equal(signal, value)
# segmented index with relative paths
index = audformat.segmented_index(
[file] * 3,
pd.timedelta_range('0s', '2s', 3),
pd.timedelta_range('1s', '3s', 3),
)
y = process.process_index(index, root=root)
for (file, start, end), value in y.items():
signal, sampling_rate = audinterface.utils.read_audio(
file, start=start, end=end, root=root
)
np.testing.assert_equal(signal, value)
# filewise index with relative paths
index = audformat.filewise_index(path)
y = process.process_index(index, root=root)
for (file, start, end), value in y.items():
signal, sampling_rate = audinterface.utils.read_audio(
file, start=start, end=end, root=root
)
np.testing.assert_equal(signal, value)
@pytest.mark.parametrize(
'process_func, process_func_args, segment, signal, '
'sampling_rate, file, start, end, keep_nat, expected_signal',
[
(
None,
{},
None,
np.array([1., 2., 3.]),
44100,
None,
None,
None,
False,
np.array([1., 2., 3.]),
),
(
None,
{},
None,
np.array([1., 2., 3.]),
44100,
'file',
None,
None,
False,
np.array([1., 2., 3.]),
),
(
None,
{},
SEGMENT,
np.array([1., 2., 3., 4.]),
44100,
None,
None,
None,
False,
np.array([1., 2.]),
),
(
None,
{},
SEGMENT,
np.array([1., 2., 3., 4.]),
44100,
'file',
None,
None,
False,
np.array([1., 2.]),
),
(
signal_max,
{},
None,
np.array([1., 2., 3.]),
44100,
None,
None,
None,
False,
3.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
3,
None,
None,
None,
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
pd.to_timedelta('2s'),
None,
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
None,
pd.to_timedelta('1s'),
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
None,
pd.NaT,
False,
3.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
None,
pd.NaT,
True,
3.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
pd.to_timedelta('1s'),
pd.to_timedelta('2s'),
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
1,
2,
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
1.0,
2.0,
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
'1s',
'2s',
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
'file',
pd.to_timedelta('1s'),
pd.to_timedelta('2s'),
False,
1.0,
),
(
signal_modification,
{},
None,
np.array([1., 1., 1.]),
44100,
None,
None,
None,
False,
np.array([[1.1, 1.1, 1.1]]),
),
(
signal_modification,
{'subtract': False},
None,
np.array([1., 1., 1.]),
44100,
None,
None,
None,
False,
np.array([[1.1, 1.1, 1.1]]),
),
(
signal_modification,
{'subtract': True},
None,
np.array([1., 1., 1.]),
44100,
None,
None,
None,
False,
np.array([[0.9, 0.9, 0.9]]),
),
],
)
def test_process_signal(
process_func,
process_func_args,
segment,
signal,
sampling_rate,
file,
start,
end,
keep_nat,
expected_signal,
):
process = audinterface.Process(
process_func=process_func,
sampling_rate=None,
resample=False,
segment=segment,
keep_nat=keep_nat,
verbose=False,
process_func_args=process_func_args,
)
x = process.process_signal(
signal,
sampling_rate,
file=file,
start=start,
end=end,
)
signal = np.atleast_2d(signal)
if start is None or pd.isna(start):
start = pd.to_timedelta(0)
elif isinstance(start, (int, float)):
start = pd.to_timedelta(start, 's')
elif isinstance(start, str):
start = pd.to_timedelta(start)
if end is None or (
|
pd.isna(end)
|
pandas.isna
|
import pandas as pd
import numpy as np
import requests
import re
import os
from bs4 import BeautifulSoup
import datetime
# You should use this source!
# https://en.wikipedia.org/wiki/2020_coronavirus_pandemic_in_Germany
RKI_url = 'https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Fallzahlen.html'
requestRKI = requests.get(RKI_url)
soup = BeautifulSoup(requestRKI.content)
table = soup.find("table")
allP = list(soup.find_all("p"))
dateP = [p for p in allP if str(p).__contains__("online aktualisiert um")]
# old way to find date
# date = soup.find_all("div", class_="dateOfIssue")
date = re.findall('\\d+', str(dateP))
date = [int(part) for part in date]
date = datetime.date(date[2], date[1], date[0])
date = date.strftime("%d-%m-%Y")
output_rows = []
for table_row in table.findAll('tr'):
columns = table_row.findAll('td')
output_row = []
for column in columns:
output_row.append(column.text)
output_rows.append(output_row)
output_rows = list(np.delete(output_rows, [0, 1, len(output_rows) - 1]))
headers = ['Bundesland', 'Anzahl', 'Differenz zum Vortag', 'Fälle/100.000 Einw.', 'Todesfälle']
outputTable =
|
pd.DataFrame(output_rows, columns=headers)
|
pandas.DataFrame
|
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
import numpy as np
import pandas as pd
def combine_hist(hist_list, method='sum', nbins=64):
"""
Performs the combination of multiple Histograms.
Parameters
----------
hist_list: list
list of histograms with all histograms (saved as DataFrames in pyLife format)
method: str
method: 'sum', 'min', 'max', 'mean', 'std' default is 'sum'
nbins: int
number of bins of the combined histogram
Returns
-------
DataFrame:
Combined histogram
list:
list with the reindexed input histograms
"""
hist_combined =
|
pd.concat(hist_list)
|
pandas.concat
|
import os
import numpy as np
import pandas as pd
from numpy import abs
from numpy import log
from numpy import sign
from scipy.stats import rankdata
import scipy as sp
import statsmodels.api as sm
from data_source import local_source
from tqdm import tqdm as pb
# region Auxiliary functions
def ts_sum(df, window=10):
"""
Wrapper function to estimate rolling sum.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series sum over the past 'window' days.
"""
return df.rolling(window).sum()
def ts_prod(df, window=10):
"""
Wrapper function to estimate rolling product.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series product over the past 'window' days.
"""
return df.rolling(window).prod()
def sma(df, window=10): #simple moving average
"""
Wrapper function to estimate SMA.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series SMA over the past 'window' days.
"""
return df.rolling(window).mean()
def ema(df, n, m): #exponential moving average
"""
Wrapper function to estimate EMA.
:param df: a pandas DataFrame.
:return: ema_{t}=(m/n)*a_{t}+((n-m)/n)*ema_{t-1}
"""
result = df.copy()
for i in range(1,len(df)):
result.iloc[i]= (m*df.iloc[i-1] + (n-m)*result[i-1]) / n
return result
def wma(df, n):
"""
Wrapper function to estimate WMA.
:param df: a pandas DataFrame.
:return: wma_{t}=0.9*a_{t}+1.8*a_{t-1}+...+0.9*n*a_{t-n+1}
"""
weights = pd.Series(0.9*np.flipud(np.arange(1,n+1)))
result = pd.Series(np.nan, index=df.index)
for i in range(n-1,len(df)):
result.iloc[i]= sum(df[i-n+1:i+1].reset_index(drop=True)*weights.reset_index(drop=True))
return result
def stddev(df, window=10):
"""
Wrapper function to estimate rolling standard deviation.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return df.rolling(window).std()
def correlation(x, y, window=10):
"""
Wrapper function to estimate rolling corelations.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return x.rolling(window).corr(y)
def covariance(x, y, window=10):
"""
Wrapper function to estimate rolling covariance.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return x.rolling(window).cov(y)
def rolling_rank(na):
"""
Auxiliary function to be used in pd.rolling_apply
:param na: numpy array.
:return: The rank of the last value in the array.
"""
return rankdata(na)[-1]
def ts_rank(df, window=10):
"""
Wrapper function to estimate rolling rank.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series rank over the past window days.
"""
return df.rolling(window).apply(rolling_rank)
def rolling_prod(na):
"""
Auxiliary function to be used in pd.rolling_apply
:param na: numpy array.
:return: The product of the values in the array.
"""
return np.prod(na)
def product(df, window=10):
"""
Wrapper function to estimate rolling product.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series product over the past 'window' days.
"""
return df.rolling(window).apply(rolling_prod)
def ts_min(df, window=10):
"""
Wrapper function to estimate rolling min.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return df.rolling(window).min()
def ts_max(df, window=10):
"""
Wrapper function to estimate rolling min.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series max over the past 'window' days.
"""
return df.rolling(window).max()
def delta(df, period=1):
"""
Wrapper function to estimate difference.
:param df: a pandas DataFrame.
:param period: the difference grade.
:return: a pandas DataFrame with today’s value minus the value 'period' days ago.
"""
return df.diff(period)
def delay(df, period=1):
"""
Wrapper function to estimate lag.
:param df: a pandas DataFrame.
:param period: the lag grade.
:return: a pandas DataFrame with lagged time series
"""
return df.shift(period)
def rank(df):
"""
Cross sectional rank
:param df: a pandas DataFrame.
:return: a pandas DataFrame with rank along columns.
"""
#return df.rank(axis=1, pct=True)
return df.rank(pct=True)
def scale(df, k=1):
"""
Scaling time serie.
:param df: a pandas DataFrame.
:param k: scaling factor.
:return: a pandas DataFrame rescaled df such that sum(abs(df)) = k
"""
return df.mul(k).div(np.abs(df).sum())
def ts_argmax(df, window=10):
"""
Wrapper function to estimate which day ts_max(df, window) occurred on
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: well.. that :)
"""
return df.rolling(window).apply(np.argmax) + 1
def ts_argmin(df, window=10):
"""
Wrapper function to estimate which day ts_min(df, window) occurred on
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: well.. that :)
"""
return df.rolling(window).apply(np.argmin) + 1
def decay_linear(df, period=10):
"""
Linear weighted moving average implementation.
:param df: a pandas DataFrame.
:param period: the LWMA period
:return: a pandas DataFrame with the LWMA.
"""
try:
df = df.to_frame() #Series is not supported for the calculations below.
except:
pass
# Clean data
if df.isnull().values.any():
df.fillna(method='ffill', inplace=True)
df.fillna(method='bfill', inplace=True)
df.fillna(value=0, inplace=True)
na_lwma = np.zeros_like(df)
na_lwma[:period, :] = df.iloc[:period, :]
na_series = df.values
divisor = period * (period + 1) / 2
y = (np.arange(period) + 1) * 1.0 / divisor
# Estimate the actual lwma with the actual close.
# The backtest engine should assure to be snooping bias free.
for row in range(period - 1, df.shape[0]):
x = na_series[row - period + 1: row + 1, :]
na_lwma[row, :] = (np.dot(x.T, y))
return pd.DataFrame(na_lwma, index=df.index, columns=['CLOSE'])
def highday(df, n): #计算df前n期时间序列中最大值距离当前时点的间隔
result = pd.Series(np.nan, index=df.index)
for i in range(n,len(df)):
result.iloc[i]= i - df[i-n:i].idxmax()
return result
def lowday(df, n): #计算df前n期时间序列中最小值距离当前时点的间隔
result = pd.Series(np.nan, index=df.index)
for i in range(n,len(df)):
result.iloc[i]= i - df[i-n:i].idxmin()
return result
def daily_panel_csv_initializer(csv_name): #not used now
if os.path.exists(csv_name)==False:
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY')
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')
dataset=0
for date in date_list["TRADE_DATE"]:
stock_list[date]=stock_list["INDUSTRY"]
stock_list.drop("INDUSTRY",axis=1,inplace=True)
stock_list.set_index("TS_CODE", inplace=True)
dataset = pd.DataFrame(stock_list.stack())
dataset.reset_index(inplace=True)
dataset.columns=["TS_CODE","TRADE_DATE","INDUSTRY"]
dataset.to_csv(csv_name,encoding='utf-8-sig',index=False)
else:
dataset=pd.read_csv(csv_name)
return dataset
def IndustryAverage_vwap():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_vwap.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average vwap data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average vwap data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average vwap data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1)
result_unaveraged_piece = VWAP
result_unaveraged_piece.rename("VWAP_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["VWAP_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_vwap.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_close():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_close.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average close data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average close data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average close data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = CLOSE
result_unaveraged_piece.rename("CLOSE_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["CLOSE_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_close.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_low():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_low.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average low data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average low data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average low data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
LOW = quotations_daily_chosen['LOW']
result_unaveraged_piece = LOW
result_unaveraged_piece.rename("LOW_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["LOW_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_low.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_volume():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_volume.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average volume data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average volume data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average volume data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = VOLUME
result_unaveraged_piece.rename("VOLUME_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["VOLUME_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_volume.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_adv(num):
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_adv{num}.csv".format(num=num))
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average adv{num} data needs not to be updated.".format(num=num))
return result_industryaveraged_df
else:
print("The corresponding industry average adv{num} data needs to be updated.".format(num=num))
first_date_update = date_list_update[0]
except:
print("The corresponding industry average adv{num} data is missing.".format(num=num))
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = sma(VOLUME, num)
result_unaveraged_piece.rename("ADV{num}_UNAVERAGED".format(num=num),inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["ADV{num}_UNAVERAGED".format(num=num)].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_adv{num}.csv".format(num=num),encoding='utf-8-sig')
return result_industryaveraged_df
#(correlation(delta(close, 1), delta(delay(close, 1), 1), 250) *delta(close, 1)) / close
def IndustryAverage_PreparationForAlpha048():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df =
|
pd.read_csv("IndustryAverage_Data_PreparationForAlpha048.csv")
|
pandas.read_csv
|
'''
--------------------------------------------------------------------------------
Description:
Roadmap:
Written by <NAME> <<EMAIL>>, DAMP Lab 2020
--------------------------------------------------------------------------------
'''
import glob
import os
from pathlib import Path
import xml.etree.ElementTree as ET
from typing import (
List,
)
import numpy as np
import pandas as pd
import tqdm
from beholder.signal_processing.sigpro_utility import (
get_channel_data_from_xml_metadata,
ingress_tiff_file,
)
from beholder.utils import (
get_analysis_location,
)
def enqueue_wide_analysis(
input_datasets: List[str],
runlist_fp: str,
calibration_rpu_dataset_fp: str,
calibration_autofluoresence_dataset_fp: str,
signal_channel_label: str = 'm-Cherry'
):
# Generate a directory in the analysis results to hold all of the
# outputted observations.
# The Name of each of the files will the passed in names of each of the
# directories.
# - Tab 1
# Create a baseline DF, iterating over all of the different panels.
# Generate a Column for each of the different panels encapsulated within
# that ND2. Iterate over each of the frames in that TIFF file, and
# calculate the summation of each of the signals in these.
rpu_df = pd.read_csv(calibration_rpu_dataset_fp)
af_df = pd.read_csv(calibration_autofluoresence_dataset_fp)
output_path = get_analysis_location(runlist_fp)
final_dest = os.path.join(
output_path,
'wide_analysis'
)
if not os.path.exists(final_dest):
os.makedirs(final_dest)
for nd2_index, dataset_fp in tqdm.tqdm(
enumerate(input_datasets),
desc=f'Enumerating over datasets and parsing tiffs...',
total=len(input_datasets),
):
tiff_dir_root = os.path.join(dataset_fp, 'raw_tiffs')
metadata_root = os.path.join(dataset_fp, 'metadata.xml')
tree = ET.parse(metadata_root)
channels = get_channel_data_from_xml_metadata(tree)
raw_sum_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import urllib.request
import numpy as np
import shapefile
from datetime import datetime
from zipfile import ZipFile
import pandasql as ps
import requests
import json
import pkg_resources
def softmax(x):
if np.max(x) > 1:
e_x = np.exp(x/np.max(x))
else:
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
## getProvinceBoundaryBox function is to get the cordinate details from Mapbox API for ITALY
## Parameter Needed - Province Name
def getProvinceBoundaryBox(provinceName):
Place_Details = requests.get(
'http://api.mapbox.com/geocoding/v5/mapbox.places/' + provinceName + '%20province%20Italy.json?access_token=<KEY>').json()[
'features']
for eachPlace in Place_Details:
try:
if eachPlace['context'][0]['text'] == 'Italy' or eachPlace['context'][1]['text'] == 'Italy':
getBbox = eachPlace['bbox']
except:
continue
return getBbox
# The below function used to get the USA Patient Data Automatically from HARVARD DATABASE COVID Patient Database and will create a timeseries patient file along with population of the Area at county along with a USA County file
## Parameter Needed - Target Directory to save the File
def fetch_us_patientdata(tgtdir):
url='https://dataverse.harvard.edu/api/access/datafile/:persistentId?persistentId=doi:10.7910/DVN/HIDLTK/7NWUDK'
urllib.request.urlretrieve(url,tgtdir+'/us_county_confirmed_cases.tab')
latest_data = pd.read_csv(tgtdir+'/us_county_confirmed_cases.tab',sep='\t')
allcols = list(latest_data.columns)
datecols = allcols[allcols.index('HHD10')+1:]
latest_data = latest_data[['COUNTY', 'NAME']+datecols]
datecolsmod=[datetime.strptime(i,'%m/%d/%Y').strftime('%Y%m%d') for i in datecols]
latest_data.columns = ['cfips', 'county']+datecolsmod
latest_data = latest_data.melt(id_vars=['cfips', 'county'], var_name='data_date', value_name='no_pat')
latest_data['county']=latest_data['county'].apply(lambda x : x.split(' County')[0])
url='https://dataverse.harvard.edu/api/access/datafile/:persistentId?persistentId=doi:10.7910/DVN/HIDLTK/OFVFPY'
urllib.request.urlretrieve(url,tgtdir+'/COUNTY_MAP.zip')
zip = ZipFile(tgtdir+'/COUNTY_MAP.zip')
zip.extractall(tgtdir)
sf = shapefile.Reader(tgtdir+"/CO_CARTO")
shape_df = pd.DataFrame()
shapes = sf.shapes()
records = sf.records()
for eachrec in range(len(records)):
eachRec = {}
shapebbbox = shapes[eachrec].bbox
shapelat = (shapebbbox[1] + shapebbbox[3]) / 2
shapelong = (shapebbbox[0] + shapebbbox[2]) / 2
eachRec['lat'] = [shapelat]
eachRec['long'] = [shapelong]
eachRec['county_fips'] = [records[eachrec][0]]
eachRec['county_name'] = [records[eachrec][1]]
eachRec['POP'] = [records[eachrec][10]]
eachRec['HHD'] = [records[eachrec][11]]
shape_df = shape_df.append(pd.DataFrame.from_dict(eachRec))
us_counties = shape_df
us_counties['county_name'] = us_counties['county_name'].apply(lambda x: x.split(' County')[0])
us_counties['county_fips'] = us_counties['county_fips'].apply(lambda x: int(x))
us_counties.columns = ['lat','long', 'cfips', 'county', 'pop', 'HHD']
full_data = pd.merge(latest_data, us_counties, on=['cfips', 'county'])
if sum(full_data['no_pat']) != sum(latest_data['no_pat']):
print("fetch failed")
raise
full_data['no_pat'] = full_data.groupby(['cfips'])['no_pat'].apply(lambda x: x.cummax())
full_data['new_pat'] = full_data.groupby(['lat','long'])['no_pat'].diff()
full_data = full_data.dropna()
us_counties.to_csv(tgtdir+'USA_counties.csv',index=False)
full_data.to_csv(tgtdir+'USA_covid_data_final.csv',index=False)
print(' USA Patient Data Created under Directory :'+tgtdir)
## Below function will create the China COVID19 time series Patient file by abosrving data from Harvard Database and it will create County file along with Population Data by county/province
## Parameter Needed - Target Directory to save the File
def fetch_china_patientdata(tgtdir):
url = 'https://dataverse.harvard.edu/api/access/datafile/3781338?format=original&gbrecs=true'
urllib.request.urlretrieve(url, tgtdir+'/City_Confirmed_Map_China.csv')
latest_data = pd.read_csv(tgtdir+'/City_Confirmed_Map_China.csv')
latest_data = latest_data[
['GbCity', 'GbProv', 'City_EN', 'Prov_EN', 'N_C_0115', 'N_C_0116', 'N_C_0117', 'N_C_0118', 'N_C_0119',
'N_C_0120', 'N_C_0121', 'N_C_0122', 'N_C_0123', 'N_C_0124', 'N_C_0125', 'N_C_0126', 'N_C_0127', 'N_C_0128',
'N_C_0129', 'N_C_0130', 'N_C_0131', 'N_C_0201', 'N_C_0202', 'N_C_0203', 'N_C_0204', 'N_C_0205', 'N_C_0206',
'N_C_0207', 'N_C_0208', 'N_C_0209', 'N_C_0210', 'N_C_0211', 'N_C_0212', 'N_C_0213', 'N_C_0214', 'N_C_0215',
'N_C_0216', 'N_C_0217', 'N_C_0218', 'N_C_0219', 'N_C_0220', 'N_C_0221', 'N_C_0222', 'N_C_0223', 'N_C_0224',
'N_C_0225', 'N_C_0226', 'N_C_0227', 'N_C_0228', 'N_C_0229', 'N_C_0301', 'N_C_0302', 'N_C_0303', 'N_C_0304',
'N_C_0305', 'N_C_0306', 'N_C_0307', 'N_C_0308', 'N_C_0309', 'N_C_0310', 'N_C_0311', 'N_C_0312', 'N_C_0313',
'N_C_0314', 'N_C_0315', 'N_C_0316', 'N_C_0317', 'N_C_0318', 'T_C_0115', 'T_C_0116', 'T_C_0117', 'T_C_0118',
'T_C_0119', 'T_C_0120', 'T_C_0121', 'T_C_0122', 'T_C_0123', 'T_C_0124', 'T_C_0125', 'T_C_0126', 'T_C_0127',
'T_C_0128', 'T_C_0129', 'T_C_0130', 'T_C_0131', 'T_C_0201', 'T_C_0202', 'T_C_0203', 'T_C_0204', 'T_C_0205',
'T_C_0206', 'T_C_0207', 'T_C_0208', 'T_C_0209', 'T_C_0210', 'T_C_0211', 'T_C_0212', 'T_C_0213', 'T_C_0214',
'T_C_0215', 'T_C_0216', 'T_C_0217', 'T_C_0218', 'T_C_0219', 'T_C_0220', 'T_C_0221', 'T_C_0222', 'T_C_0223',
'T_C_0224', 'T_C_0225', 'T_C_0226', 'T_C_0227', 'T_C_0228', 'T_C_0229', 'T_C_0301', 'T_C_0302', 'T_C_0303',
'T_C_0304', 'T_C_0305', 'T_C_0306', 'T_C_0307', 'T_C_0308', 'T_C_0309', 'T_C_0310', 'T_C_0311', 'T_C_0312',
'T_C_0313', 'T_C_0314', 'T_C_0315', 'T_C_0316', 'T_C_0317', 'T_C_0318']]
latest_data['City_EN'] = latest_data['City_EN'].apply(lambda x: x.split('(')[0])
latest_data.columns = ['GbCity', 'GbProv', 'city', 'Province', 'N_C_0115', 'N_C_0116', 'N_C_0117', 'N_C_0118',
'N_C_0119', 'N_C_0120', 'N_C_0121', 'N_C_0122', 'N_C_0123', 'N_C_0124', 'N_C_0125',
'N_C_0126', 'N_C_0127', 'N_C_0128', 'N_C_0129', 'N_C_0130', 'N_C_0131', 'N_C_0201',
'N_C_0202', 'N_C_0203', 'N_C_0204', 'N_C_0205', 'N_C_0206', 'N_C_0207', 'N_C_0208',
'N_C_0209', 'N_C_0210', 'N_C_0211', 'N_C_0212', 'N_C_0213', 'N_C_0214', 'N_C_0215',
'N_C_0216', 'N_C_0217', 'N_C_0218', 'N_C_0219', 'N_C_0220', 'N_C_0221', 'N_C_0222',
'N_C_0223', 'N_C_0224', 'N_C_0225', 'N_C_0226', 'N_C_0227', 'N_C_0228', 'N_C_0229',
'N_C_0301', 'N_C_0302', 'N_C_0303', 'N_C_0304', 'N_C_0305', 'N_C_0306', 'N_C_0307',
'N_C_0308', 'N_C_0309', 'N_C_0310', 'N_C_0311', 'N_C_0312', 'N_C_0313', 'N_C_0314',
'N_C_0315', 'N_C_0316', 'N_C_0317', 'N_C_0318', 'T_C_0115', 'T_C_0116', 'T_C_0117',
'T_C_0118', 'T_C_0119', 'T_C_0120', 'T_C_0121', 'T_C_0122', 'T_C_0123', 'T_C_0124',
'T_C_0125', 'T_C_0126', 'T_C_0127', 'T_C_0128', 'T_C_0129', 'T_C_0130', 'T_C_0131',
'T_C_0201', 'T_C_0202', 'T_C_0203', 'T_C_0204', 'T_C_0205', 'T_C_0206', 'T_C_0207',
'T_C_0208', 'T_C_0209', 'T_C_0210', 'T_C_0211', 'T_C_0212', 'T_C_0213', 'T_C_0214',
'T_C_0215', 'T_C_0216', 'T_C_0217', 'T_C_0218', 'T_C_0219', 'T_C_0220', 'T_C_0221',
'T_C_0222', 'T_C_0223', 'T_C_0224', 'T_C_0225', 'T_C_0226', 'T_C_0227', 'T_C_0228',
'T_C_0229', 'T_C_0301', 'T_C_0302', 'T_C_0303', 'T_C_0304', 'T_C_0305', 'T_C_0306',
'T_C_0307', 'T_C_0308', 'T_C_0309', 'T_C_0310', 'T_C_0311', 'T_C_0312', 'T_C_0313',
'T_C_0314', 'T_C_0315', 'T_C_0316', 'T_C_0317', 'T_C_0318']
latest_data = latest_data.melt(id_vars=['GbCity', 'GbProv', 'city', 'Province'], var_name='Date',
value_name='No of Patient')
New_Patients = ps.sqldf(
''' select GbCity,GbProv,city,Province,Date,"No of Patient" from latest_data where Date like "N_C_%" ''',
locals())
New_Patients['Date'] = New_Patients['Date'].apply(lambda x: '2020' + x.split('N_C_')[1])
New_Patients.columns = ['GbCity', 'GbProv', 'city', 'Province', 'Date', 'New Patient Count']
Total_Patients = ps.sqldf(
''' select GbCity,GbProv,city,Province,Date,"No of Patient" from latest_data where Date like "T_C_%" ''',
locals())
Total_Patients['Date'] = Total_Patients['Date'].apply(lambda x: '2020' + x.split('T_C_')[1])
Total_Patients.columns = ['GbCity', 'GbProv', 'city', 'Province', 'Date', 'Total Patient Count']
latest_data_Normalized = pd.merge(New_Patients, Total_Patients, on=['GbCity', 'GbProv', 'city', 'Province', 'Date'])
latest_data_Normalized['GbCity'] = latest_data_Normalized['GbCity'].apply(lambda x: str(x))
latest_data_Normalized['GbProv'] = latest_data_Normalized['GbProv'].apply(lambda x: str(x))
url='https://dvn-cloud.s3.amazonaws.com/10.7910/DVN/MR5IJN/1710944b44b-ce6a2df0b32e?response-content-disposition=attachment%3B%20filename%2A%3DUTF-8%27%27china_city_basemap.zip&response-content-type=application%2Fzipped-shapefile&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20200408T040239Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Credential=AKIAIEJ3NV7UYCSRJC7A%2F20200408%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Signature=ed0cbb34d3e1a129167cbd353afc469d13ddaf4dc14520366df279219b422957'
urllib.request.urlretrieve(url,tgtdir+'/china_city_basemap.zip')
zip = ZipFile(tgtdir+'/china_city_basemap.zip')
zip.extractall()
sf = shapefile.Reader(tgtdir+"/china_city_basemap")
shape_df = pd.DataFrame()
shapes = sf.shapes()
records = sf.records()
for eachrec in range(len(records)):
eachRec = {}
shapebbbox = shapes[eachrec].bbox
shapelat = (shapebbbox[1] + shapebbbox[3]) / 2
shapelong = (shapebbbox[0] + shapebbbox[2]) / 2
eachRec['lat'] = [shapelat]
eachRec['long'] = [shapelong]
eachRec['GbCity'] = [records[eachrec][0]]
eachRec['city'] = [records[eachrec][2]]
eachRec['GbProv'] = [records[eachrec][3]]
eachRec['Province'] = [records[eachrec][5]]
eachRec['Shape_Area'] = [records[eachrec][6]]
shape_df = shape_df.append(pd.DataFrame.from_dict(eachRec))
china_provinces = shape_df
china_provinces['GbProv'] = china_provinces['GbProv'].apply(lambda x: str(x))
full_data = pd.merge(latest_data_Normalized, china_provinces, on=['city', 'Province'])
full_data = full_data[['city', 'Province', 'Date', 'Total Patient Count', 'lat', 'long']]
full_data.columns = ['city', 'Province', 'data_date', 'no_pat', 'lat', 'long']
china_pop_data = load_support_data('China_Population_Data.xlsx','xl')
china_pop_data['Province'] = china_pop_data['Province'].apply(lambda x: x.split('[')[0])
full_data = ps.sqldf(
''' select a.*,b.Population pop from full_data a left join china_pop_data b on a.Province = b.Province ''',
locals())
full_data['no_pat'] = full_data.groupby(['city'])['no_pat'].apply(lambda x: x.cummax())
full_data['new_pat'] = full_data.groupby(['lat','long'])['no_pat'].diff()
full_data = full_data.dropna()
china_provinces.to_csv(tgtdir+'China_provinces_data.csv', index=False)
full_data.to_csv(tgtdir+'China_covid_data_final.csv', index=False)
print(' China Patient Data Created under Directory :' + tgtdir)
def load_support_data(filename,type = 'xl'):
# This is a stream-like object. If you want the actual info, call
# stream.read()
stream = pkg_resources.resource_stream(__name__, 'data/'+filename)
if type == 'xl':
return pd.read_excel(stream)
elif type == 'csv':
return pd.read_csv(stream)
## The below function will give us the Patient count along with population in timeseries manner for ITALY provinces along with County file
## Parameter Needed - Target Directory to save the File
def fetch_italy_patientdata(tgtdir):
url = 'https://github.com/pcm-dpc/COVID-19/archive/master.zip'
urllib.request.urlretrieve(url, tgtdir+'/IT_covid19.zip')
zip = ZipFile(tgtdir+'/IT_covid19.zip')
zip.extractall(tgtdir)
latest_data = pd.read_csv(tgtdir+'/COVID-19-master/dati-province/dpc-covid19-ita-province.csv')
latest_data = ps.sqldf(
''' select Date(data) as data_date,denominazione_regione as "RegionName",denominazione_provincia as "ProvinceName", lat, long,totale_casi as "no_pat" from latest_data ''',
locals())
latest_data_Area_Regions = latest_data[['RegionName', 'ProvinceName']].drop_duplicates()
Unique_Provinces = latest_data_Area_Regions['ProvinceName'].unique()
lat_long_df = pd.DataFrame()
for i in range(len(Unique_Provinces)):
if Unique_Provinces[i] != 'In fase di definizione/aggiornamento':
each_lat_long_df = {}
each_lat_long_df['ProvinceName'] = [Unique_Provinces[i]]
Cordinates = getProvinceBoundaryBox(Unique_Provinces[i])
each_lat_long_df['minlong'] = [Cordinates[0]]
each_lat_long_df['minlat'] = [Cordinates[1]]
each_lat_long_df['maxlong'] = [Cordinates[2]]
each_lat_long_df['maxlat'] = [Cordinates[3]]
each_lat_long_df = pd.DataFrame.from_dict(each_lat_long_df)
lat_long_df = lat_long_df.append(each_lat_long_df)
full_data = ps.sqldf(
''' select a.*, b.* from latest_data a left join lat_long_df b on a."ProvinceName" = b."ProvinceName" ''',
locals())
#full_data = latest_data
Dates_in_Data = full_data['data_date'].unique()
Regions_in_Data = full_data['RegionName'].unique()
final_Data =
|
pd.DataFrame()
|
pandas.DataFrame
|
from pathlib import Path
import pandas as pd
|
pd.set_option('display.max_colwidth', 250)
|
pandas.set_option
|
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.neighbors import NearestNeighbors
import pandas as pd
import dill
# Data Pre-Processing
# load the dataset
df =
|
pd.read_csv('../data/stars.csv')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 1 23:28:55 2021
@author: dv516
"""
import numpy as np
import pickle
import pyro
pyro.enable_validation(True) # can help with debugging
pyro.set_rng_seed(1)
from algorithms.PyBobyqa_wrapped.Wrapper_for_pybobyqa import PyBobyqaWrapper
from algorithms.Bayesian_opt_Pyro.utilities_full import BayesOpt
from algorithms.nesterov_random.nesterov_random import nesterov_random
from algorithms.simplex.simplex_method import simplex_method
from algorithms.CUATRO.CUATRO import CUATRO
from algorithms.Finite_differences.Finite_differences import finite_Diff_Newton
from algorithms.Finite_differences.Finite_differences import Adam_optimizer
from algorithms.Finite_differences.Finite_differences import BFGS_optimizer
from algorithms.SQSnobfit_wrapped.Wrapper_for_SQSnobfit import SQSnobFitWrapper
from algorithms.DIRECT_wrapped.Wrapper_for_Direct import DIRECTWrapper
from case_studies.Controller_tuning.Control_system import reactor_phi_2st, reactor_phi_2stNS
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
import pandas as pd
import pickle
def average_from_list(solutions_list):
N = len(solutions_list)
f_best_all = np.zeros((N, 100))
for i in range(N):
f_best = np.array(solutions_list[i]['f_best_so_far'])
x_ind = np.array(solutions_list[i]['samples_at_iteration'])
for j in range(100):
ind = np.where(x_ind <= j+1)
if len(ind[0]) == 0:
f_best_all[i, j] = f_best[0]
else:
f_best_all[i, j] = f_best[ind][-1]
f_median = np.median(f_best_all, axis = 0)
# f_av = np.average(f_best_all, axis = 0)
# f_std = np.std(f_best_all, axis = 0)
f_min = np.min(f_best_all, axis = 0)
f_max = np.max(f_best_all, axis = 0)
return f_best_all, f_median, f_min, f_max
def cost_control_noise(x, bounds_abs, noise, N_SAA, x0 = [.116, 368.489], \
N = 200, T = 20, NS = False):
f_SAA = 0 ; g_SAA = -np.inf
if not NS:
f = lambda x: reactor_phi_2st(x, bounds_abs, noise, x0 = x0, N = N, \
T = T, return_sys_resp = False)
else:
f = lambda x: reactor_phi_2stNS(x, noise, x0 = x0, N = N, \
T = T, return_sys_resp = False)
for i in range(N_SAA):
f_sample = f(x)
f_SAA += f_sample[0]/N_SAA
g_SAA = np.maximum(g_SAA, float(f_sample[1][0]))
return f_SAA, [g_SAA]
max_it = 100
pi = [.8746, .0257, -1.43388, -0.00131, 0.00016, 55.8692, 0.7159, .0188, .00017]
pi_init = [.8746, .0257, -1.43388, -0.00131, 0.00016, 0, 0, 0, 0, 0]
bounds_abs = np.zeros((10, 2))
for i in range(5):
if pi[i] > 0:
bounds_abs[i] = [pi[i]/2, pi[i]*2]
bounds_abs[i+5] = [-pi[i]*10, pi[i]*10]
else:
bounds_abs[i] = [pi[i]*2, pi[i]/2]
bounds_abs[i+5] = [pi[i]*10, -pi[i]*10]
x0 = (np.array(pi_init) - bounds_abs[:,0]) / (bounds_abs[:,1]-bounds_abs[:,0])
noise_previous = [.001, 1]
n_noise = 6
noise_mat = np.zeros((n_noise, 2))
for i in range(n_noise):
noise_mat[i] = 1/3*np.array(noise_previous)*i
bounds = np.array([[0, 1]]*10)
x0 = (np.array(pi_init) - bounds_abs[:,0]) / (bounds_abs[:,1]-bounds_abs[:,0])
x0_abs = np.array(pi_init)
max_f_eval = 100 ; N_SAA = 1
# max_f_eval = 50 ; N_SAA = 2
N_samples = 20
# ContrSynNoise_list_DIRECT = []
# for i in range(n_noise):
# print('Iteration ', i+1, ' of DIRECT')
# best = []
# best_constr = []
# f = lambda x: cost_control_noise(x, bounds_abs, noise_mat[i], N_SAA, \
# x0 = [.116, 368.489], N = 200, T = 20)
# ContrSynNoise_DIRECT_f = lambda x, grad: f(x)
# for j in range(N_samples):
# sol = DIRECTWrapper().solve(ContrSynNoise_DIRECT_f, x0, bounds, \
# maxfun = max_f_eval, constraints=1)
# best.append(sol['f_best_so_far'][-1])
# ContrSynNoise_list_DIRECT.append(best)
# with open('DIRECTContrSyn_listNoiseConv.pickle', 'wb') as handle:
# pickle.dump(ContrSynNoise_list_DIRECT, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('DIRECTContrSyn_listNoiseConv.pickle', 'rb') as handle:
ContrSynNoise_list_DIRECT = pickle.load(handle)
# # N_SAA = 1
# N_samples = 20
# ContrSynNoise_list_CUATROg = []
# for i in range(n_noise):
# print('Iteration ', i+1, ' of CUATRO_g')
# best = []
# best_constr = []
# f = lambda x: cost_control_noise(x, bounds_abs, noise_mat[i], N_SAA, \
# x0 = [.116, 368.489], N = 200, T = 20)
# for j in range(N_samples):
# print(j+1)
# sol = CUATRO(f, x0, 0.5, bounds = bounds, max_f_eval = max_f_eval, \
# N_min_samples = 6, tolerance = 1e-10,\
# beta_red = 0.9, rnd = j, method = 'global', \
# constr_handling = 'Discrimination')
# best.append(sol['f_best_so_far'][-1])
# ContrSynNoise_list_CUATROg.append(best)
# with open('CUATROgContrSyn_listNoiseConv.pickle', 'wb') as handle:
# pickle.dump(ContrSynNoise_list_CUATROg, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('CUATROgContrSyn_listNoiseConv.pickle', 'rb') as handle:
ContrSynNoise_list_CUATROg = pickle.load(handle)
# N_samples = 20
# ContrSynNoise_list_simplex = []
# for i in range(n_noise):
# print('Iteration ', i+1, ' of Simplex')
# best = []
# best_constr = []
# f = lambda x: cost_control_noise(x, bounds_abs, noise_mat[i], N_SAA, \
# x0 = [.116, 368.489], N = 200, T = 20)
# for j in range(N_samples):
# sol = simplex_method(f, x0, bounds, max_iter = 50, \
# constraints = 1, rnd_seed = j, mu_con = 1e6)
# best.append(sol['f_best_so_far'][-1])
# ContrSynNoise_list_simplex.append(best)
# with open('simplexContrSyn_listNoiseConv.pickle', 'wb') as handle:
# pickle.dump(ContrSynNoise_list_simplex, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('simplexContrSyn_listNoiseConv.pickle', 'rb') as handle:
ContrSynNoise_list_simplex = pickle.load(handle)
noise = ['%.3f' % noise_mat[i][1] for i in range(n_noise)]
noise_labels = [[noise[i]]*N_samples for i in range(n_noise)]
min_list = np.array([np.min([np.min(ContrSynNoise_list_DIRECT[i]),
np.min(ContrSynNoise_list_CUATROg[i]),
np.min(ContrSynNoise_list_simplex[i])]) for i in range(n_noise)])
convergence = list(itertools.chain(*np.array(ContrSynNoise_list_DIRECT) - min_list.reshape(6,1))) + \
list(itertools.chain(*np.array(ContrSynNoise_list_CUATROg)- min_list.reshape(6,1))) + \
list(itertools.chain(*np.array(ContrSynNoise_list_simplex) - min_list.reshape(6,1)))
noise = list(itertools.chain(*noise_labels))*3
method = ['DIRECT']*int(len(noise)/3) + ['CUATRO_g']*int(len(noise)/3) + \
['Simplex']*int(len(noise)/3)
data = {'Best function evaluation': convergence, \
"Noise standard deviation": noise, \
'Method': method}
df = pd.DataFrame(data)
plt.rcParams["font.family"] = "Times New Roman"
ft = int(15)
font = {'size': ft}
plt.rc('font', **font)
params = {'legend.fontsize': 12.5,
'legend.handlelength': 1.2}
ax = sns.boxplot(x = "Noise standard deviation", y = "Best function evaluation", hue = "Method", data = df, palette = "muted")
plt.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",
mode="expand", borderaxespad=0, ncol=3)
plt.tight_layout()
plt.ylabel(r'$f_{best, sample}$ - $f_{opt, noise}$')
plt.savefig('ContrSyn_publication_plots/ContrSyn_feval100Convergence.svg', format = "svg")
# ax.set_ylim([0.1, 10])
# ax.set_yscale("log")
plt.show()
plt.clf()
# max_f_eval = 100 ; N_SAA = 1
max_f_eval = 50 ; N_SAA = 2
N_samples = 20
# ContrSynNoiseSAA_list_DIRECT = []
# for i in range(n_noise):
# print('Iteration ', i+1, ' of DIRECT')
# best = []
# best_constr = []
# f = lambda x: cost_control_noise(x, bounds_abs, noise_mat[i], N_SAA, \
# x0 = [.116, 368.489], N = 200, T = 20)
# ContrSynNoise_DIRECT_f = lambda x, grad: f(x)
# for j in range(N_samples):
# sol = DIRECTWrapper().solve(ContrSynNoise_DIRECT_f, x0, bounds, \
# maxfun = max_f_eval, constraints=1)
# best.append(sol['f_best_so_far'][-1])
# ContrSynNoiseSAA_list_DIRECT.append(best)
# with open('DIRECTContrSynSAA_listNoiseConv.pickle', 'wb') as handle:
# pickle.dump(ContrSynNoiseSAA_list_DIRECT, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('DIRECTContrSynSAA_listNoiseConv.pickle', 'rb') as handle:
ContrSynNoiseSAA_list_DIRECT = pickle.load(handle)
# N_SAA = 1
N_samples = 20
# ContrSynNoiseSAA_list_CUATROg = []
# for i in range(4):
# print('Iteration ', i+1, ' of CUATRO_g')
# best = []
# best_constr = []
# f = lambda x: cost_control_noise(x, bounds_abs, noise_mat[i], N_SAA, \
# x0 = [.116, 368.489], N = 200, T = 20)
# for j in range(N_samples):
# print(j+1)
# sol = CUATRO(f, x0, 0.5, bounds = bounds, max_f_eval = max_f_eval, \
# N_min_samples = 6, tolerance = 1e-10,\
# beta_red = 0.9, rnd = j, method = 'global', \
# constr_handling = 'Discrimination')
# best.append(sol['f_best_so_far'][-1])
# ContrSynNoiseSAA_list_CUATROg.append(best)
# with open('CUATROgContrSynSAA_listNoiseConv.pickle', 'wb') as handle:
# pickle.dump(ContrSynNoiseSAA_list_CUATROg, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('CUATROgContrSynSAA_listNoiseConv.pickle', 'rb') as handle:
ContrSynNoiseSAA_list_CUATROg = pickle.load(handle)
N_samples = 20
# ContrSynNoiseSAA_list_simplex = []
# for i in range(n_noise):
# print('Iteration ', i+1, ' of Simplex')
# best = []
# best_constr = []
# f = lambda x: cost_control_noise(x, bounds_abs, noise_mat[i], N_SAA, \
# x0 = [.116, 368.489], N = 200, T = 20)
# for j in range(N_samples):
# sol = simplex_method(f, x0, bounds, max_iter = 50, \
# constraints = 1, rnd_seed = j, mu_con = 1e6)
# best.append(sol['f_best_so_far'][-1])
# ContrSynNoiseSAA_list_simplex.append(best)
# with open('simplexContrSynSAA_listNoiseConv.pickle', 'wb') as handle:
# pickle.dump(ContrSynNoiseSAA_list_simplex, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('simplexContrSynSAA_listNoiseConv.pickle', 'rb') as handle:
ContrSynNoiseSAA_list_simplex = pickle.load(handle)
min_list = np.zeros(n_noise)
for i in range(n_noise):
if i <4:
min_list[i] = np.array([np.min([np.min(ContrSynNoiseSAA_list_DIRECT[i]),
np.min(ContrSynNoiseSAA_list_CUATROg[i]),
np.min(ContrSynNoiseSAA_list_simplex[i])])])
else:
min_list[i] = np.array([np.min([np.min(ContrSynNoiseSAA_list_DIRECT[i]),
np.min(ContrSynNoiseSAA_list_simplex[i])])])
noise = ['%.3f' % noise_mat[i][1] for i in range(n_noise)]
noise_labels = [[noise[i]]*N_samples for i in range(n_noise)]
convergence = list(itertools.chain(*np.array(ContrSynNoiseSAA_list_DIRECT) - min_list.reshape(6,1))) + \
list(itertools.chain(*np.array(ContrSynNoiseSAA_list_CUATROg[:4])- min_list.reshape(6,1)[:4])) + \
list(itertools.chain(*np.array(ContrSynNoiseSAA_list_simplex) - min_list.reshape(6,1)))
noise = list(itertools.chain(*noise_labels)) + list(itertools.chain(*noise_labels[:4])) + \
list(itertools.chain(*noise_labels))
method = ['DIRECT']*N_samples*n_noise + ['CUATRO_g*']*N_samples*(n_noise-2) + \
['Simplex']*N_samples*n_noise
data = {'Best function evaluation': convergence, \
"Noise standard deviation": noise, \
'Method': method}
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import codecs
import pandas as pd #导入数据
import numpy as np #矩阵计算
import matplotlib.pyplot as plt
from math import sin as sin
from math import cos as cos
from math import exp as exp
from pandas import Series,DataFrame
import csv
import sklearn.preprocessing as preprocessing #归一化
import operator
from os import listdir
from numpy import *
import warnings # gensim导入问题
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
from gensim.models import word2vec
import gensim
import logging
import jieba
import re
import multiprocessing
# header表示文件第0行为列索引,index_col为行索引
data_train = pd.read_csv(u"F:/answers.csv", header=0,index_col=None, encoding='gb18030')
data_train1 =
|
pd.read_csv(u"F:/questions.csv", header=0,index_col=None, encoding='gb18030')
|
pandas.read_csv
|
import argparse
from pathlib import Path
import pandas as pd
from finetune_v2.fmt_output import ResultFormatter
dir_struct = ["model_name", "experiment", "checkpoint", "task", "run"]
def is_path_ok(args, path):
n = len(Path(args.results_dir).parts)
assert len(path.parts[n:]) == len(dir_struct)
if not (path / "output.csv").exists():
return False
is_ok = True
for key, part in zip(dir_struct, path.parts[n:]):
if hasattr(args, key):
value = getattr(args, key)
if value is not None:
is_ok &= part == value
return is_ok
def listdir(args):
results_dir = args.results_dir
for path in Path(results_dir).glob("*/" * len(dir_struct)):
if is_path_ok(args, path):
yield path
def get_meta(args, path):
n = len(Path(args.results_dir).parts)
assert len(path.parts[n:]) == len(dir_struct)
return {key: part for key, part in zip(dir_struct, path.parts[n:])}
def collect_data(args, path):
formatter = ResultFormatter(path / "output.csv")
return get_meta(args, path), formatter.read()
if __name__=="__main__":
parser = argparse.ArgumentParser("show results")
parser.add_argument("--results_dir", default="saved")
parser.add_argument("--checkpoint", type=str, default=None)
parser.add_argument("--experiment", type=str, default=None)
parser.add_argument("--model_name", type=str, default=None)
parser.add_argument("--task", type=str, default=None)
parser.add_argument("--run", type=str, default=None)
parser.add_argument("--groupby", type=str, default=None)
parser.add_argument("--to_save", type=str, default="saved/final_result.csv")
args = parser.parse_args()
dfs = []
for path in listdir(args):
meta, data = collect_data(args, path)
for key in meta:
data[key] = [meta[key]] * len(data)
dfs.append(data)
final_df =
|
pd.concat(dfs)
|
pandas.concat
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.