prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# ********************************************************************************** #
# #
# Project: FastClassAI workbecnch #
# #
# Author: <NAME> #
# Contact: <EMAIL> #
# #
# This notebook is a part of Skin AanaliticAI development kit, created #
# for evaluation of public datasets used for skin cancer detection with #
# large number of AI models and data preparation pipelines. #
# #
# License: MIT #
# Copyright (C) 2021.01.30 <NAME> #
# https://opensource.org/licenses/MIT #
# #
# ********************************************************************************** #
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os # allow changing, and navigating files and folders,
import sys
import re # module to use regular expressions,
import glob # lists names in folders that match Unix shell patterns
import random # functions that use and generate random numbers
import pickle
import numpy as np # support for multi-dimensional arrays and matrices
import pandas as pd # library for data manipulation and analysis
import seaborn as sns # advance plots, for statistics,
import matplotlib as mpl # to get some basif functions, heping with plot mnaking
import scipy.cluster.hierarchy as sch
import matplotlib.pyplot as plt # for making plots,
from src.utils.model_summary_plots import visual_model_summary
# from src.utils.method_comparison_tools import method_comparison_boxplot # copied here for any potential changes,
# Function ..........................................................................
def create_class_colors_dict(*,
list_of_unique_names,
cmap_name="tab20",
cmap_colors_from=0,
cmap_colors_to=1
):
'''Returns dictionary that maps each class name in list_of_unique_names,
to to a distinct RGB color
. list_of_unique_names : list with unique, full names of clasesses, group etc..
. cmap_name : standard mpl colormap name.
. cmap_colors_from, cmap_colors_to, values between 0 and 1,
used to select range of colors in cmap,
'''
# create cmap
mycmap = plt.cm.get_cmap(cmap_name, len(list_of_unique_names)*10000)
newcolors = mycmap(np.linspace(cmap_colors_from, cmap_colors_to, len(list_of_unique_names)))
class_color_dict = dict()
for i, un in enumerate(list_of_unique_names):
class_color_dict[un] = newcolors[i]
return class_color_dict
# Function .............................................................................
def load_summary_files(*,
dataset_name,
dataset_variants,
module_names,
ai_methods,
keywords,
path_results,
verbose=False
):
# assure that you have proper datastructures
if isinstance(dataset_variants, str):
dataset_variants = [dataset_variants]
else:
pass
if isinstance(module_names, str):
module_names = [module_names]
else:
pass
if isinstance(ai_methods, str):
ai_methods = [ai_methods]
else:
pass
if isinstance(keywords, str):
keywords = [keywords]
else:
pass
# collect names of files that will be loaded
file_counter=0
for ai_method in ai_methods:
for dataset_variant in dataset_variants:
for module_name in module_names:
if verbose==True:
print("Loading files for: ", ai_method, dataset_variant, module_name, "Found: ", end="")
else:
pass
# path
rpath = os.path.join(path_results, f"{ai_method}__{dataset_name}__{dataset_variant}")
os.chdir(rpath)
# find all files in rpath
files = []
for file in glob.glob("*"):
files.append(file)
# select all with keywords,
files_s =
|
pd.Series(files)
|
pandas.Series
|
#!/usr/bin/env python
"""This script is used to run the pipeline in a distributed computing system
Instead of running each test with a different number of samples per subject
one at a time, this script allows for parallel submission of multiple jobs each
with a different number of samples.
The notebooks and featlib are intended to provide the needed
context/information that may have been omitted in this file.
"""
import click
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import qiime2 as q2
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_curve, auc
from scipy import interp
from skbio import TreeNode
from biom import load_table, Table
from featlib import Sculptor, load_mf
@click.command()
@click.option('--otu-fp', help='A rarefied OTU table (a BIOM file).')
@click.option('--metadata-fp', help='Metadata mapping file (a tab-delimitted '
'file).')
@click.option('--tree-fp', help='Phylogenetic tree (a QIIME 2 artifact).')
@click.option('--samples-per-subject', help='Number of samples per subject',
type=click.IntRange(0, 11))
@click.option('--method', type=click.Choice(['gg', 'gg-smaller', 'sepp']),
help='Data to use gg (Greengenes 97% OTUs and clade names), '
'gg-smaller (Greengenes 97% OTUs limited to the OTU IDs found '
'by phylofactor), and sepp (deblurred OTUs using clade names).')
def main(otu_fp, metadata_fp, tree_fp, samples_per_subject, method):
# rarefy before renaming
bt = load_table(otu_fp)
if method == 'sepp':
bt.update_ids({i:i.replace('.fastq', '').split('.')[0]
for i in bt.ids('sample')}, 'sample', inplace=True)
mf = load_mf(metadata_fp)
# we only keep the samples that have sequences in the table
mf = mf.loc[bt.ids()].copy()
tree = q2.Artifact.load(tree_fp).view(TreeNode)
for n in tree.traverse():
if n.length is None:
n.length = 0
mf['days_since_epoch'] = pd.to_numeric(mf['days_since_epoch'],
errors='coerce')
if method == 'gg-smaller':
# this file was provided by Alex
otus = pd.read_csv('Proteos_Lachnos_Phylofactored_in_Terminal_Ileum.csv')
pts_lchns = {str(int(i)) for i in set(otus.Proteos) | set(otus.Lachnos) if not np.isnan(i)}
think = Sculptor(biom_table=bt, mapping_file=mf, tree=tree,
gradient='days_since_epoch', trajectory='host_subject_id',
name=method)
lw = 2
N = 100
N_samples = samples_per_subject
plt.figure()
i = 0
while i < N:
# taken from:
# http://scikit-learn.org/stable/auto_examples/model_selection/
# plot_roc_crossval.html
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
## BEGIN feature creation
think.randomly_select(N_samples)
alpha = think.alpha_table(['faith_pd', 'chao1', 'brillouin_d'])
beta = think.beta_table()
features_to_keep = []
if method in {'gg', 'sepp'}:
for _, id_, md in think.biom_table.iter(axis='observation',
dense=False):
t = md['taxonomy']
if (t[4].lower() == 'f__lachnospiraceae'
or t[2].lower() == 'c__gammaproteobacteria'
or t[2].lower() == 'c__betaproteobacteria'):
features_to_keep.append(id_)
else:
features_to_keep = set(think.biom_table.ids('observation')) & pts_lchns
# more than one sample
if N_samples > 1:
alpha = think.alpha_table(['faith_pd', 'chao1', 'brillouin_d'])
beta = think.beta_table()
features = think.microbes_over_time(ids=features_to_keep)
# combine the data
combined_features =
|
pd.concat([features, alpha, beta], axis=1)
|
pandas.concat
|
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-01-22')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 310, pd.Timestamp('2015-01-09')),
(10, 311, pd.Timestamp('2015-01-15')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-11')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateWindows(NextEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(bz.data(events), columns)
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp('2015-01-14')
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-09'),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp('2015-01-20')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20')],
'estimate': [130., 131., 230., 231.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30
})
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [140., 240.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40
})
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [150., 250.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50
})
return pd.concat([
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
])
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (-1., 2., 3., 4., 5., 6., 7., 100),
'effective_date': (pd.Timestamp('2014-01-01'), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp('2015-01-07'),
# Split before Q1 event
pd.Timestamp('2015-01-09'),
# Split before Q1 event
pd.Timestamp('2015-01-13'),
# Split before Q1 event
pd.Timestamp('2015-01-15'),
# Split before Q1 event
pd.Timestamp('2015-01-18'),
# Split after Q1 event and before Q2 event
pd.Timestamp('2015-01-30'),
# Filter out - this is after our date index
pd.Timestamp('2016-01-01'))
})
sid_10_splits = pd.DataFrame({
SID_FIELD_NAME: 10,
'ratio': (.2, .3),
'effective_date': (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp('2015-01-07'),
# Apply a single split before Q1 event.
pd.Timestamp('2015-01-20')),
})
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame({
SID_FIELD_NAME: 20,
'ratio': (.4, .5, .6, .7, .8, .9,),
'effective_date': (
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18'),
pd.Timestamp('2015-01-30')),
})
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame({
SID_FIELD_NAME: 30,
'ratio': (8, 9, 10, 11, 12),
'effective_date': (
# Split before the event and before the
# split-asof-date.
pd.Timestamp('2015-01-07'),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp('2015-01-09'),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18')),
})
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame({
SID_FIELD_NAME: 40,
'ratio': (13, 14),
'effective_date': (
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-22')
)
})
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame({
SID_FIELD_NAME: 50,
'ratio': (15, 16),
'effective_date': (
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')
)
})
return pd.concat([
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
])
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows,
ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-12')
]),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150. * 1 / 16, pd.Timestamp('2015-01-09')),
], pd.Timestamp('2015-01-13')),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))
], pd.Timestamp('2015-01-14')),
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-01-21')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 221*.8*.9, pd.Timestamp('2015-02-10')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240.*13*14, pd.Timestamp('2015-02-10')),
(50, 250., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-19')] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11*12, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101*7,
|
pd.Timestamp('2015-02-10')
|
pandas.Timestamp
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 1 18:29:16 2020
@author: POI-PC
"""
from PyQt5.QtWidgets import*
from PyQt5.QtCore import pyqtSlot
from PyQt5 import QtGui
from PyQt5 import QtCore, QtWidgets
import sys
from selenium import webdriver
import time
import pandas as pd
import numpy as np
from xlrd import open_workbook
import os
from openpyxl import *
import io
from zipfile import ZipFile
import xlrd
import codecs
import shutil
from selenium.common.exceptions import NoSuchElementException
import html5lib
from os import path
from pathlib import Path
from itertools import product
import xlwings as xw
from datetime import date
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1489, 901)
font = QtGui.QFont()
font.setPointSize(9)
MainWindow.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons/bilanco.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setLocale(QtCore.QLocale(QtCore.QLocale.Turkish, QtCore.QLocale.Turkey))
MainWindow.setIconSize(QtCore.QSize(50, 50))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(380, 10, 711, 51))
font = QtGui.QFont()
font.setFamily("Tw Cen MT")
font.setPointSize(24)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.sirketGetir = QtWidgets.QPushButton(self.centralwidget)
self.sirketGetir.setGeometry(QtCore.QRect(760, 120, 241, 61))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
font.setStrikeOut(False)
self.sirketGetir.setFont(font)
self.sirketGetir.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.sirketGetir.setLocale(QtCore.QLocale(QtCore.QLocale.Turkish, QtCore.QLocale.Turkey))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("icons/sirketler.jpg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.sirketGetir.setIcon(icon1)
self.sirketGetir.setIconSize(QtCore.QSize(50, 50))
self.sirketGetir.setObjectName("sirketGetir")
self.yedekleSil = QtWidgets.QPushButton(self.centralwidget)
self.yedekleSil.setGeometry(QtCore.QRect(50, 120, 241, 61))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.yedekleSil.setFont(font)
self.yedekleSil.setLayoutDirection(QtCore.Qt.LeftToRight)
self.yedekleSil.setLocale(QtCore.QLocale(QtCore.QLocale.Turkish, QtCore.QLocale.Turkey))
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("icons/clear.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.yedekleSil.setIcon(icon2)
self.yedekleSil.setIconSize(QtCore.QSize(50, 50))
self.yedekleSil.setObjectName("yedekleSil")
self.anaExcel = QtWidgets.QPushButton(self.centralwidget)
self.anaExcel.setGeometry(QtCore.QRect(1080, 120, 251, 61))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.anaExcel.setFont(font)
self.anaExcel.setLocale(QtCore.QLocale(QtCore.QLocale.Turkish, QtCore.QLocale.Turkey))
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("icons/excel2.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.anaExcel.setIcon(icon3)
self.anaExcel.setIconSize(QtCore.QSize(50, 50))
self.anaExcel.setObjectName("anaExcel")
self.sirketler = QtWidgets.QListWidget(self.centralwidget)
self.sirketler.setGeometry(QtCore.QRect(290, 290, 261, 301))
self.sirketler.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.sirketler.setObjectName("sirketler")
self.gosterSirket = QtWidgets.QPushButton(self.centralwidget)
self.gosterSirket.setGeometry(QtCore.QRect(880, 330, 271, 61))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.gosterSirket.setFont(font)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("icons/show.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.gosterSirket.setIcon(icon4)
self.gosterSirket.setIconSize(QtCore.QSize(40, 40))
self.gosterSirket.setObjectName("gosterSirket")
self.secilenIndr = QtWidgets.QPushButton(self.centralwidget)
self.secilenIndr.setGeometry(QtCore.QRect(880, 490, 271, 61))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.secilenIndr.setFont(font)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("icons/download.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.secilenIndr.setIcon(icon5)
self.secilenIndr.setIconSize(QtCore.QSize(40, 40))
self.secilenIndr.setObjectName("secilenIndr")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(60, 240, 191, 21))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.tumSirketler = QtWidgets.QListView(self.centralwidget)
self.tumSirketler.setGeometry(QtCore.QRect(20, 290, 256, 301))
self.tumSirketler.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.tumSirketler.setLineWidth(0)
self.tumSirketler.setResizeMode(QtWidgets.QListView.Fixed)
self.tumSirketler.setObjectName("tumSirketler")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(280, 240, 291, 16))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(290, 270, 261, 16))
self.label_5.setObjectName("label_5")
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setGeometry(QtCore.QRect(1150, 330, 20, 211))
self.line.setLineWidth(3)
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.line_2 = QtWidgets.QFrame(self.centralwidget)
self.line_2.setGeometry(QtCore.QRect(20, 200, 1381, 16))
self.line_2.setLineWidth(2)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.bildirim = QtWidgets.QLineEdit(self.centralwidget)
self.bildirim.setGeometry(QtCore.QRect(10, 800, 701, 41))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.bildirim.setFont(font)
self.bildirim.setObjectName("bildirim")
self.genelGetir = QtWidgets.QPushButton(self.centralwidget)
self.genelGetir.setGeometry(QtCore.QRect(1190, 330, 261, 61))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.genelGetir.setFont(font)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("icons/geneldown.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.genelGetir.setIcon(icon6)
self.genelGetir.setIconSize(QtCore.QSize(35, 35))
self.genelGetir.setObjectName("genelGetir")
self.devamEt = QtWidgets.QPushButton(self.centralwidget)
self.devamEt.setGeometry(QtCore.QRect(1190, 480, 261, 61))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.devamEt.setFont(font)
icon13 = QtGui.QIcon()
icon13.addPixmap(QtGui.QPixmap("icons/continue.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.devamEt.setIcon(icon13)
self.devamEt.setIconSize(QtCore.QSize(35, 35))
self.devamEt.setObjectName("devamEt")
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(610, 70, 271, 20))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(75)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(self.centralwidget)
self.label_8.setGeometry(QtCore.QRect(980, 230, 331, 20))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(75)
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.label_9 = QtWidgets.QLabel(self.centralwidget)
self.label_9.setGeometry(QtCore.QRect(1050, 570, 251, 20))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(75)
self.label_9.setFont(font)
self.label_9.setObjectName("label_9")
self.line_3 = QtWidgets.QFrame(self.centralwidget)
self.line_3.setGeometry(QtCore.QRect(880, 550, 531, 20))
self.line_3.setLineWidth(2)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.zipAktar = QtWidgets.QPushButton(self.centralwidget)
self.zipAktar.setGeometry(QtCore.QRect(610, 660, 241, 61))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.zipAktar.setFont(font)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap("icons/zip.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.zipAktar.setIcon(icon7)
self.zipAktar.setIconSize(QtCore.QSize(40, 40))
self.zipAktar.setObjectName("zipAktar")
self.aktarExcel = QtWidgets.QPushButton(self.centralwidget)
self.aktarExcel.setGeometry(QtCore.QRect(1160, 710, 241, 61))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.aktarExcel.setFont(font)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap("icons/excel3.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.aktarExcel.setIcon(icon8)
self.aktarExcel.setIconSize(QtCore.QSize(50, 50))
self.aktarExcel.setObjectName("aktarExcel")
self.label_12 = QtWidgets.QLabel(self.centralwidget)
self.label_12.setGeometry(QtCore.QRect(1220, 640, 55, 16))
font = QtGui.QFont()
font.setPointSize(7)
font.setBold(True)
font.setWeight(75)
self.label_12.setFont(font)
self.label_12.setObjectName("label_12")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(1190, 610, 191, 16))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.lineEdit_3 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_3.setGeometry(QtCore.QRect(1300, 660, 51, 22))
self.lineEdit_3.setObjectName("lineEdit_3")
self.label_13 = QtWidgets.QLabel(self.centralwidget)
self.label_13.setGeometry(QtCore.QRect(1310, 640, 21, 16))
font = QtGui.QFont()
font.setPointSize(7)
font.setBold(True)
font.setWeight(75)
self.label_13.setFont(font)
self.label_13.setObjectName("label_13")
self.lineEdit_4 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_4.setGeometry(QtCore.QRect(1220, 660, 51, 22))
self.lineEdit_4.setObjectName("lineEdit_4")
self.bosZipler = QtWidgets.QListWidget(self.centralwidget)
self.bosZipler.setGeometry(QtCore.QRect(350, 670, 241, 91))
self.bosZipler.setObjectName("bosZipler")
self.label_14 = QtWidgets.QLabel(self.centralwidget)
self.label_14.setGeometry(QtCore.QRect(630, 740, 181, 21))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.label_14.setFont(font)
self.label_14.setObjectName("label_14")
self.secHepsini = QtWidgets.QPushButton(self.centralwidget)
self.secHepsini.setGeometry(QtCore.QRect(420, 600, 131, 28))
font = QtGui.QFont()
font.setPointSize(10)
self.secHepsini.setFont(font)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap("icons/selectall.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.secHepsini.setIcon(icon9)
self.secHepsini.setObjectName("secHepsini")
self.yedekle = QtWidgets.QPushButton(self.centralwidget)
self.yedekle.setGeometry(QtCore.QRect(390, 120, 241, 61))
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.yedekle.setFont(font)
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap("icons/backup.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.yedekle.setIcon(icon10)
self.yedekle.setIconSize(QtCore.QSize(30, 30))
self.yedekle.setObjectName("yedekle")
self.listWidget = QtWidgets.QListWidget(self.centralwidget)
self.listWidget.setGeometry(QtCore.QRect(880, 640, 256, 192))
self.listWidget.setObjectName("listWidget")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(1150, 790, 251, 20))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.label_10 = QtWidgets.QLabel(self.centralwidget)
self.label_10.setGeometry(QtCore.QRect(970, 290, 231, 21))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_10.setFont(font)
self.label_10.setObjectName("label_10")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(1260, 290, 113, 22))
self.lineEdit.setObjectName("lineEdit")
self.label_11 = QtWidgets.QLabel(self.centralwidget)
self.label_11.setGeometry(QtCore.QRect(570, 260, 301, 16))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_11.setFont(font)
self.label_11.setObjectName("label_11")
self.listWidget_2 = QtWidgets.QListWidget(self.centralwidget)
self.listWidget_2.setGeometry(QtCore.QRect(580, 290, 256, 301))
self.listWidget_2.setObjectName("listWidget_2")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(880, 410, 271, 61))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.pushButton.setFont(font)
icon11 = QtGui.QIcon()
icon11.addPixmap(QtGui.QPixmap("icons/checkbox.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton.setIcon(icon11)
self.pushButton.setIconSize(QtCore.QSize(40, 40))
self.pushButton.setObjectName("pushButton")
self.label_15 = QtWidgets.QLabel(self.centralwidget)
self.label_15.setGeometry(QtCore.QRect(10, 770, 171, 16))
font = QtGui.QFont()
font.setItalic(True)
self.label_15.setFont(font)
self.label_15.setObjectName("label_15")
self.line_4 = QtWidgets.QFrame(self.centralwidget)
self.line_4.setGeometry(QtCore.QRect(1470, 100, 20, 731))
self.line_4.setLineWidth(3)
self.line_4.setFrameShape(QtWidgets.QFrame.VLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.label_16 = QtWidgets.QLabel(self.centralwidget)
self.label_16.setGeometry(QtCore.QRect(940, 260, 261, 21))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_16.setFont(font)
self.label_16.setObjectName("label_16")
self.donem = QtWidgets.QLineEdit(self.centralwidget)
self.donem.setGeometry(QtCore.QRect(1260, 260, 113, 22))
self.donem.setObjectName("donem")
self.tumGetir = QtWidgets.QPushButton(self.centralwidget)
self.tumGetir.setGeometry(QtCore.QRect(1170, 400, 291, 61))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.tumGetir.setFont(font)
icon12 = QtGui.QIcon()
icon12.addPixmap(QtGui.QPixmap("icons/all.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tumGetir.setIcon(icon12)
self.tumGetir.setIconSize(QtCore.QSize(35, 35))
self.tumGetir.setObjectName("tumGetir")
self.label.raise_()
self.sirketGetir.raise_()
self.yedekleSil.raise_()
self.anaExcel.raise_()
self.sirketler.raise_()
self.secilenIndr.raise_()
self.label_3.raise_()
self.tumSirketler.raise_()
self.label_4.raise_()
self.label_5.raise_()
self.line.raise_()
self.line_2.raise_()
self.bildirim.raise_()
self.gosterSirket.raise_()
self.genelGetir.raise_()
self.devamEt.raise_()
self.label_7.raise_()
self.label_8.raise_()
self.label_9.raise_()
self.line_3.raise_()
self.zipAktar.raise_()
self.aktarExcel.raise_()
self.label_12.raise_()
self.label_6.raise_()
self.lineEdit_3.raise_()
self.label_13.raise_()
self.lineEdit_4.raise_()
self.bosZipler.raise_()
self.label_14.raise_()
self.secHepsini.raise_()
self.yedekle.raise_()
self.listWidget.raise_()
self.label_2.raise_()
self.label_10.raise_()
self.lineEdit.raise_()
self.label_11.raise_()
self.listWidget_2.raise_()
self.pushButton.raise_()
self.label_15.raise_()
self.line_4.raise_()
self.label_16.raise_()
self.donem.raise_()
self.tumGetir.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1489, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.label_5.setBuddy(self.sirketler)
self.retranslateUi(MainWindow)
self.sirketGetir.clicked.connect(MainWindow.sirketlerKap)
self.anaExcel.clicked.connect(MainWindow.bilancoExcel)
self.yedekleSil.clicked.connect(MainWindow.silYedekle)
self.gosterSirket.clicked.connect(self.sirketler.doItemsLayout)
self.genelGetir.clicked.connect(MainWindow.genelYukle)
self.anaExcel.released.connect(self.tumSirketler.doItemsLayout)
self.devamEt.clicked.connect(MainWindow.devamEttir)
self.zipAktar.clicked.connect(MainWindow.zipeAktar)
self.aktarExcel.clicked.connect(MainWindow.hepsiExcel)
self.zipAktar.released.connect(self.bosZipler.doItemsLayout)
self.secilenIndr.clicked.connect(MainWindow.cekSecilen)
self.secHepsini.clicked.connect(MainWindow.selectHepsi)
self.secHepsini.clicked.connect(self.sirketler.selectAll)
self.yedekle.clicked.connect(MainWindow.excelYedekle)
self.aktarExcel.clicked.connect(self.listWidget.doItemsLayout)
self.pushButton.clicked.connect(self.listWidget_2.doItemsLayout)
self.tumGetir.clicked.connect(MainWindow.donemselTum)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Bilanco Programı"))
self.label.setText(_translate("MainWindow", "Otomatik Bilanço Veri Çekme Programı V. 1.6"))
self.sirketGetir.setText(_translate("MainWindow", "Şirketleri Getir"))
self.yedekleSil.setText(_translate("MainWindow", "Sil ve Yedekle"))
self.anaExcel.setText(_translate("MainWindow", "Bilanco.xlsx Sisteme Al"))
self.gosterSirket.setText(_translate("MainWindow", "İndirilmemiş Verileri Göster"))
self.secilenIndr.setText(_translate("MainWindow", "Seçileni İndir"))
self.label_3.setText(_translate("MainWindow", "Tüm Şirketlerin Listesi"))
self.label_4.setText(_translate("MainWindow", "Sistemde Çekilmemiş Şirketler Listesi"))
self.label_5.setText(_translate("MainWindow", "(Burda tıkladıkların sisteme çekilecektir.)"))
self.bildirim.setText(_translate("MainWindow", "Bildirimler !"))
self.genelGetir.setText(_translate("MainWindow", "Tüm Şirketleri İndir"))
self.devamEt.setText(_translate("MainWindow", "Kaldığı Yerden Devam Ettir"))
self.label_7.setText(_translate("MainWindow", "Veriler İçin Ön Hazırlık"))
self.label_8.setText(_translate("MainWindow", "Verilerin İnternetten Çekildiği Yer"))
self.label_9.setText(_translate("MainWindow", "Verilerin Excel\'e Aktarılması"))
self.zipAktar.setText(_translate("MainWindow", "Zip Dosyalarını Aç"))
self.aktarExcel.setText(_translate("MainWindow", "Excel\'e Aktar"))
self.label_12.setText(_translate("MainWindow", "Dönem"))
self.label_6.setText(_translate("MainWindow", "Çekmek İstediğin Dönem"))
self.lineEdit_3.setText(_translate("MainWindow", "2019"))
self.label_13.setText(_translate("MainWindow", "Yıl"))
self.lineEdit_4.setText(_translate("MainWindow", "0"))
self.label_14.setText(_translate("MainWindow", "<-- Zip\'leri Boş Olanlar"))
self.secHepsini.setText(_translate("MainWindow", "Hepsini Seç"))
self.yedekle.setText(_translate("MainWindow", "Bilanco Yedekle"))
self.label_2.setText(_translate("MainWindow", " <-- Excel\'e Aktarılmamış Olanlar"))
self.label_10.setText(_translate("MainWindow", "İndirmek İstediğin Yılı Gir ->"))
self.lineEdit.setText(_translate("MainWindow", "2020"))
self.label_11.setText(_translate("MainWindow", "Seçilmiş (İndirilecek) Şirketler Listesi"))
self.pushButton.setText(_translate("MainWindow", "Seçilmişleri Göster"))
self.label_15.setText(_translate("MainWindow", "Writed by SVS © (2020)"))
self.label_16.setText(_translate("MainWindow", "İndirmek İstediğin Dönemi Gir ->"))
self.donem.setText(_translate("MainWindow", "5"))
self.tumGetir.setText(_translate("MainWindow", "Tüm Şirketleri Dönemsel İndir"))
class Bilanco(QMainWindow):
def __init__(self):
super().__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.sirketler.setSelectionMode(
QAbstractItemView.ExtendedSelection
)
self.ui.sirketler.setEditTriggers(QAbstractItemView.DoubleClicked|QAbstractItemView.EditKeyPressed)
self.ui.sirketler.setSelectionMode(QAbstractItemView.MultiSelection)
self.ui.sirketler.setViewMode(QListView.ListMode)
self.ui.listWidget.setSelectionMode(
QAbstractItemView.ExtendedSelection
)
self.ui.listWidget.setEditTriggers(QAbstractItemView.DoubleClicked|QAbstractItemView.EditKeyPressed)
self.ui.listWidget.setSelectionMode(QAbstractItemView.MultiSelection)
self.ui.listWidget.setViewMode(QListView.ListMode)
self.ui.anaExcel.released.connect(self.listeyeDok)
self.ui.sirketGetir.released.connect(self.bildirim1)
#self.ui.anaExcel.released.connect(self.bildirim2)
self.ui.yedekleSil.released.connect(self.bildirim3)
self.ui.gosterSirket.clicked.connect(self.widgetListele)
self.ui.gosterSirket.released.connect(self.bildirim4)
self.ui.pushButton.clicked.connect(self.widgetSelectedShow)
self.ui.pushButton.released.connect(self.bildirim8)
#self.ui.sirketGetir.released.connect(self.listeyeDok)
self.ui.zipAktar.released.connect(self.bildirim7)
self.ui.sirketler.itemClicked.connect(self.seciliSec)
def bildirim1(self):
self.ui.bildirim.setText("Sirket Verileri Cekildi!")
def bildirim2(self):
self.ui.bildirim.setText("Excel Datası Cekildi!")
def bildirim3(self):
self.ui.bildirim.setText("Eski Veriler silindi ve Bilanco yedeklendi!")
def bildirim4(self):
self.ui.bildirim.setText("Çekilen şirketler gösterildi!")
def bildirim5(self):
self.ui.bildirim.setText("Tum veriler CEKILEMEDI!")
def bildirim6(self):
self.ui.bildirim.setText("Tum veriler basariyla cekildi!")
def bildirim7(self):
self.ui.bildirim.setText("Dosyadaki tum Zip'ler açıldı!")
def bildirim8(self):
self.ui.bildirim.setText("Secilmis sirketler gösterildi!")
def selectHepsi(self):
print("ok")
def excelYedekle(self):
today = date.today()
shutil.copy('Bilanco-Excel/Bilanco.xlsm', 'BilancoYedek/BilancoBackUp-'+str(today)+'.xlsm')
self.ui.bildirim.setText("Bilanco excel'i yedeklendi!")
def donemselTum(self):
yil = int(self.ui.lineEdit.text())
donem = int(self.ui.donem.text())
yilDonem = str(yil) + "+" + str(donem)
options = webdriver.ChromeOptions()
adres = fileName + "\Veriler\-"
#options.add_argument("download.default_directory="+ adres ")
prefs = {
"download.default_directory": adres+yilDonem,
"download.prompt_for_download": False,
"download.directory_upgrade": True
}
options.add_experimental_option('prefs', prefs)
browser = webdriver.Chrome(chrome_options=options)
browser.get("https://www.kap.org.tr/tr/")
time.sleep(5)
ftablolar = browser.find_element_by_xpath("//*[@id='financialTablesTab']/div")
ftablolar.click()
time.sleep(5)
fyil = int(browser.find_element_by_xpath("//*[@id='email-form']/div[3]/div[2]/div[1]/div[1]/div").text)
time.sleep(2)
if(fyil != yil):
flager = fyil - yil
if flager > 0:
for i in range(flager):
cyil = browser.find_element_by_xpath('//*[@id="rightFinancialTableYearSliderButton"]/div')
cyil.click()
time.sleep(2)
else:
for i in range(abs(flager)):
cyil = browser.find_element_by_xpath('//*[@id="leftFinancialTableYearSliderButton"]/div')
cyil.click()
time.sleep(2)
fdonem = 5 - donem
print(fdonem)
if(donem == 3 or donem == 4):
while(fdonem > 0):
cdonem = browser.find_element_by_xpath('//*[@id="leftFinancialTablePeriodSliderButton"]')
cdonem.click()
time.sleep(2)
fdonem = fdonem - 1
else:
while(donem > 0):
cdonem = browser.find_element_by_xpath('//*[@id="rightFinancialTablePeriodSliderButton"]')
cdonem.click()
time.sleep(2)
donem = donem - 1
getir = browser.find_element_by_xpath("//*[@id='Getir']")
getir.click()
time.sleep(5)
try:
dosyaBulunamadi = browser.find_element_by_xpath("/html/body/div[10]/div/div/div[2]/div/div[2]")
if dosyaBulunamadi:
self.ui.bildirim.setText("Istedigin tarih ve doneme ait veriler bulunamadi!")
except:
self.ui.bildirim.setText("Istenilen tarih ve donemdeki tum sirketler cekildi!")
def sirketlerKap(self):
options = webdriver.ChromeOptions()
adres = fileName2 +"\\Sirketler"
#options.add_argument("download.default_directory="+ adres ")
prefs = {
"download.default_directory": adres,
"download.prompt_for_download": False,
"download.directory_upgrade": True
}
options.add_experimental_option('prefs', prefs)
browser = webdriver.Chrome(chrome_options=options)
browser.get("https://www.kap.org.tr/tr/api/exportCompanyPages/bist-sirketler/xls")
time.sleep(20)
browser.close()
df_sirket = pd.read_html('Sirketler/Sirketler.xls')
print(df_sirket)
sirketler = []
for i in range(len(df_sirket)):
temp = df_sirket[i][1][1:]
temp = temp.to_list()
for k in range(len(temp)):
s = temp[k]
sirketler.append(s)
model = QtGui.QStandardItemModel()
self.ui.tumSirketler.setModel(model)
for i in sirketler:
item = QtGui.QStandardItem(i)
model.appendRow(item)
def widgetSelectedShow(self):
self.ui.listWidget_2.clear()
# items1 = self.ui.sirketler.selectedItems()
# print(items1)
items1 = [item.text() for item in self.ui.sirketler.selectedItems()]
print(items1)
self.ui.listWidget_2.addItems(items1)
def cekSecilen(self):
lw = self.ui.listWidget_2
items = []
for x in range(lw.count()):
items.append(str(lw.item(x).text()))
print(items)
a = 0
for sirketisim in items:
passYap = False
print(a)
a = a + 1
options = webdriver.ChromeOptions()
adres = fileName + "\Veriler\-"
#options.add_argument("download.default_directory="+ adres ")
prefs = {
"download.default_directory": adres+sirketisim,
"download.prompt_for_download": False,
"download.directory_upgrade": True
}
options.add_experimental_option('prefs', prefs)
browser = webdriver.Chrome(chrome_options=options)
browser.get("https://www.kap.org.tr/tr/")
time.sleep(5)
ftablolar = browser.find_element_by_xpath("//*[@id='financialTablesTab']/div")
ftablolar.click()
time.sleep(5)
yilx = int(self.ui.lineEdit.text())
fyil = int(browser.find_element_by_xpath("//*[@id='email-form']/div[3]/div[2]/div[1]/div[1]/div").text)
print(fyil)
print(sirketisim)
time.sleep(2)
if fyil == yilx:
print(yilx)
else:
flager = fyil - yilx
if flager > 0:
for i in range(flager):
cyil = browser.find_element_by_xpath('//*[@id="rightFinancialTableYearSliderButton"]/div')
cyil.click()
time.sleep(2)
else:
for i in range(abs(flager)):
cyil = browser.find_element_by_xpath('//*[@id="leftFinancialTableYearSliderButton"]/div')
cyil.click()
time.sleep(2)
try:
sirket = browser.find_element_by_id("Sirket-6")
sirket.send_keys(sirketisim)
time.sleep(5)
ftablolar2 = browser.find_element_by_xpath("//*[@id='calendarFilterInputFinancialTable']/div/a")
ftablolar2.click()
time.sleep(5)
except:
try:
sirket = browser.find_element_by_id("Sirket-6")
sirket.clear()
sirket.send_keys(sirketisim[:-1])
time.sleep(1)
ftablolar2 = browser.find_element_by_xpath("//*[@id='calendarFilterInputFinancialTable']/div/a")
ftablolar2.click()
time.sleep(1)
except:
sirket = browser.find_element_by_id("Sirket-6")
sirket.clear()
sirket.send_keys(sirketisim)
time.sleep(1)
getir = browser.find_element_by_xpath("//*[@id='Getir']")
getir.click()
time.sleep(5)
try:
dosyaBulunamadi = browser.find_element_by_xpath("/html/body/div[10]/div/div/div[2]/div/div[2]")
if dosyaBulunamadi:
try:
solKaydir = browser.find_element_by_xpath('//*[@id="leftFinancialTablePeriodSliderButton"]/div')
solKaydir.click()
solKaydir = browser.find_element_by_xpath('//*[@id="leftFinancialTablePeriodSliderButton"]/div')
solKaydir.click()
time.sleep(2)
getir = browser.find_element_by_xpath("//*[@id='Getir']")
getir.click()
time.sleep(5)
except:
passYap == True
os.mkdir(adres+sirketisim)
print ("Successfully created the directory %s " % path)
except:
pass
time.sleep(25)
browser.close()
if (path.exists(adres+sirketisim[:-1]+"\\2019-Tum Donemler.zip") == False) or (path.exists(adres+sirketisim+"\\2019-Tum Donemler.zip") == False):
if passYap == True:
self.ui.bildirim.setText("Tum veriler CEKILEMEDI!")
break
self.ui.bildirim.setText("Seçinler sirketler basariyla indirildi!")
def seciliSec(self):
print("ok")
def bilancoExcel(self):
sheets = pd.read_excel('Bilanco-Excel/Bilanco.xlsm' ,sheet_name=['KOZAA'])
bilanco_isim = sheets['KOZAA'].iloc[:,0]
bilanco_isim = bilanco_isim.values.tolist()
#○bilanco_isim['bilanco'] = bilanco_isim['bilanco'].str.upper()
bilanco_isim_revize = []
for i in bilanco_isim:
if i[0] == ' ':
new_i = list(i)
for letter in i:
if letter == ' ':
new_i.pop(0)
else:
i = (''.join(new_i))
bilanco_isim_revize.append(i.upper())
break
else:
bilanco_isim_revize.append(i.upper())
print("Bitti !")
def zipeAktar(self):
self.ui.bosZipler.clear()
veriler = os.listdir(fileName + "/Veriler/")
bos_veri = []
for veri in veriler:
path_sirket = []
sirket = os.listdir(fileName2 +"\\Veriler\\"+veri)
path_sirket.append(sirket)
for zipex in veriler:
path = fileName + "\\Veriler\\"
path2 = zipex + "\\2019-Tum Donemler.zip"
pathe = path + path2
exact =fileName + "\\Excels"
try:
with ZipFile(pathe, 'r') as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall(exact)
print("ok")
except:
bos_veri.append(zipex)
print("fail")
self.ui.bosZipler.addItems(bos_veri)
def hepsiExcel(self):
sheets = pd.read_excel('Bilanco-Excel/Bilanco.xlsm' ,sheet_name=['KOZAA'])
bilanco_isim = sheets['KOZAA'].iloc[:,0]
bilanco_isim = bilanco_isim.values.tolist()
#○bilanco_isim['bilanco'] = bilanco_isim['bilanco'].str.upper()
excel_sheets = xlrd.open_workbook('Bilanco-Excel/Bilanco.xlsm', on_demand=True)
excel_list = excel_sheets.sheet_names()
excel_list.remove('Anatablo')
excel_list.remove('HISSE-GRAFIK')
excel_list.remove('GRAFİK 2')
excel_list.remove('ÖZEL ORANLAR')
excel_list.remove('Güncel Fiyat')
excel_liste = [x.upper() for x in excel_list]
print (excel_liste)
cekSirkets = pd.read_excel('Hisseler/Hisseler.xlsx')
cekSirketler = cekSirkets[["KOD"]].values.tolist()
print(cekSirketler)
bilanco_isim_revize = []
for i in bilanco_isim:
if i[0] == ' ':
new_i = list(i)
for letter in i:
if letter == ' ':
new_i.pop(0)
else:
i = (''.join(new_i))
bilanco_isim_revize.append(i.upper())
break
else:
bilanco_isim_revize.append(i.upper())
print(bilanco_isim_revize)
excels = os.listdir("Excels/")
matching = [s[:-4] for s in excels if '.xls' in s]
print(len(matching))
total = 0
for excel in matching:
temp = excel.split("-")
keep = len(temp)
total = total + keep
print(total)
npgive = np.empty([total,3], dtype = object)
z = 0
for i in range(len(matching)):
temp = matching[i]
x = temp.split("_")
y = temp.split("-")
for k in range(len(y)):
if k == (len(y) - 1):
temp = y[-1].split("_")
npgive[z][0] = temp[0]
npgive[z][1] = x[-1]
npgive[z][2] = x[-2]
z += 1
else:
npgive[z][0] = y[k]
npgive[z][1] = x[-1]
npgive[z][2] = x[-2]
z += 1
sirketKod = pd.DataFrame({'Kod': npgive[:, 0], 'Donem': npgive[:, 1],'Yil': npgive[:, 2]})
print(sirketKod)
yil = self.ui.lineEdit_3.text()
donem = self.ui.lineEdit_4.text()
print(yil)
print(donem)
yil = int(yil)
donem = int(donem)
donemlik = donem * 3
is_sirketKod = sirketKod[(sirketKod.Yil == yil) & (sirketKod.Donem == donem)]
print(is_sirketKod)
olmadi = []
a = 0
b = 0
for take in excel_liste:
c = sirketKod[sirketKod.Kod == take.upper()]
if(c.empty):
print("fail")
olmadi.append(take.upper())
else:
print("ok")
b += 1
print(olmadi)
donemstr = str(donem)
yilstr = str(yil)
sonExcel = []
for exc in matching:
x = exc.split("_")
if donemstr in x[-1] and yilstr in x[-2]:
sonExcel.append(exc)
else:
continue
# print(sonExcel)
cekExcel = []
for sExc in sonExcel:
for excLi in cekSirketler:
if excLi[0] in sExc:
cekExcel.append(sExc)
cekexcel = []
[cekexcel.append(x) for x in cekExcel if x not in cekexcel]
olmadis = []
print(cekexcel)
for excs in cekexcel:
x = excs.split("-")
if len(x) < 2:
y = excs.split("_")
print(excs)
print(y[0])
excs = str(excs) + ".xls"
npsave = np.empty([len(bilanco_isim_revize),2], dtype = object)
for i in range(len(bilanco_isim_revize)):
npsave[i][0] = bilanco_isim_revize[i]
#seçilen tablodan bilanço verilerinin ayıklanması
manu = pd.read_html("Excels/"+ excs)
npsave[0][1] = str(yil) + "/" + str(donemlik)
bilanchos = []
for i in range(len(manu)):
if len(manu[i].columns) >= 5 and len(manu[i].columns) <= 8:
if len(manu[i])>2:
bilanchos.append(i)
newdf = manu[bilanchos[0]]
del bilanchos[0]
newdf3 = manu[bilanchos[-1]]
del bilanchos[-1]
if len(manu[bilanchos[0]]) == 300:
newdf2 = manu[bilanchos[0]]
else:
frames = []
for i in range(len(bilanchos)):
frames.append(manu[bilanchos[i]])
if len(frames) == 0:
newdf2 = manu[bilanchos[0]]
elif len(frames) >= 1 :
newdf2 = pd.concat(frames, ignore_index=True)
carpanx = manu[0]
carpany = carpanx[1][0]
carpanz = carpany.strip(' TL')
if not carpanz:
carpanz = 1
else:
oldstr = carpanz
if isinstance(oldstr, int):
carpanz = oldstr
else:
newstr = oldstr.replace(".", "")
carpanz = int(newstr)
print(carpanz)
print(len(newdf))
print(len(newdf2))
print(len(newdf3))
for a in bilanchos:
print(len(manu[a]))
#df1 için yapılması
df1 = newdf[[1,3]].dropna(subset = [1])
df1 = df1.reset_index()
df1 = df1.drop("index",axis=1)
df1 = df1.fillna(0)
df1 = df1.reset_index()
df1 = df1.drop("index",axis=1)
df1 = df1.rename(columns={1: "bilanco", 3: "ciro"})
df1['bilanco'] = df1['bilanco'].str.upper()
df1 = df1.replace({'İ':'I'},regex = True)
donen_varliklar = df1.loc[2:54]
ara_toplam_donenvarliklar = df1.loc[51].ciro
toplam_donen_varlıklar = df1.loc[54].ciro
duran_varliklar = df1.loc[55:127]
ozkaynak_yontemiyle_degerlenen_yatirimlar = df1.loc[68].ciro
toplam_duran_varliklar = df1.loc[127].ciro
toplam_varliklar = df1.loc[128].ciro
kisa_vadeli_yukumlulukler = df1.loc[131:190]
finansal_borclar = df1.loc[131].ciro
diger_finansal_yukumlulukler = df1.loc[184].ciro
musteri_soz_dogan_yuk = df1.loc[167].ciro
ertelenmis_gelirler = df1.loc[176].ciro
borc_karsiliklari = df1.loc[180].ciro
ara_toplam_kisavadeliy = df1.loc[187].ciro
toplam_kisa_vadeli = df1.loc[190].ciro
uzun_vadeli_yukumlulukler = df1.loc[192:240]
u_finansal_borclar = df1.loc[192].ciro
u_musteri_soz_dogan_yuk = df1.loc[217].ciro
u_ertelenmis_gelirler = df1.loc[226].ciro
calisanlara_saglanan_faydalara = df1.loc[230].ciro
toplam_uzun_vadeli = df1.loc[240].ciro
ozkaynaklar = df1.loc[243:294]
geçmis_yillar_kar_zararlari = df1.loc[291].ciro
net_donem_kar_zaralari = df1.loc[292].ciro
hisse_senedi_ihrac_primleri = df1.loc[251].ciro
azinlik_paylari = df1.loc[293].ciro
kalemler = df1.loc[245:281]
kalemler = kalemler["ciro"].unique()
diger_ozsermaye_kalemleri = 0
for value in kalemler:
if value == 0:
topla = 0
else:
topla = int(value.replace('.',''))
diger_ozsermaye_kalemleri = diger_ozsermaye_kalemleri + topla
toplam_ozkaynaklar = df1.loc[294].ciro
toplam_kaynaklar = df1.loc[295].ciro
for find in range(1,13):
cost = donen_varliklar[donen_varliklar["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
npsave[11][1] = int(ara_toplam_donenvarliklar.replace(".", ""))
npsave[1][1] = int(toplam_donen_varlıklar.replace(".", ""))
for find in range(13,30):
cost = duran_varliklar[duran_varliklar["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = ozkaynak_yontemiyle_degerlenen_yatirimlar
if oldstr == 0:
npsave[19][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[19][1] = int(newstr)
npsave[13][1] = int(toplam_duran_varliklar.replace(".", ""))
npsave[29][1] = int(toplam_varliklar.replace(".", ""))
for find in range(30,45):
cost = kisa_vadeli_yukumlulukler[kisa_vadeli_yukumlulukler["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = finansal_borclar
if oldstr == 0:
npsave[32][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[32][1] = int(newstr)
oldstr = diger_finansal_yukumlulukler
if oldstr == 0:
npsave[33][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[33][1] = int(newstr)
oldstr = musteri_soz_dogan_yuk
if oldstr == 0:
npsave[36][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[36][1] = int(newstr)
oldstr = ertelenmis_gelirler
if oldstr == 0:
npsave[39][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[39][1] = int(newstr)
oldstr = borc_karsiliklari
if oldstr == 0:
npsave[41][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[41][1] = int(newstr)
npsave[43][1] = int(ara_toplam_kisavadeliy.replace(".", ""))
npsave[31][1] = int(toplam_kisa_vadeli.replace(".", ""))
for find in range(45,58):
cost = uzun_vadeli_yukumlulukler[uzun_vadeli_yukumlulukler["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = u_finansal_borclar
if oldstr == 0:
npsave[46][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[46][1] = int(newstr)
oldstr = u_musteri_soz_dogan_yuk
if oldstr == 0:
npsave[50][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[50][1] = int(newstr)
oldstr = u_ertelenmis_gelirler
if oldstr == 0:
npsave[53][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[53][1] = int(newstr)
oldstr = u_ertelenmis_gelirler
if oldstr == 0:
npsave[53][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[53][1] = int(newstr)
oldstr = calisanlara_saglanan_faydalara
if oldstr == 0:
npsave[55][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[55][1] = int(newstr)
npsave[45][1] = int(toplam_uzun_vadeli.replace(".", ""))
for find in range(58,71):
cost = ozkaynaklar[ozkaynaklar["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = geçmis_yillar_kar_zararlari
if oldstr == 0:
npsave[66][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[66][1] = int(newstr)
oldstr = net_donem_kar_zaralari
if oldstr == 0:
npsave[67][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[67][1] = int(newstr)
oldstr = hisse_senedi_ihrac_primleri
if oldstr == 0:
npsave[62][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[62][1] = int(newstr)
oldstr = azinlik_paylari
if oldstr == 0:
npsave[69][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[69][1] = int(newstr)
kalemler = df1.loc[245:281]
kalemler = kalemler["ciro"].unique()
diger_ozsermaye_kalemleri = 0
for value in kalemler:
if value == 0:
topla = 0
else:
topla = int(value.replace('.',''))
diger_ozsermaye_kalemleri = diger_ozsermaye_kalemleri + topla
npsave[68][1] = diger_ozsermaye_kalemleri
npsave[58][1] = int(toplam_ozkaynaklar.replace(".", ""))
npsave[70][1] = int(toplam_kaynaklar.replace(".", ""))
#df2 için yapılması
df2 = newdf2[[1,3]].dropna(subset = [1])
df2 = df2.reset_index()
df2 = df2.drop("index",axis=1)
df2 = df2.fillna(0)
df2 = df2.reset_index()
df2 = df2.drop("index",axis=1)
df2 = df2.rename(columns={1: "bilanco", 3: "ciro"})
df2['bilanco'] = df2['bilanco'].str.upper()
df2 = df2.replace({'İ':'I'},regex = True)
surdurulen_faaliyetler= df2.loc[0:148]
satis_gelirleri = df2.loc[2].ciro
satislerin_maliyetleri = df2.loc[3].ciro
f_u_p_k_diğer_ge = df2.loc[6].ciro
f_u_p_k_diğer_gi = df2.loc[17].ciro
f_sektoru_faaliyetlerinden_diger_kar = df2.loc[15].ciro
satis_diger_gelir_ve_giderler = df2.loc[27].ciro
pazarlama_satis_ve_dagıtım_gider = df2.loc[32].ciro
genel_yonetim_giderleri = df2.loc[31].ciro
arastirma_ve_gelistirme_giderleri = df2.loc[33].ciro
diger_faaliyet_gelirleri = df2.loc[34].ciro
diger_faaliyet_giderleri = df2.loc[35].ciro
faaliyet_kari_oncesi_diger_gelir_ve_giderl = df2.loc[36].ciro
faaliyet_kari_zarari = df2.loc[37].ciro
oldstr = faaliyet_kari_zarari
if oldstr == 0:
a = oldstr
else:
newstr = oldstr.replace(".", "")
a = int(newstr)
oldstr = diger_faaliyet_giderleri
if oldstr == 0:
b = oldstr
else:
newstr = oldstr.replace(".", "")
b = int(newstr)
oldstr = diger_faaliyet_gelirleri
if oldstr == 0:
c = oldstr
else:
newstr = oldstr.replace(".", "")
c = int(newstr)
net_faaliyet_kar_zarari = a -( b + c)
yatirim_faaliyetlerinden_giderler = df2.loc[41].ciro
faaliyet_diger_gelir_ve_giderler = df2.loc[44].ciro
ozkaynak_yontemiyle_degerlenen_yatırımlarin_kar_zarar = df2.loc[43].ciro
finansman_gideri_oncesi_faaliyet_kari_zarari = df2.loc[48].ciro
finansal_gelirler = df2.loc[49].ciro
finansal_giderler = df2.loc[50].ciro
surdurulen_faaliyetler_vergi_geliri = df2.loc[53].ciro
donem_vergi_geliri = df2.loc[54].ciro
ertelenmis_vergi_geliri = df2.loc[55].ciro
surdurulen_faaliyetler_donem_kari_zarari = df2.loc[56].ciro
durdurulan_faaliyetler_donem_kari_zarari = df2.loc[57].ciro
durdurulan_faaliyetler_vergi_sonrasi_donem = df2.loc[57].ciro
azinlik_paylari = df2.loc[60].ciro
for find in range(71,122):
cost = surdurulen_faaliyetler[surdurulen_faaliyetler["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = satis_gelirleri
if oldstr == 0:
npsave[72][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[72][1] = int(newstr)
oldstr = satislerin_maliyetleri
if oldstr == 0:
npsave[73][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[73][1] = int(newstr)
oldstr = f_u_p_k_diğer_ge
if oldstr == 0:
npsave[76][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[76][1] = int(newstr)
oldstr = f_u_p_k_diğer_gi
if oldstr == 0:
npsave[77][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[77][1] = int(newstr)
oldstr = f_sektoru_faaliyetlerinden_diger_kar
if oldstr == 0:
npsave[78][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[78][1] = int(newstr)
oldstr = satis_diger_gelir_ve_giderler
if oldstr == 0:
npsave[80][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[80][1] = int(newstr)
oldstr = pazarlama_satis_ve_dagıtım_gider
if oldstr == 0:
npsave[82][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[82][1] = int(newstr)
oldstr = genel_yonetim_giderleri
if oldstr == 0:
npsave[83][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[83][1] = int(newstr)
oldstr = arastirma_ve_gelistirme_giderleri
if oldstr == 0:
npsave[84][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[84][1] = int(newstr)
oldstr = diger_faaliyet_gelirleri
if oldstr == 0:
npsave[85][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[85][1] = int(newstr)
oldstr = diger_faaliyet_giderleri
if oldstr == 0:
npsave[86][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[86][1] = int(newstr)
oldstr = faaliyet_kari_oncesi_diger_gelir_ve_giderl
if oldstr == 0:
npsave[87][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[87][1] = int(newstr)
oldstr = faaliyet_kari_zarari
if oldstr == 0:
npsave[88][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[88][1] = int(newstr)
oldstr = df2.loc[37].ciro
if oldstr == 0:
a = oldstr
else:
newstr = oldstr.replace(".", "")
a = int(newstr)
oldstr = diger_faaliyet_giderleri
if oldstr == 0:
b = oldstr
else:
newstr = oldstr.replace(".", "")
b = int(newstr)
oldstr = diger_faaliyet_gelirleri
if oldstr == 0:
c = oldstr
else:
newstr = oldstr.replace(".", "")
c = int(newstr)
net_faaliyet_kar_zarari = a -( b + c)
npsave[89][1] = net_faaliyet_kar_zarari
oldstr = yatirim_faaliyetlerinden_giderler
if oldstr == 0:
npsave[91][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[91][1] = int(newstr)
oldstr = faaliyet_diger_gelir_ve_giderler
if oldstr == 0:
npsave[92][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[92][1] = int(newstr)
oldstr = ozkaynak_yontemiyle_degerlenen_yatırımlarin_kar_zarar
if oldstr == 0:
npsave[93][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[93][1] = int(newstr)
oldstr = finansman_gideri_oncesi_faaliyet_kari_zarari
if oldstr == 0:
npsave[94][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[94][1] = int(newstr)
oldstr = finansal_gelirler
if oldstr == 0:
npsave[95][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[95][1] = int(newstr)
oldstr = finansal_giderler
if oldstr == 0:
npsave[96][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[96][1] = int(newstr)
oldstr = surdurulen_faaliyetler_vergi_geliri
if oldstr == 0:
npsave[99][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[99][1] = int(newstr)
oldstr = donem_vergi_geliri
if oldstr == 0:
npsave[100][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[100][1] = int(newstr)
oldstr = ertelenmis_vergi_geliri
if oldstr == 0:
npsave[101][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[101][1] = int(newstr)
oldstr = surdurulen_faaliyetler_donem_kari_zarari
if oldstr == 0:
npsave[103][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[103][1] = int(newstr)
oldstr = durdurulan_faaliyetler_donem_kari_zarari
if oldstr == 0:
npsave[106][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[106][1] = int(newstr)
oldstr = durdurulan_faaliyetler_vergi_sonrasi_donem
if oldstr == 0:
npsave[105][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[105][1] = int(newstr)
oldstr = azinlik_paylari
if oldstr == 0:
npsave[108][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[108][1] = int(newstr)
#df3 için yapılması
df3 = newdf3[[1,3]].dropna(subset = [1])
df3 = df3.reset_index()
df3 = df3.drop("index",axis=1)
df3 = df3.fillna(0)
df3 = df3.reset_index()
df3 = df3.drop("index",axis=1)
df3 = df3.rename(columns={1: "bilanco", 3: "ciro"})
df3['bilanco'] = df3['bilanco'].astype(str).str.upper()
df3 = df3.replace({'İ':'I'},regex = True)
nakit_akislari = df3.loc[0:202]
amortisman_giderleri = df3.loc[6].ciro
npsave2 = np.empty([12,2],dtype = object)
npsave2[0][0] = "IŞLETME FAALIYETLERINDEN NAKIT AKIŞLARI"
npsave2[1][0] = "DÖNEM KARI (ZARARI)"
npsave2[2][0] = "AMORTISMAN VE ITFA GIDERI ILE ILGILI DÜZELTMELER"
npsave2[3][0] = "IŞLETME SERMAYESINDE GERÇEKLEŞEN DEĞIŞIMLER"
npsave2[4][0] = "FINANSAL YATIRIMLARDAKI AZALIŞ (ARTIŞ)"
npsave2[5][0] = "FAALIYETLERDEN ELDE EDILEN NAKIT AKIŞLARI"
npsave2[6][0] = "YATIRIM FAALIYETLERINDEN KAYNAKLANAN NAKIT AKIŞLARI"
npsave2[7][0] = "MADDI VE MADDI OLMAYAN DURAN VARLIKLARIN ALIMDAN KAYNAKLANAN NAKIT ÇIKIŞLARI"
npsave2[8][0] = "FINANSMAN FAALIYETLERINDEN NAKIT AKIŞLARI"
npsave2[9][0] = "NAKIT VE NAKIT BENZERLERINDEKI NET ARTIŞ (AZALIŞ)"
npsave2[10][0] = "DÖNEM BAŞI NAKIT VE NAKIT BENZERLERI"
npsave2[11][0] = "DÖNEM SONU NAKIT VE NAKIT BENZERLERI"
for find in range(len(npsave2)):
cost = nakit_akislari[nakit_akislari["bilanco"] == npsave2[find][0]].ciro
if cost.empty:
npsave2[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave2[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave2[find][1] = int(newstr)
sistem_1 = pd.DataFrame({'BİLANÇO': npsave[:, 0], 'CİRO': npsave[:, 1]})
sistem_2 = pd.DataFrame({'BİLANÇO': npsave2[:, 0], 'CİRO': npsave2[:, 1]})
excel_aktar = sistem_1.append(sistem_2, ignore_index = True)
excel_aktar["CIRO"] = excel_aktar["CİRO"] * carpanz
app = xw.App(visible=False) # IF YOU WANT EXCEL TO RUN IN BACKGROUND
xlwb = xw.Book('Bilanco-Excel/Bilanco.xlsm')
try:
xlws = xlwb.sheets[y[0].upper()]
except:
try:
xlws = xlwb.sheets[y[0].lower()]
except:
xlwb.close()
app.kill()
olmadis.append(y[0])
continue
xlws.range("B:B").insert('right')
donem = list(excel_aktar.CİRO)
xlws.range('B2').value = donem[0]
ciro = list(excel_aktar.CIRO)
xlws.range('B3').options(transpose=True).value = ciro[1:]
xlwb.save()
xlwb.close()
app.kill()
else:
y = excs.split("_")
z = y[0].split("-")
print(excs)
excs = str(excs) + ".xls"
npsave = np.empty([len(bilanco_isim_revize),2], dtype = object)
for i in range(len(bilanco_isim_revize)):
npsave[i][0] = bilanco_isim_revize[i]
#seçilen tablodan bilanço verilerinin ayıklanması
manu = pd.read_html("Excels/"+ excs)
npsave[0][1] = str(yil) + "/" + str(donemlik)
bilanchos = []
for i in range(len(manu)):
if len(manu[i].columns) >= 5 and len(manu[i].columns) <= 8:
if len(manu[i])>2:
bilanchos.append(i)
newdf = manu[bilanchos[0]]
del bilanchos[0]
newdf3 = manu[bilanchos[-1]]
del bilanchos[-1]
if len(manu[bilanchos[0]]) == 300:
newdf2 = manu[bilanchos[0]]
else:
frames = []
for i in range(len(bilanchos)):
frames.append(manu[bilanchos[i]])
if len(frames) == 1:
newdf2 = manu[bilanchos[0]]
elif len(frames) >= 1 :
newdf2 = pd.concat(frames, ignore_index=True)
carpanx = manu[0]
carpany = carpanx[1][0]
carpanz = carpany.strip(' TL')
if not carpanz:
carpanz = 1
else:
oldstr = carpanz
if isinstance(oldstr, int):
carpanz = oldstr
else:
newstr = oldstr.replace(".", "")
carpanz = int(newstr)
print(carpanz)
print(len(newdf))
print(len(newdf2))
print(len(newdf3))
for a in bilanchos:
print(len(manu[a]))
#df1 için yapılması
df1 = newdf[[1,3]].dropna(subset = [1])
df1 = df1.reset_index()
df1 = df1.drop("index",axis=1)
df1 = df1.fillna(0)
df1 = df1.reset_index()
df1 = df1.drop("index",axis=1)
df1 = df1.rename(columns={1: "bilanco", 3: "ciro"})
df1['bilanco'] = df1['bilanco'].str.upper()
df1 = df1.replace({'İ':'I'},regex = True)
donen_varliklar = df1.loc[2:54]
ara_toplam_donenvarliklar = df1.loc[51].ciro
toplam_donen_varlıklar = df1.loc[54].ciro
duran_varliklar = df1.loc[55:127]
ozkaynak_yontemiyle_degerlenen_yatirimlar = df1.loc[68].ciro
toplam_duran_varliklar = df1.loc[127].ciro
toplam_varliklar = df1.loc[128].ciro
kisa_vadeli_yukumlulukler = df1.loc[131:190]
finansal_borclar = df1.loc[131].ciro
diger_finansal_yukumlulukler = df1.loc[184].ciro
musteri_soz_dogan_yuk = df1.loc[167].ciro
ertelenmis_gelirler = df1.loc[176].ciro
borc_karsiliklari = df1.loc[180].ciro
ara_toplam_kisavadeliy = df1.loc[187].ciro
toplam_kisa_vadeli = df1.loc[190].ciro
uzun_vadeli_yukumlulukler = df1.loc[192:240]
u_finansal_borclar = df1.loc[192].ciro
u_musteri_soz_dogan_yuk = df1.loc[217].ciro
u_ertelenmis_gelirler = df1.loc[226].ciro
calisanlara_saglanan_faydalara = df1.loc[230].ciro
toplam_uzun_vadeli = df1.loc[240].ciro
ozkaynaklar = df1.loc[243:294]
geçmis_yillar_kar_zararlari = df1.loc[291].ciro
net_donem_kar_zaralari = df1.loc[292].ciro
hisse_senedi_ihrac_primleri = df1.loc[251].ciro
azinlik_paylari = df1.loc[293].ciro
kalemler = df1.loc[245:281]
kalemler = kalemler["ciro"].unique()
diger_ozsermaye_kalemleri = 0
for value in kalemler:
if value == 0:
topla = 0
else:
topla = int(value.replace('.',''))
diger_ozsermaye_kalemleri = diger_ozsermaye_kalemleri + topla
toplam_ozkaynaklar = df1.loc[294].ciro
toplam_kaynaklar = df1.loc[295].ciro
for find in range(1,13):
cost = donen_varliklar[donen_varliklar["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
npsave[11][1] = int(ara_toplam_donenvarliklar.replace(".", ""))
npsave[1][1] = int(toplam_donen_varlıklar.replace(".", ""))
for find in range(13,30):
cost = duran_varliklar[duran_varliklar["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = ozkaynak_yontemiyle_degerlenen_yatirimlar
if oldstr == 0:
npsave[19][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[19][1] = int(newstr)
npsave[13][1] = int(toplam_duran_varliklar.replace(".", ""))
npsave[29][1] = int(toplam_varliklar.replace(".", ""))
for find in range(30,45):
cost = kisa_vadeli_yukumlulukler[kisa_vadeli_yukumlulukler["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = finansal_borclar
if oldstr == 0:
npsave[32][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[32][1] = int(newstr)
oldstr = diger_finansal_yukumlulukler
if oldstr == 0:
npsave[33][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[33][1] = int(newstr)
oldstr = musteri_soz_dogan_yuk
if oldstr == 0:
npsave[36][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[36][1] = int(newstr)
oldstr = ertelenmis_gelirler
if oldstr == 0:
npsave[39][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[39][1] = int(newstr)
oldstr = borc_karsiliklari
if oldstr == 0:
npsave[41][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[41][1] = int(newstr)
npsave[43][1] = int(ara_toplam_kisavadeliy.replace(".", ""))
npsave[31][1] = int(toplam_kisa_vadeli.replace(".", ""))
for find in range(45,58):
cost = uzun_vadeli_yukumlulukler[uzun_vadeli_yukumlulukler["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = u_finansal_borclar
if oldstr == 0:
npsave[46][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[46][1] = int(newstr)
oldstr = u_musteri_soz_dogan_yuk
if oldstr == 0:
npsave[50][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[50][1] = int(newstr)
oldstr = u_ertelenmis_gelirler
if oldstr == 0:
npsave[53][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[53][1] = int(newstr)
oldstr = u_ertelenmis_gelirler
if oldstr == 0:
npsave[53][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[53][1] = int(newstr)
oldstr = calisanlara_saglanan_faydalara
if oldstr == 0:
npsave[55][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[55][1] = int(newstr)
npsave[45][1] = int(toplam_uzun_vadeli.replace(".", ""))
for find in range(58,71):
cost = ozkaynaklar[ozkaynaklar["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = geçmis_yillar_kar_zararlari
if oldstr == 0:
npsave[66][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[66][1] = int(newstr)
oldstr = net_donem_kar_zaralari
if oldstr == 0:
npsave[67][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[67][1] = int(newstr)
oldstr = hisse_senedi_ihrac_primleri
if oldstr == 0:
npsave[62][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[62][1] = int(newstr)
oldstr = azinlik_paylari
if oldstr == 0:
npsave[69][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[69][1] = int(newstr)
kalemler = df1.loc[245:281]
kalemler = kalemler["ciro"].unique()
diger_ozsermaye_kalemleri = 0
for value in kalemler:
if value == 0:
topla = 0
else:
topla = int(value.replace('.',''))
diger_ozsermaye_kalemleri = diger_ozsermaye_kalemleri + topla
npsave[68][1] = diger_ozsermaye_kalemleri
npsave[58][1] = int(toplam_ozkaynaklar.replace(".", ""))
npsave[70][1] = int(toplam_kaynaklar.replace(".", ""))
#df2 için yapılması
df2 = newdf2[[1,3]].dropna(subset = [1])
df2 = df2.reset_index()
df2 = df2.drop("index",axis=1)
df2 = df2.fillna(0)
df2 = df2.reset_index()
df2 = df2.drop("index",axis=1)
df2 = df2.rename(columns={1: "bilanco", 3: "ciro"})
df2['bilanco'] = df2['bilanco'].str.upper()
df2 = df2.replace({'İ':'I'},regex = True)
surdurulen_faaliyetler= df2.loc[0:148]
satis_gelirleri = df2.loc[2].ciro
satislerin_maliyetleri = df2.loc[3].ciro
f_u_p_k_diğer_ge = df2.loc[6].ciro
f_u_p_k_diğer_gi = df2.loc[17].ciro
f_sektoru_faaliyetlerinden_diger_kar = df2.loc[15].ciro
satis_diger_gelir_ve_giderler = df2.loc[27].ciro
pazarlama_satis_ve_dagıtım_gider = df2.loc[32].ciro
genel_yonetim_giderleri = df2.loc[31].ciro
arastirma_ve_gelistirme_giderleri = df2.loc[33].ciro
diger_faaliyet_gelirleri = df2.loc[34].ciro
diger_faaliyet_giderleri = df2.loc[35].ciro
faaliyet_kari_oncesi_diger_gelir_ve_giderl = df2.loc[36].ciro
faaliyet_kari_zarari = df2.loc[37].ciro
oldstr = faaliyet_kari_zarari
if oldstr == 0:
a = oldstr
else:
newstr = oldstr.replace(".", "")
a = int(newstr)
oldstr = diger_faaliyet_giderleri
if oldstr == 0:
b = oldstr
else:
newstr = oldstr.replace(".", "")
b = int(newstr)
oldstr = diger_faaliyet_gelirleri
if oldstr == 0:
c = oldstr
else:
newstr = oldstr.replace(".", "")
c = int(newstr)
net_faaliyet_kar_zarari = a -( b + c)
yatirim_faaliyetlerinden_giderler = df2.loc[41].ciro
faaliyet_diger_gelir_ve_giderler = df2.loc[44].ciro
ozkaynak_yontemiyle_degerlenen_yatırımlarin_kar_zarar = df2.loc[43].ciro
finansman_gideri_oncesi_faaliyet_kari_zarari = df2.loc[48].ciro
finansal_gelirler = df2.loc[49].ciro
finansal_giderler = df2.loc[50].ciro
surdurulen_faaliyetler_vergi_geliri = df2.loc[53].ciro
donem_vergi_geliri = df2.loc[54].ciro
ertelenmis_vergi_geliri = df2.loc[55].ciro
surdurulen_faaliyetler_donem_kari_zarari = df2.loc[56].ciro
durdurulan_faaliyetler_donem_kari_zarari = df2.loc[57].ciro
durdurulan_faaliyetler_vergi_sonrasi_donem = df2.loc[57].ciro
azinlik_paylari = df2.loc[60].ciro
for find in range(71,122):
cost = surdurulen_faaliyetler[surdurulen_faaliyetler["bilanco"] == npsave[find][0]].ciro
if cost.empty:
npsave[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[find][1] = int(newstr)
oldstr = satis_gelirleri
if oldstr == 0:
npsave[72][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[72][1] = int(newstr)
oldstr = satislerin_maliyetleri
if oldstr == 0:
npsave[73][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[73][1] = int(newstr)
oldstr = f_u_p_k_diğer_ge
if oldstr == 0:
npsave[76][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[76][1] = int(newstr)
oldstr = f_u_p_k_diğer_gi
if oldstr == 0:
npsave[77][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[77][1] = int(newstr)
oldstr = f_sektoru_faaliyetlerinden_diger_kar
if oldstr == 0:
npsave[78][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[78][1] = int(newstr)
oldstr = satis_diger_gelir_ve_giderler
if oldstr == 0:
npsave[80][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[80][1] = int(newstr)
oldstr = pazarlama_satis_ve_dagıtım_gider
if oldstr == 0:
npsave[82][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[82][1] = int(newstr)
oldstr = genel_yonetim_giderleri
if oldstr == 0:
npsave[83][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[83][1] = int(newstr)
oldstr = arastirma_ve_gelistirme_giderleri
if oldstr == 0:
npsave[84][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[84][1] = int(newstr)
oldstr = diger_faaliyet_gelirleri
if oldstr == 0:
npsave[85][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[85][1] = int(newstr)
oldstr = diger_faaliyet_giderleri
if oldstr == 0:
npsave[86][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[86][1] = int(newstr)
oldstr = faaliyet_kari_oncesi_diger_gelir_ve_giderl
if oldstr == 0:
npsave[87][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[87][1] = int(newstr)
oldstr = faaliyet_kari_zarari
if oldstr == 0:
npsave[88][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[88][1] = int(newstr)
oldstr = df2.loc[37].ciro
if oldstr == 0:
a = oldstr
else:
newstr = oldstr.replace(".", "")
a = int(newstr)
oldstr = diger_faaliyet_giderleri
if oldstr == 0:
b = oldstr
else:
newstr = oldstr.replace(".", "")
b = int(newstr)
oldstr = diger_faaliyet_gelirleri
if oldstr == 0:
c = oldstr
else:
newstr = oldstr.replace(".", "")
c = int(newstr)
net_faaliyet_kar_zarari = a -( b + c)
npsave[89][1] = net_faaliyet_kar_zarari
oldstr = yatirim_faaliyetlerinden_giderler
if oldstr == 0:
npsave[91][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[91][1] = int(newstr)
oldstr = faaliyet_diger_gelir_ve_giderler
if oldstr == 0:
npsave[92][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[92][1] = int(newstr)
oldstr = ozkaynak_yontemiyle_degerlenen_yatırımlarin_kar_zarar
if oldstr == 0:
npsave[93][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[93][1] = int(newstr)
oldstr = finansman_gideri_oncesi_faaliyet_kari_zarari
if oldstr == 0:
npsave[94][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[94][1] = int(newstr)
oldstr = finansal_gelirler
if oldstr == 0:
npsave[95][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[95][1] = int(newstr)
oldstr = finansal_giderler
if oldstr == 0:
npsave[96][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[96][1] = int(newstr)
oldstr = surdurulen_faaliyetler_vergi_geliri
if oldstr == 0:
npsave[99][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[99][1] = int(newstr)
oldstr = donem_vergi_geliri
if oldstr == 0:
npsave[100][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[100][1] = int(newstr)
oldstr = ertelenmis_vergi_geliri
if oldstr == 0:
npsave[101][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[101][1] = int(newstr)
oldstr = surdurulen_faaliyetler_donem_kari_zarari
if oldstr == 0:
npsave[103][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[103][1] = int(newstr)
oldstr = durdurulan_faaliyetler_donem_kari_zarari
if oldstr == 0:
npsave[106][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[106][1] = int(newstr)
oldstr = durdurulan_faaliyetler_vergi_sonrasi_donem
if oldstr == 0:
npsave[105][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[105][1] = int(newstr)
oldstr = azinlik_paylari
if oldstr == 0:
npsave[108][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave[108][1] = int(newstr)
#df3 için yapılması
df3 = newdf3[[1,3]].dropna(subset = [1])
df3 = df3.reset_index()
df3 = df3.drop("index",axis=1)
df3 = df3.fillna(0)
df3 = df3.reset_index()
df3 = df3.drop("index",axis=1)
df3 = df3.rename(columns={1: "bilanco", 3: "ciro"})
df3['bilanco'] = df3['bilanco'].astype(str).str.upper()
df3 = df3.replace({'İ':'I'},regex = True)
nakit_akislari = df3.loc[0:202]
amortisman_giderleri = df3.loc[6].ciro
npsave2 = np.empty([12,2],dtype = object)
npsave2[0][0] = "IŞLETME FAALIYETLERINDEN NAKIT AKIŞLARI"
npsave2[1][0] = "DÖNEM KARI (ZARARI)"
npsave2[2][0] = "AMORTISMAN VE ITFA GIDERI ILE ILGILI DÜZELTMELER"
npsave2[3][0] = "IŞLETME SERMAYESINDE GERÇEKLEŞEN DEĞIŞIMLER"
npsave2[4][0] = "FINANSAL YATIRIMLARDAKI AZALIŞ (ARTIŞ)"
npsave2[5][0] = "FAALIYETLERDEN ELDE EDILEN NAKIT AKIŞLARI"
npsave2[6][0] = "YATIRIM FAALIYETLERINDEN KAYNAKLANAN NAKIT AKIŞLARI"
npsave2[7][0] = "MADDI VE MADDI OLMAYAN DURAN VARLIKLARIN ALIMDAN KAYNAKLANAN NAKIT ÇIKIŞLARI"
npsave2[8][0] = "FINANSMAN FAALIYETLERINDEN NAKIT AKIŞLARI"
npsave2[9][0] = "NAKIT VE NAKIT BENZERLERINDEKI NET ARTIŞ (AZALIŞ)"
npsave2[10][0] = "DÖNEM BAŞI NAKIT VE NAKIT BENZERLERI"
npsave2[11][0] = "DÖNEM SONU NAKIT VE NAKIT BENZERLERI"
for find in range(len(npsave2)):
cost = nakit_akislari[nakit_akislari["bilanco"] == npsave2[find][0]].ciro
if cost.empty:
npsave2[find][1] = 0
else:
oldstr = cost.iloc[0]
if oldstr == 0:
npsave2[find][1] = oldstr
else:
newstr = oldstr.replace(".", "")
npsave2[find][1] = int(newstr)
sistem_1 = pd.DataFrame({'BİLANÇO': npsave[:, 0], 'CİRO': npsave[:, 1]})
sistem_2 = pd.DataFrame({'BİLANÇO': npsave2[:, 0], 'CİRO': npsave2[:, 1]})
excel_aktar = sistem_1.append(sistem_2, ignore_index = True)
excel_aktar["CIRO"] = excel_aktar["CİRO"] * carpanz
for items in z:
print(items)
app = xw.App(visible=False) # IF YOU WANT EXCEL TO RUN IN BACKGROUND
xlwb = xw.Book('Bilanco-Excel/Bilanco.xlsm')
try:
xlws = xlwb.sheets[items.upper()]
except:
try:
xlws = xlwb.sheets[items.lower()]
except:
xlwb.close()
app.kill()
olmadis.append(items)
continue
xlws.range("B:B").insert('right')
donem = list(excel_aktar.CİRO)
xlws.range('B2').value = donem[0]
ciro = list(excel_aktar.CIRO)
xlws.range('B3').options(transpose=True).value = ciro[1:]
xlwb.save()
xlwb.close()
app.kill()
self.ui.listWidget.addItems(olmadis)
self.ui.bildirim.setText("Veriler excel'e aktarildi!")
def listeyeDok(self):
df_sirket =
|
pd.read_html('Sirketler/Sirketler.xls')
|
pandas.read_html
|
#IMPORTS
import csv
import pandas as pd
import re
import nltk
import os
#LOADING FILES INTO ONE DF
PBP_data = "../nflscrapR-data/play_by_play_data/regular_season"
dfs = []
for season_file in os.listdir(PBP_data):
year = re.search("[0-9]{4}", season_file)
df = pd.read_csv(PBP_data + "/" + season_file, usecols=['desc', 'play_type', 'defteam', 'posteam']) #is this disjointing the lists?
df["year"] = year.group()
dfs.append(df)
print(season_file + " loaded.")
df = pd.concat(dfs)
df = df[df["play_type"] == "kickoff"]
#EXTRACTING DATA, CREATING NEW DF
def make_DF(texts, kicking, receiving, year):
#FIRST SENTENCE
kicker = [] #String
isTouchback = [] #bool
isOutOfBounds = [] #bool
isOnside = [] #bool
isFairCatch = [] #bool
kick_yards = [] #int
kick_start = [] #int
tb = re.compile("Touchback")
oob = re.compile("out of bounds")
fc = re.compile("fair catch")
kck_dist1 = re.compile("kicks( onside)? -?[0-9]{1,2} yard(s)?") #if you want to sort onside kicks... do it here.
kck_dist2 = re.compile("[0-9]+") #kicks from 0-anything
kck_spot1 = re.compile("from ([A-Z]{2,3} )?[0-9]{1,3} to (([A-Z]{2,3} )?-?[0-9]{1,3}|end zone)")
kck_spot2 = re.compile("-?[0-9]{1,3}") #ints, strings?
#SECOND SENTENCE
returners = [] #String
tacklers = [] #String
returnYards = [] #int
returnSpot = [] #int
isReturned = [] #bool
isAdvanced = [] #bool
isTouchdown = [] #bool
return_sent = re.compile("(\w+\.\w+((\-| |')\w+)*\.?) (((\(didn't try to advance\) )?(to ([A-Z]{1,3} )?[0-9]{1,3} )?)|((ran|pushed) ob at ([A-Z]{1,3} )?[0-9]{1,3}) )for ((-)?[0-9]{1,3} yard(s)?|(no gain))( \(((\w+\.\w+((\-| |')\w+)*\.?)(;|,) )*(\w+\.\w+((\-| |')\w+)*\.?)\))?")
returner_sent = re.compile("(\w+\.\w+((\-| |')\w+)*\.?) (((\(didn't try to advance\) )?(to ([A-Z]{1,3} )?[0-9]{1,3} )?)|((ran|pushed) ob at ([A-Z]{1,3} )?[0-9]{1,3}) )for ((-)?[0-9]{1,3} yard(s)?|(no gain))")
tackler_sent = re.compile("(((\(didn't try to advance\) )?(to ([A-Z]{1,3} )?[0-9]{1,3} )?)|((ran|pushed) ob at ([A-Z]{1,3} )?[0-9]{1,3}) )for ((-)?[0-9]{1,3} yard(s)?|(no gain))( \(((\w+\.\w+((\-| |')\w+)*\.?)(;|,) )*(\w+\.\w+((\-| |')\w+)*\.?)\))?")
advanced = re.compile("\(didn't try to advance\)")
touchdown = re.compile("TOUCHDOWN")
name = re.compile("(\w+\.\w+((\-| |')[A-Z]+[a-z]*)*\.?)")
nullified_search = re.compile("NULLIFIED")
return_spot = re.compile("to (([A-Z]{1,3} )?[0-9]{1,3} )?|((ran|pushed) ob at ([A-Z]{1,3} )?[0-9]{1,3}) ")
return_yards = re.compile("for ((-)?[0-9]{1,3} yard(s)?|(no gain))")
spot = re.compile("-?[0-9]{1,3}")
yards = re.compile("-?[0-9]{1,3}") #probably didn't need both of these... oh well!
#OTHER DATA
muffs = [] #bool
retainsMuff = [] #bool
isPenalty = [] #bool
penalizedPlayer = [] #String
penaltyYards = [] #int
penaltyType = [] #String
penaltySpot = [] #int
fumbles = [] #bool
retainsFumble = [] #bool
muff_search = re.compile("MUFFS")
muff_recover_search = re.compile("RECOVERED")
penalty_search = re.compile("PENALTY on [A-Z]{1,3}-(\w+\.\w+((\-| |')\w+)*\.?), \w+(( |-)\w+)*( \([0-9]{1,2} (Y|y)ards\))?, [0-9]{1,2} yards, enforced at [A-Z]{1,3} [0-9]{1,2}.")
penalty_search2 = re.compile(", \w+(( |-)\w+)*( \([0-9]{1,2} (Y|y)ards\))?, [0-9]{1,2} yards,")
penalty_search3 = re.compile("PENALTY on [A-Z]{1,3}-")
penaltyType_search = re.compile("\w+(( |-)\w+)*")
penaltySpot_search = re.compile("enforced at [A-Z]{1,3} [0-9]{1,2}.")
fumble_search = re.compile("FUMBLES")
recovers_search = re.compile("RECOVERED")
i=0
for text in texts: #this runs in polynomial time, don't care about optimizing
#print(text, ":", kicking[i], ":", receiving[i])
#FIRST SENTENCE
#Kicker
kicker.append(text.split(" kicks")[0])
#Touchbacks
tb_result = re.search(tb, text)
if(tb_result != None):
isTouchback.append(True)
else:
isTouchback.append(False)
#Out of Bounds
oob_result = re.search(oob, text)
if(oob_result != None):
isOutOfBounds.append(True)
else:
isOutOfBounds.append(False)
#Onside Kick
onside_result1 = re.search(kck_dist1, text)
if(not onside_result1):
print(text)
onside_result2 = re.search("onside", onside_result1.group())
if(onside_result2 == None):
isOnside.append(False)
else:
isOnside.append(True)
#Fair Catch
fc_result = re.search(fc, text)
if(fc_result != None):
isFairCatch.append(True)
else:
isFairCatch.append(False)
#Kick Distance
kck_dist_result = re.search(kck_dist1, text)
if(kck_dist_result == None):
print("ERROR")
else:
kck_dist_result = re.search(kck_dist2, kck_dist_result.group())
kick_yards.append(int(kck_dist_result.group()))
#Kick Start
ks_result1 = re.search(kck_spot1, text)
if(ks_result1 != None):
ks_result1 = re.sub("end zone", "00", ks_result1.group())
ks_result2 = re.findall(kck_spot2, ks_result1) #two item list of yardlines
if(ks_result2[0] == None):
print("ERROR")
else:
kick_start.append(int(ks_result2[0]))
#SECOND SENTENCE
#Returner
return_phrase = re.search(returner_sent, text)
if(return_phrase == None):
returners.append("no returner")
else:
returner = re.search(name, return_phrase.group())
if(returner != None):
returners.append(returner.group())
else:
returners.append("no returner")
#Tackler(s)
tackler_phrase = re.search(tackler_sent, text)
if(tackler_phrase == None):
tacklers.append("no tackler")
else:
tackler_list = re.findall(name, tackler_phrase.group())
if(tackler_list != None):
tacklers_ = []
for tackler_items in tackler_list:
tacklers_.append(tackler_items[0])
tacklers.append(tacklers_)
else:
tacklers.append("no tackler")
#Return
return_result = re.search(return_sent, text)
if(return_result != None):
isReturned.append(True)
else:
isReturned.append(False)
#Advanced
advanced_result = re.search(advanced, text)
if(advanced_result != None):
isAdvanced.append(False)
else:
isAdvanced.append(True)
#Touchdown
touchdown_result = re.search(touchdown, text)
if(touchdown_result != None):
nullified_result = re.search(nullified_search, text)
if(nullified_result):
isTouchdown.append(False)
else:
isTouchdown.append(True)
else:
isTouchdown.append(False)
#Return Spot
#kick_land - return_yards
#Return Yards
ry_result = re.search(return_yards, text)
if(ry_result):
ry = re.sub("no gain", "00", ry_result.group())
ry = re.search(yards, ry)
if(ry):
returnYards.append(int(ry.group()))
else:
print("ERROR:", ry_result.group())
else:
returnYards.append(0)
#OTHER DATA
#Muffs
muff_result = re.search(muff_search, text)
if(muff_result):
muffs.append(True)
muff_recover_result = re.search(muff_recover_search, text)
if(muff_recover_result):
retainsMuff.append(False)
else:
retainsMuff.append(True)
else:
muffs.append(False)
retainsMuff.append(True)
#Penalty, Penalized Player, Penalty Yards, Penalty Type
penalty_result = re.search(penalty_search, text)
if(penalty_result):
isPenalty.append(True)
penalizedPlayer_result = re.search(name, penalty_result.group())
if(penalizedPlayer_result):
penalizedPlayer.append(penalizedPlayer_result.group())
else:
print("ERROR")
penaltyYards_result = re.search(yards, penalty_result.group())
if(penaltyYards_result):
penalty_result3 = re.search(penalty_search3, penalty_result.group())
#HERE IS WHERE YOU NEED TO MAKE +/- YARDS
if(kicking[i] in penalty_result3.group()):
yds = -1*int(penaltyYards_result.group())
penaltyYards.append(yds)
else:
yds = int(penaltyYards_result.group())
penaltyYards.append(yds)
else:
print("ERROR")
penalty2_result = re.search(penalty_search2, text)
if(penalty2_result):
penaltyType_result = re.search(penaltyType_search, penalty2_result.group())
if(penaltyType_result):
penaltyType.append(penaltyType_result.group())
else:
print("ERROR")
else:
print("ERROR")
penaltySpot_result = re.search(penaltySpot_search, text)
if(penaltySpot_result):
penaltySpot_result2 = re.search(spot, penaltySpot_result.group())
if(penaltySpot_result2):
penaltySpot.append(100 - int(penaltySpot_result2.group()))
else:
print("ERROR")
else:
print("ERROR")
else:
isPenalty.append(False)
penalizedPlayer.append("no player")
penaltyYards.append(0)
penaltyType.append("no penalty")
penaltySpot.append(0)
#Fumbles
fumbles_result = re.search(fumble_search, text)
if(fumbles_result):
fumbles.append(True)
rf_result = re.search(recovers_search, text)
if(rf_result):
retainsFumble.append(False)
else:
retainsFumble.append(True)
else:
fumbles.append(False)
retainsFumble.append(True)
i+=1
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# The focus of the analysis, is to identify the main factors, for a person to decide to date someone, after only few minutes of interaction.
# Therefore, it is focus on the variable "dec" (willines to see the person again) rather than "match" both agreed to meet again
#
# ## Work in Progress
#
#
# In[ ]:
import numpy as np # linear algebra
import pandas as pandas # data processing, CSV file I/O (e.g. pd.read_csv)
#########
import seaborn as sns
import matplotlib
import numpy as numpy
import pandas as pandas
import statsmodels.api
import statsmodels.formula.api as smf
import statsmodels.api as sm
import statsmodels.stats.multicomp as multi
import scipy
import matplotlib.pyplot as plt
import warnings
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report
import sklearn.metrics
from sklearn.ensemble import ExtraTreesClassifier
warnings.simplefilter(action = "ignore", category = FutureWarning)
from subprocess import check_output
print(check_output(["ls", "../../../input/annavictoria_speed-dating-experiment"]).decode("utf8"))
# In[ ]:
#Reading the data
data1 = pandas.read_csv("../../../input/annavictoria_speed-dating-experiment/Speed Dating Data.csv", encoding="ISO-8859-1")
# In[ ]:
##Selecting Only the Relevant Variables for the Analysis
temp1=data1[['iid','gender','pid','samerace','age_o','race_o','dec_o','attr_o','sinc_o','intel_o','fun_o','amb_o','shar_o','like_o','prob_o','age','field_cd','race','imprace','imprelig','from','date','go_out','dec','attr','sinc','intel','fun','amb','shar','like','prob']]
# In[ ]:
###################################################################################################
# The next lines are to have on the same raw all the relevant information from both the partners.##
####################################################################################################
#Creation DataSet for Merging Missing Variables
temp2=temp1[['iid','field_cd','imprace','imprelig','from','date','go_out']]
#Rename the variables to avoid confusion with the two data frames...
temp2.columns = ['pid','field_cd_o','imprace_o','imprelig_o','from_0','date_0','go_out_o']
#Merge the two datasets to have all the variables for both the partners.
BothGenders=pandas.merge(temp1,temp2,on='pid')
BothGenders=BothGenders.drop('iid',1)
BothGenders=BothGenders.drop('pid',1)
BothGenders=BothGenders.dropna()
# In[ ]:
###############################################################
#Creation New Features to further analysis potential patterns##
###############################################################
#Difference of the age between the parther instead of the "absolute" age.
BothGenders['Delta_Age']=BothGenders['age'] - BothGenders['age_o']
#Same field of career
BothGenders['SameField']=BothGenders['field_cd'] == BothGenders['field_cd_o']
#Provenience from the state.
BothGenders['SameState']=BothGenders['from'] == BothGenders['from_0']
BothGenders=BothGenders.drop('from',1)
BothGenders=BothGenders.drop('from_0',1)
# In[ ]:
#Subset the dataframe for the two genders From now on we will use only these two datasets.
Females=BothGenders.loc[BothGenders['gender'] == 0]
Males=BothGenders.loc[BothGenders['gender'] == 1]
# In[ ]:
#Average for all the Features Group by 'dec' factor
#Females
Females[Females.columns[:]].groupby(Females['dec']).mean().round(2)
# In[ ]:
#Males
Males[Males.columns[:]].groupby(Males['dec']).mean().round(2)
# ##ANOVA Analysis
# Females
# In[ ]:
model1 = smf.ols(formula='dec ~ C(samerace)+age_o+C(race_o)+dec_o+attr_o+sinc_o+intel_o+fun_o+amb_o+shar_o+like_o+prob_o+imprace+imprelig+date+go_out+attr+sinc+intel+fun+amb+shar+like+prob+age+age_o+Delta_Age+go_out_o+date_0+C(race)', data=Females)
results1 = model1.fit()
table = sm.stats.anova_lm(results1, typ=2)
# In[ ]:
FeaturesImportance=sorted(zip(table.F,table.index),reverse=True)
dfFemales = pandas.DataFrame(FeaturesImportance, columns=['Model.Feature_Importances_Based_on_F', 'predictors.columns'])
print("Top 10 Features with the highest F value")
print(dfFemales.head(10).round(2))
# In[ ]:
ax0=sns.barplot(y="predictors.columns", x="Model.Feature_Importances_Based_on_F", data=dfFemales,palette="Blues")
ax0.set(ylabel='Predictors', xlabel='F value',title="Female Group, F values for each Predictor")
print()
# Below few BoxPlots to visualize few of the top variables on the two codition of "dec"
# In[ ]:
Females.boxplot(column=['like','attr','attr_o','shar','prob'], by=['dec'])
# In the letterature there are many reference on the Race for the decision of the partner, so further exploration
# have been made...
# In[ ]:
print("if the partner are from the same race are more keen to go for a date?")
pandas.crosstab(Females.samerace,Females.dec).apply(lambda r: r/r.sum(), axis=1).round(2)
# In[ ]:
print("what are the cross selections from the different races ")
pandas.crosstab([Females.race,Females.race_o],Females.dec).apply(lambda r: r/r.sum(), axis=1).round(2)
# Black/African American 1, European/Caucasian-American 2, Latino/Hispanic American 3, Asian/Pacific Islander/Asian-American 4, Native American 5, Other 6
# The Same analysis for the Males
# In[ ]:
model1 = smf.ols(formula='dec ~ C(samerace)+age_o+C(race_o)+dec_o+attr_o+sinc_o+intel_o+fun_o+amb_o+shar_o+like_o+prob_o+imprace+imprelig+date+go_out+attr+sinc+intel+fun+amb+shar+like+prob+age+age_o+Delta_Age+go_out_o+date_0+C(race)', data=Males)
results1 = model1.fit()
table = sm.stats.anova_lm(results1, typ=2)
# In[ ]:
FeaturesImportance=sorted(zip(table.F,table.index),reverse=True)
dfMales =
|
pandas.DataFrame(FeaturesImportance, columns=['Model.Feature_Importances_Based_on_F', 'predictors.columns'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime
import os
import time
def ScoreProcess():
"""
extract student score ranking percentage feature
"""
if os.path.exists('input/processed/score_train_test.csv'):
return
score_train = pd.read_table('input/train/score_train.txt', sep=',', header=None)
score_train.columns = ["stu_id","school_id","grade_rank"]
score_test =
|
pd.read_table('input/test/score_test.txt', sep=',', header=None)
|
pandas.read_table
|
import sys
import re
import pandas as pd
import os
from workflow.scripts.utils import settings
from loguru import logger
env_configs = settings.env_configs
SNAKEMAKE_LOGS = env_configs["snakemake_logs"]
logger.add(os.path.join(SNAKEMAKE_LOGS, "import_report.log"), colorize=True)
def check_logs():
import_logs = sys.argv[1]
logger.info("Reading {}", import_logs)
with open(import_logs) as f:
relDic = {"Other": 0}
for line in f:
x = re.search("(\(.*\)).*(\[.*\]).*(\(.*\))", line)
if x:
relID = x[1] + "-" + x[2] + "-" + x[3]
if relID in relDic:
relDic[relID] += 1
else:
relDic[relID] = 1
else:
relDic["Other"] += 1
rel_df =
|
pd.DataFrame([relDic])
|
pandas.DataFrame
|
import pandas as pd
from classifier import globals
from sklearn.metrics import mean_squared_error
import scipy.stats
def predict_and_evaluate(model, x_test, labels):
prediction = model.predict(x_test)
df_predictions =
|
pd.DataFrame()
|
pandas.DataFrame
|
########################################################################
# Copyright 2020 Battelle Energy Alliance, LLC ALL RIGHTS RESERVED #
# Mobility Systems & Analytics Group, Idaho National Laboratory #
########################################################################
# Location Generalizer
# Release 1.2 8/10/2021
import pyodbc
import pandas as pd
import pickle
from datetime import datetime, timedelta
import time
import math
import yaml
from pathlib import Path
import csv
import numpy as np
from sklearn.cluster import DBSCAN
from shapely import geometry
from shapely.geometry import MultiPoint
from haversine import haversine, Unit
import pynput
from pandasql import sqldf
import time
from location_generalizer.utils import parallel_func_wrapper_update_vlocation
from location_generalizer.dataclasses import StartSEColumnMappings, EndSEColumnMappings
class cfg():
with open('locationGeneralizer.yml') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
odbcConnectionString=config['odbcConnectionString']
inputTableOrCSV= config['inputTableOrCSV']
vehiclesInChunk = config['vehiclesInChunk']
qryVehicleIDList =config['qryVehicleIDList']
qryVehicleInfo = config['qryVehicleInfo']
qryVehicleIDList = qryVehicleIDList.replace('{inputsrc}', inputTableOrCSV)
qryVehicleInfo = qryVehicleInfo.replace('{inputsrc}', inputTableOrCSV)
errorLogFileName = config['errorLogFileName']
heartbeatFileName = config['heartbeatFileName']
locationInfoFileName = config['locationInfoFileName']
homeInfoFileName = config['homeInfoFileName']
pklCensusDivisionsFileName = config['pklCensusDivisionsFileName']
evseLookupFileName = config['evseLookupFileName']
bboxes = config['boundingBoxes']
gpsOdoThreshold_mi = config['gpsOdoThreshold_mi']
minTrips = config['minTrips']
minLastTrips = config['minLastTrips']
minPctParks = config['minPctParks']
numL2Rounding = config['numL2Rounding']
numDCRounding = config['numDCRounding']
doCheck = config['doCheck']
dayEndHours = config['dayEndHours']
dayEndMinutes = config['dayEndMinutes']
dbscan_eps_ft = config['dbscan_eps_ft']
dbscan_min_spls = config['dbscan_min_spls']
evseDistRange_Miles = config['evseDistRange_Miles']
evseLatRange = config['evseLatRange']
evseLonRange = config['evseLonRange']
addClusterIDtoLocationInfo = config['addClusterIDtoLocationInfo']
hdrErrorLogCSV = config['hdrErrorLogCSV']
if addClusterIDtoLocationInfo:
hdrLocationInfoCSV = config['hdrClusterLocationInfoCSV']
else:
hdrLocationInfoCSV = config['hdrLocationInfoCSV']
hdrHomeInfoCSV = config['hdrHomeInfoCSV']
if addClusterIDtoLocationInfo:
colLocationInfo = config['colClusterLocationInfo']
else:
colLocationInfo = config['colLocationInfo']
colHomeInfo = config['colHomeInfo']
verbosity = config['verbosity']
stopProcessing = False
numCores = config['num_cores']
errFilePath = Path(errorLogFileName)
if not errFilePath.exists():
# ErroLog output file
hdr = pd.DataFrame(hdrErrorLogCSV)
hdr.to_csv(errorLogFileName, index=False, header=False, mode='w')
# use one line buffering - every line written is flushed to disk
errorFile = open(errorLogFileName, mode='a', buffering=1, newline='')
errorWriter = csv.writer(errorFile)
def main():
# trust chained assignments (no warnings)
pd.set_option('mode.chained_assignment', None)
# LocationInfo output file
locationFilePath = Path(cfg.locationInfoFileName)
if not locationFilePath.exists():
hdr =
|
pd.DataFrame(cfg.hdrLocationInfoCSV)
|
pandas.DataFrame
|
import sys
import time
import random
import logging
import argparse
import pandas as pd
import numpy as np
from itertools import chain, repeat
from collections import Counter
from GetConfig import getConfig
config = getConfig()
'''
Description:
This module is used for statistical analysis.
Y haplotype data is required for analysis.
It can calculate commonly used statistics in population genetics.
HD: haplotype diversity
MPD: mean pairwise differences
GD: genetic distance
AMOVA: analysis of molecular variance
predict: predict haplogroup from Y-STR haplotype
'''
def stat_parser():
parser = argparse.ArgumentParser('stat', description='(c) Y-LineageTracker: Statistical analysis')
# function used for statistical analysis
parser.add_argument('stat',
help='Perform statistical analysis from Y haplotype data.')
# required, input file, alignment sequence or matrix
input = parser.add_mutually_exclusive_group(required=True)
input.add_argument('--seq',
type=str,
action='store',
help='seq: Haplotype data in sequence alignment format.')
input.add_argument('--matrix',
type=str,
action='store',
help='matrix: Y-STR haplotype data in matrix format.')
# optional, format of sequence alignment
parser.add_argument('--seq-format',
required=False,
type=str,
dest='format',
action='store',
choices=['fasta', 'phylip', 'nexus', 'meg', 'vcf'],
help='seq-format: The format of sequence file. \
This option is only required for the sequence alignment file.')
# population file, required for calculating statistics, not required for prediction
parser.add_argument('-p', '--population',
required=False,
type=str,
action='store',
help='population: A file containing sample ID and population information of each individual. \
The population file is required in the analysis of calculating statistics, but not in the haplogroup prediction')
# optional, calculate genetic distance
parser.add_argument('--gd',
required=False,
type=str,
action='store',
choices=['fst', 'rst', 'Ngst', 'NCgst', 'Hgst', 'ibs'],
help='gd: Calculate genetic distance based on the selected statistics.')
# optional, perform analysis of molecular variance
parser.add_argument('--amova',
required=False,
type=str,
action='store',
nargs='?',
choices=['fst', 'rst'],
help='amova: Get the AMOVA table and calculate Fst or Rst of pairwise populations based on AMOVA and give the p values. \
The default is fst.')
# optional, calculate haplotype diversity
parser.add_argument('--hd',
required=False,
action='store_true',
help='hd: Calculate haplotype diversity of each population.')
# optional, calculate mean pairwise differences
parser.add_argument('--mpd',
required=False,
action='store_true',
help='mpd: calculate mean pairwise differences within and between populations.')
# optional, perform prediction anlaysis from Y-STR haplotype
parser.add_argument('--predict',
required=False,
action='store_true',
help='predict: predict possible NRY haplogroups and give the probability of each haplogroup from Y-STR haplotype data by Bayesian approach. \
This analysis only supports Y-STR haplotype data.')
# optional, the prefix of output
parser.add_argument('-o', '--output',
required=False,
type=str,
action='store',
help='output: The prefix of output files.')
args = parser.parse_args()
return args
# print program information and write to log file
def set_log(args_log, log_file):
logger = logging.getLogger()
logger.setLevel(level=logging.INFO)
handler = logging.FileHandler(log_file, mode='w')
handler.setLevel(logging.INFO)
formatter = logging.Formatter('[%(asctime)s] - [%(levelname)s]: %(message)s')
handler.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logger.addHandler(handler)
logger.addHandler(console)
log_info = ['[Y-LineageTracker] [Stat]',
'[Y-LineageTracker] Run Date: ' + time.asctime(time.localtime(time.time())),
'[Y-LineageTracker] Haplotype File: %s' % args_log.seq,
'[Y-LineageTracker] Haplotype File Format: %s' % args_log.format]
if args_log.mpd:
log_info.append('[Y-LineageTracker] Calculate mean pairwise differences')
if args_log.hd:
log_info.append('[Y-LineageTracker] Calculate haplotype diversity')
if args_log.amova:
log_info.append('[Y-LineageTracker] Calculate %s based on AMOVA' % args_log.amova)
if args_log.predict:
log_info.append('[Y-LineageTracker] Predict haplogroup from Y-STR data')
print('\n')
for i in log_info:
logger.info(i)
# check input command
def check_commands(arguments):
if arguments.mpd or arguments.hd or arguments.amova or arguments.gd:
if not arguments.population:
print('[Y-LineageTracker] Population file required for specified analysis')
sys.exit()
else:
if not arguments.predict:
print('[Y-LineageTracker] No command for analysis')
sys.exit()
# check amova command
def check_amova(amova, arguments):
if amova == None:
if '--amova' in arguments:
amova = 'fst'
else:
amova = amova
return amova
class StatisticsAnalysis(object):
'''
This class include commonly used statistical method for Y-chromosome analysis:
1. HD: haplotype diversity
2. MPD: mean pairwise differences
3. GD: genetic distance
4. AMOVA: analysis of molecular analysis
for analysis 2-4, p value will calculated by permutation
'''
def __init__(self, label, haplotype_data, population_data):
self.logger = logging.getLogger()
self.label = label
if label == 'Population':
self.label_num = 1
else:
self.label_num = 2
self.skip_letters = config.get('Statistics', 'SkipLetters').split(',')
self.haplotype_array = np.array(haplotype_data)
self.haplotype_array_with_inds = haplotype_data.reset_index().to_numpy()
self.population_array = np.array(population_data)
self.header = haplotype_data.columns.tolist()
self.pops = sorted(list(set(population_data[label])))
# calculate distance between two sequence
def _pairwise_distance(self, seq1, seq2, type):
if type == 'num_of_diff_alleles':
distance = sum(l1 != l2 for l1, l2 in zip(seq1, seq2) if l1 not in self.skip_letters and l2 not in self.skip_letters)
elif type == 'sum_of_squared_diff':
distance = sum((int(l1)-int(l2))**2 for l1, l2 in zip(seq1, seq2) if l1 not in self.skip_letters and l2 not in self.skip_letters)
return distance
# get haplotype data of a population
def _get_population_data(self, pop, population_data=None):
# string type: one population, list type: two populations or more
if isinstance(population_data, np.ndarray):
population_data = population_data
else:
population_data = self.population_array
if isinstance(pop, str):
pop_inds = population_data[population_data[:, self.label_num]==pop][:, 0]
elif isinstance(pop, list):
pop_inds = population_data[list(map(lambda x: x in pop, population_data[:, self.label_num]))][:, 0]
pop_haplotype_data = self.haplotype_array[list(map(lambda x: x in pop_inds, self.haplotype_array_with_inds[:, 0]))]
return pop_haplotype_data
# calculate haplotype frequency of a population
def _calculate_haplotype_freq(self, pop_haplotype_data):
pop_all_haps = [tuple(i) for i in pop_haplotype_data] # get all haplotypes
pop_haps = sorted(list(set(pop_all_haps))) # get haplotypes without duplication
hap_freq = [pop_all_haps.count(i) / len(pop_all_haps) for i in pop_haps] # calculate freq of each haplotype for the population
return pop_haps, hap_freq
# calculate allele frequency of a population
def _calculate_allele_freq(self, pops, population_data, pops_haplotype_data):
# get allele frequency, sample size of each population
pops_count = []
sample_sizes = []
max_len = max([len(set(pops_haplotype_data[:, i])) for i in range(len(self.header))])
for pop in pops:
pop_haplotype_data = self._get_population_data(pop, population_data)
single_pop_count = []
sample_sizes.append(len(pop_haplotype_data))
for allele in range(len(self.header)):
all_alleles = sorted(list(set(pops_haplotype_data[:, allele])))
allele_count = [list(pop_haplotype_data[:, allele]).count(i) for i in all_alleles]
if len(allele_count) < max_len:
diff = max_len - len(allele_count)
allele_count += [np.nan] * diff
single_pop_count.append(allele_count)
pops_count.append(single_pop_count)
pops_count = np.array(pops_count)
sample_sizes = np.array(sample_sizes)
return pops_count, sample_sizes
# calculate haplotype diversity of a population
def _haplotype_diversity(self, pop, type, allele=None):
pop_haplotype_data = self._get_population_data(pop)
n = len(pop_haplotype_data)
if type == 'single':
hap_freq = (np.unique(pop_haplotype_data[:, allele], return_counts=True)[1]/len(pop_haplotype_data[:, allele]))
elif type == 'haplotype':
pop_haps, hap_freq = self. _calculate_haplotype_freq(pop_haplotype_data)
if n == 1:
return 'NA', 'NA'
# calculate single Y-STR and Y-STR haplotype diversity
sub = 0
for i in hap_freq:
sub += i**2
diversity = (n/(n-1)) * (1-sub)
# calculate var and sd
sub1 = 0
sub2 = 0
for i in hap_freq:
sub1 += i**3
sub2 += i**2
v1 = 2.0 / (n*(n-1))
v2 = 2.0 * (n-2) * (sub1-sub2**2)
var = v1 * (v2+sub2-sub2**2)
sd = var**0.5
return diversity, sd
# get haplotype diversity of all populations
def _calculate_haplotype_diversity(self):
hd_df = pd.DataFrame(index=self.pops, columns=['Haplotype']+self.header)
for pop in self.pops:
for n, allele in enumerate(self.header):
diversity, sd = self._haplotype_diversity(pop, 'single', n)
hd_df.at[pop, allele] = str(round(diversity, 5))+'±'+str(round(sd, 5))
diversity, sd = self._haplotype_diversity(pop, 'haplotype')
if diversity == 'NA':
hd_df.at[pop, 'Haplotype'] = 'NA'
else:
hd_df.at[pop, 'Haplotype'] = str(round(diversity, 5))+'±'+str(round(sd, 5))
return hd_df
# calculate mean pairwise differences of between two populations
def _between_mean_pairwise_differences(self, pop1, pop2):
pop1_data = self._get_population_data(pop1)
pop2_data = self._get_population_data(pop2)
n = len(pop1_data) + len(pop2_data)
pop1_haps, hap1_freq = self._calculate_haplotype_freq(pop1_data)
pop2_haps, hap2_freq = self._calculate_haplotype_freq(pop2_data)
pi = 0
for i in range(len(pop1_haps)):
Sub = 0
for j in range(len(pop2_haps)):
hap1 = pop1_haps[i]
hap2 = pop2_haps[j]
d = self._pairwise_distance(hap1, hap2, 'num_of_diff_alleles')
p_i = hap1_freq[i]
p_j = hap2_freq[j]
Sub += p_i*p_j*d
pi += Sub
if n > 6:
# calculate var ans sd
sub1 = 3 * n * (n+1)
sub2 = 2 * (n**2+n+3)
sub3 = 11 * (n**2-7*n+6)
var = float(sub1*pi+sub2*pi**2) / sub3
sd = var**0.5
else:
sd = 'NA'
return pi, sd
# calculate mean pairwise difference within a population
def _within_mean_pairwise_differences(self, pop):
pop_haplotype_data = self._get_population_data(pop)
n = len(pop_haplotype_data)
pop_haps, hap_freq = self. _calculate_haplotype_freq(pop_haplotype_data)
# n: sample size
# hap_freq: frequence of haplotypes of two population
if round(sum(hap_freq), 1) != 1.0:
return 'NA', 'NA'
if n <= 2:
return 'NA', 'NA'
pi = 0
k = len(pop_haps)
for i in range(k):
Sub = 0
for j in range(k):
hap1 = pop_haps[i]
hap2 = pop_haps[j]
d = self._pairwise_distance(hap1, hap2, 'num_of_diff_alleles')
p_i = hap_freq[i]
p_j = hap_freq[j]
Sub += p_i*p_j*d
pi += Sub
pi = (float(n)/(n-1)) * pi
if n > 6:
# calculate var ans sd
sub1 = 3 * n * (n+1)
sub2 = 2 * (n**2+n+3)
sub3 = 11 * (n**2-7*n+6)
var = float(sub1*pi+sub2*pi**2) / sub3
sd = var**0.5
else:
sd = 'NA'
return pi, sd
# get mean pairwise difference of all populations
def _calculate_mean_pairwise_differences(self):
mpd_df = pd.DataFrame(index=self.pops, columns=self.pops)
pop_num = 0
for pop1 in self.pops:
for pop2 in self.pops[pop_num:]:
if pop1 == pop2:
pi, sd = self._within_mean_pairwise_differences(pop1)
else:
pi, sd = self._between_mean_pairwise_differences(pop1, pop2)
if pi == 'NA':
mpd_df.at[pop2, pop1] = 'NA'
else:
if sd == 'NA':
mpd_df.at[pop2, pop1] = str(round(pi, 5))+'±'+'NA'
else:
mpd_df.at[pop2, pop1] = str(round(pi, 5))+'±'+str(round(sd, 5))
pop_num += 1
return mpd_df
# claculate pairwise Fst
# <NAME>. and <NAME>. (2002) Estimating F-statistics. Annual Review of Genetics, 36, 721–750.
def _fst(self, pops, population_data, pops_haplotype_data):
# get allele frequency, sample size of each population
pops_count, sample_sizes = self._calculate_allele_freq(pops, population_data, pops_haplotype_data)
# calculate MSP and MSG to get theta statistic
pops_freq = [np.array(i) / j for i, j in zip(pops_count, sample_sizes)]
mean_freq = np.sum([np.array(i) for i in pops_count]) / np.sum(sample_sizes)
MSP = np.sum([i*((j-mean_freq)**2) for i, j in zip(sample_sizes, pops_freq)], axis=0)
MSG1 = float(1/np.sum([i-1 for i in sample_sizes]))
MSG2 = np.sum([np.array(i)*(1-j) for i, j in zip(pops_count, pops_freq)], axis=0)
MSG = MSG1 * MSG2
nc = np.sum(sample_sizes) - np.sum(sample_sizes**2)/np.sum(sample_sizes)
theta1 = np.nansum(MSP)-np.nansum(MSG)
theta2 = np.nansum(MSP)+(nc-1)*np.nansum(MSG)
if theta2 == 0:
theta = 0
else:
theta = (theta1 / theta2)
return theta
# calculate pairwise Rst
# <NAME>. (1995) A measure of population subdivision based on microsatellite allele frequencies. Genetics, 139, 457–462.
def _rst(self, pops, population_data, distance_matrix):
n = len(distance_matrix)
pop_num = len(pops)
expr = 2 / (pop_num*(pop_num-1))
level_labels = population_data[0:, self.label_num]
labels_num = []
for i in Counter(level_labels).values():
labels_num += [i]*i
level_labels_code = [sorted(list(set(level_labels))).index(i) for i in level_labels]
# calculate SW
SW = 0
bool1 = np.equal.outer(level_labels_code, level_labels_code)
for i in range(pop_num):
bool2 = np.tile(np.array(level_labels_code) == 1, (n, 1))
bool_SW = bool1 & bool2 & bool2.T
SW += np.sum(distance_matrix[bool_SW]) / np.sum(bool_SW)
SW = SW / pop_num
# calculate SB
SB = 0
bool3 = np.not_equal.outer(level_labels_code, level_labels_code)
for i in range(pop_num-1):
for j in range(i+1, pop_num):
bool4 = np.tile(np.array(level_labels_code) == i, (n, 1)) | np.tile(np.array(level_labels_code) == j, (n, 1))
bool_SB = bool3 & bool4 & bool4.T
SB += np.sum(distance_matrix[bool_SB]) / np.sum(bool_SB)
SB = SB * expr
n_bar = np.mean(list(Counter(level_labels).values()))
denom = np.sum(list(Counter(level_labels).values())) - 1
S_bar = (SW*(n_bar-1)/denom) + (SB*(n_bar*(pop_num-1))/denom)
res = (S_bar-SW) / S_bar
return res
# calculate hs standard hs and ht of Gst
def _gst(self, pops, population_data, pops_haplotype_data):
# get allele frequency, sample size of each population
pops_count, sample_sizes = self._calculate_allele_freq(pops, population_data, pops_haplotype_data)
pops_freq = np.array([np.array(i) / j for i, j in zip(pops_count, sample_sizes)])
hs = 1 - np.nansum(np.nansum((pops_freq**2), axis=0), axis=1)/len(pops)
ht = 1 - np.nansum((np.nansum(pops_freq**2, axis=2)/len(pops))**2, axis=0)
return hs, ht, sample_sizes
# calculate Nei Gst
# <NAME> (1973) Analysis of Gene Diversity in Subdivided Populations. Proc. Nat. Acad. Sci., 70, 3321-3323.
def _Nei_gst(self, pops, population_data, pops_haplotype_data):
hs, ht, sample_sizes = self._gst(pops, population_data, pops_haplotype_data)
Nei_gst = 1 - (np.mean(hs)/np.mean(ht))
return Nei_gst
# calculate Nei and Chesser Gst
# <NAME>, <NAME> (1983) Estimation of fixation indices and gene diversity. Annals of Human Genetics, 47, 253-259.
def _Nei_Chesser_gst(self, pops, population_data, pops_haplotype_data, return_flag=False):
hs, ht, sample_sizes = self._gst(pops, population_data, pops_haplotype_data)
nm = len(pops) / np.sum(1/sample_sizes)
Hs = (nm/(nm-1)) * hs
Ht = ht + Hs/(nm*len(pops))
Nei_Chesser_gst = 1 - np.mean(Hs)/np.mean(Ht)
if return_flag:
return Hs, Nei_Chesser_gst
else:
return Nei_Chesser_gst
# calculate Hedrick Gst
# <NAME> (2005) A standardized genetic differentiation measure. Evolution, 59, 1633-1638.
def _Hedrick_gst(self, pops, population_data, pops_haplotype_data):
Hs, Nei_Chesser_gst = self._Nei_Chesser_gst(pops, population_data, pops_haplotype_data, True)
Hedrick_gst = Nei_Chesser_gst * (len(pops)-1+np.mean(Hs))/((len(pops)-1)*(1-np.mean(Hs)))
return Hedrick_gst
# get pairwise genetic distance of two populations
def _get_gd_and_p(self, pops, gd_func):
pops_haplotype_data = self._get_population_data(pops, self.population_array) # haplotype data of specific populations
population_data = self.population_array[list(map(lambda x: x in pops, self.population_array[:, self.label_num]))] # population fata of specific populations
# calculate genetic distance (Fst, Rst or Dst)
if gd_func == self._rst:
inds = list(range(len(pops_haplotype_data)))
distance_matrix = np.zeros(shape=(len(inds), len(inds)))
for ind1 in inds:
seq1 = pops_haplotype_data[ind1]
for ind2 in inds:
if ind1 == ind2:
distance = 0.0
else:
seq2 = pops_haplotype_data[ind2]
distance = self._pairwise_distance(seq1, seq2, 'sum_of_squared_diff')
distance_matrix[ind1, ind2] = distance
gd = gd_func(pops, population_data, distance_matrix)
else:
gd = gd_func(pops, population_data, pops_haplotype_data)
# permutation to get genetic distance
permuted_index = list(range(0, len(population_data)))
permuted_population_data = population_data.copy()
permuted_gds = []
for i in range(1000):
random.shuffle(permuted_index)
permuted_pops = population_data[permuted_index, self.label_num]
permuted_population_data[:, self.label_num] = permuted_pops
if gd_func == self._rst:
permuted_gd = gd_func(pops, permuted_population_data, distance_matrix)
else:
permuted_gd = gd_func(pops, permuted_population_data, pops_haplotype_data)
permuted_gds.append(permuted_gd)
# calculate p-value
p_value = np.sum(np.array(permuted_gds) >= gd) / (1000+1)
p_value = round(p_value, 5)
if p_value == 0:
lowest_p_value = round(1/1001, 5)
p_value = '<%s' % lowest_p_value
return gd, p_value
# get pairwise and global genetic distance of all populations
def _calculate_gd(self, gd_type):
# create empty gnetic diastance matrix
gd_df =
|
pd.DataFrame(index=self.pops, columns=self.pops)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import os
# In[2]:
train_encoded = pd.read_csv("../data/train_store_encoded_onehot.csv")
# In[3]:
train_df = pd.read_csv("../data/train.csv")
store_df = pd.read_csv("../data/store.csv")
# In[4]:
cate_df = store_df.apply(lambda x: (x["Store"], x["StoreType"] + x["Assortment"]), axis = 1).map(lambda x: x[-1]).copy().reset_index()
cate_df.columns = ["Store", "cate"]
cate_df["Store"] = cate_df["Store"] + 1
# In[5]:
def calculate_days_num(data_df, cate_df):
import gc
data_df["Date"] = pd.to_datetime(data_df["Date"])
merge_df = pd.merge(data_df[["Date", "Store", "Sales"]], cate_df, on = "Store", how = "inner")
print("merge_df shape : {}".format(merge_df.shape))
from functools import reduce
ordered_intersection_dates = sorted(pd.to_datetime(sorted(reduce(lambda a, b: a.intersection(b),map(lambda x: set(x.tolist()),merge_df.groupby("cate").apply(dict).map(lambda inner_dict:inner_dict["Date"]).values.tolist())))))
ordered_intersection_dates = pd.Series(ordered_intersection_dates)
#return ordered_intersection_dates
sales_date_intersection = merge_df.copy()
del merge_df
gc.collect()
sales_date_intersection = sales_date_intersection[sales_date_intersection["Date"].isin(ordered_intersection_dates)].copy()
def transform_dict_to_df(row):
Store, dict_ = row["cate"], row[0]
Date = dict_["Date"].tolist()
Sales = dict_["Sales"].tolist()
df = pd.DataFrame(list(zip(*[Date, Sales])))
df.columns = ["Date", Store]
return df
before_reduce_list = sales_date_intersection.groupby("cate").apply(dict).reset_index().apply(
transform_dict_to_df
, axis = 1).values.tolist()
#return before_reduce_list
before_reduce_list = list(map(lambda x: x.groupby("Date").sum().reset_index(), before_reduce_list))
sales_cate_format_df = reduce(lambda a, b: pd.merge(a, b, on = "Date", how = "inner"), before_reduce_list)
return sales_cate_format_df
# In[6]:
sales_cate_format_df = calculate_days_num(train_df, cate_df[cate_df["cate"].isin(cate_df["cate"].value_counts()[cate_df["cate"].value_counts() > 70].index.tolist())])
# In[7]:
sales_cate_format_df["total"] = sales_cate_format_df.iloc[:, 1:].apply(lambda x: x.sum(), axis = 1)
# In[8]:
from functools import reduce
sales_cate_format_df_up = sales_cate_format_df[sales_cate_format_df.iloc[:, 1:].apply(lambda x: reduce(lambda a, b: a * b ,map(int,map(bool, x))), axis = 1) > 0]
# In[9]:
df = sales_cate_format_df_up.copy()
df.index = pd.to_datetime(df["Date"])
dates = df["Date"].copy()
del df["Date"]
df = df.asfreq("D")
df = df.interpolate(method = "linear")
# In[10]:
before_reduce_by_cate_df = pd.merge(cate_df, train_encoded, on = "Store", how = "inner")
# In[11]:
before_reduce_by_cate_df["id"] = before_reduce_by_cate_df[["cate", "Date"]].apply(lambda x: "{}_{}".format(x["cate"], x["Date"]), axis = 1)
# In[12]:
reduce_by_id = before_reduce_by_cate_df[set(before_reduce_by_cate_df.columns.tolist()).difference(set(["Store", "cate"]))].groupby("id").apply(dict)
# In[13]:
def produce_agg_measure(same_id_df, agg_funcs = {"max":np.max, "min":np.min, "count":len, "mean":np.mean}):
if "id" in same_id_df.columns.tolist():
del same_id_df["id"]
same_id_df["Date"] = pd.to_datetime(same_id_df["Date"]).map(lambda x: (x - pd.to_datetime("1970-01-01")).days)
agg_series_dict = dict(map(lambda t2: (t2[0] ,same_id_df.apply(t2[-1], axis = 0)), agg_funcs.items()))
def rename_index(s, agg_name):
s.index = list(map(lambda index: "{}_{}".format(index, agg_name) ,s.index.tolist()))
return s
agg_series_dict = dict(map(lambda t2: (t2[0] ,rename_index(t2[1], t2[0])), agg_series_dict.items()))
return pd.concat(list(agg_series_dict.values()), axis = 0)
# In[14]:
data_part = pd.concat(reduce_by_id.map(lambda dict_: produce_agg_measure(pd.DataFrame.from_dict(dict(map(lambda t2: (t2[0], t2[1].tolist()) ,dict_.items()))))).tolist(), axis = 1)
data_part.columns = reduce_by_id.index.tolist()
# In[17]:
def retrieve_data(input_df, cate):
req_part = input_df[filter(lambda col: col.startswith(cate),input_df.columns.tolist())].copy()
req_part.columns = list(map(lambda col: col[3:], req_part.columns.tolist()))
req_part = req_part.T
req_part.columns = list(map(lambda col: "{}_{}".format(col, cate), req_part.columns.tolist()))
req_part.index = pd.to_datetime(req_part.index)
return req_part
# In[18]:
lookup_dict = dict(map(lambda col: (col, retrieve_data(data_part, col)) ,set(df.columns.tolist()).difference(set(["total"]))))
# In[20]:
def retrieve_total_part(lookup_dict):
from functools import reduce
colnames = list(map(lambda x: x[:-3], list(lookup_dict.values())[0].columns.tolist()))
keys = list(lookup_dict.keys())
cols = list(set(map(lambda x: x[:x.rfind("_")], colnames)))
aggs = list(set(map(lambda x: x[x.rfind("_") + 1:], colnames)))
vals_list = []
for col in cols:
for agg_name in aggs:
req = []
for cate_key in keys:
s = lookup_dict[cate_key]["{}_{}_{}".format(col, agg_name, cate_key)]
req.append(s)
if agg_name == "max":
val_s = pd.concat(req, axis = 1).dropna().apply(np.max, axis = 1)
elif agg_name == "min":
val_s = pd.concat(req, axis = 1).dropna().apply(np.min, axis = 1)
elif agg_name == "count":
val_s = pd.concat(req, axis = 1).dropna().apply(np.sum, axis = 1)
else:
val_s = pd.concat(req, axis = 1).dropna().apply(np.mean, axis = 1)
val_s.name = "{}_{}_{}".format(col, agg_name, "total")
vals_list.append(val_s)
return pd.concat(vals_list, axis = 1)
# In[21]:
total_data_part = retrieve_total_part(lookup_dict)
# In[23]:
lookup_df = reduce(lambda a, b: pd.merge(a, b, left_index=True, right_index = True, how = "inner"), lookup_dict.values())
# In[25]:
total_data_part_asfreq_D = total_data_part.asfreq("D").sort_index().fillna(method = "pad")
# In[26]:
lookup_df_asfreq_D = lookup_df.asfreq("D").sort_index().fillna(method = "pad")
# In[27]:
df_add_lookup =
|
pd.merge(df, lookup_df_asfreq_D, left_index = True, right_index =True, how = "inner")
|
pandas.merge
|
import datetime as dt
import blpapi
import logging
from .BbgRefDataService import BbgRefDataService
import pandas as pd
import numpy as np
from . import BbgLogger
import pytz
from tzlocal import get_localzone
logger = BbgLogger.logger
SECURITY_DATA = blpapi.Name("securityData")
SECURITY = blpapi.Name("security")
FIELD_DATA = blpapi.Name("fieldData")
FIELD_EXCEPTIONS = blpapi.Name("fieldExceptions")
FIELD_ID = blpapi.Name("fieldId")
ERROR_INFO = blpapi.Name("errorInfo")
BAR_DATA = blpapi.Name("barData")
BAR_TICK_DATA = blpapi.Name("barTickData")
OPEN = blpapi.Name("open")
HIGH = blpapi.Name("high")
LOW = blpapi.Name("low")
CLOSE = blpapi.Name("close")
VOLUME = blpapi.Name("volume")
NUM_EVENTS = blpapi.Name("numEvents")
TIME = blpapi.Name("time")
class BbgIntradayBar(BbgRefDataService):
def __init__(self, securities, startTime, endTime, event = "TRADE", barInterval = 60, timeZone = str(get_localzone()), gapFillInitialBar = False, adjustmentSplit = True, adjustmentAbnormal = False, adjustmentNormal = False, adjustmentFollowDPDF = True):
'''
Bloomberg Intraday Bar query object. Allows user to input a list of securities retrieval over a specified time period subject to the usual constraints that apply to Bloomberg Intraday Bar data retrieval.
Parameters
----------
fields : tuple, list, or ndarray
The list of fields to be retrieved, field names and data types can be determined by typing FLDS <GO> and using the search box.
securities : tuple, list, or ndarray
List of Bloomberg tickers to retrieve data for. If one item is passed this can be input as a string, otherwise inputs must be passed as a list or array-like.
startTime : datetime.datetime
The start date and time at which to retrieving data from. Must be passed as a datetime.
endTime : datetime.datetime
The end date and time at which to retrieving data from. Must be passed as a datetime.
event : string
Defines the market event supplied for an intraday request. Could be TRADE, BID or ASK. If no event is passed, will default to TRADE.
barInterval : integer
Sets the length of each time-bar in the response. Entered as a whole number (between 1 and 1,440 minutes). If omitted, the request will default to 60 minutes. One minute is the lowest possible granularity.
timeZone : string
Timezone for the request based on the pytz package timezone names. If no timezone is passed, will default to current system timezone.
gapFillInitialBar : bool
Adjust historical pricing to reflect: Special Cash, Liquidation, Capital Gains, Long-Term Capital Gains, Short-Term Capital Gains, Memorial, Return of Capital, Rights Redemption, Miscellaneous, Return Premium, Preferred Rights Redemption, Proceeds/Rights, Proceeds/Shares, Proceeds/Warrants
adjustmentSplit : bool
Adjust historical pricing and/or volume to reflect: Spin-Offs, Stock Splits/Consolidations, Stock Dividend/Bonus, Rights Offerings/Entitlement. If not set, will be set to True.
adjustmentAbnormal : bool
Adjust historical pricing to reflect: Special Cash, Liquidation, Capital Gains, Long-Term Capital Gains, Short-Term Capital Gains, Memorial, Return of Capital, Rights Redemption, Miscellaneous, Return Premium, Preferred Rights Redemption, Proceeds/Rights, Proceeds/Shares, Proceeds/Warrants. If not set, will be set to False.
adjustmentNormal : bool
Adjust historical pricing to reflect: Regular Cash, Interim, 1st Interim, 2nd Interim, 3rd Interim, 4th Interim, 5th Interim, Income, Estimated, Partnership Distribution, Final, Interest on Capital, Distribution, Prorated. If not set, will be set to False.
adjustmentFollowDPDF : bool
Setting to True will follow the DPDF <GO> Terminal function. True is the default setting for this option. If not set, will be set to True.
See Also
--------
BbgIntradayBar.constructDf : Constructor method, retrieves data associated with a BbgDataPoint query object and generates a dataframe from it.
BbgDataPoint : Retrieve single point static, calculated or other reference data.
BbgIntradayTick : Retrieve historic tick-level data for a given security.
BbgIntradayBar : Retrieve historic bar level data for a given security (open, high, low and close) for a specified time interval given in minutes.
Examples
--------
Retrieve open, high, low, close, volume, number of events and value data for a basket of securities between two datetimes.
>>> import datetime as dt
>>> import pandas as pd
>>> import BloombergDataModule as bbg
>>> futHist = bbg.BbgIntradayBar(securities = ["YMH0 Comdty", "XMH0 Comdty"], startTime = dt.datetime(2020, 1, 31, 9, 0, 0), endTime = dt.datetime(2020, 1, 31, 12, 0, 0), barInterval = 5)
>>> futHist.constructDf().head()
Field open high low close volume numEvents value
Security time
YMH0 Comdty 2020-01-31 09:10:00+11:00 99.37 99.375 99.37 99.375 149 3 14806.3
2020-01-31 09:15:00+11:00 99.375 99.38 99.375 99.38 1749 13 173807
2020-01-31 09:20:00+11:00 99.38 99.38 99.38 99.38 6 6 596.28
2020-01-31 09:25:00+11:00 99.38 99.38 99.375 99.38 2170 35 215655
2020-01-31 09:30:00+11:00 99.38 99.38 99.375 99.38 93 3 9241.89
'''
self.securities = list(securities) if type(securities) is not list else securities
self.startTime = startTime
self.endTime = endTime
self.event = event
self.barInterval = barInterval
self.timeZone = timeZone
self.gapFillInitialBar = gapFillInitialBar
self.adjustmentSplit = adjustmentSplit
self.adjustmentAbnormal = adjustmentAbnormal
self.adjustmentNormal = adjustmentNormal
self.adjustmentFollowDPDF = adjustmentFollowDPDF
def constructDf(self):
'''
The constructDf method retrieves data associated with a BbgIntradayBar query object and generates a dataframe from it.
Parameters
----------
None
Returns
-------
table : DataFrame
Raises
------
ValueError:
Blah blah blah
See Also
--------
BbgDataHistory.constructDf : retrieves static history data and constructs a DataFrame from it. It has more customisability with respect to overrides
BbgIntradayTick.constructDf: retrieves intraday (or multi-day) tick level data and constructs a dataframe from it. It has applications in more data intensive and granular analysis
BbgDataPoint.constructDf: retrieves intraday (or multi-day) bar level (open-high-low-close) data and constructs a dataframe from it. It is for use in more data intensive and granular analysis.constructDf. The bar interval frequency can be specified in minutes to optimise for efficiency and speed.
Notes
-----
Blah blah blah
Examples
--------
Retrieve open, high, low, close, volume, number of events and value data for a basket of securities between two datetimes.
>>> import datetime as dt
>>> import pandas as pd
>>> import BloombergDataModule as bbg
>>> futHist = bbg.BbgIntradayBar(securities = ["YMH0 Comdty", "XMH0 Comdty"], startTime = dt.datetime(2020, 1, 31, 9, 0, 0), endTime = dt.datetime(2020, 1, 31, 12, 0, 0), barInterval = 5)
>>> futHist.constructDf().head()
Field open high low close volume numEvents value
Security time
YMH0 Comdty 2020-01-31 09:10:00+11:00 99.37 99.375 99.37 99.375 149 3 14806.3
2020-01-31 09:15:00+11:00 99.375 99.38 99.375 99.38 1749 13 173807
2020-01-31 09:20:00+11:00 99.38 99.38 99.38 99.38 6 6 596.28
2020-01-31 09:25:00+11:00 99.38 99.38 99.375 99.38 2170 35 215655
2020-01-31 09:30:00+11:00 99.38 99.38 99.375 99.38 93 3 9241.89
'''
BbgRefDataService.__init__(self)
self.bbgRefData =
|
pd.DataFrame()
|
pandas.DataFrame
|
def flatten(foo):
# Taken from https://stackoverflow.com/a/5286571
for x in foo:
if hasattr(x, '__iter__') and not isinstance(x, str):
for y in flatten(x):
yield y
else:
yield x
def freedman_lane(data_df, Yvar, Xvars, Zvars, n_perms=10000, stat='tstat', perm_func=None, perm_func_args=None,
surrogates=None, return_null=False, return_surrogates=False):
"""
Use permutation testing (via random shuffling or a user provided function) to estimate the significance of an EV (Xvar)
in a multiple linear regression (Yvar ~ Xvar + Zvars) while "correcting for" (regressing-out the estimated effects of)
other covariates (Zvars).
The null distribution of test statistics is generated by permuting the residuals of a reduced model (Yvar ~ Zvars)
(following Freedman and Lane, 1983; DOI: 10.2307/1391660).
Note:
Interactions are not curently supported, and categorical variables are currently only supported when stat='fstat'.
Parameters:
-----------
data_df : Pandas DataFrame
A data frame containing data to be modeled.
Yvar : str
Name of the DataFrame column containing the dependent (endog) variable
Xvar : str
Name of the DataFrame column containing the independent (exog) variable of interest.
Zvars : str or list or str
Name of the DataFrame column(s) containing the independent (exog) variable(s) to use as a covariate.
n_perms : int, optional
Number of surrogate data sets to generate. Default is 10,000.
stat : string, optional
Which test statistic to evaluate significance for and return. 'tstat' evaluates the significant of the t-statistic
for the effect of Xvar. 'pcorr' evaluates significance of the partial pearson correlation coefficient between the
residuals of Yvar ~ Zvars and Xvar ~ Zvars. 'tstat' and 'pcorr' will always return the same p-value (because the
partial correlation is calculated from the t-statistic); I provide both for convenience. 'fstat' uses a Type II ANOVA
to evaluate the significance of the main effect for Xvar.
perm_func : function, optional
Custom function to generate surrogate data. Must accept a 1D array (the data to be permuted) as the first argument,
and n_perms (an integer) as the second argument. If no custom function is provided, surrogate data will be generated
via random shuffling (which assumes full exchangability).
perm_func_args : dict, optional
Dictionary containing additional arguments (including potentially additional data) to pass to perm_func.
surrogates: ndarray, optional
Surrogate data sets generated and returned by a previous run of lmperm which evaluated a model with the same
Yvar and Zvars as currently specified.
return_null : bool
Return the null distribution of statistic values. Useful if you want to plot the observed value against the null
distribution. Defaults to False.
return_surrogates : bool
Return an array containing surrogate data sets. Useful time saver if you want to test the relationship of a
given Yvar to multiple Xvars while correcting for the same Zvars. Defaults to False.
Returns:
--------
stat_observed : float
The test statistic for Xvar from the full, un-permuted model.
pvals : Pandas Series
The probability of the observed test statistic relative to the null distribution.
'p_greater' is Pr(stat_observed >= null). 'p_less' is Pr(stat_observed <= null).
'p_abs' is Pr(|stat_observed| >= |null|). If stat='fstat', only p_greater is returned.
fit_Full : statsmodels RegressionResults
Results (coefficients, fit measures, p-values, etc.) of the parametric OLS fit to to the full model.
stats_null : ndarray, optional
Null distribution of test statistics generated via permutation testing.
resid_Reduced_Perms : ndarray, optional
Permuted residuals of the reduced model (Yvar ~ Zvars).
"""
import warnings
import numpy as np
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.stats import anova
# Check inputs.
# This is not Pythonic, but is more user friendly than letting exceptions raise down stream.
# Some interesting ideas here on how to make checking more Pythonic:
# https://stackoverflow.com/questions/19684434/best-way-to-check-function-arguments
assert (isinstance(data_df, pd.core.frame.DataFrame)), "data_df must be a Pandas DataFrame!"
assert (type(Yvar) == str), "Yvar must be a string!"
assert (type(Xvars) == str or type(Xvars) == list), "Xvars must be a string or a list!"
assert (type(Zvars) == str or type(Zvars) == list), "Zvars must be a string or a list!"
assert (type(n_perms) == int), "n_perms must be an integer!"
if perm_func is not None:
assert (callable(perm_func)), "perm_func must be a function!"
if perm_func_args is not None:
assert (type(perm_func_args) == dict), "perm_func_args must be a dictionary!"
if surrogates is not None:
assert (type(surrogates) == np.ndarray), "surrogates must be an ndarray!"
# Generate a formula string for Zvars.
if type(Zvars) == list:
Zvars_formula_string = " + ".join(Zvars)
Zsize = len(Zvars)
elif type(Zvars) == str:
Zvars_formula_string = Zvars
Zsize = 1
# Generate a formula string for Xvars
if type(Xvars) == list:
Xvars_formula_string = " + ".join(Xvars)
if stat == 'fstat' is False:
warnings.warn("Multiple Xvars provided; setting stat='fstat'.")
stat = 'fstat'
elif type(Xvars) == str:
Xvars_formula_string = Xvars
# Fit the full model with observed data.
formula_Full = f"{Yvar} ~ {Xvars_formula_string} + {Zvars_formula_string}"
fit_Full = smf.ols(formula_Full, data=data_df).fit()
if stat == 'fstat':
contrast_Xvars_string = " = ".join(Xvars)
contrast_formula = f"{contrast_Xvars_string} = 0"
f_res = fit_Full.f_test(contrast_formula)
stat_observed = f_res.fvalue[0][0]
elif stat == 'pcorr':
stat_observed = (fit_Full.tvalues[Xvars] / np.sqrt(fit_Full.tvalues[Xvars]**2 + fit_Full.df_resid))
elif stat == 'tstat':
stat_observed = fit_Full.tvalues[Xvars]
# Fit the reduced model with the observed data.
formula_Reduced = f"{Yvar} ~ {Zvars_formula_string}"
model_Reduced = smf.ols(formula_Reduced, data=data_df)
fit_Reduced = model_Reduced.fit()
resid_Reduced = fit_Reduced.resid.values
Yhat_reduced = model_Reduced.predict(fit_Reduced.params)
# If the user has provided previously generated surrogates, use those.
if surrogates is not None:
resid_Reduced_perms = surrogates
# Otherwise, generate surrogate data sets.
# TO-DO: Generate permutations in parallel via multiprocessing/joblib/ray.
else:
# Permute the residuals from the reduced model.
# If a custom permutation function is provided, use that.
if perm_func:
resid_Reduced_perms = perm_func(resid_Reduced, n_perms, **perm_func_args)
# Otherwise, just do random shuffling.
else:
resid_Reduced_perms = []
for i in range(n_perms):
resid_Reduced_perms.append(np.random.permutation(resid_Reduced))
resid_Reduced_perms = np.array(resid_Reduced_perms)
# Generate a null distribution by calculating test statistics from the full model for each of the permutations.
stats_null = []
exog_cols = []
exog_cols.append(Xvars)
exog_cols.append(Zvars)
exog_cols = list(flatten(exog_cols))
ev_array = data_df[exog_cols].values
# The non-formula OLS interface is much faster but does not automatically add an intercept, so we must add it ourselves.
intercept = np.reshape(np.ones_like(data_df[exog_cols[0]]),(ev_array.shape[0],1))
ev_array = np.concatenate((intercept, ev_array), axis=1)
for perm_iter in resid_Reduced_perms:
Ypi = perm_iter + Yhat_reduced
fit_Perm = sm.OLS(Ypi, ev_array).fit()
if stat == 'fstat':
# Create a contrast array that includes all regressors
contrast_array = np.identity(len(fit_Perm.params))
# Drop the intercept
contrast_array = contrast_array[1:,:]
# Drop Zvars
contrast_array = contrast_array[:-Zsize,:]
f_res = fit_Perm.f_test(contrast_array)
stats_null.append(f_res.fvalue[0][0])
else:
if stat == 'tstat':
stats_null.append(fit_Perm.tvalues[1])
elif stat == 'pcorr':
stats_null.append(fit_Perm.tvalues[1] / np.sqrt(fit_Perm.tvalues[1]**2 + fit_Perm.df_resid))
stats_null = np.array(stats_null)
# Calculate the probability of the observed test statistic relative to the null distribution.
if stat == 'fstat':
p_greater = (np.sum(stats_null >= stat_observed) + 1) / (n_perms + 1)
pvals =
|
pd.Series([p_greater], index=['p_greater'])
|
pandas.Series
|
import math
import numpy as np
import pandas as pd
import scipy.sparse as sp
import copy
from flask import render_template, jsonify, url_for
from app.models import Dataset, Klasifikasi, Username
from sklearn.metrics import classification_report
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
from sklearn.model_selection import KFold
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
import preprocessor as p # cleaning
import string
from nltk.corpus import stopwords
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
factory = StemmerFactory()
stemmer = factory.create_stemmer()
stop_words = frozenset(stopwords.words('indonesian'))
def remove_punct(text):
text = "".join([char for char in text if char not in string.punctuation])
return text
def prediksi_caption(data):
clf = pickle.load(open("static/model/{}.sav".format(data["algoritma"]), 'rb'))
vectorizer = pickle.load(open("static/vectorizer/{}.pickle".format(data["algoritma"]), 'rb'))
p.set_options(p.OPT.EMOJI,p.OPT.SMILEY)
caption = p.clean(data["caption"])
casefolding = caption.lower()
p.set_options(p.OPT.URL,p.OPT.MENTION,p.OPT.HASHTAG,p.OPT.NUMBER,p.OPT.RESERVED)
cleansing = remove_punct(p.clean(casefolding))
token_caption = cleansing.split(" ")
cleaned_token = [x for x in token_caption if x not in stop_words]
filtering = " ".join(cleaned_token)
stemming = stemmer.stem(filtering)
df = pd.DataFrame({
"caption" : [caption],
"stemming" : [stemming]
})
all_features = vectorizer.transform(df.stemming) # menncari vocabulary
#print (all_features)
result = clf.predict(all_features)
#print (result)
k = Klasifikasi.get_one(int(result[0]))
klasifikasi = k["nama"]
algoritma = "Support Vector Machine" if data["algoritma"] == "svm" else "Backpropagation"
return render_template('pages/algoritma/hasil.html', klasifikasi=klasifikasi, algoritma=algoritma, caption=caption)
def backpropagation(args): # untuk membaca nilai k
labels = list()
klasifikasi = Klasifikasi.get_all() # ambil semua klasifikasi
kl_dict = dict() # membentuk dictionary di python. Dictionary merupakan tipe data yang tersusun atas dua unsur yaitu key:value atau kunci dan nilai, makanya tipe data ini juga erat kaitannya dengan sebutan argumen dengan kata kunci (keyword arguments).
for kl in klasifikasi: # loop data yang ada pada klasifikasi dan ditampung pada variabel kl
labels.append(kl["id"]) ## append menambahkan item dari belakkang
kl_dict[kl["id"]] = { # kl_dict untuk setiap data yang kl id, akan menganmbil klasifikasi dengan id dan nama. misal id =1 nama = abc
"nama" : kl["nama"]
}
# karena pada backpro weight yang diacak ada di tahap awal ketika membuat initial weight
# random_state biar datanya gak berbeda beda tiap kali menjalankan algoritma
backpro = MLPClassifier(random_state=1, max_iter=1000)
# backpro = MLPClassifier(random_state=1, max_iter=1000, alpha(errorrate)=0.0001, hidden_layer_sizes=(100,),learning_rate_init=0.001)
# max_iter default adalah 200
# max_iter digunakan untuk menentukan iterasi maksimum pada proses backpro,jika pada perhitungannya sudah mencapai iterasi tersebut,
# algoritma otomatis berhenti. memakai 1000 karena agar algoritma dapat bekerja secara optimal. 100 hasilnya 95%, 200 hasilnya 96%
# learning rate 0.001
title = 'Backpropagation'
k = args.get('k')
if k == None:
k = 5
data = Dataset.get_all() # pertama, ambil dulu semua dataset
if len(data) == 0: # jika data tidak ditemukan
return empty(title) # jalankan fungsi empty()
X_train, X_test, y_train, y_test, vectorizer = preprocessing(data) # preprocessing
# dari proses preprocessing mengahsilkan data train, yang kemudian di train pada baris 95
backpro.fit(X_train, y_train)
#print(dir(backpro),backpro.score)
# menampilkan untuk nilai tf-idf
#sample_train = pd.DataFrame.sparse.from_spmatrix(X_train)
#print(sample_train)
# menampilkan kata vocab dari seluruh caption dari proses preprocessing
#print(vectorizer.vocabulary_)
# dari hasil train itu di save pada baris selanjutnya
filename = 'static/model/backpropagation.sav'
pickle.dump(backpro, open(filename, 'wb'))
filename='static/vectorizer/backpropagation.pickle'
pickle.dump(vectorizer, open(filename, "wb"))
backpro = pickle.load(open("static/model/backpropagation.sav", 'rb'))
vectorizer = pickle.load(open("static/vectorizer/backpropagation.pickle", 'rb'))
X = sp.vstack((X_train, X_test)) ## untuk menggabungkan data train dan test (x) tipe data berbeda spas matrix, untukk mendapatkan nilai X
y = pd.concat([y_train, y_test]) ## untuk menggabungkan data train dan test (y) tipe data berbeda string, untuk mendapatkan nilai y
#print(X, y)
scores = kFoldClassification(backpro, X, y, k)
u_predictions, ig = usernameClassification(vectorizer, backpro, X, y)
createGraphs(u_predictions)
# print(scores)
total_akurasi = 0
for s in scores:
total_akurasi += s["accuracy_score"]
avg_acc = round(total_akurasi / len(scores), 3) #mengembalikan panjang (jumlah anggota) dari scores/keseluruhan fold
#avg_acc = round(total_akurasi / len(scores), 3)
best_fold, data_training, data_testing = tab34(data, backpro, X, y)
# buat 20% data testing
i = 0
for kl in kl_dict.values():
#sreturn jsonify(best_fold)
kl["cm"] = [m for m in best_fold["confusion_matrix"][i]]
kl["precision"] = round(best_fold["precision_score"][i] * 100, 2)
kl["recall"] = round(best_fold["recall_score"][i] * 100, 2)
kl["f1_score"] = round(2 * (0 if (kl["precision"] + kl["recall"]) == 0 else (kl["precision"] * kl["recall"]) / (kl["precision"] + kl["recall"])), 2)
i += 1
## kl_dict = {
# 1 : {
# "nama" : Food,
# "precision" : 1,
# "recall" : 0.9,
# ...
# },
# 2 : {
# "nama" : Beauty,
# "precision" : 0.4,
# ...
# },
# }
# buat k-fold
total_y_test = list()
total_y_pred = list()
kfold_cm = list()
#return jsonify(scores)
for s in scores:
i = 0
kl_dict2 = copy.deepcopy(kl_dict)
total_y_test += s["y_test"]
total_y_pred += s["y_pred"]
for kl in kl_dict2.values():
kl["cm"] = [m for m in s["confusion_matrix"][i]]
kl["precision"] = round(s["precision_score"][i] * 100, 2)
kl["recall"] = round(s["recall_score"][i] * 100, 2)
kl["f1_score"] = round(2 * (0 if (kl["precision"] + kl["recall"]) == 0 else (kl["precision"] * kl["recall"]) / (kl["precision"] + kl["recall"])), 2)
i += 1
kfold_cm.append(kl_dict2)
#return jsonify(kl_dict2)
# buat seluruh confusion matrix
kl_dict3 = copy.deepcopy(kl_dict)
# menambahkan labels solusi dari penambahan klasifikasi
## y_test = [1,2,1,1,1,2,1,...]
## y_pred = [1,1,2,2,1,2,1,...]
# menampilkan nilai conf matrix
#print(total_y_test)
#print(total_y_pred)
cm = confusion_matrix(total_y_test, total_y_pred, labels=labels)
ps = recall_score(total_y_test, total_y_pred, average=None, labels=labels)
rs = precision_score(total_y_test, total_y_pred, average=None, labels=labels)
fs = f1_score(total_y_test, total_y_pred)
acs = accuracy_score(total_y_test, total_y_pred)
# kl_dict3["cm"] = cm
i = 0
for kl in kl_dict3.values():
# print(cm[i], i)
# print(ps.tolist()[n])
kl["cm"] = cm[i]
kl["precision"] = round(ps.tolist()[i] * 100, 2)
kl["recall"] = round(rs.tolist()[i] * 100, 2)
kl["f1_score"] = round(2 * (0 if (kl["precision"] + kl["recall"]) == 0 else (kl["precision"] * kl["recall"]) / (kl["precision"] + kl["recall"])), 2)
i += 1
#return jsonify(kl_dict3)
return render_template('pages/algoritma/detail.html', scores=scores, title=title, ig=ig, avg_acc=avg_acc, data_training=data_training, data_testing=data_testing, kl_dict=kl_dict, kl_dict3=kl_dict3, best_fold=best_fold, kfold_cm=kfold_cm)
def supportVectorMachine(args):
labels = list()
klasifikasi = Klasifikasi.get_all() # ambil semua klasifikasi
kl_dict = dict() # membentuk dictionary di python. Dictionary merupakan tipe data yang tersusun atas dua unsur yaitu key:value atau kunci dan nilai, makanya tipe data ini juga erat kaitannya dengan sebutan argumen dengan kata kunci (keyword arguments).
for kl in klasifikasi:
labels.append(kl["id"])
kl_dict[kl["id"]] = {
"nama" : kl["nama"]
}
svm = SVC() ## inisiasi model
# C:float default 1.0, kernel default = rbf, gamma = scale
title = 'Support Vector Machine'
k = args.get('k')
if k == None:
k = 5
data = Dataset.get_all() # pertama, ambil dulu semua dataset
if len(data) == 0: # jika data tidak ditemukan
return empty(title) # jalankan fungsi empty()
X_train, X_test, y_train, y_test, vectorizer = preprocessing(data) # preprocessing
# dari proses preprocessing mengahsilkan data train, yang kemudian di train pada baris 188
svm.fit(X_train, y_train)
# menampilkan untuk nilai tf-idf
##sample_train = pd.DataFrame.sparse.from_spmatrix(X_train)
##print(sample_train)
# menampilkan kata vocab dari seluruh caption dari proses preprocessing
#print(vectorizer.vocabulary_)
# dari hasil train itu di save pada baris selanjutnya
filename = 'static/model/svm.sav'
pickle.dump(svm, open(filename, 'wb'))
filename='static/vectorizer/svm.pickle'
pickle.dump(vectorizer, open(filename, "wb"))
svm = pickle.load(open("static/model/svm.sav", 'rb'))
vectorizer = pickle.load(open("static/vectorizer/svm.pickle", 'rb'))
X = sp.vstack((X_train, X_test)) ## untuk menggabungkan data train dan test (x) tipe data berbeda spas matrix
y = pd.concat([y_train, y_test]) ## untuk menggabungkan data train dan test (y) tipe data berbeda string
# print(X, y)
scores = kFoldClassification(svm, X, y, k)
u_predictions, ig = usernameClassification(vectorizer, svm, X, y)
createGraphs(u_predictions)
total_akurasi = 0
for s in scores:
total_akurasi += s["accuracy_score"]
avg_acc = round(total_akurasi / len(scores), 3)
best_fold, data_training, data_testing = tab34(data, svm, X, y)
# untuk 20% data testing
i = 0
for kl in kl_dict.values():
kl["cm"] = [m for m in best_fold["confusion_matrix"][i]]
kl["precision"] = round(best_fold["precision_score"][i] * 100, 2)
kl["recall"] = round(best_fold["recall_score"][i] * 100, 2)
kl["f1_score"] = round(2 * (0 if (kl["precision"] + kl["recall"]) == 0 else (kl["precision"] * kl["recall"]) / (kl["precision"] + kl["recall"])), 2)
i += 1
# buat k-fold
total_y_test = list()
total_y_pred = list()
kfold_cm = list()
for s in scores:
i = 0
kl_dict2 = copy.deepcopy(kl_dict)
total_y_test += s["y_test"]
total_y_pred += s["y_pred"]
for kl in kl_dict2.values():
kl["cm"] = s["confusion_matrix"][i]
kl["precision"] = round(s["precision_score"][i] * 100, 2)
kl["recall"] = round(s["recall_score"][i] * 100, 2)
kl["f1_score"] = round(2 * (0 if (kl["precision"] + kl["recall"]) == 0 else (kl["precision"] * kl["recall"]) / (kl["precision"] + kl["recall"])), 2)
i += 1
kfold_cm.append(kl_dict2)
# return jsonify(kfold_cm)
# buat seluruh confusion matrix
kl_dict3 = copy.deepcopy(kl_dict)
# menambahkan labels solusi dari penambahan klasifikasi
cm = confusion_matrix(total_y_test, total_y_pred, labels=labels)
ps = recall_score(total_y_test, total_y_pred, average=None, labels=labels)
rs = precision_score(total_y_test, total_y_pred, average=None, labels=labels)
fs = f1_score(total_y_test, total_y_pred)
acs = accuracy_score(total_y_test, total_y_pred)
# kl_dict3["cm"] = cm
i = 0
for kl in kl_dict3.values():
# print(cm[i], i)
# print(ps.tolist()[n])
kl["cm"] = cm[i]
kl["precision"] = round(ps.tolist()[i] * 100, 2)
kl["recall"] = round(rs.tolist()[i] * 100, 2)
kl["f1_score"] = round(2 * (0 if (kl["precision"] + kl["recall"]) == 0 else (kl["precision"] * kl["recall"]) / (kl["precision"] + kl["recall"])), 2)
i += 1
return render_template('pages/algoritma/detail.html', scores=scores, title=title, ig=ig, avg_acc=avg_acc, data_training=data_training, data_testing=data_testing, kl_dict=kl_dict, kl_dict3=kl_dict3, best_fold=best_fold, kfold_cm=kfold_cm)
def empty(title):
return render_template('pages/algoritma/empty.html', title=title)
def preprocessing(data):
pdData =
|
pd.DataFrame.from_dict(data)
|
pandas.DataFrame.from_dict
|
from Testdata.MertonJump import merton_jump_paths
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.ensemble import IsolationForest
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from pandas.core.common import SettingWithCopyWarning
warnings.simplefilter(action='ignore', category=SettingWithCopyWarning)
sns.set()
sns.set_style('darkgrid')
def buildMertonDF(jump_rate:float=None, l:int=None, step:int=None, v=0.0395, lam=8, sigma=0.25, N=1):
"""
Creates a large data set with all features for the isolatin forest and associated
anomaly values as well as the signed jumps.
:param jump_rate: lambda/step (i.e. contamination) [float]
:param l: lambda, intensity of jump [int]
:param step: time steps, per default 10 000 [int]
:return: dataset with Merton-jump-data,signed jumps, features, anomalie scores [Dataframe]
"""
# parameter mertion-jump
steps = 10000 if step == None else step
lam = jump_rate * steps if l == None else l
# generate merton data
mertonData, jumps, contamin = merton_jump_paths(v=v, lam=lam, steps=steps, sigma=sigma)
mertonDf =
|
pd.DataFrame(mertonData, columns=['Merton Jump'])
|
pandas.DataFrame
|
import itertools
import time
import glob as gb
import librosa
import matplotlib.pyplot as plt
import librosa.display
import pickle
import pandas as pd
from sklearn.metrics import confusion_matrix, accuracy_score
import os
import soundfile as sf
import sys
import warnings
from keras.utils.vis_utils import plot_model
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import tensorflow.keras as keras
from sklearn.svm import LinearSVC
from tensorflow.keras.layers import Input
from tensorflow.keras.regularizers import l2, l1_l2
import seaborn as sns
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.metrics import classification_report
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import statistics
from sklearn import tree
from sklearn.dummy import DummyClassifier
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
import random
from numpy import inf
import audioread
import librosa.segment
import numpy as np
import data_utils as du
import data_utils_input as dus
from data_utils_input import normalize_image, padding_MLS, padding_SSLM, borders
from keras import backend as k
from shutil import copyfile
import fnmatch
from sklearn import preprocessing
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from ast import literal_eval
from sklearn.feature_selection import RFE
from skimage.transform import resize
from tensorflow.python.ops.init_ops_v2 import glorot_uniform
import lightgbm as lgb
from treegrad import TGDClassifier
from sklearn.preprocessing import MultiLabelBinarizer
import logging
# import tensorflow_decision_forests as tfdf # linux only
from tensorflow.keras.layers.experimental import RandomFourierFeatures
from XBNet.training_utils import training, predict
from XBNet.models import XBNETClassifier
from XBNet.run import run_XBNET
import autokeras as ak
from djinn import djinn
import hyperas
from hyperopt import Trials, STATUS_OK, tpe
from hyperas.distributions import choice, uniform
from os import listdir, walk, getcwd, sep
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
import math
from scipy import signal
import tensorflow.keras.layers as kl
import tensorflow.keras.applications as ka
import tensorflow.keras.optimizers as ko
import tensorflow.keras.models as km
import skimage.measure
import scipy
from scipy.spatial import distance
from tensorflow.keras.layers import Flatten, Dropout, Activation, BatchNormalization, Dense
from sklearn.model_selection import GridSearchCV
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.optimizers import SGD
from sklearn.preprocessing import OneHotEncoder
from tensorflow.keras.layers import Conv1D, MaxPooling1D, AveragePooling1D
from tensorflow.keras.regularizers import l1
from keras.utils import np_utils
from pydub import AudioSegment
from tensorflow.keras.models import load_model
from sklearn.metrics import roc_curve, roc_auc_score, auc
import datetime
import glob
import math
import re
import pyaudio
import wave
import torch
from matplotlib.pyplot import specgram
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import multilabel_confusion_matrix
tf.get_logger().setLevel(logging.ERROR)
k.set_image_data_format('channels_last')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if not sys.warnoptions:
warnings.simplefilter("ignore") # ignore warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# region Directories
MASTER_DIR = 'D:/Google Drive/Resources/Dev Stuff/Python/Machine Learning/Master Thesis/'
MASTER_INPUT_DIR = 'F:/Master Thesis Input/'
MASTER_LABELPATH = os.path.join(MASTER_INPUT_DIR, 'Labels/')
WEIGHT_DIR = os.path.join(MASTER_DIR, 'Weights/')
MIDI_Data_Dir = np.array(gb.glob(os.path.join(MASTER_DIR, 'Data/MIDIs/*')))
FULL_DIR = os.path.join(MASTER_INPUT_DIR, 'Full/')
FULL_MIDI_DIR = os.path.join(FULL_DIR, 'MIDI/')
FULL_LABELPATH = os.path.join(MASTER_LABELPATH, 'Full/')
# endregion
"""=================================================================================================================="""
# region DEPRECATED
# Deprecated
Train_Data_Dir = np.array(gb.glob(os.path.join(MASTER_INPUT_DIR, 'Train/*'))) # os.path.join(MASTER_DIR, 'Data/Train/*'
Test_Data_Dir = np.array(gb.glob(os.path.join(MASTER_INPUT_DIR, 'Test/*'))) # os.path.join(MASTER_DIR, 'Data/Test/*')))
Validate_Data_Dir = np.array(gb.glob(os.path.join(MASTER_INPUT_DIR, 'Validate/*'))) # os.path.join(MASTER_DIR,'Data/Val
MLS_Data_Dir = os.path.join(MASTER_DIR, 'Images/Train/MLS/')
SSLMCOS_Data_Dir = os.path.join(MASTER_DIR, 'Images/Train/SSLMCOS/')
SSLMEUC_Data_Dir = os.path.join(MASTER_DIR, 'Images/Train/SSLMEUC/')
SSLMCRM_Data_Dir = os.path.join(MASTER_DIR, 'Images/Train/SSLMCRM/')
TRAIN_DIR = os.path.join(MASTER_INPUT_DIR, 'Train/')
TEST_DIR = os.path.join(MASTER_INPUT_DIR, 'Test/')
VAL_DIR = os.path.join(MASTER_INPUT_DIR, 'Validate/')
TRAIN_LABELPATH = os.path.join(MASTER_LABELPATH, 'Train/')
TEST_LABELPATH = os.path.join(MASTER_LABELPATH, 'Test/')
VAL_LABELPATH = os.path.join(MASTER_LABELPATH, 'Validate/')
# Deprecated
def validate_directories():
print("Validating Training Directory...")
dus.validate_folder_contents(TRAIN_LABELPATH, os.path.join(TRAIN_DIR, 'MIDI/'), os.path.join(TRAIN_DIR, 'MLS/'),
os.path.join(TRAIN_DIR, 'SSLM_CRM_COS/'), os.path.join(TRAIN_DIR, 'SSLM_CRM_EUC/'),
os.path.join(TRAIN_DIR, 'SSLM_MFCC_COS/'), os.path.join(TRAIN_DIR, 'SSLM_MFCC_EUC/'))
print("Succes.\n")
print("Validating Validation Directory...")
dus.validate_folder_contents(VAL_LABELPATH, os.path.join(VAL_DIR, 'MIDI/'), os.path.join(VAL_DIR, 'MLS/'),
os.path.join(VAL_DIR, 'SSLM_CRM_COS/'), os.path.join(VAL_DIR, 'SSLM_CRM_EUC/'),
os.path.join(VAL_DIR, 'SSLM_MFCC_COS/'), os.path.join(VAL_DIR, 'SSLM_MFCC_EUC/'))
print("Succes.\n")
print("Validating Testing Directory...")
dus.validate_folder_contents(TEST_LABELPATH, os.path.join(TEST_DIR, 'MIDI/'), os.path.join(TEST_DIR, 'MLS/'),
os.path.join(TEST_DIR, 'SSLM_CRM_COS/'), os.path.join(TEST_DIR, 'SSLM_CRM_EUC/'),
os.path.join(TEST_DIR, 'SSLM_MFCC_COS/'), os.path.join(TEST_DIR, 'SSLM_MFCC_EUC/'))
print("Succes.\n")
# Deprecated
def get_class_weights(labels, one_hot=False):
if one_hot is False:
n_classes = max(labels) + 1
else:
n_classes = len(labels[0])
class_counts = [0 for _ in range(int(n_classes))]
if one_hot is False:
for label in labels:
class_counts[label] += 1
else:
for label in labels:
class_counts[np.where(label == 1)[0][0]] += 1
return {i: (1. / class_counts[i]) * float(len(labels)) / float(n_classes) for i in range(int(n_classes))}
# Deprecated
def buildValidationSet():
cnt = 1
numtrainfiles = len(fnmatch.filter(os.listdir(os.path.join(TRAIN_DIR, "MLS/")), '*.npy'))
for file in os.listdir(os.path.join(TRAIN_DIR, "MLS/")):
numvalfiles = len(fnmatch.filter(os.listdir(os.path.join(VAL_DIR, "MLS/")), '*.npy'))
if numvalfiles >= numtrainfiles * 0.2:
print(f"Validation set >= 20% of training set: {numvalfiles}/{numtrainfiles}")
break
filename, name = file, file.split('/')[-1].split('.')[0]
print(f"\nWorking on {os.path.basename(name)}, file #" + str(cnt))
formfolder = "" # Start search for correct form to search for label
for root, dirs, files in os.walk(os.path.join(MASTER_DIR, 'Labels/')):
flag = False
for tfile in files:
if tfile.split('/')[-1].split('.')[0] == name:
formfolder = os.path.join(root, file).split('/')[-1].split('\\')[0]
flag = True
if flag:
break
path = os.path.join(os.path.join(MASTER_DIR, 'Labels/'), formfolder) + '/' + os.path.basename(name) + '.txt'
num_lines = sum(1 for _ in open(path))
if num_lines <= 2:
print("File has not been labeled with ground truth yet. Skipping...")
cnt += 1
continue
else:
src1 = os.path.join(TRAIN_DIR, "MLS/") + '/' + filename
src2 = os.path.join(TRAIN_DIR, "SSLM_CRM_COS/") + '/' + filename
src3 = os.path.join(TRAIN_DIR, "SSLM_CRM_EUC/") + '/' + filename
src4 = os.path.join(TRAIN_DIR, "SSLM_MFCC_COS/") + '/' + filename
src5 = os.path.join(TRAIN_DIR, "SSLM_MFCC_EUC/") + '/' + filename
dst1 = os.path.join(VAL_DIR, "MLS/") + '/' + filename
dst2 = os.path.join(VAL_DIR, "SSLM_CRM_COS/") + '/' + filename
dst3 = os.path.join(VAL_DIR, "SSLM_CRM_EUC/") + '/' + filename
dst4 = os.path.join(VAL_DIR, "SSLM_MFCC_COS/") + '/' + filename
dst5 = os.path.join(VAL_DIR, "SSLM_MFCC_EUC/") + '/' + filename
if os.path.exists(dst1) and os.path.exists(dst2) and os.path.exists(dst3) and os.path.exists(dst4) \
and os.path.exists(dst5):
print("File has already been prepared for training material. Skipping...")
cnt += 1
continue
else:
copyfile(src1, dst1)
copyfile(src2, dst2)
copyfile(src3, dst3)
copyfile(src4, dst4)
copyfile(src5, dst5)
cnt += 1
pass
# Deprecated
def findBestShape(mls_train, sslm_train):
dim1_mls = [i.shape[0] for i in mls_train.getImages()]
dim2_mls = [i.shape[1] for i in mls_train.getImages()]
print(dim1_mls)
print(dim2_mls)
dim1_sslm = [i.shape[0] for i in sslm_train.getImages()]
dim2_sslm = [i.shape[1] for i in sslm_train.getImages()]
print(dim1_sslm)
print(dim2_sslm)
dim1_mean = min(statistics.mean(dim1_mls), statistics.mean(dim2_sslm))
dim2_mean = min(statistics.mean(dim1_mls), statistics.mean(dim2_sslm))
dim1_median = min(statistics.median(dim1_mls), statistics.median(dim2_sslm))
dim2_median = min(statistics.median(dim1_mls), statistics.median(dim2_sslm))
dim1_mode = min(statistics.mode(dim1_mls), statistics.mode(dim2_sslm))
dim2_mode = min(statistics.mode(dim1_mls), statistics.mode(dim2_sslm))
print(f"Dimension 0:\nMean: {dim1_mean}\t\tMedian: {dim1_median}\t\tMode: {dim1_mode}")
print(f"Dimension 1:\nMean: {dim2_mean}\t\tMedian: {dim2_median}\t\tMode: {dim2_mode}")
# Deprecated WORKING FUSE MODEL
def old_formnn_fuse(output_channels=32, lrval=0.00001, numclasses=12):
cnn1_mel = formnn_mls(output_channels, lrval=lrval)
cnn1_sslm = formnn_sslm(output_channels, lrval=lrval)
combined = layers.concatenate([cnn1_mel.output, cnn1_sslm.output], axis=2)
cnn2_in = formnn_pipeline(combined, output_channels, lrval=lrval, numclasses=numclasses)
cnn2_in = layers.Dense(numclasses, activation='sigmoid')(cnn2_in)
opt = keras.optimizers.Adam(lr=lrval)
model = keras.models.Model(inputs=[cnn1_mel.input, cnn1_sslm.input], outputs=[cnn2_in])
model.compile(loss=keras.losses.BinaryCrossentropy(from_logits=True), optimizer=opt, metrics=['accuracy'])
model.summary() # Try categorical_crossentropy, metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall()])
if not os.path.isfile(os.path.join(MASTER_DIR, 'FormNN_Model_Diagram.png')):
plot_model(model, to_file=os.path.join(MASTER_DIR, 'FormNN_Model_Diagram.png'),
show_shapes=True, show_layer_names=True, expand_nested=True, dpi=300)
return model
# Deprecated WORKING PIPELINE MODEL
def old_formnn_pipeline(combined, output_channels=32, lrval=0.0001):
z = layers.ZeroPadding2D(padding=((1, 1), (6, 6)))(combined)
z = layers.Conv2D(filters=(output_channels * 2), kernel_size=(3, 5), strides=(1, 1),
padding='same', dilation_rate=(1, 3))(z)
z = layers.LeakyReLU(alpha=lrval)(z)
z = layers.SpatialDropout2D(rate=0.5)(z)
# z = layers.Reshape(target_shape=(-1, 1, output_channels * 152))(z)
z = layers.Conv2D(filters=output_channels * 4, kernel_size=(1, 1), strides=(1, 1), padding='same')(z)
z = layers.LeakyReLU(alpha=lrval)(z)
z = layers.SpatialDropout2D(rate=0.5)(z)
z = layers.Conv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), padding='same')(z)
z = layers.GlobalMaxPooling2D()(z)
return z
# Deprecated MLS MODEL
def cnn_mls(output_channels, lrval=0.0001):
model = tf.keras.Sequential()
model.add(layers.Conv2D(filters=output_channels,
kernel_size=(5, 7), strides=(1, 1),
padding='same', # ((5 - 1) // 2, (7 - 1) // 2),
activation=layers.LeakyReLU(alpha=lrval), input_shape=(200, 1150, 4) # (1,)
))
model.add(layers.MaxPooling2D(pool_size=(5, 3), strides=(5, 1), padding='same')) # (1, 1)))
# opt = keras.optimizers.Adam(lr=lrval)
# model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
# Deprecated SSLM MODEL
def cnn_sslm(output_channels, lrval=0.0001):
model = tf.keras.Sequential()
model.add(layers.Conv2D(filters=output_channels,
kernel_size=(5, 7), strides=(1, 1),
padding='same', # ((5 - 1) // 2, (7 - 1) // 2),
activation=layers.LeakyReLU(alpha=lrval), input_shape=(200, 1150, 4) # (3,)
))
model.add(layers.MaxPooling2D(pool_size=(5, 3), strides=(5, 1), padding='same')) # (1, 1)))
# opt = keras.optimizers.Adam(lr=lrval)
# model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
# Deprecated PIPELINE MODEL
def cnn2(output_channels, lrval=0.0001):
model = tf.keras.Sequential()
model.add(layers.Conv2D(filters=(output_channels * 2),
kernel_size=(3, 5), strides=(1, 1),
padding='same', # ((3 - 1) // 2, (5 - 1) * 3 // 2),
dilation_rate=(1, 3),
activation=layers.LeakyReLU(alpha=lrval), input_shape=(40, 1150, 8)
))
model.add(layers.SpatialDropout2D(rate=0.5))
model.add(
layers.Conv2D(output_channels * 152, 128, (1, 1), activation=layers.LeakyReLU(alpha=lrval), padding='same'))
# *72=para 6pool, *152 para 2pool3
model.add(layers.SpatialDropout2D(rate=0.5))
model.add(layers.Conv2D(128, 1, (1, 1), padding='same')) # , padding='same'))
# x = np.reshape(x, -1, x.shape[1] * x.shape[2], 1, x.shape[3]) # reshape model?
# model = keras.layers.Reshape((-1, model.shape))(model)
# Feature maps are joined with the column dimension (frequency)
# opt = keras.optimizers.Adam(lr=lrval) # learning rate
# model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
# model.summary()
return model
# Deprecated
def fuse_model(output_channels, lrval=0.0001):
cnn1_mel = cnn_mls(output_channels, lrval=lrval)
cnn1_sslm = cnn_sslm(output_channels, lrval=lrval)
combined = keras.layers.concatenate([cnn1_mel.output, cnn1_sslm.output])
cnn2_in = cnn2(output_channels, lrval=lrval)(combined)
opt = keras.optimizers.Adam(lr=lrval) # learning rate
model = keras.models.Model(inputs=[cnn1_mel.input, cnn1_sslm.input], outputs=[cnn2_in])
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
model.summary()
model.get_layer(name='sequential_2').summary()
if not os.path.isfile(os.path.join(MASTER_DIR, 'Model_Diagram.png')):
plot_model(model, to_file=os.path.join(MASTER_DIR, 'Model_Diagram.png'),
show_shapes=True, show_layer_names=True, expand_nested=True)
# if not os.path.isfile(os.path.join(MASTER_DIR, 'Model_Diagram_Inner.png')):
# plot_model(model.get_layer(name='sequential_2'), to_file=os.path.join(MASTER_DIR, 'Model_Diagram_Inner.png'),
# show_shapes=True, show_layer_names=True, expand_nested=True)
return model
# Probably deprecated
def prepare_train_data():
"""
Retrieve analysis of the following audio data for each training file:
- Log-scaled Mel Spectrogram (MLS)
- Self-Similarity Lag Matrix (Mel-Frequency Cepstral Coefficients/MFCCs - Cosine Distance, SSLMCOS)
- Self-Similarity Lag Matrix (MFCCs - Euclidian Distance, SSLMEUC)
- Self-Similarity Matrix (Chromas, SSLMCRM)
Checks to ensure that each file has been fully analyzed/labeled with ground truth
and not yet prepared for training material.
"""
cnt = 1
for folder in MIDI_Data_Dir:
for file in os.listdir(folder):
foldername = folder.split('\\')[-1]
filename, name = file, file.split('/')[-1].split('.')[0]
print(f"\nWorking on {os.path.basename(name)}, file #" + str(cnt))
path = os.path.join(os.path.join(MASTER_DIR, 'Labels/'), foldername) + '/' + os.path.basename(name) + '.txt'
num_lines = sum(1 for _ in open(path))
if num_lines <= 2:
print("File has not been labeled with ground truth yet. Skipping...")
cnt += 1
continue
# elif os.path.basename(name) != "INSERT_DEBUG_NAME_HERE": # Debug output of specified file
else:
png1 = os.path.join(MASTER_DIR, 'Images/Train/') + "MLS/" + os.path.basename(name) + 'mls.png'
png2 = os.path.join(MASTER_DIR, 'Images/Train/') + "SSLMCOS/" + os.path.basename(name) + 'cos.png'
png3 = os.path.join(MASTER_DIR, 'Images/Train/') + "SSLMEUC/" + os.path.basename(name) + 'euc.png'
png4 = os.path.join(MASTER_DIR, 'Images/Train/') + "SSLMCRM/" + os.path.basename(name) + 'crm.png'
if os.path.exists(png1) and os.path.exists(png2) and os.path.exists(png3) and os.path.exists(png4):
print("File has already been prepared for training material. Skipping...")
cnt += 1
continue
fullfilename = folder + '/' + filename
du.create_mls_sslm(fullfilename, name, foldername)
du.peak_picking(fullfilename, name, foldername)
cnt += 1
# Deprecated
def old_prepare_train_data():
"""
Retrieve analysis of the following audio data for each training file:
- Log-scaled Mel Spectrogram (MLS)
- Self-Similarity Lag Matrix (Mel-Frequency Cepstral Coefficients/MFCCs - Cosine Distance, SSLMCOS)
- Self-Similarity Lag Matrix (MFCCs - Euclidian Distance, SSLMEUC)
- Self-Similarity Matrix (Chromas, SSLMCRM)
"""
cnt = 1
for file in Train_Data_Dir:
filename, name = file, file.split('/')[-1].split('.')[0]
print(f"\nWorking on {os.path.basename(name)}, file #" + str(cnt))
du.create_mls_sslm(filename, name)
du.create_mls_sslm2(filename, name)
cnt += 1
# Deprecated
def old_prepare_model_training_input():
"""
Read in the input data for the model, return: images [MLS, SSLMCOS, EUC, and CRM] labels (phrases), labels (seconds)
"""
mls_images = np.asarray(du.ReadImagesFromFolder(MLS_Data_Dir), dtype=np.float32)
sslmcos_images = np.asarray(du.ReadImagesFromFolder(SSLMCOS_Data_Dir), dtype=np.float32)
sslmeuc_images = np.asarray(du.ReadImagesFromFolder(SSLMEUC_Data_Dir), dtype=np.float32)
sslmcrm_images = du.ReadImagesFromFolder(SSLMCRM_Data_Dir)
lbls_seconds, lbls_phrases = du.ReadLabelSecondsPhrasesFromFolder()
# print(lbls_seconds)
# print([i for i, x in enumerate(lbls_seconds) if len(x) != 560])
# lbls_seconds = np.array(lbls_seconds).flatten()
# lbls_seconds = [item for sublist in lbls_seconds for item in sublist]
# for i in range(len(lbls_seconds)):
# lbls_seconds[i] = np.asarray(lbls_seconds[i]).flatten()
lbls_seconds = padMatrix(lbls_seconds) # matrix must not be jagged in order to convert to ndarray of float32
# print(lbls_seconds)
lbls_seconds = np.asarray(lbls_seconds, dtype=np.float32)
mdl_images = [mls_images, sslmcos_images, sslmeuc_images, sslmcrm_images]
return mdl_images, lbls_seconds, lbls_phrases
# Probably deprecated
def padMatrix(a):
b = []
width = max(len(r) for r in a)
for i in range(len(a)):
if len(a[i]) != width:
x = np.pad(a[i], (width - len(a[i]), 0), 'constant', constant_values=0)
else:
x = a[i]
b.append(x)
return b
# Probably deprecated
def debugInput(mimg, lbls, lblp):
# model_images = [0 => mls, 1 => sslmcos, 2 => sslmeuc, 3 => sslmcrm]
print("Model images:", mimg)
print("Model images length:", len(mimg))
for i in range(len(mimg)):
print("M_Imgs[" + str(i) + "] length:", len(mimg[i]))
print("Label seconds:", lbls)
print("Label phrases:", lblp)
print("Image shape:", mimg[0][0].shape) # returns (height, width, channels) := (216, 1162, 4)
# Deprecated
def old_trainModel():
model_images, labels_seconds, labels_phrases = old_prepare_model_training_input()
# debugInput(model_images, labels_seconds, labels_phrases)
# FIT MODEL AND USE CHECKPOINT TO SAVE BEST MODEL
trmodel = fuse_model(4) # (32) CNN Layer 1 Output Characteristic Maps
checkpoint = ModelCheckpoint("best_initial_model.hdf5", monitor='val_accuracy', verbose=1,
save_best_only=True, mode='max', save_freq='epoch', save_weights_only=True)
model_history = trmodel.fit((np.array([model_images[0]], dtype=np.float32),
np.array([model_images[1], model_images[2], model_images[3]], dtype=np.float32)),
# np.asarray([tf.stack(model_images[1:2]), model_images[3]],
# (np.array([model_images[1], model_images[2]], dtype=np.float32),
# np.array(model_images[3])),
np.array(labels_seconds, dtype=np.float32),
batch_size=32, epochs=2000,
validation_data=(labels_seconds,),
callbacks=[checkpoint])
print(model_history)
# PLOT MODEL HISTORY OF ACCURACY AND LOSS OVER EPOCHS
plt.plot(model_history.history['accuracy'])
plt.plot(model_history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('Initial_Model_Accuracy.png')
plt.show()
# pd.DataFrame(model_history.history).plot() # figsize=(8, 5)
# plt.show()
# summarize history for loss
plt.plot(model_history.history['loss'])
plt.plot(model_history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('Initial_Model_loss.png')
plt.show()
# Probably deprecated
def combine_generator(gen1, gen2):
while True:
yield next(gen1), next(gen2)
# endregion
# region OldModelDefinition
# MIDI MODEL -- Try switching activation to ELU instead of RELU. Mimic visual/aural analysis using ensemble method
def formnn_midi(output_channels=32, numclasses=12):
inputC = layers.Input(shape=(None, 1))
w = layers.Conv1D(output_channels * 2, kernel_size=10, activation='relu', input_shape=(None, 1))(inputC)
w = layers.Conv1D(output_channels * 4, kernel_size=10, activation='relu', kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01))(w)
w = layers.MaxPooling1D(pool_size=6)(w)
w = layers.Dropout(0.4)(w)
w = layers.Conv1D(output_channels * 4, kernel_size=10, activation='relu')(w)
w = layers.MaxPooling1D(pool_size=6)(w)
w = layers.Dropout(0.4)(w)
w = layers.GlobalMaxPooling1D()(w)
w = layers.Dense(output_channels * 8, activation='relu')(w)
w = layers.Dropout(0.4)(w)
w = layers.Dense(numclasses)(w)
w = layers.Softmax()(w)
w = keras.models.Model(inputs=inputC, outputs=w)
return w
def formnn_mls2(output_channels=32):
inputA = layers.Input(batch_input_shape=(None, None, None, 1))
x = layers.Conv2D(filters=output_channels, kernel_size=(5, 7), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), activation='relu')(inputA)
x = layers.MaxPooling2D(pool_size=(5, 3), strides=(5, 1), padding='same')(x)
x = keras.models.Model(inputs=inputA, outputs=x)
return x
def formnn_sslm2(output_channels=32):
inputB = layers.Input(batch_input_shape=(None, None, None, 1)) # (None, None, None, 4)
y = layers.Conv2D(filters=output_channels, kernel_size=(5, 7), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), activation='relu')(inputB)
y = layers.MaxPooling2D(pool_size=(5, 3), strides=(5, 1), padding='same')(y)
y = layers.AveragePooling2D(pool_size=(1, 4))(y)
y = keras.models.Model(inputs=inputB, outputs=y)
return y
def formnn_pipeline2(combined, output_channels=32, numclasses=12):
z = layers.Conv2D(filters=(output_channels * 2), kernel_size=(3, 5),
padding='same', dilation_rate=(1, 3), kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01), activation='relu')(combined)
z = layers.Conv2D(filters=output_channels * 4, kernel_size=(1, 1), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), activation='relu')(z)
z = layers.MaxPooling2D(pool_size=3)(z)
z = layers.SpatialDropout2D(rate=0.3)(z)
z = layers.Conv2D(filters=output_channels * 4, kernel_size=(1, 1), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), activation='relu')(z)
z = layers.MaxPooling2D(pool_size=3)(z)
z = layers.SpatialDropout2D(rate=0.3)(z)
z = layers.GlobalMaxPooling2D()(z)
# z = layers.Dense(output_channels * 8, activation='relu')(z)
# z = layers.Dropout(rate=0.3)(z)
z = layers.Dense(numclasses)(z)
z = layers.Softmax()(z)
return z
"""=======================ORIGINAL MODEL======================="""
# MLS MODEL
def formnn_mls(output_channels=32, lrval=0.0001):
inputA = layers.Input(batch_input_shape=(None, None, None, 1))
x = layers.ZeroPadding2D(padding=((2, 2), (3, 3)))(inputA)
x = layers.Conv2D(filters=output_channels, kernel_size=(5, 7), strides=(1, 1), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))(x)
x = layers.LeakyReLU(alpha=lrval)(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(pool_size=(5, 3), strides=(5, 1), padding='same')(x)
x = keras.models.Model(inputs=inputA, outputs=x)
return x
# SSLM MODEL
def formnn_sslm(output_channels=32, lrval=0.0001):
inputB = layers.Input(batch_input_shape=(None, None, None, 1)) # (None, None, None, 4)
y = layers.ZeroPadding2D(padding=((2, 2), (3, 3)))(inputB)
y = layers.Conv2D(filters=output_channels, kernel_size=(5, 7), strides=(1, 1), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))(y)
y = layers.LeakyReLU(alpha=lrval)(y)
y = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(y)
y = layers.MaxPooling2D(pool_size=(5, 3), strides=(5, 1), padding='same')(y)
y = layers.AveragePooling2D(pool_size=(1, 4))(y)
y = keras.models.Model(inputs=inputB, outputs=y)
return y
# PIPELINE MODEL
def formnn_pipeline(combined, output_channels=32, lrval=0.0001, numclasses=12):
z = layers.ZeroPadding2D(padding=((1, 1), (6, 6)))(combined)
z = layers.Conv2D(filters=(output_channels * 2), kernel_size=(3, 5), strides=(1, 1),
padding='same', dilation_rate=(1, 3), kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01))(z)
z = layers.LeakyReLU(alpha=lrval)(z)
z = layers.SpatialDropout2D(rate=0.3)(z)
# z = layers.Reshape(target_shape=(-1, 1, output_channels * 152))(z)
z = layers.Conv2D(filters=output_channels * 4, kernel_size=(1, 1), strides=(1, 1), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))(z)
z = layers.LeakyReLU(alpha=lrval)(z)
# z = layers.SpatialDropout2D(rate=0.5)(z)
z = layers.Conv2D(filters=output_channels * 8, kernel_size=(1, 1), strides=(1, 1), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))(z)
z = layers.LeakyReLU(alpha=lrval)(z)
z = layers.GlobalAveragePooling2D()(z)
# z = layers.Flatten()(z)
z = layers.Dense(numclasses)(z)
z = layers.Softmax()(z)
# Softmax -> Most likely class where sum(probabilities) = 1, Sigmoid -> Multiple likely classes, sum != 1
return z
def formnn_fuse(output_channels=32, lrval=0.0001, numclasses=12):
cnn1_mel = formnn_mls(output_channels, lrval=lrval)
cnn1_sslm = formnn_sslm(output_channels, lrval=lrval)
combined = layers.concatenate([cnn1_mel.output, cnn1_sslm.output], axis=2)
cnn2_in = formnn_pipeline(combined, output_channels, lrval=lrval, numclasses=numclasses)
# opt = keras.optimizers.SGD(lr=lrval, decay=1e-6, momentum=0.9, nesterov=True)
opt = keras.optimizers.Adam(lr=lrval, epsilon=1e-6)
imgmodel = keras.models.Model(inputs=[cnn1_mel.input, cnn1_sslm.input], outputs=[cnn2_in])
midmodel = formnn_midi(output_channels, numclasses=numclasses)
averageOut = layers.Average()([imgmodel.output, midmodel.output])
model = keras.models.Model(inputs=[imgmodel.input[0], imgmodel.input[1], midmodel.input], outputs=averageOut)
model.compile(loss=['categorical_crossentropy'], optimizer=opt, metrics=['accuracy'])
# model.compile(loss=keras.losses.BinaryCrossentropy(from_logits=True), optimizer=opt, metrics=['accuracy'])
model.summary() # Try categorical_crossentropy, metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall()])
if not os.path.isfile(os.path.join(MASTER_DIR, 'FormNN_Model_Diagram.png')):
plot_model(model, to_file=os.path.join(MASTER_DIR, 'FormNN_Model_Diagram.png'),
show_shapes=True, show_layer_names=True, expand_nested=True, dpi=300)
return model
def old_trainFormModel():
batch_size = 10
# region MODEL_DIRECTORIES
mls_train = dus.BuildDataloader(os.path.join(TRAIN_DIR, 'MLS/'), label_path=TRAIN_LABELPATH, # end=90,
transforms=[padding_MLS, normalize_image, borders], batch_size=batch_size)
sslm_cmcos_train = dus.BuildDataloader(os.path.join(TRAIN_DIR, 'SSLM_CRM_COS/'), label_path=TRAIN_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_cmeuc_train = dus.BuildDataloader(os.path.join(TRAIN_DIR, 'SSLM_CRM_EUC/'), label_path=TRAIN_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_mfcos_train = dus.BuildDataloader(os.path.join(TRAIN_DIR, 'SSLM_MFCC_COS/'), label_path=TRAIN_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_mfeuc_train = dus.BuildDataloader(os.path.join(TRAIN_DIR, 'SSLM_MFCC_EUC/'), label_path=TRAIN_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
midi_train = dus.BuildMIDIloader(os.path.join(TRAIN_DIR, 'MIDI/'), label_path=TRAIN_LABELPATH,
batch_size=batch_size)
mls_val = dus.BuildDataloader(os.path.join(VAL_DIR, 'MLS/'), label_path=VAL_LABELPATH,
transforms=[padding_MLS, normalize_image, borders], batch_size=batch_size)
sslm_cmcos_val = dus.BuildDataloader(os.path.join(VAL_DIR, 'SSLM_CRM_COS/'), label_path=VAL_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_cmeuc_val = dus.BuildDataloader(os.path.join(VAL_DIR, 'SSLM_CRM_EUC/'), label_path=VAL_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_mfcos_val = dus.BuildDataloader(os.path.join(VAL_DIR, 'SSLM_MFCC_COS/'), label_path=VAL_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_mfeuc_val = dus.BuildDataloader(os.path.join(VAL_DIR, 'SSLM_MFCC_EUC/'), label_path=VAL_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
midi_val = dus.BuildMIDIloader(os.path.join(VAL_DIR, 'MIDI/'), label_path=VAL_LABELPATH, batch_size=batch_size)
mls_test = dus.BuildDataloader(os.path.join(TEST_DIR, 'MLS/'), label_path=TEST_LABELPATH,
transforms=[padding_MLS, normalize_image, borders], batch_size=batch_size)
sslm_cmcos_test = dus.BuildDataloader(os.path.join(TEST_DIR, 'SSLM_CRM_COS/'), label_path=TEST_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_cmeuc_test = dus.BuildDataloader(os.path.join(TEST_DIR, 'SSLM_CRM_EUC/'), label_path=TEST_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_mfcos_test = dus.BuildDataloader(os.path.join(TEST_DIR, 'SSLM_MFCC_COS/'), label_path=TEST_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_mfeuc_test = dus.BuildDataloader(os.path.join(TEST_DIR, 'SSLM_MFCC_EUC/'), label_path=TEST_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
midi_test = dus.BuildMIDIloader(os.path.join(TEST_DIR, 'MIDI/'), label_path=TEST_LABELPATH, batch_size=batch_size)
# endregion
# findBestShape(mls_train, sslm_cmcos_train)
train_datagen = multi_input_generator(mls_train, sslm_cmcos_train, sslm_cmeuc_train, sslm_mfcos_train,
sslm_mfeuc_train, midi_train)
valid_datagen = multi_input_generator(mls_val,
sslm_cmcos_val, sslm_cmeuc_val, sslm_mfcos_val, sslm_mfeuc_val, midi_val)
test_datagen = multi_input_generator(mls_test,
sslm_cmcos_test, sslm_cmeuc_test, sslm_mfcos_test, sslm_mfeuc_test, midi_test)
steps_per_epoch = len(list(mls_train)) // batch_size
steps_per_valid = len(list(mls_val)) // batch_size
label_encoder = LabelEncoder()
label_encoder.classes_ = np.load(os.path.join(MASTER_DIR, 'form_classes.npy'))
if mls_train.getNumClasses() != mls_val.getNumClasses() or mls_train.getNumClasses() != mls_test.getNumClasses():
print(f"Train and validation or testing datasets have differing numbers of classes: "
f"{mls_train.getNumClasses()} vs. {mls_val.getNumClasses()} vs. {mls_test.getNumClasses()}")
# classweights = get_class_weights(mls_train.getLabels().numpy().squeeze(axis=-1), one_hot=True)
"""
# Show class weights as bar graph
barx, bary = zip(*sorted(classweights.items()))
plt.figure(figsize=(12, 8))
plt.bar(label_encoder.inverse_transform(barx), bary, color='green')
for i in range(len(barx)):
plt.text(i, bary[i]//2, round(bary[i], 3), ha='center', color='white')
plt.title('Train Class Weights')
plt.ylabel('Weight')
plt.xlabel('Class')
plt.savefig('Initial_Model_Class_Weights.png')
plt.show()
"""
model = formnn_fuse(output_channels=32, lrval=0.00005, numclasses=mls_train.getNumClasses()) # Try 'val_loss'?
# model.load_weights('best_initial_model.hdf5')
early_stopping = EarlyStopping(patience=5, verbose=5, mode="auto")
checkpoint = ModelCheckpoint(os.path.join(MASTER_DIR, 'best_formNN_model.hdf5'), monitor='val_accuracy', verbose=0,
save_best_only=True, mode='max', save_freq='epoch', save_weights_only=True)
model_history = model.fit(train_datagen, epochs=100, verbose=1, validation_data=valid_datagen, shuffle=False,
callbacks=[checkpoint, early_stopping], batch_size=batch_size, # class_weight=classweight
steps_per_epoch=steps_per_epoch, validation_steps=steps_per_valid)
print("Training complete!\n")
# region LossAccuracyGraphs
plt.plot(model_history.history['loss'])
plt.plot(model_history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('Initial_Model_Loss.png')
plt.show()
plt.plot(model_history.history['accuracy'])
plt.plot(model_history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('Initial_Model_Accuracy.png')
plt.show()
pd.DataFrame(model_history.history).plot()
plt.show()
# endregion
predictions = model.predict_generator(valid_datagen, steps=1, verbose=1, workers=0)
print(predictions)
print("Prediction complete!")
inverted = label_encoder.inverse_transform([np.argmax(predictions[0, :])])
print("Predicted: ", end="")
print(inverted, end=""),
print("\tActual: ", end="")
print(label_encoder.inverse_transform([np.argmax(mls_val.getFormLabel(mls_val.getCurrentIndex()-1))]))
print("Name: " + mls_val.getSong(mls_val.getCurrentIndex()-1))
print("\nEvaluating...")
score = model.evaluate_generator(test_datagen, steps=len(list(mls_test)), verbose=1)
print("Evaluation complete!\nScore:")
print(f"Loss: {score[0]}\tAccuracy: {score[1]}")
# region EvaluationGraphs
predictions = model.predict(test_datagen, steps=len(list(mls_test)), verbose=1)
predictions = predictions.argmax(axis=1)
predictions = predictions.astype(int).flatten()
predictions = (label_encoder.inverse_transform(predictions))
predictions = pd.DataFrame({'Predicted Values': predictions})
actual = mls_test.getLabels().numpy().argmax(axis=1)
actual = actual.astype(int).flatten()
actual = (label_encoder.inverse_transform(actual))
actual = pd.DataFrame({'Actual Values': actual})
cm = confusion_matrix(actual, predictions)
plt.figure(figsize=(12, 10))
cm = pd.DataFrame(cm, index=[i for i in label_encoder.classes_[0:mls_test.getNumClasses()]],
columns=[i for i in label_encoder.classes_[0:mls_test.getNumClasses()]])
ax = sns.heatmap(cm, linecolor='white', cmap='Blues', linewidth=1, annot=True, fmt='')
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
plt.title('Confusion Matrix', size=20)
plt.xlabel('Predicted Labels', size=14)
plt.ylabel('Actual Labels', size=14)
plt.savefig('Initial_Model_Confusion_Matrix.png')
plt.show()
clf_report = classification_report(actual, predictions, output_dict=True,
target_names=[i for i in label_encoder.classes_[0:mls_test.getNumClasses()]])
sns.heatmap(pd.DataFrame(clf_report).iloc[:, :].T, annot=True, cmap='viridis')
plt.title('Classification Report', size=20)
plt.savefig('Initial_Model_Classification_Report.png')
plt.show()
# endregion
def formnn_cnn_mod(input_dim_1, filters=64, lrval=0.0001, numclasses=12):
model = tf.keras.Sequential()
model.add(layers.Conv1D(filters, kernel_size=10, activation='relu', input_shape=(input_dim_1, 1)))
model.add(layers.Dropout(0.4)) # ?
model.add(layers.Conv1D(filters*2, kernel_size=10, activation='relu', kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01)))
model.add(layers.MaxPooling1D(pool_size=6))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(layers.Conv1D(filters*2, kernel_size=10, activation='relu'))
model.add(layers.MaxPooling1D(pool_size=6))
# model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(layers.Flatten())
model.add(layers.Dense(filters*4, activation='relu'))
# model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(layers.Dense(numclasses, activation='softmax')) # Try softmax?
opt = keras.optimizers.Adam(lr=lrval, epsilon=1e-6)
# opt = keras.optimizers.SGD(lr=lrval, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
def formnn_cnn_old(input_dim_1, filters=64, lrval=0.0001, numclasses=12):
model = tf.keras.Sequential()
model.add(layers.Conv1D(filters, kernel_size=10, activation='relu', input_shape=(input_dim_1, 1)))
model.add(layers.Conv1D(filters*2, kernel_size=10, activation='relu', kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01)))
model.add(layers.MaxPooling1D(pool_size=6))
model.add(layers.Dropout(0.4))
model.add(layers.Conv1D(filters*2, kernel_size=10, activation='relu'))
model.add(layers.MaxPooling1D(pool_size=6))
model.add(layers.Dropout(0.4))
model.add(layers.Flatten())
model.add(layers.Dense(filters*4, activation='relu'))
model.add(layers.Dropout(0.4))
model.add(layers.Dense(numclasses, activation='softmax')) # Try softmax?
# opt = keras.optimizers.Adam(lr=lrval, epsilon=1e-6)
opt = keras.optimizers.SGD(lr=lrval, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
# endregion
# region OldWorkingModelDefinition
def formnn_cnn(input_dim_1, filters=8, lrval=0.0001, numclasses=12, kernelsize=3, l1reg=0.01, l2reg=0.01, dropout=0.6):
np.random.seed(9)
X_input = Input(shape=(input_dim_1, 1))
X = layers.Conv1D(filters, kernel_size=kernelsize, strides=1, kernel_initializer=glorot_uniform(seed=9),
bias_regularizer=l1_l2(l1=l1reg, l2=l2reg), kernel_regularizer=l1_l2(l1=l1reg, l2=l2reg))(X_input)
X = layers.BatchNormalization(axis=2)(X)
X = layers.Activation('relu')(X)
X = layers.MaxPooling1D(numclasses, padding='same')(X)
X = layers.Dropout(dropout)(X)
# X = layers.GaussianNoise(0.1)(X)
X = layers.Conv1D(filters * 2, kernel_size=kernelsize, strides=1, kernel_initializer=glorot_uniform(seed=9),
bias_regularizer=l1_l2(l1=l1reg, l2=l2reg), kernel_regularizer=l1_l2(l1=l1reg, l2=l2reg))(X)
X = layers.BatchNormalization(axis=2)(X)
X = layers.Activation('relu')(X)
X = layers.MaxPooling1D(numclasses, padding='same')(X)
X = layers.Dropout(dropout)(X)
# X = layers.GaussianNoise(0.1)(X)
X = layers.Conv1D(filters * 4, kernel_size=kernelsize, strides=1, kernel_initializer=glorot_uniform(seed=9),
bias_regularizer=l1_l2(l1=l1reg, l2=l2reg), kernel_regularizer=l1_l2(l1=l1reg, l2=l2reg))(X)
X = layers.BatchNormalization(axis=2)(X)
X = layers.Activation('relu')(X)
X = layers.MaxPooling1D(numclasses, padding='same')(X)
X = layers.Dropout(dropout)(X)
# X = layers.GaussianNoise(0.1)(X)
X = layers.Flatten()(X)
# X = layers.Conv1D(filters * 8, kernel_size=kernelsize, strides=1, kernel_initializer=glorot_uniform(seed=9),
# bias_regularizer=l2(0.5))(X)
X = layers.Dense(filters * 8, kernel_initializer=glorot_uniform(seed=9), # 256
bias_regularizer=l1_l2(l1=l1reg, l2=l2reg), kernel_regularizer=l1_l2(l1=l1reg, l2=l2reg))(X)
X = layers.BatchNormalization(axis=-1)(X)
X = layers.Activation('relu')(X)
# X = layers.MaxPooling1D(numclasses, padding='same')(X)
X = layers.Dropout(dropout)(X)
# X = layers.GaussianNoise(0.1)(X)
# X = layers.Flatten()(X)
X = layers.Dense(numclasses, activation='sigmoid', kernel_initializer=glorot_uniform(seed=9),
bias_regularizer=l1_l2(l1=l1reg, l2=l2reg), kernel_regularizer=l1_l2(l1=l1reg, l2=l2reg))(X)
# opt = keras.optimizers.Adam(lr=lrval)
opt = keras.optimizers.SGD(lr=lrval, decay=1e-6, momentum=0.9, nesterov=True)
model = keras.models.Model(inputs=X_input, outputs=X, name='FormModel')
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
def oldWorkingtrainFormModel():
# region DataPreProcessing
df = pd.read_excel(os.path.join(MASTER_DIR, 'Data/full_augmented_dataset.xlsx'))
# df = pd.read_excel(os.path.join(MASTER_DIR, 'full_dataset.xlsx'))
names = df[['piece_name', 'composer', 'filename']]
y = df['formtype']
# """
df = df.drop(columns=['sslm_chroma_cos_mean', 'sslm_chroma_cos_var', 'sslm_chroma_euc_mean', 'sslm_chroma_euc_var',
'sslm_mfcc_cos_mean', 'sslm_mfcc_cos_var', 'sslm_mfcc_euc_mean', 'sslm_mfcc_euc_var'])
# """
df.drop(columns=['spectral_bandwidth_var', 'spectral_centroid_var', 'spectral_flatness_var', 'spectral_rolloff_var',
'zero_crossing_var', 'fourier_tempo_mean', 'fourier_tempo_var'], inplace=True) # Remove useless
# nonlist = df[['duration', 'spectral_contrast_var']]
nonlist = df[['duration']]
df.drop(columns=['piece_name', 'composer', 'filename', 'duration', 'spectral_contrast_var', 'formtype'],
inplace=True)
# df = df[['ssm_log_mel_mean', 'ssm_log_mel_var', 'mel_mean', 'mel_var', 'chroma_stft_mean', 'chroma_stft_var']]
# df = df[['ssm_log_mel_mean', 'ssm_log_mel_var']]
df = df[['ssm_log_mel_mean']] # best decision tree accuracy
print("Fixing broken array cells as needed...")
def fix_broken_arr(strx):
if '[' in strx:
if ']' in strx:
return strx
else:
return strx + ']'
for col in df.columns:
df[col] = df[col].apply(lambda x: fix_broken_arr(x))
# print("Headers:", pd.concat([pd.concat([names, pd.concat([nonlist, df], axis=1)], axis=1), y], axis=1).columns)
# Headers: Index(['piece_name', 'composer', 'filename', 'duration', 'ssm_log_mel_mean', 'formtype'], dtype='object')
print("Done processing cells, building training set...")
# d = [pd.DataFrame(df[col].astype(str).apply(literal_eval).values.tolist()).add_prefix(col) for col in df.columns]
d = [pd.DataFrame(df[col].astype(str).apply(literal_eval).values.tolist()) for col in df.columns]
df = pd.concat(d, axis=1).fillna(0)
df = pd.concat([pd.concat([names, pd.concat([nonlist, df], axis=1)], axis=1), y], axis=1) # print(df)
train, test = train_test_split(df, test_size=0.169, random_state=0, stratify=df['formtype']) # test_s=.169 gave 50%
# df.to_csv(os.path.join(MASTER_DIR, 'full_modified_dataset.csv'))
X_train = train.iloc[:, 3:-1]
# X_train_names = train.iloc[:, 0:3]
y_train = train.iloc[:, -1]
print("Train shape:", X_train.shape)
X_test = test.iloc[:, 3:-1]
# X_test_names = test.iloc[:, 0:3]
y_test = test.iloc[:, -1]
print("Test shape:", X_test.shape)
# Normalize Data
"""
min_max_scaler = preprocessing.MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train) # Good for decision tree
X_test = min_max_scaler.fit_transform(X_test)
"""
# X_train = preprocessing.scale(X_train)
# X_test = preprocessing.scale(X_test)
# """
mean = np.mean(X_train, axis=0)
std = np.std(X_train, axis=0)
X_train = (X_train - mean) / std # Good for decision tree
X_test = (X_test - mean) / std
# """
print("Normalized Train shape:", X_train.shape)
print("Normalized Test shape:", X_test.shape)
# Convert to arrays for keras
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
label_encoder = LabelEncoder()
old_y_train = y_train
# old_y_test = y_test
int_y_train = label_encoder.fit_transform(y_train)
print(int_y_train.shape)
# int_y_train = int_y_train.reshape(len(int_y_train), 1)
# int_y_test = label_encoder.fit_transform(y_test)
# int_y_test = int_y_test.reshape(len(int_y_test), 1)
y_train = to_categorical(label_encoder.fit_transform(y_train))
y_test = to_categorical(label_encoder.fit_transform(y_test))
print(y_train.shape, y_test.shape)
print(label_encoder.classes_, "\n")
""" BASE MODEL """
# DummyClassifier makes predictions while ignoring input features
dummy_clf = DummyClassifier(strategy="stratified")
dummy_clf.fit(X_train, y_train)
DummyClassifier(strategy='stratified')
dummy_clf.predict(X_test)
print("Dummy classifier accuracy:", dummy_clf.score(X_test, y_test))
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
clf.predict(X_test)
print("Decision tree accuracy:", clf.score(X_test, y_test))
""" FEATURE TUNING """
selector = SelectKBest(f_classif, k=15) # 1000 if using RFE
Z_train = selector.fit_transform(X_train, old_y_train)
skb_values = selector.get_support()
Z_test = X_test[:, skb_values]
np.save(os.path.join(MASTER_DIR, "selectkbest_indices.npy"), skb_values)
print(Z_train.shape)
print(Z_test.shape)
"""
plt.title('Feature Importance')
plt.ylabel('Score')
plt.xlabel('Feature')
plt.plot(selector.scores_)
plt.savefig('Initial_Feature_Importance.png')
plt.show()
"""
print("Indices of top 10 features:", (-selector.scores_).argsort()[:10])
""" KBEST MODEL """
clf = tree.DecisionTreeClassifier()
clf = clf.fit(Z_train, y_train)
clf.predict(Z_test)
# treedepth = clf.tree_.max_depth
skb_score = clf.score(Z_test, y_test)
print("K-Best Decision tree accuracy:", skb_score) # Highest score: 84.3% accuracy
# """
# Accuracy 0.211, stick with SKB? Gives good loss though
clf = LinearSVC(C=0.01, penalty="l1", dual=False)
clf.fit(X_train, old_y_train)
rfe_selector = RFE(clf, 15, verbose=5)
rfe_selector = rfe_selector.fit(Z_train, old_y_train)
# rfe_selector = rfe_selector.fit(X_train, old_y_train)
rfe_values = rfe_selector.get_support()
# np.save(os.path.join(MASTER_DIR, "rfebest_indices.npy"), rfe_values)
print("Indices of RFE important features:", np.where(rfe_values)[0])
W_train = Z_train[:, rfe_values]
W_test = Z_test[:, rfe_values]
# "" " RFE MODEL " ""
clf = tree.DecisionTreeClassifier()
clf = clf.fit(W_train, y_train)
clf.predict(W_test)
rfe_score = clf.score(W_test, y_test)
print("RFE Decision tree accuracy:", rfe_score) # Highest score: 83.7% accuracy, typically better than SKB
"""
plt.figure(figsize=(30, 20)) # set plot size (denoted in inches)
tree.plot_tree(clf, fontsize=10)
plt.show()
plt.savefig('tree_high_dpi', dpi=100)
"""
# """
# endregion
# Reshape to 3D tensor for keras
if skb_score > rfe_score:
X_train = Z_train[:, :, np.newaxis]
X_test = Z_test[:, :, np.newaxis]
# X1_train = Z_train
# X1_test = Z_test
else:
X_train = W_train[:, :, np.newaxis]
X_test = W_test[:, :, np.newaxis]
X1_train = W_train
X1_test = W_test
treedepth = clf.tree_.max_depth
# print(treedepth)
X_train = X_train[:, :, np.newaxis]
X_test = X_test[:, :, np.newaxis]
"""
# Autokeras Model - 32% accuracy
clf = ak.StructuredDataClassifier(overwrite=True, max_trials=10)
model_history = clf.fit(W_train, y_train, epochs=100)
predicted_y = clf.predict(W_test)
print(predicted_y)
print(clf.evaluate(W_test, y_test))
model = clf.export_model()
model.summary()
# model.save('best_auto_model.h5', save_format='tf')
if not os.path.isfile(os.path.join(MASTER_DIR, 'FormNN_CNN_AutoModel_Diagram.png')):
plot_model(model, to_file=os.path.join(MASTER_DIR, 'FormNN_CNN_AutoModel_Diagram.png'),
show_shapes=True, show_layer_names=True, expand_nested=True, dpi=300)
"""
"""
# Deep CNN Decision Tree - 50% accuracy
feature_extractor = Sequential()
feature_extractor.add(layers.Conv1D(16, 3, padding='valid', activation='relu', input_shape=(X_train.shape[1], 1),
strides=1, kernel_regularizer=l1_l2(l1=0.01, l2=0.01)))
feature_extractor.add(layers.MaxPooling1D(2))
feature_extractor.add(layers.Dropout(0.6))
feature_extractor.add(layers.BatchNormalization())
feature_extractor.add(layers.Conv1D(32, 3, padding='valid', activation='relu',
kernel_regularizer=l1_l2(l1=0.01, l2=0.01), strides=1))
# New layers for prediction outside of feature extraction model
x = feature_extractor.output
x = layers.MaxPooling1D(4)(x)
x = layers.Dropout(0.6)(x)
x = layers.BatchNormalization()(x)
x = layers.Flatten()(x)
prediction_layer = layers.Dense(len(label_encoder.classes_), activation='softmax')(x)
# New model combining both layer sets
lrval = 0.1
# opt = keras.optimizers.Adam(lr=lrval)
opt = keras.optimizers.SGD(lr=lrval, decay=1e-6, momentum=0.9, nesterov=True)
cnn_model = keras.models.Model(inputs=feature_extractor.input, outputs=prediction_layer)
cnn_model.compile(optimizer=opt, loss='categorical_crossentropy')
for i in range(10):
cnn_model.fit(X_train, y_train, verbose=1)
# Predict only the output of the feature extraction model
X_ext = feature_extractor.predict(X_train)
dtc = tree.DecisionTreeClassifier() # criterion='entropy'
nsamples, nx, ny = X_ext.shape
X_ext = X_ext.reshape((nsamples, nx * ny))
# Train the decision tree on the extracted features
dtc.fit(X_ext, y_train)
# Evaluate decision tree
X_ext = feature_extractor.predict(X_test)
nsamples, nx, ny = X_ext.shape
X_ext = X_ext.reshape((nsamples, nx * ny))
dtc.predict(X_ext)
dtc_score = dtc.score(X_ext, y_test)
print("Deep CNN Decision tree accuracy:", dtc_score)
# """
"""
# Deep SVM-NN - 23% accuracy
model = keras.Sequential([
keras.Input(shape=(X_train.shape[1],)),
RandomFourierFeatures(output_dim=4096, scale=10.0, kernel_initializer="gaussian"),
layers.Dense(units=len(label_encoder.classes_)),
])
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
loss=keras.losses.hinge,
metrics=[keras.metrics.CategoricalAccuracy(name="acc")],
)
model.fit(X_train, y_train, epochs=100, batch_size=32, validation_data=(X_test, y_test), verbose=1)
"""
"""
# Deep ANN Decision Tree - 53% accuracy
model = Sequential()
model.add(layers.Dense(128, activation='relu', input_shape=(X_train.shape[1],)))
model.add(layers.Dropout(0.3))
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(0.3))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.3))
model.add(layers.Dense(len(label_encoder.classes_), activation='softmax'))
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
# model.fit(X_train, y_train, epochs=100, batch_size=32, validation_data=(X_test, y_test), verbose=1)
model.fit(X_train, y_train, epochs=10000)
score, acc = model.evaluate(X_test, y_test, verbose=1) # ~26-35% accuracy
feature_vectors_model = keras.models.Model(model.input, model.get_layer('dense_3').output)
X_ext = feature_vectors_model.predict(X_train)
dtc = tree.DecisionTreeClassifier()
dtc.fit(X_ext, y_train)
X_ext = feature_vectors_model.predict(X_test)
dtc.predict(X_ext)
dtc_score = dtc.score(X_ext, y_test)
print("Deep ANN Decision Tree accuracy:", dtc_score)
"""
"""
# Deep Jointly-Informed Neural Network (DJINN) - 45% accuracy
modelname = "class_djinn_test"
ntrees = 1 # number of trees = number of neural nets in ensemble
maxdepth = 18 # 4 or 20-25; max depth of tree -- optimize this for each data set
dropout_keep = 1.0 # dropout typically set to 1 for non-Bayesian models
model = djinn.DJINN_Classifier(ntrees, maxdepth, dropout_keep)
optimal = model.get_hyperparameters(X1_train, y_train, random_state=1)
batchsize = optimal['batch_size']
learnrate = optimal['learn_rate']
epochs = optimal['epochs']
model.train(X1_train, int_y_train, epochs=epochs, learn_rate=learnrate, batch_size=batchsize,
display_step=1, save_files=True, file_name=modelname,
save_model=True, model_name=modelname, random_state=1)
m = model.predict(X1_test)
acc = accuracy_score(int_y_test, m.flatten())
print('DJINN Accuracy: ', acc)
model.close_model()
"""
"""
# XGBoosted Neural Network - 24% accuracy
model = XBNETClassifier(X1_train, int_y_train, num_layers=2)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
m, acc, lo, val_ac, val_lo = run_XBNET(X1_train, X1_test, int_y_train, int_y_test, model,
criterion, optimizer, batch_size=32, epochs=100)
print(predict(m, X1_test))
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.plot(acc, label='XBNET Training Accuracy')
plt.plot(val_ac, label='XBNET Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(lo, label='XBNET Training Loss')
plt.plot(val_lo, label='XBNET Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
"""
"""
# TreeGrad Deep Neural Decision Forest - 83% accuracy
model = TGDClassifier(num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100,
autograd_config={'refit_splits': True})
model.fit(X1_train, int_y_train)
acc = accuracy_score(int_y_test, model.predict(X1_test))
print('TreeGrad Deep Neural Decision Forest accuracy: ', acc)
predictions = model.predict(X1_test)
# predictions = predictions.argmax(axis=1)
predictions = predictions.astype(int).flatten()
predictions = (label_encoder.inverse_transform(predictions))
predictions = pd.DataFrame({'Predicted Values': predictions})
# actual = y_test.argmax(axis=1)
actual = int_y_test.astype(int).flatten()
actual = (label_encoder.inverse_transform(actual))
actual = pd.DataFrame({'Actual Values': actual})
cm = confusion_matrix(actual, predictions)
plt.figure(figsize=(12, 10))
cm = pd.DataFrame(cm, index=[i for i in label_encoder.classes_], columns=[i for i in label_encoder.classes_])
ax = sns.heatmap(cm, linecolor='white', cmap='Blues', linewidth=1, annot=True, fmt='')
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
plt.title('Confusion Matrix', size=20)
plt.xlabel('Predicted Labels', size=14)
plt.ylabel('Actual Labels', size=14)
plt.savefig('TreeGrad_Confusion_Matrix.png')
plt.show()
clf_report = classification_report(actual, predictions, output_dict=True,
target_names=[i for i in label_encoder.classes_])
sns.heatmap(pd.DataFrame(clf_report).iloc[:, :].T, annot=True, cmap='viridis')
plt.title('Classification Report', size=20)
plt.savefig('TreeGrad_Classification_Report.png')
plt.show()
with open('treegrad_model_save.pkl', 'wb') as f:
pickle.dump(model, f)
with open('treegrad_model_save.pkl', 'rb') as f:
model2 = pickle.load(f)
acc = accuracy_score(int_y_test, model2.predict(X1_test))
print('TreeGrad Deep Neural Decision Forest accuracy from save: ', acc)
"""
# """
model = formnn_cnn(X_train.shape[1], filters=32, lrval=0.003, numclasses=len(label_encoder.classes_),
kernelsize=10, l1reg=0.000001, l2reg=0.000001, dropout=0.6)
model.summary()
if not os.path.isfile(os.path.join(MASTER_DIR, 'FormNN_CNN_Model_Diagram.png')):
plot_model(model, to_file=os.path.join(MASTER_DIR, 'FormNN_CNN_Model_Diagram.png'),
show_shapes=True, show_layer_names=True, expand_nested=True, dpi=300)
history_loss = []
history_val_loss = []
history_accuracy = []
history_val_accuracy = []
num_epochs = 0
"""
# Try predict
model.load_weights('best_form_model_50p.hdf5')
result = model.predict(X_test)
percent_correct = 0
pred_table = pd.DataFrame(columns=["Piece", "Predicted", "Actual"])
X_test_names = np.array(X_test_names)
for i in range(len(result)):
resultlbl = label_encoder.inverse_transform([np.argmax(result[i, :])])
actuallbl = label_encoder.inverse_transform([np.argmax(y_test[i, :])])
pred_table.loc[i] = ([X_test_names[i][2], resultlbl, actuallbl])
percent_correct += 1 if resultlbl == actuallbl else 0
print(pred_table.to_string(index=False))
print("Accuracy: " + str(float(percent_correct/len(result))*100) + "%")
return
"""
# model.load_weights('best_form_model_44p.hdf5')
model.load_weights('best_form_new_model40p.hdf5')
# while True:
for i in range(0, 3000):
# early_stopping = EarlyStopping(monitor='val_loss', patience=5, verbose=5, mode="auto")
checkpoint = ModelCheckpoint("best_form_new_model.hdf5", monitor='val_accuracy', verbose=0,
save_best_only=False, mode='max', save_freq='epoch', save_weights_only=True)
model_history = model.fit(X_train, y_train, batch_size=32, epochs=1, validation_data=(X_test, y_test),
callbacks=[checkpoint]) # , early_stopping epochs=2000 loss hits 0.7
history_loss.append(model_history.history['loss'])
history_val_loss.append(model_history.history['val_loss'])
history_accuracy.append(model_history.history['accuracy'])
history_val_accuracy.append(model_history.history['val_accuracy'])
num_epochs += 1
print("Epochs completed:", num_epochs)
print("\nEvaluating...")
score = model.evaluate(X_test, y_test, verbose=1)
print("Evaluation complete!\n__________Score__________")
print(f"Loss: {score[0]}\tAccuracy: {score[1]}")
feature_vectors_model = keras.models.Model(model.input, model.get_layer('dense').output)
X_ext = feature_vectors_model.predict(X_train)
dtc = tree.DecisionTreeClassifier()
"""
# More trees performs worst, rfc0 28%, everything else 12-15%
rfc0 = RandomForestClassifier(n_estimators=1)
rfc1 = RandomForestClassifier(n_estimators=10)
rfc2 = RandomForestClassifier(n_estimators=100)
rfc3 = RandomForestClassifier(n_estimators=1000)
rfc4 = RandomForestClassifier(n_estimators=int(np.sqrt(X_train.shape[1])))
rfc5 = RandomForestClassifier(n_estimators=int(X_train.shape[1]/2))
"""
dtc.fit(X_ext, y_train)
X_ext = feature_vectors_model.predict(X_test)
dtc.predict(X_ext)
dtc_score = dtc.score(X_ext, y_test)
print("Deep CNN Decision Tree 2 accuracy:", dtc_score) # ^ 26%, 29%
# if score[1] >= 0.51:
# region EvaluationGraphs
plt.plot(history_loss) # plt.plot(model_history.history['loss'])
plt.plot(history_val_loss) # plt.plot(model_history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('Initial_Model_Loss.png')
plt.show()
plt.plot(history_accuracy) # plt.plot(model_history.history['accuracy'])
plt.plot(history_val_accuracy) # plt.plot(model_history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('Initial_Model_Accuracy.png')
plt.show()
# pd.DataFrame(model_history.history).plot()
# plt.show()
predictions = model.predict(X_test, verbose=1)
predictions = predictions.argmax(axis=1)
predictions = predictions.astype(int).flatten()
predictions = (label_encoder.inverse_transform(predictions))
predictions = pd.DataFrame({'Predicted Values': predictions})
actual = y_test.argmax(axis=1)
actual = actual.astype(int).flatten()
actual = (label_encoder.inverse_transform(actual))
actual = pd.DataFrame({'Actual Values': actual})
cm = confusion_matrix(actual, predictions)
plt.figure(figsize=(12, 10))
cm = pd.DataFrame(cm, index=[i for i in label_encoder.classes_], columns=[i for i in label_encoder.classes_])
ax = sns.heatmap(cm, linecolor='white', cmap='Blues', linewidth=1, annot=True, fmt='')
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
plt.title('Confusion Matrix', size=20)
plt.xlabel('Predicted Labels', size=14)
plt.ylabel('Actual Labels', size=14)
plt.savefig('Initial_Model_Confusion_Matrix.png')
plt.show()
clf_report = classification_report(actual, predictions, output_dict=True,
target_names=[i for i in label_encoder.classes_])
sns.heatmap(pd.DataFrame(clf_report).iloc[:, :].T, annot=True, cmap='viridis')
plt.title('Classification Report', size=20)
plt.savefig('Initial_Model_Classification_Report.png')
plt.show()
# break
# elif num_epochs >= 50:
# model.load_weights('best_form_model_44p.hdf5')
# num_epochs = 0
# continue
# endregion
# """
pass
def old_preparePredictionData(filepath, savetoexcel=False):
print("Preparing MLS")
mls = dus.util_main_helper(feature="mls", filepath=filepath, predict=True)
print("Preparing SSLM-MFCC-COS")
sslm_mfcc_cos = dus.util_main_helper(feature="mfcc", filepath=filepath, mode="cos", predict=True)
print("Preparing SSLM-MFCC-EUC")
sslm_mfcc_euc = dus.util_main_helper(feature="mfcc", filepath=filepath, mode="euc", predict=True)
print("Preparing SSLM-CRM-COS")
sslm_crm_cos = dus.util_main_helper(feature="chroma", filepath=filepath, mode="cos", predict=True)
print("Preparing SSLM-CRM-EUC")
sslm_crm_euc = dus.util_main_helper(feature="chroma", filepath=filepath, mode="euc", predict=True)
midimages = [mls, sslm_mfcc_cos, sslm_mfcc_euc, sslm_crm_cos, sslm_crm_euc]
cur_data = []
for image in midimages:
if image.ndim == 1:
raise ValueError("Erroneous Image Shape:", image.shape, image.ndim)
else:
image1 = np.mean(image, axis=0)
image2 = np.var(image, axis=0)
image = np.array([image1, image2])
cur_data.append(image)
print("Preparing audio feature data")
dfmid = dus.get_midi_dataframe(building_df=True)
dfmid = dus.get_audio_features(dfmid, 0, filepath, building_df=True)
dfmid = dfmid.fillna(0)
dfmid = np.array(dfmid)
sngdur = 0
with audioread.audio_open(filepath) as f:
sngdur += f.duration
np.set_string_function(
lambda x: repr(x).replace('(', '').replace(')', '').replace('array', '').replace(" ", ' '), repr=False)
np.set_printoptions(threshold=inf)
print("Building feature table")
df = get_column_dataframe()
c_flname = os.path.basename(filepath.split('/')[-1].split('.')[0])
c_sngdur = sngdur
c_slmmls = cur_data[0]
c_scmcos = cur_data[1]
c_scmeuc = cur_data[2]
c_smfcos = cur_data[3]
c_smfeuc = cur_data[4]
c_midinf = dfmid[0]
df.loc[0] = ["TBD", "TBD", c_flname, c_sngdur, c_slmmls[0], c_slmmls[1], c_scmcos[0], c_scmcos[1],
c_scmeuc[0], c_scmeuc[1], c_smfcos[0], c_smfcos[1], c_smfeuc[0], c_smfeuc[1],
c_midinf[2], c_midinf[3], c_midinf[4], c_midinf[5], c_midinf[6], c_midinf[7],
c_midinf[8], c_midinf[9], c_midinf[10], c_midinf[11], c_midinf[12], c_midinf[13],
c_midinf[14], c_midinf[15], c_midinf[0], c_midinf[1], c_midinf[16], c_midinf[17],
c_midinf[18], c_midinf[19], c_midinf[20], c_midinf[21], c_midinf[22], c_midinf[23],
c_midinf[24], c_midinf[25], c_midinf[26], c_midinf[27], c_midinf[28], c_midinf[29], "TBD"]
for col in df.columns:
df[col] = df[col].apply(lambda x: str(x)
.replace(", dtype=float32", "").replace("],", "]")
.replace("dtype=float32", "").replace("...,", ""))
if savetoexcel:
df.to_excel(os.path.join(MASTER_DIR, c_flname + '.xlsx'), index=False)
return df
def old_predictForm():
midpath = input("Enter path to folder or audio file: ")
df = pd.read_excel(os.path.join(MASTER_DIR, 'full_dataset.xlsx')) # 15,330
df = pd.DataFrame(df.loc[[0, 153]]).reset_index()
df2 = pd.DataFrame()
if not os.path.exists(midpath):
raise FileNotFoundError("Path not found or does not exist.")
else:
if os.path.isfile(midpath):
# df2 = pd.read_excel(os.path.join(MASTER_DIR, 'brahms_opus117_1.xlsx'))
df2 = old_preparePredictionData(midpath, savetoexcel=False)
elif os.path.isdir(midpath):
if midpath[-1] != "\\" or midpath[-1] != "/":
if "\\" in midpath:
midpath = midpath + "\\"
else:
midpath = midpath + "/"
cnt = 0
audio_extensions = ["3gp", "aa", "aac", "aax", "act", "aiff", "alac", "amr", "ape", "au", "awb", "dct",
"dss", "dvf", "flac", "gsm", "iklax", "ivs", "m4a", "m4b", "m4p", "mmf", "mp3", "mpc",
"msv", "nmf", "ogg", "oga", "mogg", "opus", "ra", "rm", "raw", "rf64", "sln", "tta",
"voc", "vox", "wav", "wma", "wv", "webm", "8svx", "cda", "mid", "midi", "MID" "mp4"]
for (mid_dirpath, mid_dirnames, mid_filenames) in os.walk(midpath):
for f in mid_filenames:
if f.endswith(tuple(audio_extensions)):
print("Reading file #" + str(cnt + 1))
mid_path = mid_dirpath + f
df2t = old_preparePredictionData(mid_path, savetoexcel=False)
df2 = pd.concat([df2, df2t], ignore_index=True).reset_index(drop=True)
cnt += 1
else:
raise FileNotFoundError("Path resulted in error.")
# Reshape test data to match training set
np.set_string_function(
lambda x: repr(x).replace('(', '').replace(')', '').replace('array', '').replace(" ", ' '), repr=False)
np.set_printoptions(threshold=inf)
for i in range(df2.shape[0]):
for col_name, data in df2.items():
if "[" in str(data[i]) and "]" in str(data[i]):
compdata = df.iloc[1][col_name]
if "[" in compdata and "]" in compdata:
if 'dtype=complex64' in compdata or 'dtype=complex64' in str(data[i]):
continue # Ignore since complex values aren't used in model
arr_1 = np.array(literal_eval(compdata))
# print("Evaluating:", str(data[i]))
arr_2 = np.array(literal_eval(str(data[i]).strip()))
arr_2 = np.resize(arr_2, arr_1.shape)
df2.at[i, col_name] = arr_2
# df = df2
df = pd.read_excel(os.path.join(MASTER_DIR, 'full_dataset.xlsx')) # 15,330
train_rows = df.shape[0]
df = pd.concat([df, df2], ignore_index=True).reset_index(drop=True)
names = df[['piece_name', 'composer', 'filename']]
y = df['formtype']
df.drop(columns=['spectral_bandwidth_var', 'spectral_centroid_var', 'spectral_flatness_var', 'spectral_rolloff_var',
'zero_crossing_var', 'fourier_tempo_mean', 'fourier_tempo_var'], inplace=True)
nonlist = df[['duration', 'spectral_contrast_var']]
df.drop(columns=['piece_name', 'composer', 'filename', 'duration', 'spectral_contrast_var', 'formtype'],
inplace=True)
d = [pd.DataFrame(df[col].astype(str).apply(literal_eval).values.tolist()).add_prefix(col) for col in df.columns]
df = pd.concat(d, axis=1).fillna(0)
df = pd.concat([pd.concat([names, pd.concat([nonlist, df], axis=1)], axis=1), y], axis=1) # print(df)
df = df.fillna(0)
X_test = df.iloc[:, 3:-1]
X_test_names = df.iloc[:, 0:3]
y_test = df.iloc[:, -1]
print("Test shape:", X_test.shape)
# Normalize Data
min_max_scaler = preprocessing.MinMaxScaler()
X_test = min_max_scaler.fit_transform(X_test)
print("Normalized Test shape:", X_test.shape)
# Convert to arrays for keras
X_test = np.array(X_test)
X_test_names = np.array(X_test_names)
y_test = np.array(y_test)
label_encoder = LabelEncoder()
label_encoder.classes_ = np.load(os.path.join(WEIGHT_DIR, 'form_classes.npy'))
skb_values = np.load(os.path.join(MASTER_DIR, "selectkbest_indicies.npy"))
kbest_indicies = np.argwhere(skb_values == True)
X_test = X_test[:, skb_values]
# Ensembling the model (5 networks) still yields 50% accuracy
model = formnn_cnn(5000, filters=8, lrval=0.00003, numclasses=12)
model.summary()
model.load_weights('best_form_model_50p.hdf5')
result = model.predict(X_test)
print(X_test.shape[0] - train_rows)
for i in range(X_test.shape[0] - train_rows):
print("Performing predictions on", X_test_names[i + train_rows])
resultlbl = label_encoder.inverse_transform([np.argmax(result[i + train_rows, :])])
print("\t\tPredicted form:", resultlbl)
"""
percent_correct = 0
pred_table = pd.DataFrame(columns=["Piece", "Predicted", "Actual"])
for i in range(len(result)):
resultlbl = label_encoder.inverse_transform([np.argmax(result[i, :])])
actuallbl = label_encoder.inverse_transform([np.argmax(y_test[i, :])])
pred_table.loc[i] = ([X_test_names[i][2], resultlbl, actuallbl])
percent_correct += 1 if resultlbl == actuallbl else 0
print(pred_table.to_string(index=False))
print("Accuracy: " + str(float(percent_correct / len(result)) * 100) + "%")
"""
# endregion
# region TestLabelModel
def formnn_lstm(n_timesteps, mode='concat', num_classes=1): # Try 'ave', 'mul', and 'sum' also
model = Sequential()
model.add(layers.Bidirectional(
layers.LSTM(32, return_sequences=True), input_shape=(None, 1), merge_mode=mode))
model.add(layers.TimeDistributed(
layers.Dense(num_classes, activation='sigmoid')))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# print(model.metrics_names)
return model
def get_sequence(n_timesteps):
# create a sequence of random numbers in [0,1]
X = np.array([random.random() for _ in range(n_timesteps)])
# calculate cut-off value to change class values
limit = n_timesteps/4.0
# determine the class outcome for each item in cumulative sequence
y = np.array([0 if x < limit else 1 for x in np.cumsum(X)])
# reshape input and output data to be suitable for LSTMs
# print(X) [0.436576 0.35750063 0.41489899 0.19143477 0.01814592 0.89638702 0.01744344 0.63694126 0.614542 0.623846]
# print(y) [0 0 0 0 0 0 0 1 1 1]
X = X.reshape(1, n_timesteps, 1) # from (10,) to (1, 10, 1)
y = y.reshape(1, n_timesteps, 1)
return X, y
def test_trainLabelModel_helper(model, n_timesteps, num_epochs=250):
# early_stopping = EarlyStopping(patience=5, verbose=5, mode="auto") # Does not work without validation set
# checkpoint = ModelCheckpoint(os.path.join(MASTER_DIR, 'best_formNN_label_model.hdf5'), monitor='val_accuracy',
# verbose=0, save_best_only=False, mode='max', save_freq='epoch', save_weights_only=True)
history_loss = []
# history_val_loss = []
history_accuracy = []
# history_val_accuracy = []
tr_set = pd.DataFrame(du.ReadLabelSecondsPhrasesFromFolder(FULL_LABELPATH, valid_only=True)[0:2]).transpose()
tr_set = np.array(tr_set)
# print(tr_set)
# for i in range(num_epochs):
for i in range(tr_set.shape[0]):
Xt = tr_set[i][0]
yt = tr_set[i][1]
Xt = Xt.reshape(1, len(Xt), 1)
yt = yt.reshape(1, len(yt), 1)
# print(Xt)
# print(yt)
# X, y = get_sequence(n_timesteps) # generate new random sequence
X, y = get_sequence(tr_set.shape[0]) # generate new random sequence
# print(X, y)
model_history = model.fit(X, y, epochs=1, batch_size=1, verbose=1) # , callbacks=[checkpoint])
history_loss.append(model_history.history['loss'])
# history_val_loss.append(model_history.history['val_loss'])
history_accuracy.append(model_history.history['acc'])
# history_val_accuracy.append(model_history.history['val_accuracy'])
print("Epochs completed:", i)
# return [history_loss, history_val_loss, history_accuracy, history_val_accuracy]
return [history_loss, history_accuracy]
def test_trainLabelModel():
n_timesteps = 10
model = formnn_lstm(n_timesteps, mode='concat')
model_history = test_trainLabelModel_helper(model, n_timesteps, num_epochs=250)
plt.plot(model_history[0]) # plt.plot(model_history.history['loss'])
# plt.plot(model_history[1]) # plt.plot(model_history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('Initial_LabelModel_Loss.png')
plt.show()
plt.plot(model_history[1]) # plt.plot(model_history.history['accuracy'])
# plt.plot(model_history[3]) # plt.plot(model_history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('Initial_LabelModel_Accuracy.png')
plt.show()
print("Evaluating...")
X, y = get_sequence(n_timesteps)
score = model.evaluate(X, y)
print("Evaluation complete!\nScore:")
print(f"Loss: {score[0]}\tAccuracy: {score[1]}")
print("Predicting...")
X, y = get_sequence(n_timesteps)
yhat = model.predict(X, verbose=1)
print("Prediction complete!")
for i in range(n_timesteps):
print('Expected:', y[0, i], 'Predicted', yhat[0, i])
pass
# endregion
"""=================================================================================================================="""
# region DataTools
def generate_label_files():
"""
Generate label '.txt' file for each MIDI in its respective Form-folder.
Pre-timestamps each file with silence and end times
"""
cnt = 1
for folder in MIDI_Data_Dir:
for file in os.listdir(folder):
foldername = folder.split('\\')[-1]
filename, name = file, file.split('/')[-1].split('.')[0]
print(f"\nWorking on {os.path.basename(name)}, file #" + str(cnt))
path = os.path.join(os.path.join(MASTER_DIR, 'Labels/'), foldername) + '/' + os.path.basename(name) + '.txt'
if not os.path.exists(path):
with audioread.audio_open(folder + '/' + filename) as f:
print("Reading duration of " + os.path.basename(name))
totalsec = f.duration
fwrite = open(path, "w+")
fwrite.write("0.000\tSilence\n" + str(totalsec) + '00\tEnd')
fwrite.close()
cnt += 1
def get_total_duration():
"""
Return the sum of all audio file durations together
"""
dur_sum = 0
for folder in MIDI_Data_Dir:
for file in os.listdir(folder):
filename, name = file, file.split('/')[-1].split('.')[0]
with audioread.audio_open(folder + '/' + filename) as f:
dur_sum += f.duration
print("Total duration: " + str(dur_sum) + " seconds")
# Total duration: 72869.0 seconds
# = 1214.4833 minutes = 20.241389 hours = 20 hours, 14 minutes, 29 seconds
return dur_sum
def prepare_model_training_input():
print("Preparing MLS inputs")
dus.util_main(feature="mls")
print("\nPreparing SSLM-MFCC-COS inputs")
dus.util_main(feature="mfcc", mode="cos")
print("\nPreparing SSLM-MFCC-EUC inputs")
dus.util_main(feature="mfcc", mode="euc")
print("\nPreparing SSLM-CRM-COS inputs")
dus.util_main(feature="chroma", mode="cos")
print("\nPreparing SSLM-CRM-EUC inputs")
dus.util_main(feature="chroma", mode="euc")
def multi_input_generator_helper(gen1, gen2, gen3, gen4, concat=True):
while True:
sslm1 = next(gen1)[0]
sslm2 = next(gen2)[0]
sslm3 = next(gen3)[0]
sslm4 = next(gen4)[0]
if not concat:
yield [sslm1, sslm2, sslm3, sslm4], sslm1.shape
continue
if sslm2.shape != sslm1.shape:
sslm2 = resize(sslm2, sslm1.shape)
if sslm3.shape != sslm1.shape:
sslm3 = resize(sslm3, sslm1.shape)
if sslm4.shape != sslm1.shape:
sslm4 = resize(sslm4, sslm1.shape)
yield tf.expand_dims(
np.concatenate((sslm1,
np.concatenate((sslm2,
np.concatenate((sslm3, sslm4),
axis=-1)), axis=-1)), axis=-1), axis=-1), sslm1.shape
def multi_input_generator(gen1, gen2, gen3, gen4, gen5, gen6, feature=2, concat=True, expand_dim_6=True, augment=False):
while True:
mlsgen = next(gen1)
mlsimg = mlsgen[0]
if augment:
yield [mlsimg, [[0, 0], [0, 0], [0, 0], [0, 0]],
next(gen6)[0]], mlsgen[1][feature] # tf.expand_dims(next(gen6)[0], axis=0)], mlsgen[1][feature]
else:
sslmimgs, sslmshape = next(multi_input_generator_helper(gen2, gen3, gen4, gen5, concat))
if not expand_dim_6:
yield [mlsimg, sslmimgs, next(gen6)[0]], mlsgen[1][feature]
continue
if mlsimg.shape != sslmshape:
mlsimg = resize(mlsimg, sslmshape)
yield [mlsimg, sslmimgs, tf.expand_dims(next(gen6)[0], axis=0)], mlsgen[1][feature]
def get_column_dataframe():
df = pd.DataFrame(columns=['piece_name', 'composer', 'filename', 'duration',
'ssm_log_mel_mean', 'ssm_log_mel_var',
'sslm_chroma_cos_mean', 'sslm_chroma_cos_var',
'sslm_chroma_euc_mean', 'sslm_chroma_euc_var',
'sslm_mfcc_cos_mean', 'sslm_mfcc_cos_var',
'sslm_mfcc_euc_mean', 'sslm_mfcc_euc_var', # ---{
'chroma_cens_mean', 'chroma_cens_var',
'chroma_cqt_mean', 'chroma_cqt_var',
'chroma_stft_mean', 'chroma_stft_var',
'mel_mean', 'mel_var',
'mfcc_mean', 'mfcc_var',
'spectral_bandwidth_mean', 'spectral_bandwidth_var',
'spectral_centroid_mean', 'spectral_centroid_var',
'spectral_contrast_mean', 'spectral_contrast_var',
'spectral_flatness_mean', 'spectral_flatness_var',
'spectral_rolloff_mean', 'spectral_rolloff_var',
'poly_features_mean', 'poly_features_var',
'tonnetz_mean', 'tonnetz_var',
'zero_crossing_mean', 'zero_crossing_var',
'tempogram_mean', 'tempogram_var',
'fourier_tempo_mean', 'fourier_tempo_var', # }---
'formtype'])
return df
def create_form_dataset(filedir=FULL_DIR, labeldir=FULL_LABELPATH, outfile='full_dataset.xlsx', augment=False):
# if augment then ignore sslms and replace with [0, 0]
mls_full = dus.BuildDataloader(os.path.join(filedir, 'MLS/'),
label_path=labeldir, batch_size=1, reshape=False)
midi_full = dus.BuildMIDIloader(os.path.join(filedir, 'MIDI/'),
label_path=labeldir, batch_size=1, reshape=False, building_df=True)
if not augment:
sslm_cmcos_full = dus.BuildDataloader(os.path.join(filedir, 'SSLM_CRM_COS/'),
label_path=labeldir, batch_size=1, reshape=False)
sslm_cmeuc_full = dus.BuildDataloader(os.path.join(filedir, 'SSLM_CRM_EUC/'),
label_path=labeldir, batch_size=1, reshape=False)
sslm_mfcos_full = dus.BuildDataloader(os.path.join(filedir, 'SSLM_MFCC_COS/'),
label_path=labeldir, batch_size=1, reshape=False)
sslm_mfeuc_full = dus.BuildDataloader(os.path.join(filedir, 'SSLM_MFCC_EUC/'),
label_path=labeldir, batch_size=1, reshape=False)
print("Done building dataloaders, merging...")
full_datagen = multi_input_generator(mls_full, sslm_cmcos_full, sslm_cmeuc_full, sslm_mfcos_full,
sslm_mfeuc_full, midi_full, concat=False, expand_dim_6=False)
print("Merging complete. Printing...")
else:
print("Done building dataloaders, merging...")
full_datagen = multi_input_generator(mls_full, None, None, None, None,
midi_full, concat=False, expand_dim_6=False, augment=True)
print("Merging complete. Printing...")
np.set_string_function(
lambda x: repr(x).replace('(', '').replace(')', '').replace('array', '').replace(" ", ' '), repr=False)
np.set_printoptions(threshold=inf)
df = get_column_dataframe()
label_encoder = LabelEncoder()
label_encoder.classes_ = np.load(os.path.join(WEIGHT_DIR, 'form_classes.npy'))
for indx, cur_data in enumerate(full_datagen):
if indx == len(mls_full):
break
c_flname = mls_full.getSong(indx).replace(".wav.npy", "").replace(".wav", "").replace(".npy", "")
c_sngdur = mls_full.getDuration(indx)
c_slmmls = cur_data[0][0]
c_scmcos = cur_data[0][1][0]
c_scmeuc = cur_data[0][1][1]
c_smfcos = cur_data[0][1][2]
c_smfeuc = cur_data[0][1][3]
c_midinf = cur_data[0][2]
c_flabel = cur_data[1]
c_flabel = label_encoder.inverse_transform(np.where(c_flabel == 1)[0])[0]
df.loc[indx] = ["", "", c_flname, c_sngdur, c_slmmls[0], c_slmmls[1], c_scmcos[0], c_scmcos[1],
c_scmeuc[0], c_scmeuc[1], c_smfcos[0], c_smfcos[1], c_smfeuc[0], c_smfeuc[1],
c_midinf[2], c_midinf[3], c_midinf[4], c_midinf[5], c_midinf[6], c_midinf[7],
c_midinf[8], c_midinf[9], c_midinf[10], c_midinf[11], c_midinf[12], c_midinf[13],
c_midinf[14], c_midinf[15], c_midinf[0], c_midinf[1], c_midinf[16], c_midinf[17],
c_midinf[18], c_midinf[19], c_midinf[20], c_midinf[21], c_midinf[22], c_midinf[23],
c_midinf[24], c_midinf[25], c_midinf[26], c_midinf[27], c_midinf[28], c_midinf[29], c_flabel]
for col in df.columns:
df[col] = df[col].apply(lambda x: str(x)
.replace(", dtype=float32", "").replace("],", "]")
.replace("dtype=float32", "").replace("...,", ""))
# df.to_csv(os.path.join(MASTER_DIR, 'full_dataset.csv'), index=False)
df.to_excel(os.path.join(MASTER_DIR, outfile), index=False)
def prepare_augmented_audio(inpath=FULL_DIR, savepath='', augmentation=1):
if not os.path.exists(savepath):
os.makedirs(savepath)
print("New directory created:", savepath)
def inject_noise(adata, noise_factor):
noise = np.random.randn(len(adata))
augmented_data = adata + noise_factor * noise
augmented_data = augmented_data.astype(type(adata[0]))
return augmented_data
def shift_time(adata, sampling_rate, shift_max, shift_direction):
shift = np.random.randint(sampling_rate * shift_max)
if shift_direction == 'right':
shift = -shift
elif shift_direction == 'both':
direction = np.random.randint(0, 2)
if direction == 1:
shift = -shift
augmented_data = np.roll(adata, shift)
# Set to silence for heading/ tailing
if shift > 0:
augmented_data[:shift] = 0
else:
augmented_data[shift:] = 0
return augmented_data
def shift_pitch(adata, sampling_rate, pitch_factor):
return librosa.effects.pitch_shift(adata, sampling_rate, n_steps=pitch_factor)
def shift_speed(adata, speed_factor):
return librosa.effects.time_stretch(adata, speed_factor)
start_time = time.time()
for (dir_path, dnames, fnames) in os.walk(inpath):
for f in fnames:
augdatapath = savepath + f.split('.')[0] + '_aug' + str(augmentation) + '.wav'
if os.path.exists(augdatapath):
continue
start_time_song = time.time()
fdatapath = dir_path + '/' + f
y, sr = librosa.load(fdatapath, sr=None)
sr = 44100
if augmentation == 1:
y = shift_speed(y, 0.7) # Slower
y = shift_pitch(y, sr, -6) # Shift down 6 half-steps (tritone)
y = shift_time(y, sr, random.random(), 'right')
y = inject_noise(y, 0.005)
elif augmentation == 2:
y = shift_speed(y, 1.4) # Faster
y = shift_pitch(y, sr, 4) # Shift up 4 half-steps (major 3rd)
y = shift_time(y, sr, random.random(), 'right')
y = inject_noise(y, 0.01)
elif augmentation == 3:
y = shift_speed(y, 0.5)
y = shift_pitch(y, sr, 7) # Shift up perfect 5th
y = shift_time(y, sr, random.random(), 'right')
y = inject_noise(y, 0.003)
elif augmentation == 4:
y = shift_speed(y, 2)
y = shift_pitch(y, sr, 8) # Shift up minor 6th
y = shift_time(y, sr, random.random(), 'right')
y = inject_noise(y, 0.02)
elif augmentation == 5:
y = shift_speed(y, 1.1)
y = shift_pitch(y, sr, 1) # Shift up minor 2nd
y = shift_time(y, sr, random.random(), 'right')
y = inject_noise(y, 0.007)
sf.write(augdatapath, y, sr)
print("Successfully saved file:", augdatapath, "\tDuration: {:.2f}s".format(time.time() - start_time_song))
print("All files have been converted. Duration: {:.2f}s".format(time.time() - start_time))
pass
def generate_augmented_datasets():
for i in range(1, 6):
prepare_augmented_audio(savepath=os.path.join(MASTER_INPUT_DIR, 'Aug' + str(i) + '/MIDI/'), augmentation=i)
dus.util_main(feature="mls", inpath=os.path.join(MASTER_INPUT_DIR, 'Aug' + str(i) + '/'),
midpath=os.path.join(MASTER_INPUT_DIR, 'Aug' + str(i) + '/'))
create_form_dataset(filedir=os.path.join(MASTER_INPUT_DIR, 'Aug' + str(i) + '/'), augment=True,
outfile='full_dataset_aug' + str(i) + '.xlsx')
df = pd.read_excel(os.path.join(MASTER_DIR, 'full_dataset.xlsx'))
df1 = pd.read_excel(os.path.join(MASTER_DIR, 'full_dataset_aug1.xlsx'))
df2 = pd.read_excel(os.path.join(MASTER_DIR, 'full_dataset_aug2.xlsx'))
df3 = pd.read_excel(os.path.join(MASTER_DIR, 'full_dataset_aug3.xlsx'))
df4 = pd.read_excel(os.path.join(MASTER_DIR, 'full_dataset_aug4.xlsx'))
df5 = pd.read_excel(os.path.join(MASTER_DIR, 'full_dataset_aug5.xlsx'))
df = pd.concat([df, df1, df2, df3, df4, df5], ignore_index=True).reset_index()
df.to_excel(os.path.join(MASTER_DIR, 'Data/full_augmented_dataset.xlsx'), index=False)
def prepare_lstm_peaks():
MIDI_FILES = os.path.join(MASTER_INPUT_DIR, 'Full/MIDI/')
PEAK_DIR = os.path.join(MASTER_INPUT_DIR, 'Full/PEAKS/')
cnt = len(os.listdir(PEAK_DIR)) + 1
for file in os.listdir(MIDI_FILES):
foldername = MIDI_FILES.split('\\')[-1]
filename, name = file, file.split('/')[-1].split('.')[0]
if str(os.path.basename(name)) + ".npy" in os.listdir(PEAK_DIR):
continue
print(f"\nWorking on {os.path.basename(name)}, file #" + str(cnt))
fullfilename = MIDI_FILES + '/' + filename
peaks = du.peak_picking(fullfilename, name, foldername, returnpeaks=True)
print(peaks)
np.save(os.path.join(PEAK_DIR, os.path.basename(name)), peaks)
cnt += 1
# endregion
# region FormModel
def trainFormModel():
# region DataPreProcessing
df = pd.read_excel(os.path.join(MASTER_DIR, 'Data/full_augmented_dataset.xlsx'))
# df = pd.read_excel(os.path.join(MASTER_DIR, 'full_dataset.xlsx'))
names = df[['piece_name', 'composer', 'filename']]
y = df['formtype']
# """
df = df.drop(columns=['sslm_chroma_cos_mean', 'sslm_chroma_cos_var', 'sslm_chroma_euc_mean', 'sslm_chroma_euc_var',
'sslm_mfcc_cos_mean', 'sslm_mfcc_cos_var', 'sslm_mfcc_euc_mean', 'sslm_mfcc_euc_var'])
# """
df.drop(columns=['spectral_bandwidth_var', 'spectral_centroid_var', 'spectral_flatness_var', 'spectral_rolloff_var',
'zero_crossing_var', 'fourier_tempo_mean', 'fourier_tempo_var'], inplace=True) # Remove useless
# nonlist = df[['duration', 'spectral_contrast_var']]
nonlist = df[['duration']]
df.drop(columns=['piece_name', 'composer', 'filename', 'duration', 'spectral_contrast_var', 'formtype'],
inplace=True)
# df = df[['ssm_log_mel_mean', 'ssm_log_mel_var', 'mel_mean', 'mel_var', 'chroma_stft_mean', 'chroma_stft_var']]
# df = df[['ssm_log_mel_mean', 'ssm_log_mel_var']]
df = df[['ssm_log_mel_mean']] # best decision tree accuracy
print("Fixing broken array cells as needed...")
def fix_broken_arr(strx):
if '[' in strx:
if ']' in strx:
return strx
else:
return strx + ']'
for col in df.columns:
df[col] = df[col].apply(lambda x: fix_broken_arr(x))
# print("Headers:", pd.concat([pd.concat([names, pd.concat([nonlist, df], axis=1)], axis=1), y], axis=1).columns)
# Headers: Index(['piece_name', 'composer', 'filename', 'duration', 'ssm_log_mel_mean', 'formtype'], dtype='object')
print("Done processing cells, building training set...")
# d = [pd.DataFrame(df[col].astype(str).apply(literal_eval).values.tolist()).add_prefix(col) for col in df.columns]
d = [pd.DataFrame(df[col].astype(str).apply(literal_eval).values.tolist()) for col in df.columns]
df = pd.concat(d, axis=1).fillna(0)
df = pd.concat([pd.concat([names, pd.concat([nonlist, df], axis=1)], axis=1), y], axis=1) # print(df)
train, test = train_test_split(df, test_size=0.169, random_state=0, stratify=df['formtype']) # test_s=.169 gave 50%
# df.to_csv(os.path.join(MASTER_DIR, 'full_modified_dataset.csv'))
X_train = train.iloc[:, 3:-1]
# X_train_names = train.iloc[:, 0:3]
y_train = train.iloc[:, -1]
print("Train shape:", X_train.shape)
X_test = test.iloc[:, 3:-1]
# X_test_names = test.iloc[:, 0:3]
y_test = test.iloc[:, -1]
print("Test shape:", X_test.shape)
# Normalize Data
"""
min_max_scaler = preprocessing.MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train) # Good for decision tree
X_test = min_max_scaler.fit_transform(X_test)
"""
# X_train = preprocessing.scale(X_train)
# X_test = preprocessing.scale(X_test)
# """
mean = np.mean(X_train, axis=0)
std = np.std(X_train, axis=0)
X_train = (X_train - mean) / std # Good for decision tree
X_test = (X_test - mean) / std
# """
print("Normalized Train shape:", X_train.shape)
print("Normalized Test shape:", X_test.shape)
# Convert to arrays for keras
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
label_encoder = LabelEncoder()
old_y_train = y_train
# old_y_test = y_test
int_y_train = label_encoder.fit_transform(y_train)
print(int_y_train.shape)
# int_y_train = int_y_train.reshape(len(int_y_train), 1)
int_y_test = label_encoder.fit_transform(y_test)
# int_y_test = int_y_test.reshape(len(int_y_test), 1)
y_train = to_categorical(label_encoder.fit_transform(y_train))
y_test = to_categorical(label_encoder.fit_transform(y_test))
print(y_train.shape, y_test.shape)
print(label_encoder.classes_, "\n")
""" BASE MODEL """
# DummyClassifier makes predictions while ignoring input features
dummy_clf = DummyClassifier(strategy="stratified")
dummy_clf.fit(X_train, y_train)
DummyClassifier(strategy='stratified')
dummy_clf.predict(X_test)
print("Dummy classifier accuracy:", dummy_clf.score(X_test, y_test))
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
clf.predict(X_test)
print("Decision tree accuracy:", clf.score(X_test, y_test))
""" FEATURE TUNING """
selector = SelectKBest(f_classif, k=15) # 1000 if using RFE
Z_train = selector.fit_transform(X_train, old_y_train)
skb_values = selector.get_support()
Z_test = X_test[:, skb_values]
np.save(os.path.join(WEIGHT_DIR, "selectkbest_indices.npy"), skb_values)
print(Z_train.shape)
print(Z_test.shape)
"""
plt.title('Feature Importance')
plt.ylabel('Score')
plt.xlabel('Feature')
plt.plot(selector.scores_)
plt.savefig('Initial_Feature_Importance.png')
plt.show()
"""
print("Indices of top 10 features:", (-selector.scores_).argsort()[:10])
""" KBEST MODEL """
clf = tree.DecisionTreeClassifier()
clf = clf.fit(Z_train, y_train)
clf.predict(Z_test)
# treedepth = clf.tree_.max_depth
skb_score = clf.score(Z_test, y_test)
print("K-Best Decision tree accuracy:", skb_score) # Highest score: 84.3% accuracy
# """
# Accuracy 0.211, stick with SKB? Gives good loss though
clf = LinearSVC(C=0.01, penalty="l1", dual=False)
clf.fit(X_train, old_y_train)
rfe_selector = RFE(clf, 15, verbose=5)
rfe_selector = rfe_selector.fit(Z_train, old_y_train)
# rfe_selector = rfe_selector.fit(X_train, old_y_train)
rfe_values = rfe_selector.get_support()
# np.save(os.path.join(MASTER_DIR, "rfebest_indices.npy"), rfe_values)
print("Indices of RFE important features:", np.where(rfe_values)[0])
W_train = Z_train[:, rfe_values]
W_test = Z_test[:, rfe_values]
# "" " RFE MODEL " ""
clf = tree.DecisionTreeClassifier()
clf = clf.fit(W_train, y_train)
clf.predict(W_test)
rfe_score = clf.score(W_test, y_test)
print("RFE Decision tree accuracy:", rfe_score) # Highest score: 83.7% accuracy, typically better than SKB
"""
plt.figure(figsize=(30, 20)) # set plot size (denoted in inches)
tree.plot_tree(clf, fontsize=10)
plt.show()
plt.savefig('tree_high_dpi', dpi=100)
"""
# """
# endregion
if skb_score > rfe_score:
X_train = Z_train
X_test = Z_test
else:
X_train = W_train
X_test = W_test
# treedepth = clf.tree_.max_depth
# TreeGrad Deep Neural Decision Forest - 83% accuracy
model = TGDClassifier(num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100,
autograd_config={'refit_splits': True})
model.fit(X_train, int_y_train)
acc = accuracy_score(int_y_test, model.predict(X_test))
print('TreeGrad Deep Neural Decision Forest accuracy: ', acc)
print('Plotting 0th tree...') # one tree use categorical feature to split
lgb.plot_tree(model.base_model_, tree_index=0, figsize=(15, 15), show_info=['split_gain'])
plt.savefig('TreeGrad_Model.png')
plt.show()
print('Plotting feature importances...')
lgb.plot_importance(model.base_model_, max_num_features=15)
plt.savefig('TreeGrad_Feature_Importance.png')
plt.show()
predictions = model.predict(X_test)
# predictions = predictions.argmax(axis=1)
predictions = predictions.astype(int).flatten()
predictions = (label_encoder.inverse_transform(predictions))
predictions = pd.DataFrame({'Predicted Values': predictions})
# actual = y_test.argmax(axis=1)
actual = int_y_test.astype(int).flatten()
actual = (label_encoder.inverse_transform(actual))
actual = pd.DataFrame({'Actual Values': actual})
cm = confusion_matrix(actual, predictions)
plt.figure(figsize=(12, 10))
cm = pd.DataFrame(cm, index=[i for i in label_encoder.classes_], columns=[i for i in label_encoder.classes_])
ax = sns.heatmap(cm, linecolor='white', cmap='Blues', linewidth=1, annot=True, fmt='')
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
plt.title('Confusion Matrix', size=20)
plt.xlabel('Predicted Labels', size=14)
plt.ylabel('Actual Labels', size=14)
plt.savefig('TreeGrad_Confusion_Matrix.png')
plt.show()
clf_report = classification_report(actual, predictions, output_dict=True,
target_names=[i for i in label_encoder.classes_])
sns.heatmap(pd.DataFrame(clf_report).iloc[:, :].T, annot=True, cmap='viridis')
plt.title('Classification Report', size=20)
plt.savefig('TreeGrad_Classification_Report.png')
plt.show()
with open(os.path.join(WEIGHT_DIR, 'treegrad_model_save.pkl'), 'wb') as f:
pickle.dump(model, f)
with open(os.path.join(WEIGHT_DIR, 'treegrad_model_save.pkl'), 'rb') as f:
model2 = pickle.load(f)
acc = accuracy_score(int_y_test, model2.predict(X_test))
print('TreeGrad Deep Neural Decision Forest accuracy from save: ', acc)
pass
def preparePredictionData(filepath, savetoexcel=False, verbose=True):
if verbose:
print("Preparing MLS")
mls = dus.util_main_helper(feature="mls", filepath=filepath, predict=True)
sngdur = 0
with audioread.audio_open(filepath) as f:
sngdur += f.duration
np.set_string_function(
lambda x: repr(x).replace('(', '').replace(')', '').replace('array', '').replace(" ", ' '), repr=False)
np.set_printoptions(threshold=inf)
if verbose:
print("Building feature table")
df = pd.DataFrame(columns=['piece_name', 'composer', 'filename', 'duration', 'ssm_log_mel_mean', 'formtype'])
c_flname = os.path.basename(filepath.split('/')[-1].split('.')[0])
df.loc[0] = ["TBD", "TBD", c_flname, sngdur, mls[0], "TBD"]
for col in df.columns:
df[col] = df[col].apply(lambda x: str(x)
.replace(", dtype=float32", "").replace("],", "]")
.replace("dtype=float32", "").replace("...,", ""))
if savetoexcel:
df.to_excel(os.path.join(MASTER_DIR, c_flname + '.xlsx'), index=False)
return df
def predictForm(midpath=None, verbose=True):
if midpath is None:
midpath = input("Enter path to folder or audio file: ")
df = pd.DataFrame()
if not os.path.exists(midpath):
raise FileNotFoundError("Path not found or does not exist.")
else:
if os.path.isfile(midpath):
# df2 = pd.read_excel(os.path.join(MASTER_DIR, 'brahms_opus117_1.xlsx'))
df = preparePredictionData(midpath, savetoexcel=False, verbose=verbose)
elif os.path.isdir(midpath):
if midpath[-1] != "\\" or midpath[-1] != "/":
if "\\" in midpath:
midpath = midpath + "\\"
else:
midpath = midpath + "/"
cnt = 0
audio_extensions = ["3gp", "aa", "aac", "aax", "act", "aiff", "alac", "amr", "ape", "au", "awb", "dct",
"dss", "dvf", "flac", "gsm", "iklax", "ivs", "m4a", "m4b", "m4p", "mmf", "mp3", "mpc",
"msv", "nmf", "ogg", "oga", "mogg", "opus", "ra", "rm", "raw", "rf64", "sln", "tta",
"voc", "vox", "wav", "wma", "wv", "webm", "8svx", "cda", "mid", "midi", "MID", "mp4"]
for (mid_dirpath, mid_dirnames, mid_filenames) in os.walk(midpath):
for f in mid_filenames:
if f.endswith(tuple(audio_extensions)):
if verbose:
print("Reading file #" + str(cnt + 1))
mid_path = mid_dirpath + f
dft = preparePredictionData(mid_path, savetoexcel=False, verbose=verbose)
df = pd.concat([df, dft], ignore_index=True).reset_index(drop=True)
cnt += 1
else:
raise FileNotFoundError("Path resulted in error.")
names = df[['piece_name', 'composer', 'filename']]
y = df['formtype']
nonlist = df[['duration']]
df.drop(columns=['piece_name', 'composer', 'filename', 'duration', 'formtype'], inplace=True)
df = df[['ssm_log_mel_mean']]
if verbose:
print("Fixing broken array cells as needed...")
def fix_broken_arr(strx):
if '[' in strx:
if ']' in strx:
return strx
else:
return strx + ']'
for col in df.columns:
df[col] = df[col].apply(lambda x: fix_broken_arr(x))
if verbose:
print("Done processing cells, building data set...")
d = [pd.DataFrame(df[col].astype(str).apply(literal_eval).values.tolist()) for col in df.columns]
df = pd.concat(d, axis=1).fillna(0)
df = pd.concat([pd.concat([names,
|
pd.concat([nonlist, df], axis=1)
|
pandas.concat
|
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import Series, compat
from pandas.core.indexes.period import IncompatibleFrequency
import pandas.util.testing as tm
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic(object):
@pytest.mark.parametrize(
'ts',
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(),
lambda x: tm.makeFloatSeries(),
True)
])
@pytest.mark.parametrize('opname', ['add', 'sub', 'mul', 'floordiv',
'truediv', 'div', 'pow'])
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename('ts')
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
if opname == 'div' and compat.PY3:
pytest.skip('div test only for Py3')
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
class TestSeriesArithmetic(object):
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype='M8[ns]')
b = Series(dtype='m8[ns]')
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
with pytest.raises(TypeError):
b - a
def test_add_series_with_period_index(self):
rng = pd.period_range('1/1/2000', '1/1/2010', freq='A')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with tm.assert_raises_regex(IncompatibleFrequency, msg):
ts + ts.asfreq('D', how="end")
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series([pd.Timestamp('20111230'), pd.Timestamp('20120101'),
pd.Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([pd.Timestamp('20111231'), pd.Timestamp('20120102'),
pd.Timestamp('20120104')])
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
td1 + dt1
dt1 + td1
# ------------------------------------------------------------------
# Comparisons
class TestSeriesFlexComparison(object):
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, 'index']:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = 'No axis named 1 for object type'
for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
with tm.assert_raises_regex(ValueError, msg):
getattr(left, op)(right, axis=1)
class TestSeriesComparison(object):
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
with pytest.raises(ValueError):
a < b
a = Series([1, 2])
b = Series([2, 3, 4])
with pytest.raises(ValueError):
a == b
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_ser_flex_cmp_return_dtypes(self, opname):
# GH#15115
ser = Series([1, 3, 2], index=range(3))
const = 2
result = getattr(ser, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, Series([1], ['bool']))
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_ser_flex_cmp_return_dtypes_empty(self, opname):
# GH#15115 empty Series case
ser = Series([1, 3, 2], index=range(3))
empty = ser.iloc[:0]
const = 2
result = getattr(empty, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, Series([1], ['bool']))
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.le, operator.lt,
operator.ge, operator.gt])
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('baz', 'baz', 'baz')])
def test_ser_cmp_result_names(self, names, op):
# datetime64 dtype
dti = pd.date_range('1949-06-07 03:00:00',
freq='H', periods=5, name=names[0])
ser =
|
Series(dti)
|
pandas.Series
|
# -*- coding: utf-8 -*-
import requests
import pandas as pd
from io import StringIO
import numpy as np
import time
from functools import reduce
#2SXOFPK5YGV8VIVI
timezones={}
#function = 'TIME_SERIES_INTRADAY'
apii = 'https://www.alphavantage.co/query?function={function}&symbol={symbol}&interval={interval}&outputsize=full&datatype=csv&apikey=2SXOFPK5YGV8VIVI'
apid = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={symbol}&outputsize=full&datatype=csv&apikey=2SXOFPK5YGV8VIVI'
#https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=ASML&interval=1min&outputsize=compact&datatype=csv&time_period=0&apikey=2SXOFPK5YGV8VIVI
sector = 'https://www.alphavantage.co/query?function=SECTOR&datatype=csv&apikey=2SXOFPK5YGV8VIVI'
s_type = ['close','high','low']#,'open']
ma_types = [0,1,2,3,4,5,6,7,8]
#Moving average type By default, matype=0. INT 0 = SMA, 1 = EMA, 2 = Weighted Moving Average (WMA), 3 = Double Exponential Moving Average (DEMA), 4 = Triple Exponential Moving Average (TEMA), 5 = Triangular Moving Average (TRIMA), 6 = T3 Moving Average, 7 = Kaufman Adaptive Moving Average (KAMA), 8 = MESA Adaptive Moving Average (MAMA).
indicator_dict = {
'sma':'https://www.alphavantage.co/query?function=SMA&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=2SXOFPK5YGV8VIVI',
'ema':'https://www.alphavantage.co/query?function=EMA&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=<KEY>',
'tema':'https://www.alphavantage.co/query?function=TEMA&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=<KEY>',
'macd':'https://www.alphavantage.co/query?function=MACD&symbol={symbol}&interval={interval}&series_type=close&fastperiod=12&slowperiod=26&signalperiod=9&datatype=csv&apikey=<KEY>',
'macdext':'https://www.alphavantage.co/query?function=MACDEXT&symbol={symbol}&interval={interval}&series_type={series_type}&fastperiod={fastperiod}&slowperiod={slowperiod}&signalperiod={signalperiod}&fastmatype={fastmatype}&slowmatype={slowmatype}&signalmatype={signalmatype}&datatype=csv&apikey=<KEY>',
'stoch':'https://www.alphavantage.co/query?function=STOCH&symbol={symbol}&interval={interval}&fastkperiod={fastkperiod}&slowkperiod={slowkperiod}&slowdperiod={slowdperiod}&slowkmatype={slowkmatype}&slowdmatype={slowdmatype}&datatype=csv&apikey=<KEY>',
'stochf':'https://www.alphavantage.co/query?function=STOCHF&symbol={symbol}&interval={interval}&fastkperiod={fastkperiod}&fastdperiod={fastdperiod}&fastdmatype={fastdmatype}&datatype=csv&apikey=<KEY>',
'rsi':'https://www.alphavantage.co/query?function=RSI&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=<KEY>',
'stochrsi':'https://www.alphavantage.co/query?function=STOCHRSI&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&fastkperiod={fastkperiod}&fastdperiod={fastdperiod}&fastdmatype={fastdmatype}&datatype=csv&apikey=<KEY>',
'willr':'https://www.alphavantage.co/query?function=WILLR&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=<KEY>',
'adx':'https://www.alphavantage.co/query?function=ADX&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=<KEY>',
'adxr':'https://www.alphavantage.co/query?function=ADXR&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=<KEY>',
'apo':'https://www.alphavantage.co/query?function=APO&symbol={symbol}&interval={interval}&series_type={series_type}&fastperiod={fastperiod}&slowperiod={slowperiod}&matype={matype}&datatype=csv&apikey=<KEY>',
'ppo':'https://www.alphavantage.co/query?function=PPO&symbol={symbol}&interval={interval}&series_type={series_type}&fastperiod={fastperiod}&slowperiod={slowperiod}&matype={matype}&datatype=csv&apikey=<KEY>',
'mom':'https://www.alphavantage.co/query?function=MOM&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=<KEY>',
'bop':'https://www.alphavantage.co/query?function=BOP&symbol={symbol}&interval={interval}&datatype=csv&apikey=<KEY>',
'cci':'https://www.alphavantage.co/query?function=CCI&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=<KEY>I',
'cmo':'https://www.alphavantage.co/query?function=CMO&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=2SXOFPK5YGV8VIVI',
'roc':'https://www.alphavantage.co/query?function=ROC&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=<KEY>',
'rocr':'https://www.alphavantage.co/query?function=ROCR&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=2SXOFPK5YGV8VIVI',
'aroon':'https://www.alphavantage.co/query?function=AROON&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=2SXOFPK5YGV8VIVI',
'aroonosc':'https://www.alphavantage.co/query?function=AROONOSC&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=<KEY>',
'mfi':'https://www.alphavantage.co/query?function=MFI&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=2SX<KEY>VIVI',
'trix':'https://www.alphavantage.co/query?function=TRIX&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=<KEY>',
'ultosc':'https://www.alphavantage.co/query?function=ULTOSC&symbol={symbol}&interval={interval}&timeperiod1={timeperiod1}&timeperiod2={timeperiod2}&timeperiod3={timeperiod3}&datatype=csv&apikey=<KEY>',
'dx':'https://www.alphavantage.co/query?function=DX&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=<KEY>',
'minus_di':'https://www.alphavantage.co/query?function=MINUS_DI&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=<KEY>',
'plus_di':'https://www.alphavantage.co/query?function=PLUS_DI&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=<KEY>',
'minus_dm':'https://www.alphavantage.co/query?function=MINUS_DM&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=<KEY>',
'plus_dm':'https://www.alphavantage.co/query?function=PLUS_DM&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=<KEY>',
'bbands':'https://www.alphavantage.co/query?function=BBANDS&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&nbdevup={nbdevup}&nbdevdn={nbdevdn}&matype={matype}&datatype=csv&apikey=<KEY>',
'midpoint':'https://www.alphavantage.co/query?function=MIDPOINT&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=<KEY>',
'midprice':'https://www.alphavantage.co/query?function=MIDPRICE&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=2SXOFPK5YGV8VIVI',
'sar':'https://www.alphavantage.co/query?function=SAR&symbol={symbol}&interval={interval}&acceleration={acceleration}&maximum={maximum}&datatype=csv&apikey=<KEY>',
'trange':'https://www.alphavantage.co/query?function=TRANGE&symbol={symbol}&interval={interval}&datatype=csv&apikey=<KEY>',
'atr':'https://www.alphavantage.co/query?function=ATR&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=<KEY>',
'natr':'https://www.alphavantage.co/query?function=NATR&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=<KEY>',
'ad':'https://www.alphavantage.co/query?function=AD&symbol={symbol}&interval={interval}&datatype=csv&apikey=2SX<KEY>I',
'adosc':'https://www.alphavantage.co/query?function=ADOSC&symbol={symbol}&interval={interval}&fastperiod={fastperiod}&slowperiod={slowperiod}&datatype=csv&apikey=<KEY>',
'obv':'https://www.alphavantage.co/query?function=OBV&symbol={symbol}&interval={interval}&datatype=csv&apikey=<KEY>',
'ht_trendline':'https://www.alphavantage.co/query?function=HT_TRENDLINE&symbol={symbol}&interval={interval}&series_type={series_type}&datatype=csv&apikey=<KEY>',
'ht_sine':'https://www.alphavantage.co/query?function=HI_SINE&symbol={symbol}&interval={interval}&series_type={series_type}&datatype=csv&apikey=<KEY>',
'ht_trendmode':'https://www.alphavantage.co/query?function=HT_TRENDMODE&symbol={symbol}&interval={interval}&series_type={series_type}&datatype=csv&apikey=<KEY>',
'ht_dcperiod':'https://www.alphavantage.co/query?function=HT_DCPERIOD&symbol={symbol}&interval={interval}&series_type={series_type}&datatype=csv&apikey=2SXOFPK5YGV8VIVI',
'ht_dcphase':'https://www.alphavantage.co/query?function=HT_DCPHASE&symbol={symbol}&interval={interval}&series_type={series_type}&datatype=csv&apikey=<KEY>',
'ht_dcphasor':'https://www.alphavantage.co/query?function=HT_DCPHASOR&symbol={symbol}&interval={interval}&series_type={series_type}&datatype=csv&apikey=<KEY>'
}
def shifter(shifts,shift_df,direction='up'):
#takes in number of shifts and dataframe, returns a list of dataframes with shifts number of dataframes and a each dataframe shifted columnwise stepwise start at 0 to shifts-1
if shifts >1:
if direction =='up':
output = shifter(shifts-1,shift_df,direction)
output.append(shift_df.shift(0-shifts+1))
return output
elif shifts ==1:
output = [shift_df]
return output
def moving_a(df,ma,symbol,interval):
# api = indicator_dict[ma]
ma_range = [5,10,15,20,35,50,65,100,125,200,250]
new_series = pd.DataFrame()
for i in range(len(ma_range)):
for s in s_type:
t = ma_range[i]
name = '_'.join(['sma',str(t),s])
new_series[name] = reduce(lambda x,y: x.add(y),shifter(t,df[s])).values/t
name = '_'.join(['ema',str(t),s])
counter = (len(new_series)-t+1)
calc_ema = []
k = 2/(t+1)
for n in range(len(new_series)-t+1):
if n == 0:
calc_ema.extend(list(np.zeros(t-1)+np.nan))
ema_one = float(new_series.iloc[-t:-t+1,i*4:i*4+1].values)
calc_ema.insert(0,ema_one)
elif n !=0:
ema_in = (calc_ema[0]*(1-k)+k*float(df.loc[counter-n-1:counter-n-1,s].values))
calc_ema.insert(0,ema_in)
new_series[name]= calc_ema
return new_series
def macdext_get(df,macd,symbol, interval):#,types=False,time_period=False):
# out_df = pd.DataFrame()
macd_range = [[5,10,3],[10,20,7],[12,26,9],[15,35,11]]
api = indicator_dict[macd]
macd_ma = 1
first=True
for i in macd_range:
for s in s_type:
indicator = requests.get(api.format(symbol=symbol,interval=interval,series_type=s,fastperiod=i[0],slowperiod=i[1],signalperiod=i[2],fastmatype=ma_types[1],slowmatype=ma_types[1],signalmatype=ma_types[1]))
time.sleep(12)
fixed = StringIO(indicator.content.decode('utf-8'))
#pandas.read_csv needs a filepath for strings use StringIO from IO to convert Str to filepath
if first:
out_df = pd.read_csv(fixed)
first = False
elif first != True:
indi_df = pd.read_csv(fixed)
out_df = pd.merge(out_df,indi_df,on='time',how="inner")
return out_df
def stoch_get(df,stoch,symbol,interval):
slowd = 3
slowk = 3
fastk = 5
fastd = 3
stoch_ma = 1
#EMA
api = indicator_dict[stoch]
if stoch == 'stoch':
indicator = requests.get(api.format(symbol=symbol,interval=interval,fastkperiod=fastk,slowkperiod=slowk,slowdperiod=slowd,slowkmatype=stoch_ma,slowdmatype=stoch_ma))
time.sleep(12)
fixed = StringIO(indicator.content.decode('utf-8'))
#pandas.read_csv needs a filepath for strings use StringIO from IO to convert Str to filepath
indi_df = pd.read_csv(fixed)
return indi_df
elif stoch == 'stochf':
indicator = requests.get(api.format(symbol=symbol,interval=interval,fastkperiod=fastk,fastdperiod=fastd,fastdmatype=stoch_ma))
time.sleep(12)
fixed = StringIO(indicator.content.decode('utf-8'))
#pandas.read_csv needs a filepath for strings use StringIO from IO to convert Str to filepath
indi_df = pd.read_csv(fixed)
return indi_df
def rsi_get(df,rsi,symbol,interval):
rsi_period = [7,11,14,21]
api = indicator_dict[rsi]
first = True
for t in rsi_period:
for s in s_type:
indicator = requests.get(api.format(symbol=symbol,interval=interval,time_period = t,series_type=s))
time.sleep(12)
fixed = StringIO(indicator.content.decode('utf-8'))
#pandas.read_csv needs a filepath for strings use StringIO from IO to convert Str to filepath
if first:
out_df = pd.read_csv(fixed)
first = False
elif first != True:
indi_df = pd.read_csv(fixed)
out_df =
|
pd.merge(out_df,indi_df,on='time',how="inner")
|
pandas.merge
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 9 20:13:44 2020
@author: Adam
"""
#%% Heatmap generator "Barcode"
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re
def join_cols(row):
return ''.join(list(row))
def find_favor(seq):
t = []
for m in re.finditer(seq, DNA):
t += [m.start()]
return t
DNA = np.loadtxt('./data/DNA.txt', str)
DNA = ''.join(DNA)
print('DNA Length = {} '.format(len(DNA)) )
start_idxs = []
for m in re.finditer('GTC', DNA):
start_idxs += [m.start()]
start_idxs = np.array(start_idxs)
df = pd.DataFrame()
df['loc'] = np.arange(len(DNA))
df['start_ind'] = 0
df.loc[start_idxs,'start_ind'] = 1
favor = pd.read_csv('./data/favor_seqs.csv')
gtc_loc = list(favor.iloc[0,:])[0].find('GTC')
red_idxs = []
for detsize in range(3,4):
dets = favor['seq'].str[ gtc_loc-detsize:gtc_loc + 3 + detsize]
dets = list(np.unique(dets))
detslocs = list(map(find_favor, dets))
detslocs = [x for x in detslocs if len(x) > 1]
for tlocs in detslocs:
mean_dist = np.mean(np.diff(tlocs))
median_dist = np.median(np.diff(tlocs))
if(mean_dist > 1000 and mean_dist < 6000
or
median_dist > 1000 and median_dist < 6000):
red_idxs += [tlocs]
red_idxs = [item for sublist in red_idxs for item in sublist]
plt.figure(figsize=(16,4))
plt.bar(start_idxs, [0.3]*len(start_idxs), width=64, color='black', alpha=0.8)
plt.bar(red_idxs, [1]*len(red_idxs), width=64, color='red')
plt.ylim([0,1])
plt.xlim([0,len(DNA)])
plt.xlabel('DNA nucleotide index')
plt.yticks([])
plt.xticks([])
plt.title('\"Intresting\" Sequences')
plt.legend(['GTC Locations','Intresting Frequency Locations'], facecolor=(1,1,1,1), framealpha=0.98 )
plt.savefig('./out/favor_seqs_k_3.png')
plt.show()
#%% Prim VS Primon when POLY is saturated
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
print('\n=====================================================\n')
def mms(t):
if(np.max(t) - np.min(t) > 0):
t = (t - np.min(t))/(np.max(t) - np.min(t))
else:
t = (t)/(np.max(t))
return t
def ms(t):
return t/np.max(t)
def nucs2seq(row):
row = list(row)
t = ''.join(row)
return t
# Heatmap for favorite seqs vs all gtc containing seqs
df = pd.read_csv('./data/chip_B.csv')
df_favor = pd.read_csv('./data/favor_seqs.csv')
df['seq'] = list(map( nucs2seq, np.array(df.iloc[:,:-4]) ))
tcols = df.columns
tcols = list(tcols[:-4]) + ['poly','prim','primo','seq']
df.columns = tcols
df['primo-prim'] = df['primo'] - df['prim']
labels = ['poly','primo','prim','primo-prim']
df = df.sort_values('poly').reset_index(drop=True)
sm = 100
plt.figure(figsize=(12,8))
for i, lab in enumerate(labels):
plt.subplot(4,1,i+1)
if(i != 3):
df = df.sort_values(lab).reset_index(drop=True)
y = df[lab].copy()
if(i != 3):
y = mms( y )**0.5
y = y.rolling(sm).mean().drop(np.arange(sm)).reset_index(drop=True)
y = pd.Series(y)
plt.plot(np.arange(len(y)),y, alpha=0.8)
plt.title(lab + ' sorted by self')
plt.ylabel(' ln(score)' )
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=1)
#%% Collect favorite sequences
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
print('\n=====================================================\n')
labels = ['poly','primo','prim']
def mms(t):
if(np.max(t) - np.min(t) > 0):
t = (t - np.min(t))/(np.max(t) - np.min(t))
else:
t = (t)/(np.max(t))
return t
def nucs2seq(row):
row = list(row)
t = ''.join(row)
return t
# Heatmap for favorite seqs vs all gtc containing seqs
df = pd.read_csv('./data/chip_B.csv')
df_favor = pd.read_csv('./data/favor_seqs.csv')
df['seq'] = list(map( nucs2seq, np.array(df.iloc[:,:-3]) ))
# keep favorite seuqnces (1000~6000 reps)
df_test = pd.read_csv('./data/validation.csv')
df.index = df['seq']
df = df.loc[df_favor['seq'],:]
df = df.dropna(axis=0).reset_index(drop=True)
df.columns = list(df.columns[:-4]) + ['poly', 'prim', 'primo', 'seq']
# keep non test set sequences
toDrop = df_test['seq']
df.index = df['seq']
df = df.drop(toDrop, axis=0, errors='ignore')
df = df.reset_index(drop=True)
print('lets unite the data by seq and watch the mean and std of each sequence')
dfm = pd.DataFrame()
dfm['primo'] = mms(df.groupby('seq').median()['primo'])
dfm['primo_std'] = mms(df.groupby('seq').std()['primo'])#/mms( df.groupby('seq').mean()['primo'] )
dfm['prim'] = mms(df.groupby('seq').median()['prim'])
dfm['prim_std'] = mms(df.groupby('seq').std()['prim'])#/mms( df.groupby('seq').mean()['poly'] )
dfm['poly'] = mms(df.groupby('seq').median()['poly'])
dfm['poly_std'] = mms(df.groupby('seq').std()['poly'])#/mms( df.groupby('seq').mean()['poly'] )
dfm['seq'] = dfm.index
dfm = dfm.reset_index(drop=True)
T1 = np.percentile(dfm['primo'], 95)
T2 = np.percentile(dfm['primo_std'], 90)
T3 = np.percentile(dfm['prim'], 95)
T4 = np.percentile(dfm['prim_std'], 90)
T5 = np.percentile(dfm['poly'], 95)
T6 = np.percentile(dfm['poly_std'], 90)
print('length of dfm before outlier cleaning = {}'.format(len(dfm)) )
dfm = dfm.drop(np.where(dfm['primo'] > T1 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['primo_std'] > T2 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['prim'] > T3 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['prim_std'] > T4 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['poly'] > T5 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['poly_std'] > T6 )[0]).reset_index(drop=True)
print('length of dfm after outlier cleaning = {}'.format(len(dfm)) )
nucs = np.array(list(map(list, dfm['seq']))).copy()
nucs = pd.DataFrame(nucs.copy())
nucs = nucs.add_suffix('_nuc')
nucs = nucs.reset_index(drop=True)
dfm = pd.concat([dfm, nucs], axis=1)
dfm = dfm.reset_index(drop=True)
toKeep = [x for x in dfm.columns if 'std' not in x]
dfm = dfm.loc[:,toKeep]
for lab in labels:
dfm.loc[:,lab] = mms(dfm.loc[:,lab])
for lab in labels:
dfm.loc[:,lab] = mms(dfm.loc[:,lab]**0.5)
dfm.to_csv('data/chip_B_favor.csv', index=False)
#%% Heatmap of ABS Correlation
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def mms(t):
if(np.max(t) - np.min(t) > 0):
t = (t - np.min(t))/(np.max(t) - np.min(t))
else:
t = (t)/(np.max(t))
return t
def count_letters(df_nucs, rep_dict):
X = df_nucs.copy()
X = X.replace(rep_dict)
X = np.array(X)
X = np.sum(X,1)
return X
df = pd.read_csv('data/chip_B_favor.csv')
cols = df.columns
cols = [x for x in cols if 'nuc' in x]
df_nucs = df.loc[:,cols].copy()
df_labels = df.loc[:,['primo','prim','poly']]
df_res = pd.DataFrame()
# count appereances of each individual letter
for letter in ['A','C','G','T']:
rep_dict = {'A':0,'C':0,'G':0,'T':0}
rep_dict[letter] = 1
df_res['{}_count'.format(letter) ] = count_letters(df_nucs, rep_dict)
gtc_ind_start = ''.join( list(df_nucs.iloc[0,:]) ).find('GTC') - 5
gtc_ind_end = gtc_ind_start + 5 + 3 + 5
# extract puryn and prymidin densities
# A,<NAME>
# C,T Prymidins
""" =================== Left Side Count =============================== """
rep_dict = {'A':1,'C':0,'G':1,'T':0}
df_res['Left_Pur_count'] = count_letters(df_nucs.iloc[:,:gtc_ind_start], rep_dict)
rep_dict = {'A':0,'C':1,'G':0,'T':1}
df_res['Left_Pry_count'] = count_letters(df_nucs.iloc[:,:gtc_ind_start], rep_dict)
""" =================== Center / Determinant Count ===================== """
rep_dict = {'A':1,'C':0,'G':1,'T':0}
df_res['Center_Pur_count'] = count_letters(df_nucs.iloc[:,gtc_ind_start:gtc_ind_start], rep_dict)
rep_dict = {'A':0,'C':1,'G':0,'T':1}
df_res['Center_Pry_count'] = count_letters(df_nucs.iloc[:,gtc_ind_start:gtc_ind_start], rep_dict)
""" =================== Right Side Count =============================== """
rep_dict = {'A':1,'C':0,'G':1,'T':0}
df_res['Right_Pur_count'] = count_letters(df_nucs.iloc[:,gtc_ind_end:], rep_dict)
rep_dict = {'A':0,'C':1,'G':0,'T':1}
df_res['Right_Pry_count'] = count_letters(df_nucs.iloc[:,gtc_ind_end:], rep_dict)
df_res = pd.concat([df_res, df_labels], axis=1)
plt.figure(figsize=(12,8))
df_corr = (df_res.corr().abs())
sns.heatmap(df_corr, cmap="bwr")
plt.title('Absolute Correlation')
plt.show()
plt.figure(figsize=(12,8))
df_corr = df_corr.loc[['primo','prim','poly'],['primo','prim','poly']]
sns.heatmap(df_corr, cmap="bwr")
plt.title('Absolute Correlation')
plt.show()
#%% K mers spectrum
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from itertools import product
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
from scipy.stats import entropy
NMERS = [1,2,3]
df = pd.read_csv('./data/chip_B_favor.csv')
labels = ['primo','prim','poly']
np.random.RandomState(42)
df.index = df['seq']
m2 = 'CCACCCCAAAAAACCCCGTCAAAACCCCAAAAACCA'
df.loc[m2,'primo']
im = plt.imread(r'C:\Users\Ben\Desktop/Picture1.png')
x = list(range(1,14))
y = [1,
0,
0.4,
0.6,
0.47,
0.13,
0.2,
0.3,
0.5,
0.46,
0.5,
0.67,
0.8]
x= np.array(x)
y= np.array(y)
plt.imshow(im)
plt.scatter(x,y, c='red')
#for col in labels:
#df = df.drop(np.where(df[col] > np.percentile(df[col],95))[0],axis=0).reset_index(drop=True)
#df = df.drop(np.where(df[col] < np.percentile(df[col],5))[0],axis=0).reset_index(drop=True)
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
for col in labels:
df[col] = mms(df[col])
df[col] = np.round(df[col]*2)
df[col] = df[col].replace({0:'0weak',1:'1medium',2:'2strong'})
plt.figure(figsize=(18,16))
for i, N in enumerate(NMERS):
letters = ['A','C','G','T']
exec('combs_list = list(product(' + 'letters,'*N + '))')
combs_list = list(map(''.join,combs_list))
df_mer = pd.DataFrame(np.zeros([len(df), len(combs_list)]))
df_mer.columns = combs_list
mers = df['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, 1) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
#coutn mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
df_mer = np.sum(df_mer)
df_mer = df_mer/np.sum(df_mer)
df_mer = df_mer[(df_mer >= 0.01 )]
plt.subplot(len(NMERS),1,i+1)
plt.scatter(np.arange(len(df_mer)), df_mer, color=(['blue','red','green'])[i] )
plt.xticks(np.arange(len(df_mer)), df_mer.index, rotation=90)
#plt.legend([' Variance: {}'.format( np.var(df_mer)) ])
plt.title('{}-Mer'.format(N) )
plt.ylim([0, 0.3])
plt.ylabel('mer frequency')
#%% K-MEANS and Hirarchial clustering
"""
Dendogram
Plot By TSNE
"""
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
NLIST = [5]
labels = ['poly','prim','primo']
labels = ['primo']
ShowTextOnDendogram = True
showKM = True
showHC = False
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
def OHE(df):
cols = []
for i in range(36):
for letter in ['A','C','G','T']:
cols += [ str(i+1) + '_nuc_' + letter]
tdf = pd.get_dummies(df)
toAdd = np.setdiff1d(cols, tdf.columns)
for col in toAdd:
tdf[col] = 0
for col in cols:
tdf[col] = tdf[col].astype(int)
tdf = tdf.loc[:,cols]
return tdf
df = pd.read_csv('./data/chip_B_favor.csv')
df = pd.concat([OHE(df.drop(labels,axis=1)), df.loc[:,labels]], axis=1)
df_backup = df.copy()
# =============================================================================
# Hirarchical Clustering
# =============================================================================
from scipy.cluster import hierarchy
if(showHC):
#WORKS FINE
X = df_backup.drop(labels,axis=1).copy()
X = X.iloc[:,:].reset_index(drop=True)
Z = hierarchy.linkage(X, method='ward')
Z = pd.DataFrame(Z)
botline = Z.iloc[np.argmax(np.diff(Z.iloc[:,-2])),-2] * 1.05
topline = Z.iloc[np.argmax(np.diff(Z.iloc[:,-2])) + 1, -2] * 0.95
fig = plt.figure(figsize=(4, 6))
dn = hierarchy.dendrogram(Z, p=7, truncate_mode='level', color_threshold=40, distance_sort=True)
plt.hlines([botline, topline], xmin=0, xmax=len(Z), ls='--', alpha = 0.9 )
plt.ylabel('Ward Distance')
disticks = np.unique(np.sqrt(Z.iloc[:,-2]).astype(int))
#plt.yticks( disticks**2 , disticks)
plt.xticks([])
plt.xlabel('')
Z = hierarchy.linkage(X, method='ward')
X[labels] = df_backup[labels].copy()
thr = 40
dists = [ 20, 40, 80, 120]
fntsze = 22
thr = 40
for i, thr in enumerate(dists):
Xg = X.copy()
Xg['bin'] = hierarchy.fcluster(Z, thr, criterion='distance', depth=5, R=None, monocrit=None)
Xres = Xg.groupby('bin').sum()
Xres[labels] = Xg.groupby('bin').median()[labels]
xcount = Xg.copy()
xcount['count'] = 1
xcount = xcount.groupby('bin').sum()['count']
xcnew = [xcount.iloc[0]/2]
for j in xcount.index[1:]:
xcnew += [np.sum(xcount[:j-1]) + xcount[j]/2]
xcount = pd.Series( xcnew )
xcount.index = xcount.index + 1
#plt.subplot(4,1, i+1 )
#plt.scatter(Xres.index, Xres[labels])
toKeep = [x for x in X.drop(labels, axis=1).columns if '36' not in x]
Xres = (Xres.loc[:,toKeep])
Xres.columns = [x[-1] for x in Xres.columns]
Xres = Xres.T
Xres = Xres.groupby(Xres.index).sum()
for col in Xres.columns:
Xres[col] = Xres[col] / np.sum(Xres[col])
Xres = Xres.T
row_idx = 1
for row_idx in Xres.index:
row = Xres.loc[row_idx,:]
print(
xcount.iloc[row_idx-1]
)
accumsize = 0
for dx, lett in enumerate(row.index):
x_rng = plt.gca().get_xlim()[1]
# =============================================================================
# # ADDING TEXT TO DENDOGRAM
# =============================================================================
if(ShowTextOnDendogram == True):
plt.text(x= xcount.iloc[row_idx-1]*x_rng/len(Xg) + accumsize,
y=thr, horizontalalignment='left',
s=lett, fontsize=np.max([fntsze*row[lett], 6]) ,
weight='normal', fontname='arial')
accumsize += np.max([fntsze*row[lett], 8]) + 36
#% TODO MAKE THIS PRETTY
from sklearn.metrics import silhouette_score
res_ss = []
xvec = [5]
for i in xvec:
X = df.copy().drop(['bin'], axis=1, errors='ignore')
X = X.drop(labels, axis=1)
tmp_ss = []
for j in range(1):
km = KMeans(i, random_state=j )
y = km.fit_predict(X)
ss = silhouette_score( X, y )
tmp_ss += [ss]
print('sil score => mean: {} | std: {}'.format(np.mean(tmp_ss), np.std(tmp_ss)) )
res_ss += [np.mean(tmp_ss)]
plt.figure()
plt.scatter(xvec,res_ss)
plt.xlabel('K-Value')
plt.ylabel('Sil Score')
plt.show()
if(showKM):
col = 'primo'
plt.figure(figsize=(6,4))
for i, Nbins in enumerate(NLIST):
df = df_backup.copy()
km = KMeans(Nbins, random_state=42 )
df['bin'] = km.fit_predict(df.drop(labels,axis=1))
cc = np.array(km.cluster_centers_).reshape(km.cluster_centers_.shape[0],
km.cluster_centers_.shape[1]//4,
4)
cc = np.array(pd.DataFrame(np.argmax(cc,axis=2)).replace({0:'A',1:'C',2:'G',3:'T'}))
centers = [''.join(l) for l in cc]
tdf = df.loc[:,['bin',col]]
#rep_d = {0:'A',1:'B',2:'C',3:'D',4:'E'}
rep_d = {0:2,1:3,2:0,3:1,4:4}
df['bin'] = df['bin'].replace(rep_d)
centers = list(np.array(centers)[list(rep_d.values())])
print('Mean Words:')
print(centers)
#rep_d = {'A':2,'B':3,'C':0,'D':1,'E':4}
#df['bin'] = df['bin'].replace(rep_d)
plt.subplot(len(NLIST),1,i+1)
sns.violinplot(x="bin", y=col, data=df, palette="Blues", cut=0)
plt.ylim([-0.2, 1.2])
plt.ylabel('Primase \nBinding Scores', fontsize=12)
plt.title('Scores Distribution by Cluster', fontsize=12)
"""
for tx, tcent in zip(np.arange(np.max(tdf['bin'])+1) , centers):
chunks, chunk_size = len(tcent), len(tcent)//6
stlist = [ tcent[i:i+chunk_size] for i in range(0, chunks, chunk_size) ]
tcent = '\n'.join(stlist)
t = plt.text(x=tx-0.5, y=0, s=tcent, fontsize=10, color='red', fontweight='normal', backgroundcolor='white')
t.set_bbox(dict(facecolor='white', alpha=0.7, edgecolor='white'))
plt.xlim([-1, Nbins-1 + 0.5])
"""
#plt.xticks( np.arange(np.max(tdf['bin'])+1)
#,centers , rotation=-90, fontsize=12)
plt.yticks( [0,0.25,0.5,0.75,1], fontsize=12 )
plt.tight_layout()
plt.savefig('./out/kmeans/forpaper_B_centroids_' + str(Nbins) + 'bins')
plt.show()
#plt.close()
#%% PCA
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from itertools import product
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
NMERS = [3]
df = pd.read_csv('./data/chip_B_favor.csv')
#labels = ['primo','prim','poly']
labels = ['primo']
np.random.RandomState(42)
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
"""
for col in labels:
df[col] = mms(df[col])
df[col] = np.round(df[col]*2)
df[col] = df[col].replace({0:'0weak',1:'1medium',2:'2strong'})
"""
for N in NMERS:
letters = ['A','C','G','T']
exec('combs_list = list(product(' + 'letters,'*N + '))')
combs_list = list(map(''.join,combs_list))
df_mer = pd.DataFrame(np.zeros([len(df), len(combs_list)]))
df_mer.columns = combs_list
mers = df['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, 1) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
#coutn mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
pca = PCA(n_components=np.min([16,len(df_mer.columns)]), svd_solver='auto', random_state=42)
df_mer = pd.DataFrame(pca.fit_transform(df_mer.dropna(axis=1)))
df_mer = df_mer.add_prefix('pc')
#MMS -1 1
for col in df_mer.columns:
df_mer[col] = mms(df_mer[col])
for col in labels:
df_mer[col] = df[col]
np.cumsum(pca.explained_variance_ratio_)
1/0
# 3D scatter
for lab in labels:
fig = plt.figure(figsize=(14,10))
ax = fig.add_subplot(111, projection='3d')
x = df_mer['pc0']
y = df_mer['pc1']
z = df_mer['pc2']
clrs = mms( (df_mer[lab]) )
ax.scatter3D(2*x + 0.05*np.random.randn(len(x)) ,
2*y + 0.05*np.random.randn(len(y)) ,
2*z + 0.05*np.random.randn(len(z)) ,
alpha=0.6, c=clrs, cmap='bwr')
plt.xlabel('pc0')
plt.ylabel('pc1')
ax.set_zlabel('pc2')
plt.title('{}: {}-mer projection'.format(lab,N) )
plt.show()
""" PUT A COMMENT TO SEE 3D Projection """
#plt.close()
fig = plt.figure(figsize=(14,10))
x = df_mer['pc0']
y = df_mer['pc1']
plt.scatter( x-0.5, #+ 0.05*np.random.randn(len(x)) ,
y-0.5, #+ 0.05*np.random.randn(len(y)) ,
alpha=0.6, c=clrs, cmap='bwr' )
plt.xlabel('pc0')
plt.ylabel('pc1')
plt.title('{}: {}-mer projection'.format(lab,N) )
plt.savefig('./out/pca/{}_{}mer'.format(lab,N) )
plt.show()
""" PUT A COMMENT TO SEE 2D Projection """
#plt.close()
#%% Dynamic clustering and prediction
"""
This techinique invloves all of our research,
by using PCA we learn the existence of 5 clusters,
by using kmeans we classify each sequence to its cluster,
by using regressors suchj as lasso we train a model for each cluster
and predict labels with high resolution.
we can compare results with or without dynamic clustering.
"""
"""
Dendogram
Plot By TSNE
"""
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
import pickle
from time import clock, sleep
[plt.close() for x in plt.get_fignums()]
N = 3
with_clustering = True
stime = clock()
#labels = ['poly','prim','primo']
labels = ['primo']
def OHE(df):
cols = []
for i in range(36):
for letter in ['A','C','G','T']:
cols += [ str(i+1) + '_nuc_' + letter]
tdf = pd.get_dummies(df)
toAdd = np.setdiff1d(cols, tdf.columns)
for col in toAdd:
tdf[col] = 0
for col in cols:
tdf[col] = tdf[col].astype(int)
tdf = tdf.loc[:,cols]
return tdf
df = pd.read_csv('./data/chip_B_favor.csv')
df = pd.concat([OHE(df.drop(labels,axis=1)), df.loc[:,labels]], axis=1)
# apply KMEANS
km = KMeans(5, random_state=42, n_init=20 )
bins_pred = km.fit_predict(df.drop(labels,axis=1))
pickle.dump(km, open('./out/regressors/models/km.sav' , 'wb') )
t = km.cluster_centers_
cc = np.array(km.cluster_centers_).reshape(km.cluster_centers_.shape[0],
km.cluster_centers_.shape[1]//4, 4)
cc = np.array(pd.DataFrame(np.argmax(cc,axis=2)).replace({0:'A',1:'C',2:'G',3:'T'}))
centers = [''.join(l) for l in cc]
df = pd.read_csv('./data/chip_B_favor.csv')
df['bin'] = bins_pred
"""
# Hard To Predict (HTP) Generator
htpgen = pd.DataFrame(np.random.randint(0,4,[5000, 36])).replace({0:'A',1:'C',2:'G',3:'T'})
htpgen = htpgen.add_suffix('_nuc')
htpgen = OHE(htpgen)
htpgen['bin'] = km.predict(htpgen)
# Easy To Predict (HTP) Generator
etpgen = pd.DataFrame(np.random.randint(0,4,[5000, 36])).replace({0:'A',1:'C',2:'G',3:'T'})
etpgen = etpgen.add_suffix('_nuc')
etpgen = OHE(etpgen)
etpgen['bin'] = km.predict(etpgen)
t = np.array(htpgen.iloc[:,-1])
1/0
"""
from itertools import product
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_validate
#from sklearn.linear_model import LassoLarsIC
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
import xgboost as xgb
test_df =
|
pd.read_csv('./data/validation.csv')
|
pandas.read_csv
|
import os
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0'
import numpy as np
import pandas as pd
from models import RnnVersion3
import gc
from keras.models import Model
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau,EarlyStopping,Callback
from tqdm import tqdm_notebook
user_app_actived = pd.read_csv('../../data/original_data/user_app_actived.csv',names=['uId', 'appId'])
usage_list = pd.read_csv('../../data/processed_data/usage_app_info.csv') #重采样的usage_app
usage_appId = pd.read_csv('../../data/processed_data/usage_appId.csv') #使用表的app词典
appId = pd.read_csv('../../data/processed_data/appId.csv') #激活表的app词典
user_app_actived['app_list'] = user_app_actived.appId.str.split('#')
import ast
from tqdm import tqdm
usage_train = []
for idx in tqdm(usage_list.appId):
usage_train.append(ast.literal_eval(idx))
usage_list['app_list'] = usage_train
user_app_actived.drop('appId',axis=1,inplace=True)
usage_list.drop('appId',axis=1,inplace=True)
user_app_actived = pd.merge(user_app_actived, usage_list, how='left', on='uId')
result = []
for index,row in tqdm(user_app_actived.iterrows()):
try:
result.append(np.sort(list(set(row['app_list_x']) | set(row['app_list_y']))))
except:
result.append(row['app_list_x'])
user_app_actived['app_list'] = result
user_app_actived.drop(['app_list_x','app_list_y'],axis=1,inplace =True)
del usage_list
gc.collect()
x_train = pd.read_csv('../../data/original_data/age_train.csv',names=['uId','age_group'],dtype={'uId':np.int32, 'age_group':np.int8})
x_test = pd.read_csv('../../data/original_data/age_test.csv',names=['uId'],dtype={'uId':np.int32})
x_train = pd.merge(x_train, user_app_actived, how='left', on='uId')
x_test = pd.merge(x_test, user_app_actived, how='left', on='uId')
y_train = x_train.age_group - 1
x_train = x_train.drop('age_group',axis=1)
del user_app_actived
gc.collect()
usage_appId = pd.read_csv('../../data/processed_data/usage_appId_top_num100000.csv')
usage_appId = usage_appId[-20000:]
usage_appId['id'] = np.arange(0,20000)
all_appid = list(set(appId.appId.tolist() + usage_appId.appId.tolist()))
app_dict = dict(zip(all_appid,np.arange(len(all_appid))))
app_list = [[app_dict[x] for x in apps if x in app_dict] for apps in x_train.app_list]
app_test = [[app_dict[x] for x in apps if x in app_dict] for apps in x_test.app_list]
from keras.preprocessing import sequence
app_list = sequence.pad_sequences(app_list, maxlen=170)
app_test = sequence.pad_sequences(app_test, maxlen=170)
x_train.drop('app_list',axis=1,inplace=True)
x_test.drop('app_list',axis=1,inplace=True)
gc.collect()
class _Data_Preprocess:
def __init__(self):
self.int8_max = np.iinfo(np.int8).max
self.int8_min = np.iinfo(np.int8).min
self.int16_max = np.iinfo(np.int16).max
self.int16_min = np.iinfo(np.int16).min
self.int32_max = np.iinfo(np.int32).max
self.int32_min = np.iinfo(np.int32).min
self.int64_max = np.iinfo(np.int64).max
self.int64_min = np.iinfo(np.int64).min
self.float16_max = np.finfo(np.float16).max
self.float16_min = np.finfo(np.float16).min
self.float32_max = np.finfo(np.float32).max
self.float32_min = np.finfo(np.float32).min
self.float64_max = np.finfo(np.float64).max
self.float64_min = np.finfo(np.float64).min
'''
function: _get_type(self,min_val, max_val, types)
get the correct types that our columns can trans to
'''
def _get_type(self, min_val, max_val, types):
if types == 'int':
if max_val <= self.int8_max and min_val >= self.int8_min:
return np.int8
elif max_val <= self.int16_max <= max_val and min_val >= self.int16_min:
return np.int16
elif max_val <= self.int32_max and min_val >= self.int32_min:
return np.int32
return None
elif types == 'float':
if max_val <= self.float16_max and min_val >= self.float16_min:
return np.float16
if max_val <= self.float32_max and min_val >= self.float32_min:
return np.float32
if max_val <= self.float64_max and min_val >= self.float64_min:
return np.float64
return None
'''
function: _memory_process(self,df)
column data types trans, to save more memory
'''
def _memory_process(self, df):
init_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
print('Original data occupies {} GB memory.'.format(init_memory))
df_cols = df.columns
for col in tqdm_notebook(df_cols):
try:
if 'float' in str(df[col].dtypes):
max_val = df[col].max()
min_val = df[col].min()
trans_types = self._get_type(min_val, max_val, 'float')
if trans_types is not None:
df[col] = df[col].astype(trans_types)
elif 'int' in str(df[col].dtypes):
max_val = df[col].max()
min_val = df[col].min()
trans_types = self._get_type(min_val, max_val, 'int')
if trans_types is not None:
df[col] = df[col].astype(trans_types)
except:
print(' Can not do any process for column, {}.'.format(col))
afterprocess_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
print('After processing, the data occupies {} GB memory.'.format(afterprocess_memory))
return df
memory_preprocess = _Data_Preprocess()
train = pd.read_csv('../../data/features/base_train.csv')
test = pd.read_csv('../../data/features/base_test.csv')
train=memory_preprocess._memory_process(train)
test=memory_preprocess._memory_process(test)
print(test.info())
gc.collect()
actived_features_all = pd.read_csv('../../data/features/actived_features_all.csv')
actived_features_all=memory_preprocess._memory_process(actived_features_all)
train = pd.merge(train, actived_features_all, how='left', on='uId').fillna(0)
test = pd.merge(test, actived_features_all, how='left', on='uId').fillna(0)
del actived_features_all
gc.collect()
act_use_rnn_hide_train=pd.read_csv('../../data/features/act_use_rnn_hide_train.csv')
act_use_rnn_hide_train=memory_preprocess._memory_process(act_use_rnn_hide_train)
act_use_rnn_hide_train.rename(columns={'Unnamed: 0': 'uId'}, inplace=True)
train = pd.merge(train, act_use_rnn_hide_train, how='left', on='uId').fillna(0)
del act_use_rnn_hide_train
act_use_rnn_hide_test=pd.read_csv('../../data/features/act_use_rnn_hide_test.csv')
act_use_rnn_hide_test=memory_preprocess._memory_process(act_use_rnn_hide_test)
act_use_rnn_hide_test.rename(columns={'Unnamed: 0': 'uId'}, inplace=True)
test = pd.merge(test, act_use_rnn_hide_test, how='left', on='uId').fillna(0)
print(test.info())
del act_use_rnn_hide_test
gc.collect()
train_uId = x_train.uId.tolist()
test_uId = x_test.uId.tolist()
test.index = test.uId.tolist()
train.index = train.uId.tolist()
test = test.loc[test_uId,:]
train = train.loc[train_uId,:]
train.drop(['uId','age_group'],axis=1,inplace=True)
test.drop('uId',axis=1,inplace=True)
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
from sklearn.preprocessing import StandardScaler,MinMaxScaler
train = train.replace([np.inf, -np.inf], np.nan).fillna(0)
test = test.replace([np.inf, -np.inf], np.nan).fillna(0)
scaler = MinMaxScaler()
scaler.fit(pd.concat([train,test],axis=0))
train = scaler.transform(train)
test = scaler.transform(test)
train = memory_preprocess._memory_process(
|
pd.DataFrame(train)
|
pandas.DataFrame
|
import pymortar
import pandas as pd
import pendulum
import toml
from flask import Flask
from flask import jsonify, send_from_directory
from flask import request
from flask import current_app
from flask import make_response
from flask import render_template
from collections import defaultdict
from functools import update_wrapper
import pytz
import json
import glob
import os
from datetime import datetime, timedelta
from dashutil import get_start, generate_months, prevmonday, get_today
from datetime import timezone
import xsg
config = toml.load('config.toml')
TZ = pytz.timezone('US/Pacific')
app = Flask(__name__, static_url_path='/static')
client = pymortar.Client({
'mortar_address':config['Mortar']['url'],
'username': config['Mortar']['username'],
'password': config['<PASSWORD>']['password'],
})
sites = [config['Dashboard']['sitename']]
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, str):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, str):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def state_to_string(state):
if state == 0:
return 'off'
elif state == 1:
return 'heat stage 1'
elif state == 2:
return 'cool stage 1'
elif state == 4:
return 'heat stage 2'
elif state == 5:
return 'cool stage 2'
else:
return 'unknown'
def dofetch(views, dataframes, start=None, end=None):
timeparams = None
if start is not None and end is not None:
timeparams=pymortar.TimeParams(
start=start.isoformat(),
end=end.isoformat(),
)
req = pymortar.FetchRequest(
sites=sites,
views=views,
dataFrames=dataframes,
time=timeparams
)
return client.fetch(req)
meter_view = pymortar.View(
name="meters",
definition="""SELECT ?meter WHERE {
?meter rdf:type brick:Building_Electric_Meter
};""",
)
meter_df = pymortar.DataFrame(
name="meters",
aggregation=pymortar.MEAN,
timeseries=[
pymortar.Timeseries(
view="meters",
dataVars=['?meter'],
)
]
)
tstats_view = pymortar.View(
name="tstats",
definition="""SELECT ?rtu ?zone ?tstat ?csp ?hsp ?temp ?state WHERE {
?rtu rdf:type brick:RTU .
?tstat bf:controls ?rtu .
?rtu bf:feeds ?zone .
?tstat bf:hasPoint ?temp .
?temp rdf:type/rdfs:subClassOf* brick:Temperature_Sensor .
?tstat bf:hasPoint ?csp .
?csp rdf:type/rdfs:subClassOf* brick:Supply_Air_Temperature_Heating_Setpoint .
?tstat bf:hasPoint ?hsp .
?hsp rdf:type/rdfs:subClassOf* brick:Supply_Air_Temperature_Cooling_Setpoint .
?tstat bf:hasPoint ?state .
?state rdf:type brick:Thermostat_Status .
};""",
)
tstats_df = pymortar.DataFrame(
name="tstats",
aggregation=pymortar.MAX,
timeseries=[
pymortar.Timeseries(
view="tstats",
dataVars=['?csp','?hsp','?temp','?state'],
),
]
)
room_temp_view = pymortar.View(
name="room_temp",
definition="""SELECT ?zone ?room ?sensor WHERE {
?zone rdf:type brick:HVAC_Zone .
?zone bf:hasPart ?room .
?sensor rdf:type/rdfs:subClassOf* brick:Temperature_Sensor .
?room bf:hasPoint ?sensor .
};""",
)
weather_view = pymortar.View(
name="weather_temp",
definition="""SELECT ?sensor WHERE {
?sensor rdf:type/rdfs:subClassOf* brick:Weather_Temperature_Sensor .
};""",
)
weather_df = pymortar.DataFrame(
name="weather_temp",
aggregation=pymortar.MEAN,
window='15m',
timeseries=[
pymortar.Timeseries(
view="weather_temp",
dataVars=['?sensor'],
)
],
)
@app.route('/api/power/<last>/in/<bucketsize>')
@crossdomain(origin='*')
def power_summary(last, bucketsize):
# first, determine the start date from the 'last' argument
start_date = get_start(last)
if last == 'year' and bucketsize == 'month':
ranges = generate_months(get_today().month - 1)
readings = []
times = []
for t0,t1 in ranges:
meter_df.window = '{0}d'.format((t0-t1).days)
res=dofetch([meter_view], [meter_df], t1, t0)
times.append(t1.tz_convert(TZ).timestamp()*1000)
readings.append(res['meters'].fillna('myNullVal').values[0][0])
return jsonify({'readings': dict(zip(times,readings))})
# otherwise,
meter_df.window=bucketsize
print('start_date',start_date)
res=dofetch([meter_view], [meter_df], start_date, datetime.now(TZ))
res['meters'].columns=['readings']
return res['meters'].tz_convert(TZ).fillna('myNullVal').to_json()
@app.route('/api/energy/<last>/in/<bucketsize>')
@crossdomain(origin='*')
def energy_summary(last, bucketsize):
start_date = get_start(last)
if last == 'year' and bucketsize == 'month':
ranges = generate_months(get_today().month - 1)
readings = []
times = []
for t0,t1 in ranges:
meter_df.window = '15m'
res=dofetch([meter_view], [meter_df], t1, t0)
df = res['meters'].copy()
df.columns = ['readings']
df /= 4. # divide by 4 to get 15min (kW) -> kWh
times.append(pd.to_datetime(t1.isoformat()))
readings.append(df['readings'].sum())
df = pd.DataFrame(readings,index=times,columns=['readings'])
return df.fillna('myNullVal').to_json()
meter_df.window = '15m'
print('start_date',start_date)
res = dofetch([meter_view], [meter_df], start_date, datetime.now(TZ))
df = res['meters'].tz_convert(TZ).copy()
df.columns = ['readings']
df['readings'] /= 4.
return df.fillna('myNullVal').resample(bucketsize).apply(sum).to_json()
@app.route('/api/price')
@crossdomain(origin='*')
def price():
res = xsg.get_price(sites[0], get_today(), get_today()+timedelta(days=1))
return res['price'].to_json()
@app.route('/api/power')
@crossdomain(origin='*')
def current_power():
raise(Exception("/api/power NOT IMPLEMENTED"))
pass
@app.route('/api/hvac')
@crossdomain(origin='*')
def hvacstate():
t1 = datetime.now(TZ).replace(microsecond=0)
t0 = t1 - timedelta(hours=12)
tstats_df.window='1h'
res = dofetch([tstats_view, room_temp_view], [tstats_df], t0, t1)
zones = defaultdict(lambda : defaultdict(dict))
for (tstat, zone, hsp, csp, temp, state) in res.query('select tstat, zone, hsp_uuid, csp_uuid, temp_uuid, state_uuid from tstats'):
zone = zone.split('#')[-1]
tempdf = res['tstats'][[hsp,csp,temp,state]].tail(1).fillna('myNullVal')
hsp,csp,temp,state = tempdf.values[-1]
zones[zone]['heating_setpoint'] = hsp
zones[zone]['cooling_setpoint'] = csp
zones[zone]['tstat_temperature'] = temp
zones[zone]['heating'] = bool(state == 1 or state == 4)
zones[zone]['cooling'] = bool(state == 2 or state == 5)
zones[zone]['timestamp'] = tempdf.index[-1].timestamp() * 1000
return jsonify(zones)
@app.route('/api/hvac/day/in/<bucketsize>')
@crossdomain(origin='*')
def serve_historipcal_hvac(bucketsize):
t1 = datetime.now(TZ).replace(microsecond=0)
t0 = get_today()
tstats_df.window=bucketsize
res = dofetch([tstats_view, weather_view], [tstats_df, weather_df], t0, t1)
zones = defaultdict(lambda : defaultdict(dict))
df = res['tstats'].fillna(method='ffill').fillna(method='bfill')
for (tstat, zone, hsp, csp, temp, state) in res.query('select tstat, zone, hsp_uuid, csp_uuid, temp_uuid, state_uuid from tstats'):
zone = zone.split('#')[-1]
zones[zone]['inside'] = json.loads(df[temp].dropna().to_json())
zones[zone]['heating'] = json.loads(df[hsp].dropna().to_json())
zones[zone]['outside'] = json.loads(res['weather_temp'].max(axis=1).dropna().to_json())
zones[zone]['cooling'] = json.loads(df[csp].dropna().to_json())
zones[zone]['state'] = json.loads(df[state].dropna().apply(state_to_string).to_json())
for k, values in zones[zone].items():
if len(values) == 0:
fakedates = pd.date_range(t0, t1, freq=bucketsize.replace('m','T'))
if k != 'state':
fakevals = [0]*len(fakedates)
else:
fakevals = ['off']*len(fakedates)
zones[zone][k] = json.loads(
|
pd.DataFrame(fakevals,index=fakedates)
|
pandas.DataFrame
|
USAGE = """
python Metrics.py
Needs access to these box folders and M Drive
Box/Modeling and Surveys/Urban Modeling/Bay Area UrbanSim 1.5/PBA50/Draft Blueprint runs/
Box/Horizon and Plan Bay Area 2050/Equity and Performance/7_Analysis/Metrics/
Processes model outputs and creates a single csv with scenario metrics in this folder:
Box/Horizon and Plan Bay Area 2050/Equity and Performance/7_Analysis/Metrics/
This csv file will have 6 columns:
1) modelrun ID
2) metric ID
3) metric name
4) year (note: for metrics that depict change from 2015 to 2050, this value will be 2050)
5) blueprint type
6) metric value
"""
import datetime, os, sys
import numpy, pandas as pd
from collections import OrderedDict, defaultdict
def calculate_urbansim_highlevelmetrics(runid, dbp, parcel_sum_df, county_sum_df, metrics_dict):
metric_id = "Overall"
#################### Housing
# all households
metrics_dict[runid,metric_id,'TotHH_region',y2,dbp] = parcel_sum_df['tothh_2050'].sum()
metrics_dict[runid,metric_id,'TotHH_region',y1,dbp] = parcel_sum_df['tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_growth_region',y_diff,dbp] = metrics_dict[runid,metric_id,'TotHH_region',y2,dbp] / metrics_dict[runid,metric_id,'TotHH_region',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotHH_growth_region_number',y_diff,dbp] = parcel_sum_df['tothh_2050'].sum() - parcel_sum_df['tothh_2015'].sum()
# HH growth by county
for index,row in county_sum_df.iterrows():
metrics_dict[runid,metric_id,'TotHH_county_growth_%s' % row['county'],y_diff,dbp] = row['tothh_growth']
metrics_dict[runid,metric_id,'TotHH_county_shareofgrowth_%s' % row['county'],y_diff,dbp] = row['tothh_growth'] / metrics_dict[runid,metric_id,'TotHH_growth_region_number',y_diff,dbp]
# HH Growth in all GGs
metrics_dict[runid,metric_id,'TotHH_GG',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('GG', na=False), 'tothh_2050'].sum()
metrics_dict[runid,metric_id,'TotHH_GG',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('GG', na=False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_GG_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotHH_GG',y2,dbp] / metrics_dict[runid,metric_id,'TotHH_GG',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotHH_GG_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotHH_GG',y2,dbp] - metrics_dict[runid,metric_id,'TotHH_GG',y1,dbp]) / metrics_dict[runid,metric_id,'TotHH_growth_region_number',y_diff,dbp]
# HH Growth in PDAs
metrics_dict[runid,metric_id,'TotHH_PDA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pda_id'].str.contains('', na=False), 'tothh_2050'].sum()
metrics_dict[runid,metric_id,'TotHH_PDA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pda_id'].str.contains('', na=False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_PDA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotHH_PDA',y2,dbp] / metrics_dict[runid,metric_id,'TotHH_PDA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotHH_PDA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotHH_PDA',y2,dbp] - metrics_dict[runid,metric_id,'TotHH_PDA',y1,dbp]) / metrics_dict[runid,metric_id,'TotHH_growth_region_number',y_diff,dbp]
# HH Growth in GGs that are not PDAs
metrics_dict[runid,metric_id,'TotHH_GG_notPDA',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('GG', na=False)) & \
(parcel_sum_df['pda_id'].str.contains('', na=False)==0), 'tothh_2050'].sum()
metrics_dict[runid,metric_id,'TotHH_GG_notPDA',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('GG', na=False)) & \
(parcel_sum_df['pda_id'].str.contains('', na=False)==0), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_GG_notPDA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotHH_GG_notPDA',y2,dbp] / metrics_dict[runid,metric_id,'TotHH_GG_notPDA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotHH_GG_notPDA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotHH_GG_notPDA',y2,dbp] - metrics_dict[runid,metric_id,'TotHH_GG_notPDA',y1,dbp]) / metrics_dict[runid,metric_id,'TotHH_growth_region_number',y_diff,dbp]
# HH Growth in HRAs
metrics_dict[runid,metric_id,'TotHH_HRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'tothh_2050'].sum()
metrics_dict[runid,metric_id,'TotHH_HRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_HRA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotHH_HRA',y2,dbp] / metrics_dict[runid,metric_id,'TotHH_HRA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotHH_HRA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotHH_HRA',y2,dbp] - metrics_dict[runid,metric_id,'TotHH_HRA',y1,dbp]) / metrics_dict[runid,metric_id,'TotHH_growth_region_number',y_diff,dbp]
# HH Growth in TRAs
metrics_dict[runid,metric_id,'TotHH_TRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'tothh_2050'].sum()
metrics_dict[runid,metric_id,'TotHH_TRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_TRA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotHH_TRA',y2,dbp] / metrics_dict[runid,metric_id,'TotHH_TRA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotHH_TRA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotHH_TRA',y2,dbp] - metrics_dict[runid,metric_id,'TotHH_TRA',y1,dbp]) / metrics_dict[runid,metric_id,'TotHH_growth_region_number',y_diff,dbp]
# HH Growth in areas that are both HRAs and TRAs
metrics_dict[runid,metric_id,'TotHH_HRAandTRA',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) &\
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) , 'tothh_2050'].sum()
metrics_dict[runid,metric_id,'TotHH_HRAandTRA',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) &\
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) , 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_HRAandTRA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotHH_HRAandTRA',y2,dbp] / metrics_dict[runid,metric_id,'TotHH_HRAandTRA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotHH_HRAandTRA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotHH_HRAandTRA',y2,dbp] - metrics_dict[runid,metric_id,'TotHH_HRAandTRA',y1,dbp]) / metrics_dict[runid,metric_id,'TotHH_growth_region_number',y_diff,dbp]
#################### Jobs
# all jobs
metrics_dict[runid,metric_id,'TotJobs_region',y2,dbp] = parcel_sum_df['totemp_2050'].sum()
metrics_dict[runid,metric_id,'TotJobs_region',y1,dbp] = parcel_sum_df['totemp_2015'].sum()
metrics_dict[runid,metric_id,'TotJobs_growth_region',y_diff,dbp] = metrics_dict[runid,metric_id,'TotJobs_region',y2,dbp] / metrics_dict[runid,metric_id,'TotJobs_region',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotJobs_growth_region_number',y_diff,dbp] = parcel_sum_df['totemp_2050'].sum() - parcel_sum_df['totemp_2015'].sum()
#Job growth by county
for index,row in county_sum_df.iterrows():
metrics_dict[runid,metric_id,'TotJobs_growth_%s' % row['county'],y_diff,dbp] = row['totemp_growth']
metrics_dict[runid,metric_id,'TotJobs_county_shareofgrowth_%s' % row['county'],y_diff,dbp] = row['totemp_growth'] / metrics_dict[runid,metric_id,'TotJobs_growth_region_number',y_diff,dbp]
# Job Growth in all GGs
metrics_dict[runid,metric_id,'TotJobs_GG',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('GG', na=False), 'totemp_2050'].sum()
metrics_dict[runid,metric_id,'TotJobs_GG',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('GG', na=False), 'totemp_2015'].sum()
metrics_dict[runid,metric_id,'TotJobs_GG_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotJobs_GG',y2,dbp] / metrics_dict[runid,metric_id,'TotJobs_GG',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotJobs_GG_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotJobs_GG',y2,dbp] - metrics_dict[runid,metric_id,'TotJobs_GG',y1,dbp]) / metrics_dict[runid,metric_id,'TotJobs_growth_region_number',y_diff,dbp]
# Job Growth in PDAs
metrics_dict[runid,metric_id,'TotJobs_PDA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pda_id'].str.contains('', na=False), 'totemp_2050'].sum()
metrics_dict[runid,metric_id,'TotJobs_PDA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pda_id'].str.contains('', na=False), 'totemp_2015'].sum()
metrics_dict[runid,metric_id,'TotJobs_PDA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotJobs_PDA',y2,dbp] / metrics_dict[runid,metric_id,'TotJobs_PDA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotJobs_PDA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotJobs_PDA',y2,dbp] - metrics_dict[runid,metric_id,'TotJobs_PDA',y1,dbp]) / metrics_dict[runid,metric_id,'TotJobs_growth_region_number',y_diff,dbp]
# Job Growth in GGs that are not PDAs
metrics_dict[runid,metric_id,'TotJobs_GG_notPDA',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('GG', na=False)) & \
(parcel_sum_df['pda_id'].str.contains('', na=False)==0), 'totemp_2050'].sum()
metrics_dict[runid,metric_id,'TotJobs_GG_notPDA',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('GG', na=False)) & \
(parcel_sum_df['pda_id'].str.contains('', na=False)==0), 'totemp_2015'].sum()
metrics_dict[runid,metric_id,'TotJobs_GG_notPDA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotJobs_GG_notPDA',y2,dbp] / metrics_dict[runid,metric_id,'TotJobs_GG_notPDA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotJobs_GG_notPDA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotJobs_GG_notPDA',y2,dbp] - metrics_dict[runid,metric_id,'TotJobs_GG_notPDA',y1,dbp]) / metrics_dict[runid,metric_id,'TotJobs_growth_region_number',y_diff,dbp]
# Job Growth in HRAs
metrics_dict[runid,metric_id,'TotJobs_HRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'totemp_2050'].sum()
metrics_dict[runid,metric_id,'TotJobs_HRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'totemp_2015'].sum()
metrics_dict[runid,metric_id,'TotJobs_HRA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotJobs_HRA',y2,dbp] / metrics_dict[runid,metric_id,'TotJobs_HRA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotJobs_HRA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotJobs_HRA',y2,dbp] - metrics_dict[runid,metric_id,'TotJobs_HRA',y1,dbp]) / metrics_dict[runid,metric_id,'TotJobs_growth_region_number',y_diff,dbp]
# Job Growth in TRAs
metrics_dict[runid,metric_id,'TotJobs_TRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'totemp_2050'].sum()
metrics_dict[runid,metric_id,'TotJobs_TRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'totemp_2015'].sum()
metrics_dict[runid,metric_id,'TotJobs_TRA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotJobs_TRA',y2,dbp] / metrics_dict[runid,metric_id,'TotJobs_TRA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotJobs_TRA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotJobs_TRA',y2,dbp] - metrics_dict[runid,metric_id,'TotJobs_TRA',y1,dbp]) / metrics_dict[runid,metric_id,'TotJobs_growth_region_number',y_diff,dbp]
# Job Growth in areas that are both HRAs and TRAs
metrics_dict[runid,metric_id,'TotJobs_HRAandTRA',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) &\
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) , 'totemp_2050'].sum()
metrics_dict[runid,metric_id,'TotJobs_HRAandTRA',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) &\
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) , 'totemp_2015'].sum()
metrics_dict[runid,metric_id,'TotJobs_HRAandTRA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotJobs_HRAandTRA',y2,dbp] / metrics_dict[runid,metric_id,'TotJobs_HRAandTRA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotJobs_HRAandTRA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotJobs_HRAandTRA',y2,dbp] - metrics_dict[runid,metric_id,'TotJobs_HRAandTRA',y1,dbp]) / metrics_dict[runid,metric_id,'TotJobs_growth_region_number',y_diff,dbp]
############################
# LIHH
metrics_dict[runid,metric_id,'LIHH_share_2050',y2,dbp] = (parcel_sum_df['hhq1_2050'].sum() + parcel_sum_df['hhq2_2050'].sum()) / parcel_sum_df['totemp_2050'].sum()
metrics_dict[runid,metric_id,'LIHH_share_2015',y1,dbp] = (parcel_sum_df['hhq1_2015'].sum() + parcel_sum_df['hhq2_2050'].sum()) / parcel_sum_df['totemp_2015'].sum()
metrics_dict[runid,metric_id,'LIHH_growth_region',y_diff,dbp] = (parcel_sum_df['hhq1_2050'].sum() + parcel_sum_df['hhq2_2050'].sum()) / (parcel_sum_df['hhq1_2015'].sum() + parcel_sum_df['hhq2_2050'].sum())
for index,row in county_sum_df.iterrows():
metrics_dict[runid,metric_id,'LIHH_growth_%s' % row["county"],y_diff,dbp] = row['LIHH_growth']
# all jobs
metrics_dict[runid,metric_id,'tot_jobs_2050',y2,dbp] = parcel_sum_df['totemp_2050'].sum()
metrics_dict[runid,metric_id,'tot_jobs_2015',y1,dbp] = parcel_sum_df['totemp_2015'].sum()
metrics_dict[runid,metric_id,'jobs_growth_region',y_diff,dbp] = (parcel_sum_df['totemp_2050'].sum() / parcel_sum_df['totemp_2015'].sum())
for index,row in county_sum_df.iterrows():
metrics_dict[runid,metric_id,'jobs_growth_%s' % row["county"],y_diff,dbp] = row['totemp_growth']
def calculate_tm_highlevelmetrics(runid, dbp, parcel_sum_df, county_sum_df, metrics_dict):
metric_id = "Overall_TM"
# TBD
def calculate_normalize_factor_Q1Q2(parcel_sum_df):
return ((parcel_sum_df['hhq1_2050'].sum() + parcel_sum_df['hhq2_2050'].sum()) / parcel_sum_df['tothh_2050'].sum()) \
/ ((parcel_sum_df['hhq1_2015'].sum() + parcel_sum_df['hhq2_2015'].sum()) / parcel_sum_df['tothh_2015'].sum())
def calculate_normalize_factor_Q1(parcel_sum_df):
return (parcel_sum_df['hhq1_2050'].sum() / parcel_sum_df['tothh_2050'].sum()) \
/ (parcel_sum_df['hhq1_2015'].sum() / parcel_sum_df['tothh_2015'].sum())
def calculate_Affordable1_transportation_costs(runid, year, dbp, tm_scen_metrics_df, tm_auto_owned_df, tm_auto_times_df, tm_travel_cost_df, metrics_dict):
metric_id = "A1"
days_per_year = 300
# Total number of households
tm_tot_hh = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'].str.contains("total_households_inc") == True), 'value'].sum()
tm_tot_hh_inc1 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_households_inc1"),'value'].item()
tm_tot_hh_inc2 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_households_inc2"),'value'].item()
# Total household income (model outputs are in 2000$, annual)
tm_total_hh_inc = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'].str.contains("total_hh_inc") == True), 'value'].sum()
tm_total_hh_inc_inc1 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_hh_inc_inc1"),'value'].item()
tm_total_hh_inc_inc2 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_hh_inc_inc2"),'value'].item()
# Total transit fares (model outputs are in 2000$, per day)
tm_tot_transit_fares = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'].str.contains("total_transit_fares") == True), 'value'].sum() * days_per_year
tm_tot_transit_fares_inc1 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_transit_fares_inc1"),'value'].item() * days_per_year
tm_tot_transit_fares_inc2 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_transit_fares_inc2"),'value'].item() * days_per_year
# Total auto op cost (model outputs are in 2000$, per day)
tm_tot_auto_op_cost = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'].str.contains("total_auto_cost_inc") == True), 'value'].sum() * days_per_year
tm_tot_auto_op_cost_inc1 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_auto_cost_inc1"),'value'].item() * days_per_year
tm_tot_auto_op_cost_inc2 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_auto_cost_inc2"),'value'].item() * days_per_year
# Total auto parking cost (model outputs are in 2000$, per day, in cents)
#tm_travel_cost_df['park_cost'] = (tm_travel_cost_df['pcost_indiv']+tm_travel_cost_df['pcost_joint']) * tm_travel_cost_df['freq']
tm_tot_auto_park_cost = (tm_travel_cost_df.pcost_indiv.sum() + tm_travel_cost_df.pcost_joint.sum()) * days_per_year / 100
tm_tot_auto_park_cost_inc1 = (tm_travel_cost_df.loc[(tm_travel_cost_df['incQ'] == 1),'pcost_indiv'].sum() + tm_travel_cost_df.loc[(tm_travel_cost_df['incQ'] == 1),'pcost_joint'].sum()) * days_per_year / 100
tm_tot_auto_park_cost_inc2 = (tm_travel_cost_df.loc[(tm_travel_cost_df['incQ'] == 2),'pcost_indiv'].sum() + tm_travel_cost_df.loc[(tm_travel_cost_df['incQ'] == 2),'pcost_joint'].sum()) * days_per_year / 100
# Calculating number of autos owned from autos_owned.csv
tm_auto_owned_df['tot_autos'] = tm_auto_owned_df['autos'] * tm_auto_owned_df['households']
tm_tot_autos_owned = tm_auto_owned_df['tot_autos'].sum()
tm_tot_autos_owned_inc1 = tm_auto_owned_df.loc[(tm_auto_owned_df['incQ'] == 1), 'tot_autos'].sum()
tm_tot_autos_owned_inc2 = tm_auto_owned_df.loc[(tm_auto_owned_df['incQ'] == 2), 'tot_autos'].sum()
# Total auto ownership cost in 2000$
tm_tot_auto_owner_cost = tm_tot_autos_owned * auto_ownership_cost * inflation_18_20 / inflation_00_20
tm_tot_auto_owner_cost_inc1 = tm_tot_autos_owned_inc1 * auto_ownership_cost_inc1 * inflation_18_20 / inflation_00_20
tm_tot_auto_owner_cost_inc2 = tm_tot_autos_owned_inc2 * auto_ownership_cost_inc2 * inflation_18_20 / inflation_00_20
# Total Transportation Cost (in 2000$)
tp_cost = tm_tot_auto_op_cost + tm_tot_transit_fares + tm_tot_auto_owner_cost + tm_tot_auto_park_cost
tp_cost_inc1 = tm_tot_auto_op_cost_inc1 + tm_tot_transit_fares_inc1 + tm_tot_auto_owner_cost_inc1 + tm_tot_auto_park_cost_inc1
tp_cost_inc2 = tm_tot_auto_op_cost_inc2 + tm_tot_transit_fares_inc2 + tm_tot_auto_owner_cost_inc2 + tm_tot_auto_park_cost_inc2
# Mean transportation cost per household in 2020$
tp_cost_mean = tp_cost / tm_tot_hh * inflation_00_20
tp_cost_mean_inc1 = tp_cost_inc1 / tm_tot_hh_inc1 * inflation_00_20
tp_cost_mean_inc2 = tp_cost_inc2 / tm_tot_hh_inc2 * inflation_00_20
metrics_dict[runid,metric_id,'mean_transportation_cost_2020$',year,dbp] = tp_cost_mean
metrics_dict[runid,metric_id,'mean_transportation_cost_2020$_inc1',year,dbp] = tp_cost_mean_inc1
metrics_dict[runid,metric_id,'mean_transportation_cost_2020$_inc2',year,dbp] = tp_cost_mean_inc2
# Transportation cost % of income
tp_cost_pct_inc = tp_cost / tm_total_hh_inc
tp_cost_pct_inc_inc1 = tp_cost_inc1 / tm_total_hh_inc_inc1
tp_cost_pct_inc_inc2 = tp_cost_inc2 / tm_total_hh_inc_inc2
tp_cost_pct_inc_inc1and2 = (tp_cost_inc1+tp_cost_inc2) / (tm_total_hh_inc_inc1+tm_total_hh_inc_inc2)
# Transportation cost % of income metrics
metrics_dict[runid,metric_id,'transportation_cost_pct_income',year,dbp] = tp_cost_pct_inc
metrics_dict[runid,metric_id,'transportation_cost_pct_income_inc1',year,dbp] = tp_cost_pct_inc_inc1
metrics_dict[runid,metric_id,'transportation_cost_pct_income_inc2',year,dbp] = tp_cost_pct_inc_inc2
metrics_dict[runid,metric_id,'transportation_cost_pct_income_inc1and2',year,dbp] = tp_cost_pct_inc_inc1and2
# Transportation cost % of income metrics; split by cost bucket
metrics_dict[runid,metric_id,'transportation_cost_pct_income_autoop',year,dbp] = tm_tot_auto_op_cost / tm_total_hh_inc
metrics_dict[runid,metric_id,'transportation_cost_pct_income_autopark',year,dbp] = tm_tot_auto_park_cost / tm_total_hh_inc
metrics_dict[runid,metric_id,'transportation_cost_pct_income_transitfare',year,dbp] = tm_tot_transit_fares / tm_total_hh_inc
metrics_dict[runid,metric_id,'transportation_cost_pct_income_autoown',year,dbp] = tm_tot_auto_owner_cost / tm_total_hh_inc
# Add housing costs from Shimon's outputs
housing_costs_2050_df = pd.read_csv('C:/Users/ATapase/Box/Horizon and Plan Bay Area 2050/Equity and Performance/7_Analysis/Metrics/metrics_files/2050 Share of Income Spent on Housing.csv')
housing_costs_2015_df = pd.read_csv('C:/Users/ATapase/Box/Horizon and Plan Bay Area 2050/Equity and Performance/7_Analysis/Metrics/metrics_files/2015 Share of Income Spent on Housing.csv')
housing_costs_2015_df['totcosts'] = housing_costs_2015_df['share_income'] * housing_costs_2015_df['households']
if year == "2050":
metrics_dict[runid,metric_id,'housing_cost_pct_income',year,dbp] = housing_costs_2050_df['w_all'].sum()
metrics_dict[runid,metric_id,'housing_cost_pct_income_inc1',year,dbp] = housing_costs_2050_df['w_q1'].sum()
metrics_dict[runid,metric_id,'housing_cost_pct_income_inc2',year,dbp] = housing_costs_2050_df['w_q2'].sum()
metrics_dict[runid,metric_id,'housing_cost_pct_income_inc1and2',year,dbp] = housing_costs_2050_df['w_q1_q2'].sum()
elif year == "2015":
metrics_dict[runid,metric_id,'housing_cost_pct_income',year,dbp] = housing_costs_2015_df.loc[(housing_costs_2015_df['tenure'].str.contains("Total")), 'totcosts'].sum() / \
housing_costs_2015_df.loc[(housing_costs_2015_df['tenure'].str.contains("Total")), 'households'].sum()
metrics_dict[runid,metric_id,'housing_cost_pct_income_inc1',year,dbp] = housing_costs_2015_df.loc[(housing_costs_2015_df['short_name'].str.contains("q1t")), 'share_income'].sum()
metrics_dict[runid,metric_id,'housing_cost_pct_income_inc2',year,dbp] = housing_costs_2015_df.loc[(housing_costs_2015_df['short_name'].str.contains("q2t")), 'share_income'].sum()
metrics_dict[runid,metric_id,'housing_cost_pct_income_inc1and2',year,dbp] = (housing_costs_2015_df.loc[(housing_costs_2015_df['short_name'].str.contains("q1t")), 'totcosts'].sum() + housing_costs_2015_df.loc[(housing_costs_2015_df['short_name'].str.contains("q2t")), 'totcosts'].sum()) / \
(housing_costs_2015_df.loc[(housing_costs_2015_df['short_name'].str.contains("q1t")), 'households'].sum() + housing_costs_2015_df.loc[(housing_costs_2015_df['short_name'].str.contains("q2t")), 'households'].sum())
# Total H+T Costs pct of income
metrics_dict[runid,metric_id,'HplusT_cost_pct_income',year,dbp] = metrics_dict[runid,metric_id,'transportation_cost_pct_income',year,dbp] + \
metrics_dict[runid,metric_id,'housing_cost_pct_income',year,dbp]
metrics_dict[runid,metric_id,'HplusT_cost_pct_income_inc1',year,dbp] = metrics_dict[runid,metric_id,'transportation_cost_pct_income_inc1',year,dbp] + \
metrics_dict[runid,metric_id,'housing_cost_pct_income_inc1',year,dbp]
metrics_dict[runid,metric_id,'HplusT_cost_pct_income_inc2',year,dbp] = metrics_dict[runid,metric_id,'transportation_cost_pct_income_inc2',year,dbp] + \
metrics_dict[runid,metric_id,'housing_cost_pct_income_inc2',year,dbp]
metrics_dict[runid,metric_id,'HplusT_cost_pct_income_inc1and2',year,dbp] = metrics_dict[runid,metric_id,'transportation_cost_pct_income_inc1and2',year,dbp] + \
metrics_dict[runid,metric_id,'housing_cost_pct_income_inc1and2',year,dbp]
# Tolls & Fares
# Reading auto times file
tm_auto_times_df = tm_auto_times_df.sum(level='Income')
# Calculating Total Tolls per day = bridge tolls + value tolls (2000$)
total_tolls = OrderedDict()
for inc_level in range(1,5):
total_tolls['inc%d' % inc_level] = tm_auto_times_df.loc['inc%d' % inc_level, ['Bridge Tolls', 'Value Tolls']].sum()/100 # cents -> dollars
total_tolls_allHH = sum(total_tolls.values())
total_tolls_LIHH = total_tolls['inc1'] + total_tolls['inc2']
# Average Daily Tolls per household
metrics_dict[runid,metric_id,'tolls_per_HH',year,dbp] = total_tolls_allHH / tm_tot_hh * inflation_00_20
metrics_dict[runid,metric_id,'tolls_per_LIHH',year,dbp] = total_tolls_LIHH / (tm_tot_hh_inc1+tm_tot_hh_inc2) * inflation_00_20
metrics_dict[runid,metric_id,'tolls_per_inc1HH',year,dbp] = total_tolls['inc1'] / tm_tot_hh_inc1 * inflation_00_20
# Average Daily Fares per Household (note: transit fares totals calculated above are annual and need to be divided by days_per_year)
metrics_dict[runid,metric_id,'fares_per_HH',year,dbp] = tm_tot_transit_fares / tm_tot_hh * inflation_00_20 / days_per_year
metrics_dict[runid,metric_id,'fares_per_LIHH',year,dbp] = (tm_tot_transit_fares_inc1 + tm_tot_transit_fares_inc2) / (tm_tot_hh_inc1+tm_tot_hh_inc2) * inflation_00_20 / days_per_year
metrics_dict[runid,metric_id,'fares_per_inc1HH',year,dbp] = tm_tot_transit_fares_inc1 / tm_tot_hh_inc1 * inflation_00_20 / days_per_year
# per trip
# Total auto trips per day (model outputs are in trips, per day)
tm_tot_auto_trips = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'].str.contains("total_auto_trips") == True), 'value'].sum()
tm_tot_auto_trips_inc1 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_auto_trips_inc1"),'value'].item()
tm_tot_auto_trips_inc2 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_auto_trips_inc2"),'value'].item()
# Total transit trips per day (model outputs are in trips, per day)
tm_tot_transit_trips = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'].str.contains("total_transit_trips") == True), 'value'].sum()
tm_tot_transit_trips_inc1 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_transit_trips_inc1"),'value'].item()
tm_tot_transit_trips_inc2 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_transit_trips_inc2"),'value'].item()
# Average Tolls per trip (total_tolls_xx is calculated above as per day tolls in 2000 dollars)
metrics_dict[runid,metric_id,'tolls_per_trip',year,dbp] = total_tolls_allHH / tm_tot_auto_trips * inflation_00_20
metrics_dict[runid,metric_id,'tolls_per_trip_inc1and2',year,dbp] = total_tolls_LIHH / (tm_tot_auto_trips_inc1+tm_tot_auto_trips_inc2) * inflation_00_20
metrics_dict[runid,metric_id,'tolls_per_trip_inc1',year,dbp] = total_tolls['inc1'] / tm_tot_auto_trips_inc1 * inflation_00_20
# Total auto operating cost per trip (tm_tot_auto_op_cost and tm_tot_auto_park_cost are calculated above as annual costs in 2000 dollars)
metrics_dict[runid,metric_id,'autocost_per_trip',year,dbp] = (tm_tot_auto_op_cost + tm_tot_auto_park_cost) / tm_tot_auto_trips * inflation_00_20 / days_per_year
metrics_dict[runid,metric_id,'autocost_per_trip_inc1and2',year,dbp] = (tm_tot_auto_op_cost_inc1 + tm_tot_auto_op_cost_inc2 + tm_tot_auto_park_cost_inc1 + tm_tot_auto_park_cost_inc2) / (tm_tot_auto_trips_inc1+tm_tot_auto_trips_inc2) * inflation_00_20 / days_per_year
metrics_dict[runid,metric_id,'autocost_per_trip_inc1',year,dbp] = (tm_tot_auto_op_cost_inc1 + tm_tot_auto_park_cost_inc1) / tm_tot_auto_trips_inc1 * inflation_00_20 / days_per_year
# Average Fares per trip (note: transit fares totals calculated above are annual and need to be divided by days_per_year)
metrics_dict[runid,metric_id,'fares_per_trip',year,dbp] = tm_tot_transit_fares / tm_tot_transit_trips * inflation_00_20 / days_per_year
metrics_dict[runid,metric_id,'fares_per_trip_inc1and2',year,dbp] = (tm_tot_transit_fares_inc1 + tm_tot_transit_fares_inc2) / (tm_tot_transit_trips_inc1+tm_tot_transit_trips_inc2) * inflation_00_20 / days_per_year
metrics_dict[runid,metric_id,'fares_per_trip_inc1',year,dbp] = tm_tot_transit_fares_inc1 / tm_tot_transit_trips_inc1 * inflation_00_20 / days_per_year
def calculate_Affordable2_deed_restricted_housing(runid, dbp, parcel_sum_df, metrics_dict):
metric_id = "A2"
# totals for 2050 and 2015
metrics_dict[runid,metric_id,'deed_restricted_total',y2,dbp] = parcel_sum_df['deed_restricted_units_2050'].sum()
metrics_dict[runid,metric_id,'deed_restricted_total',y1,dbp] = parcel_sum_df['deed_restricted_units_2015'].sum()
metrics_dict[runid,metric_id,'residential_units_total',y2,dbp] = parcel_sum_df['residential_units_2050'].sum()
metrics_dict[runid,metric_id,'residential_units_total',y1,dbp] = parcel_sum_df['residential_units_2015'].sum()
metrics_dict[runid,metric_id,'deed_restricted_HRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'deed_restricted_units_2050'].sum()
metrics_dict[runid,metric_id,'deed_restricted_HRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'deed_restricted_units_2015'].sum()
metrics_dict[runid,metric_id,'residential_units_HRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'residential_units_2050'].sum()
metrics_dict[runid,metric_id,'residential_units_HRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'residential_units_2015'].sum()
metrics_dict[runid,metric_id,'deed_restricted_TRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'deed_restricted_units_2050'].sum()
metrics_dict[runid,metric_id,'deed_restricted_TRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'deed_restricted_units_2015'].sum()
metrics_dict[runid,metric_id,'residential_units_TRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'residential_units_2050'].sum()
metrics_dict[runid,metric_id,'residential_units_TRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'residential_units_2015'].sum()
metrics_dict[runid,metric_id,'deed_restricted_CoC',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['coc_flag_pba2050']==1, 'deed_restricted_units_2050'].sum()
metrics_dict[runid,metric_id,'deed_restricted_CoC',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['coc_flag_pba2050']==1, 'deed_restricted_units_2015'].sum()
metrics_dict[runid,metric_id,'residential_units_CoC',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['coc_flag_pba2050']==1, 'residential_units_2050'].sum()
metrics_dict[runid,metric_id,'residential_units_CoC',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['coc_flag_pba2050']==1, 'residential_units_2015'].sum()
# diff between 2050 and 2015
metrics_dict[runid,metric_id,'deed_restricted_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_total',y2,dbp] - metrics_dict[runid,metric_id,'deed_restricted_total',y1,dbp]
metrics_dict[runid,metric_id,'residential_units_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'residential_units_total',y2,dbp] - metrics_dict[runid,metric_id,'residential_units_total',y1,dbp]
metrics_dict[runid,metric_id,'deed_restricted_HRA_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_HRA',y2,dbp] - metrics_dict[runid,metric_id,'deed_restricted_HRA',y1,dbp]
metrics_dict[runid,metric_id,'residential_units_HRA_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'residential_units_HRA',y2,dbp] - metrics_dict[runid,metric_id,'residential_units_HRA',y1,dbp]
metrics_dict[runid,metric_id,'deed_restricted_TRA_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_TRA',y2,dbp] - metrics_dict[runid,metric_id,'deed_restricted_TRA',y1,dbp]
metrics_dict[runid,metric_id,'residential_units_TRA_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'residential_units_TRA',y2,dbp] - metrics_dict[runid,metric_id,'residential_units_TRA',y1,dbp]
metrics_dict[runid,metric_id,'deed_restricted_nonHRA_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_diff',y_diff,dbp] - metrics_dict[runid,metric_id,'deed_restricted_HRA_diff',y_diff,dbp]
metrics_dict[runid,metric_id,'residential_units_nonHRA_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'residential_units_diff',y_diff,dbp] - metrics_dict[runid,metric_id,'residential_units_HRA_diff',y_diff,dbp]
metrics_dict[runid,metric_id,'deed_restricted_CoC_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_CoC',y2,dbp] - metrics_dict[runid,metric_id,'deed_restricted_CoC',y1,dbp]
metrics_dict[runid,metric_id,'residential_units_CoC_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'residential_units_CoC',y2,dbp] - metrics_dict[runid,metric_id,'residential_units_CoC',y1,dbp]
# metric: deed restricted % of total units: overall, HRA and non-HRA
metrics_dict[runid,metric_id,'deed_restricted_pct_new_units',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_diff',y_diff,dbp] / metrics_dict[runid,metric_id,'residential_units_diff',y_diff,dbp]
metrics_dict[runid,metric_id,'deed_restricted_pct_new_units_HRA',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_HRA_diff',y_diff,dbp]/metrics_dict[runid,metric_id,'residential_units_HRA_diff',y_diff,dbp]
metrics_dict[runid,metric_id,'deed_restricted_pct_new_units_TRA',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_TRA_diff',y_diff,dbp]/metrics_dict[runid,metric_id,'residential_units_TRA_diff',y_diff,dbp]
metrics_dict[runid,metric_id,'deed_restricted_pct_new_units_nonHRA',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_nonHRA_diff',y_diff,dbp]/metrics_dict[runid,metric_id,'residential_units_nonHRA_diff',y_diff,dbp]
metrics_dict[runid,metric_id,'deed_restricted_pct_new_units_CoC',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_CoC_diff',y_diff,dbp]/metrics_dict[runid,metric_id,'residential_units_CoC_diff',y_diff,dbp]
print('********************A2 Affordable********************')
print('DR pct of new units %s' % dbp,metrics_dict[runid,metric_id,'deed_restricted_pct_new_units',y_diff,dbp] )
print('DR pct of new units in HRAs %s' % dbp,metrics_dict[runid,metric_id,'deed_restricted_pct_new_units_HRA',y_diff,dbp] )
print('DR pct of new units in TRAs %s' % dbp,metrics_dict[runid,metric_id,'deed_restricted_pct_new_units_TRA',y_diff,dbp] )
print('DR pct of new units outside of HRAs %s' % dbp,metrics_dict[runid,metric_id,'deed_restricted_pct_new_units_nonHRA',y_diff,dbp])
# Forcing preservation metrics
metrics_dict[runid,metric_id,'preservation_affordable_housing',y_diff,dbp] = 1
def calculate_Connected1_accessibility(runid, year, dbp, tm_scen_metrics_df, metrics_dict):
metric_id = "C1"
# % of Jobs accessible by 30 min car OR 45 min transit
metrics_dict[runid,metric_id,'pct_jobs_acc_by_allmodes',year,dbp] = \
tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_accessible_job_share"), 'value'].item()
metrics_dict[runid,metric_id,'pct_jobs_acc_by_allmodes_coc',year,dbp] = \
tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_accessible_job_share_coc"), 'value'].item()
metrics_dict[runid,metric_id,'pct_jobs_acc_by_allmodes_noncoc',year,dbp] = \
tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_accessible_job_share_noncoc"), 'value'].item()
# % of Jobs accessible by 30 min car only
metrics_dict[runid,metric_id,'pct_jobs_acc_by_drv_only',year,dbp] = \
tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_drv_only_acc_accessible_job_share"), 'value'].item() \
+ tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_trn_drv_acc_accessible_job_share"), 'value'].item()
metrics_dict[runid,metric_id,'pct_jobs_acc_by_drv_only_coc',year,dbp] = \
tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_drv_only_acc_accessible_job_share_coc"), 'value'].item() \
+ tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_trn_drv_acc_accessible_job_share_coc"), 'value'].item()
metrics_dict[runid,metric_id,'pct_jobs_acc_by_drv_only_noncoc',year,dbp] = \
tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_drv_only_acc_accessible_job_share_noncoc"), 'value'].item() \
+ tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_trn_drv_acc_accessible_job_share_noncoc"), 'value'].item()
# % of Jobs accessible by 45 min transit only
metrics_dict[runid,metric_id,'pct_jobs_acc_by_trn_only',year,dbp] = \
tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_trn_only_acc_accessible_job_share"), 'value'].item() \
+ tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_trn_drv_acc_accessible_job_share"), 'value'].item()
metrics_dict[runid,metric_id,'pct_jobs_acc_by_trn_only_coc',year,dbp] = \
tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_trn_only_acc_accessible_job_share_coc"), 'value'].item() \
+ tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_trn_drv_acc_accessible_job_share_coc"), 'value'].item()
metrics_dict[runid,metric_id,'pct_jobs_acc_by_trn_only_noncoc',year,dbp] = \
tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_trn_only_acc_accessible_job_share_noncoc"), 'value'].item() \
+ tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_trn_drv_acc_accessible_job_share_noncoc"), 'value'].item()
def calculate_Connected1_proximity(runid, year, dbp, tm_scen_metrics_df, metrics_dict):
metric_id = "C1"
def calculate_Connected2_crowding(runid, year, dbp, transit_operator_df, metrics_dict):
metric_id = "C2"
if "2015" in runid: tm_run_location = tm_run_location_ipa
else: tm_run_location = tm_run_location_bp
tm_crowding_df = pd.read_csv(tm_run_location+runid+'/OUTPUT/metrics/transit_crowding_complete.csv')
tm_crowding_df = tm_crowding_df[['TIME','SYSTEM','ABNAMESEQ','period','load_standcap','AB_VOL']]
tm_crowding_df = tm_crowding_df.loc[tm_crowding_df['period'] == "AM"]
tm_crowding_df['time_overcapacity'] = tm_crowding_df.apply (lambda row: row['TIME'] if (row['load_standcap']>1) else 0, axis=1)
tm_crowding_df['time_crowded'] = tm_crowding_df.apply (lambda row: row['TIME'] if (row['load_standcap']>0.85) else 0, axis=1)
tm_crowding_df['person_hrs_total'] = tm_crowding_df['TIME'] * tm_crowding_df['AB_VOL']
tm_crowding_df['person_hrs_overcap'] = tm_crowding_df['time_overcapacity'] * tm_crowding_df['AB_VOL']
tm_crowding_df['person_hrs_crowded'] = tm_crowding_df['time_crowded'] * tm_crowding_df['AB_VOL']
tm_crowding_df = pd.merge(left=tm_crowding_df, right=transit_operator_df, left_on="SYSTEM", right_on="SYSTEM", how="left")
system_crowding_df = tm_crowding_df[['person_hrs_total','person_hrs_overcap','person_hrs_crowded']].groupby(tm_crowding_df['operator']).sum().reset_index()
system_crowding_df['pct_overcapacity'] = system_crowding_df['person_hrs_overcap'] / system_crowding_df['person_hrs_total']
system_crowding_df['pct_crowded'] = system_crowding_df['person_hrs_crowded'] / system_crowding_df['person_hrs_total']
for index,row in system_crowding_df.iterrows():
if row['operator'] in ['AC Transit Local','AC Transit Transbay','SFMTA LRT','SFMTA Bus','VTA Bus Local','VTA LRT','BART','Caltrain','SamTrans Local','GGT Express','WETA']:
metrics_dict[runid,metric_id,'crowded_pct_personhrs_AM_%s' % row['operator'],year,dbp] = row['pct_crowded']
def calculate_Connected2_hwy_traveltimes(runid, year, dbp, hwy_corridor_links_df, metrics_dict):
metric_id = "C2"
if "2015" in runid: tm_run_location = tm_run_location_ipa
else: tm_run_location = tm_run_location_bp
tm_loaded_network_df = pd.read_csv(tm_run_location+runid+'/OUTPUT/avgload5period.csv')
# Keeping essential columns of loaded highway network: node A and B, distance, free flow time, congested time
tm_loaded_network_df = tm_loaded_network_df.rename(columns=lambda x: x.strip())
tm_loaded_network_df = tm_loaded_network_df[['a','b','distance','fft','ctimAM']]
tm_loaded_network_df['link'] = tm_loaded_network_df['a'].astype(str) + "_" + tm_loaded_network_df['b'].astype(str)
# merging df that has the list of all
hwy_corridor_links_df = pd.merge(left=hwy_corridor_links_df, right=tm_loaded_network_df, left_on="link", right_on="link", how="left")
corridor_travel_times_df = hwy_corridor_links_df[['distance','fft','ctimAM']].groupby(hwy_corridor_links_df['route']).sum().reset_index()
for index,row in corridor_travel_times_df.iterrows():
metrics_dict[runid,metric_id,'travel_time_AM_%s' % row['route'],year,dbp] = row['ctimAM']
def calculate_Connected2_trn_traveltimes(runid, year, dbp, transit_operator_df, metrics_dict):
metric_id = "C2"
if "2015" in runid: tm_run_location = tm_run_location_ipa
else: tm_run_location = tm_run_location_bp
tm_trn_line_df = pd.read_csv(tm_run_location+runid+'/OUTPUT/trn/trnline.csv')
# It doesn't really matter which path ID we pick, as long as it is AM
tm_trn_line_df = tm_trn_line_df.loc[tm_trn_line_df['path id'] == "am_wlk_loc_wlk"]
tm_trn_line_df = pd.merge(left=tm_trn_line_df, right=transit_operator_df, left_on="mode", right_on="mode", how="left")
# grouping by transit operator, and summing all line times and distances, to get metric of "time per unit distance", in minutes/mile
trn_operator_travel_times_df = tm_trn_line_df[['line time','line dist']].groupby(tm_trn_line_df['operator']).sum().reset_index()
trn_operator_travel_times_df['time_per_dist_AM'] = trn_operator_travel_times_df['line time'] / trn_operator_travel_times_df['line dist']
# grouping by mode, and summing all line times and distances, to get metric of "time per unit distance", in minutes/mile
trn_mode_travel_times_df = tm_trn_line_df[['line time','line dist']].groupby(tm_trn_line_df['mode_name']).sum().reset_index()
trn_mode_travel_times_df['time_per_dist_AM'] = trn_mode_travel_times_df['line time'] / trn_mode_travel_times_df['line dist']
for index,row in trn_operator_travel_times_df.iterrows():
if row['operator'] in ['AC Transit Local','AC Transit Transbay','SFMTA LRT','SFMTA Bus','VTA Bus Local','VTA LRT','BART','Caltrain','SamTrans Local']:
metrics_dict[runid,metric_id,'time_per_dist_AM_%s' % row['operator'],year,dbp] = row['time_per_dist_AM']
for index,row in trn_mode_travel_times_df.iterrows():
metrics_dict[runid,metric_id,'time_per_dist_AM_%s' % row['mode_name'],year,dbp] = row['time_per_dist_AM']
def calculate_Diverse1_LIHHinHRAs(runid, dbp, parcel_sum_df, tract_sum_df, GG_sum_df, normalize_factor_Q1Q2, normalize_factor_Q1, metrics_dict):
metric_id = "D1"
# Share of region's LIHH households that are in HRAs
metrics_dict[runid,metric_id,'LIHH_total',y2,dbp] = parcel_sum_df['hhq1_2050'].sum() + parcel_sum_df['hhq2_2050'].sum()
metrics_dict[runid,metric_id,'LIHH_total',y1,dbp] = parcel_sum_df['hhq1_2015'].sum() + parcel_sum_df['hhq2_2015'].sum()
metrics_dict[runid,metric_id,'LIHH_inHRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'hhq1_2050'].sum() + parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'hhq2_2050'].sum()
metrics_dict[runid,metric_id,'LIHH_inHRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'hhq1_2015'].sum() + parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'hhq2_2015'].sum()
metrics_dict[runid,metric_id,'LIHH_shareinHRA',y2,dbp] = metrics_dict[runid,metric_id,'LIHH_inHRA',y2,dbp] / metrics_dict[runid,metric_id,'LIHH_total',y2,dbp]
metrics_dict[runid,metric_id,'LIHH_shareinHRA',y1,dbp] = metrics_dict[runid,metric_id,'LIHH_inHRA',y1,dbp] / metrics_dict[runid,metric_id,'LIHH_total',y1,dbp]
# normalizing for overall growth in LIHH
metrics_dict[runid,metric_id,'LIHH_shareinHRA_normalized',y1,dbp] = metrics_dict[runid,metric_id,'LIHH_shareinHRA',y1,dbp] * normalize_factor_Q1Q2
# Total number of Households
# Total HHs in HRAs, in 2015 and 2050
metrics_dict[runid,metric_id,'TotHH_inHRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inHRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'tothh_2050'].sum()
# Total HHs in TRAs, in 2015 and 2050
metrics_dict[runid,metric_id,'TotHH_inTRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inTRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'tothh_2050'].sum()
# Total HHs in HRAs only, in 2015 and 2050
metrics_dict[runid,metric_id,'TotHH_inHRAonly',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) & \
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False) == False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inHRAonly',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) & \
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False) == False), 'tothh_2050'].sum()
# Total HHs in TRAs only, in 2015 and 2050
metrics_dict[runid,metric_id,'TotHH_inTRAonly',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) & \
(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False) == False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inTRAonly',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) & \
(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False) == False), 'tothh_2050'].sum()
# Total HHs in HRA/TRAs, in 2015 and 2050
metrics_dict[runid,metric_id,'TotHH_inHRATRA',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) & \
(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inHRATRA',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) & \
(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)), 'tothh_2050'].sum()
# Total HHs in DR Tracts, in 2015 and 2050
metrics_dict[runid,metric_id,'TotHH_inDRTracts',y1,dbp] = tract_sum_df.loc[(tract_sum_df['DispRisk'] == 1), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inDRTracts',y2,dbp] = tract_sum_df.loc[(tract_sum_df['DispRisk'] == 1), 'tothh_2050'].sum()
# Total HHs in CoC Tracts, in 2015 and 2050
metrics_dict[runid,metric_id,'TotHH_inCoCTracts',y1,dbp] = tract_sum_df.loc[(tract_sum_df['coc_flag_pba2050'] == 1), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inCoCTracts',y2,dbp] = tract_sum_df.loc[(tract_sum_df['coc_flag_pba2050'] == 1), 'tothh_2050'].sum()
# Total HHs in remainder of region (RoR); i.e. not HRA or TRA or CoC or DR
metrics_dict[runid,metric_id,'TotHH_inRoR',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False) == False) & \
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False) == False) & \
(parcel_sum_df['pba50chcat'].str.contains('DR', na=False) == False) & \
(parcel_sum_df['coc_flag_pba2050'] == 0), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inRoR',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False) == False) & \
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False) == False) & \
(parcel_sum_df['pba50chcat'].str.contains('DR', na=False) == False) & \
(parcel_sum_df['coc_flag_pba2050'] == 0), 'tothh_2050'].sum()
# Total HHs in GGs, in 2015 and 2050
metrics_dict[runid,metric_id,'TotHH_inGGs',y1,dbp] = GG_sum_df['tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inGGs',y2,dbp] = GG_sum_df['tothh_2050'].sum()
# Total HHs in Transit Rich GGs, in 2015 and 2050
GG_TRich_sum_df = GG_sum_df[GG_sum_df['Designation']=="Transit-Rich"]
metrics_dict[runid,metric_id,'TotHH_inTRichGGs',y1,dbp] = GG_TRich_sum_df['tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inTRichGGs',y2,dbp] = GG_TRich_sum_df['tothh_2050'].sum()
########### Tracking movement of Q1 households: Q1 share of Households
# Share of Households that are Q1, within each geography type in this order:
# Overall Region; HRAs; TRAs, DR Tracts; CoCs; Rest of Region; and also GGs and TRichGGs
metrics_dict[runid,metric_id,'Q1HH_shareofRegion',y1,dbp] = parcel_sum_df['hhq1_2015'].sum() / parcel_sum_df['tothh_2015'].sum()
metrics_dict[runid,metric_id,'Q1HH_shareofRegion_normalized',y1,dbp] = parcel_sum_df['hhq1_2015'].sum() / parcel_sum_df['tothh_2015'].sum() * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofRegion',y2,dbp] = parcel_sum_df['hhq1_2050'].sum() / parcel_sum_df['tothh_2050'].sum()
metrics_dict[runid,metric_id,'Q1HH_shareofHRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inHRA',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofHRA_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofHRA',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofHRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inHRA',y2,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofTRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inTRA',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofTRA_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofTRA',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofTRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inTRA',y2,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofHRAonly',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) & (parcel_sum_df['pba50chcat'].str.contains('tra', na=False) == False), 'hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inHRAonly',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofHRAonly_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofHRAonly',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofHRAonly',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) & (parcel_sum_df['pba50chcat'].str.contains('tra', na=False) == False), 'hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inHRAonly',y2,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofTRAonly',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) & (parcel_sum_df['pba50chcat'].str.contains('HRA', na=False) == False), 'hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inTRAonly',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofTRAonly_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofTRAonly',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofTRAonly',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) & (parcel_sum_df['pba50chcat'].str.contains('HRA', na=False) == False), 'hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inTRAonly',y2,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofHRATRA',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) & (parcel_sum_df['pba50chcat'].str.contains('tra', na=False)), 'hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inHRATRA',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofHRATRA_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofHRATRA',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofHRATRA',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) & (parcel_sum_df['pba50chcat'].str.contains('tra', na=False)), 'hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inHRATRA',y2,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofDRTracts',y1,dbp] = tract_sum_df.loc[(tract_sum_df['DispRisk'] == 1), 'hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inDRTracts',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofDRTracts_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofDRTracts',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofDRTracts',y2,dbp] = tract_sum_df.loc[(tract_sum_df['DispRisk'] == 1), 'hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inDRTracts',y2,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofCoCTracts',y1,dbp] = tract_sum_df.loc[(tract_sum_df['coc_flag_pba2050'] == 1), 'hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inCoCTracts',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofCoCTracts_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofCoCTracts',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofCoCTracts',y2,dbp] = tract_sum_df.loc[(tract_sum_df['coc_flag_pba2050'] == 1), 'hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inCoCTracts',y2,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofRoR',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False) == False) & \
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False) == False) & \
(parcel_sum_df['pba50chcat'].str.contains('DR', na=False) == False) & \
(parcel_sum_df['coc_flag_pba2050'] == 0), 'hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inRoR',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofRoR_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofRoR',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofRoR',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False) == False) & \
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False) == False) & \
(parcel_sum_df['pba50chcat'].str.contains('DR', na=False) == False) & \
(parcel_sum_df['coc_flag_pba2050'] == 0), 'hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inRoR',y2,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofGGs',y1,dbp] = GG_sum_df['hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inGGs',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofGGs_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofGGs',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofGGs',y2,dbp] = GG_sum_df['hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inGGs',y2,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofTRichGGs',y1,dbp] = GG_TRich_sum_df['hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inTRichGGs',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofTRichGGs_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofTRichGGs',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofTRichGGs',y2,dbp] = GG_TRich_sum_df['hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inTRichGGs',y2,dbp]
'''
print('********************D1 Diverse********************')
print('Growth of LIHH share of population (normalize factor))',normalize_factor_Q1Q2 )
print('LIHH Share in HRA 2050 %s' % dbp,metrics_dict[runid,metric_id,'LIHH_shareinHRA',y2,dbp] )
print('LIHH Share in HRA 2015 %s' % dbp,metrics_dict[runid,metric_id,'LIHH_shareinHRA_normalized',y1,dbp] )
print('LIHH Share of HRA 2050 %s' % dbp,metrics_dict[runid,metric_id,'LIHH_shareofHRA',y2,dbp])
print('LIHH Share of HRA 2015 %s' % dbp,metrics_dict[runid,metric_id,'LIHH_shareofHRA_normalized',y1,dbp] )
'''
def calculate_Diverse2_LIHH_Displacement(runid, dbp, parcel_sum_df, tract_sum_df, TRA_sum_df, GG_sum_df, normalize_factor_Q1Q2, normalize_factor_Q1, metrics_dict):
metric_id = "D2"
# For reference: total number of LIHH in tracts
metrics_dict[runid,metric_id,'LIHH_inDR',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('DR', na=False), 'hhq1_2050'].sum()
metrics_dict[runid,metric_id,'LIHH_inDR',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('DR', na=False), 'hhq1_2015'].sum()
metrics_dict[runid,metric_id,'LIHH_inDR_normalized',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('DR', na=False), 'hhq1_2015'].sum() * normalize_factor_Q1
print('********************D2 Diverse********************')
print('Total Number of LIHH in DR tracts in 2050',metrics_dict[runid,metric_id,'LIHH_inDR',y2,dbp] )
print('Number of LIHH in DR tracts in 2015',metrics_dict[runid,metric_id,'LIHH_inDR',y1,dbp] )
print('Number of LIHH in DR tracts in normalized',metrics_dict[runid,metric_id,'LIHH_inDR_normalized',y1,dbp] )
###### Displacement at Tract Level (for Displacement Risk Tracts and CoC Tracts and HRA Tracts)
# Total number of DR, CoC, HRA Tracts
metrics_dict[runid,metric_id,'Num_DRtracts_total',y1,dbp] = tract_sum_df.loc[(tract_sum_df['DispRisk'] == 1), 'tract_id'].nunique()
metrics_dict[runid,metric_id,'Num_CoCtracts_total',y1,dbp] = tract_sum_df.loc[(tract_sum_df['coc_flag_pba2050'] == 1), 'tract_id'].nunique()
metrics_dict[runid,metric_id,'Num_HRAtracts_total',y1,dbp] = tract_sum_df.loc[(tract_sum_df['hra'] == 1), 'tract_id'].nunique()
# Calculating share of Q1 households at tract level / we are not going to normalize this since we want to check impacts at neighborhood level
#tract_sum_df['hhq1_pct_2015_normalized'] = tract_sum_df['hhq1_2015'] / tract_sum_df['tothh_2015'] * normalize_factor_Q1
tract_sum_df['hhq1_pct_2050'] = tract_sum_df['hhq1_2050'] / tract_sum_df['tothh_2050']
tract_sum_df['hhq1_pct_2015'] = tract_sum_df['hhq1_2015'] / tract_sum_df['tothh_2015']
# Creating functions to check if rows of a dataframe lost hhq1 share or absolute; applied to tract_summary_df and TRA_summary_df
def check_losthhq1_share(row,j):
if (row['hhq1_pct_2015'] == 0): return 0
elif ((row['hhq1_pct_2050']/row['hhq1_pct_2015'])<j): return 1
else: return 0
def check_losthhq1_abs(row,j):
if (row['hhq1_2015'] == 0): return 0
elif ((row['hhq1_2050']/row['hhq1_2015'])<j): return 1
else: return 0
# Calculating number of Tracts that Lost LIHH, with "lost" defined as any loss, or 10% loss
for i in [0, 10]:
if i == 0:
j = 1
else:
j = 0.9
# Calculating change in share of LIHH at tract level to check gentrification
tract_sum_df['lost_hhq1_%dpct' % i] = tract_sum_df.apply (lambda row: check_losthhq1_share(row,j), axis=1)
#(lambda row: 1 if ((row['hhq1_pct_2050']/row['hhq1_pct_2015_normalized'])<j) else 0, axis=1)
#(lambda row: 1 if (row['hhq1_pct_2050'] < (row['hhq1_pct_2015']*j)) else 0, axis=1)
# Calculating absolute change in LIHH at tract level to check true displacement
tract_sum_df['lost_hhq1_abs_%dpct' % i] = tract_sum_df.apply (lambda row: check_losthhq1_abs(row,j), axis=1)
#(lambda row: 1 if (row['hhq1_2050'] < (row['hhq1_2015']*j)) else 0, axis=1)
############################### Gentrification
######## Gentrification in Displacement Risk Tracts
# Number or percent of DR tracts that lost Q1 households as a share of total HH
metrics_dict[runid,metric_id,'Num_DRtracts_lostLIHH_%dpct' % i,y_diff,dbp] = tract_sum_df.loc[((tract_sum_df['DispRisk'] == 1) & (tract_sum_df['lost_hhq1_%dpct' % i] == 1)), 'tract_id'].nunique()
metrics_dict[runid,metric_id,'Pct_DRtracts_lostLIHH_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_DRtracts_lostLIHH_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_DRtracts_total',y1,dbp] )
print('Number of DR Tracts that lost LIHH (as a share) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_DRtracts_lostLIHH_%dpct' % i,y_diff,dbp] )
print('Pct of DR Tracts that lost LIHH (as a share) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_DRtracts_lostLIHH_%dpct' % i,y_diff,dbp] )
######## Gentrification in Communities of Concern
# Number or percent of CoC tracts that lost Q1 households as a share of total HH
metrics_dict[runid,metric_id,'Num_CoCtracts_lostLIHH_%dpct' % i,y_diff,dbp] = tract_sum_df.loc[((tract_sum_df['coc_flag_pba2050'] == 1) & (tract_sum_df['lost_hhq1_%dpct' % i] == 1)), 'tract_id'].nunique()
metrics_dict[runid,metric_id,'Pct_CoCtracts_lostLIHH_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_CoCtracts_lostLIHH_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_CoCtracts_total',y1,dbp] )
print('Number of CoC Tracts that lost LIHH (as a share) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_CoCtracts_lostLIHH_%dpct' % i,y_diff,dbp] )
print('Pct of CoC Tracts that lost LIHH (as a share) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_CoCtracts_lostLIHH_%dpct' % i,y_diff,dbp] )
######## Gentrification in HRAs
# Number or percent of HRA tracts that lost Q1 households as a share of total HH
metrics_dict[runid,metric_id,'Num_HRAtracts_lostLIHH_%dpct' % i,y_diff,dbp] = tract_sum_df.loc[((tract_sum_df['hra'] == 1) & (tract_sum_df['lost_hhq1_%dpct' % i] == 1)), 'tract_id'].nunique()
metrics_dict[runid,metric_id,'Pct_HRAtracts_lostLIHH_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_HRAtracts_lostLIHH_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_HRAtracts_total',y1,dbp] )
print('Number of HRA Tracts that lost LIHH (as a share) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_HRAtracts_lostLIHH_%dpct' % i,y_diff,dbp] )
print('Pct of HRA Tracts that lost LIHH (as a share) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_HRAtracts_lostLIHH_%dpct' % i,y_diff,dbp] )
############################### Displacement
######## Displacement in Displacement Risk Tracts
# Number or percent of DR tracts that lost Q1 households in absolute numbers
metrics_dict[runid,metric_id,'Num_DRtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] = tract_sum_df.loc[((tract_sum_df['DispRisk'] == 1) & (tract_sum_df['lost_hhq1_abs_%dpct' % i] == 1)), 'tract_id'].nunique()
metrics_dict[runid,metric_id,'Pct_DRtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_DRtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_DRtracts_total',y1,dbp] )
print('Number of DR Tracts that lost LIHH from (in absolute numbers) 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_DRtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] )
print('Pct of DR Tracts that lost LIHH from (in absolute numbers) 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_DRtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] )
######## Displacement in Communities of Concern
# Number or percent of CoC tracts that lost Q1 households in absolute numbers
metrics_dict[runid,metric_id,'Num_CoCtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] = tract_sum_df.loc[((tract_sum_df['coc_flag_pba2050'] == 1) & (tract_sum_df['lost_hhq1_abs_%dpct' % i] == 1)), 'tract_id'].nunique()
metrics_dict[runid,metric_id,'Pct_CoCtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_CoCtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_CoCtracts_total',y1,dbp] )
print('Number of CoC Tracts that lost LIHH (in absolute numbers) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_CoCtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] )
print('Pct of CoC Tracts that lost LIHH (in absolute numbers) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_CoCtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] )
######## Displacement in HRAs
# Number or percent of HRA tracts that lost Q1 households in absolute numbers
metrics_dict[runid,metric_id,'Num_HRAtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] = tract_sum_df.loc[((tract_sum_df['hra'] == 1) & (tract_sum_df['lost_hhq1_abs_%dpct' % i] == 1)), 'tract_id'].nunique()
metrics_dict[runid,metric_id,'Pct_HRAtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_HRAtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_HRAtracts_total',y1,dbp] )
print('Number of HRA Tracts that lost LIHH (in absolute numbers) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_HRAtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] )
print('Pct of HRA Tracts that lost LIHH (in absolute numbers) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_HRAtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] )
##### Calculating displacement risk using the PBA2040 methodology
# The analysis estimated which zones (i.e., TAZs) gained or lost lower-income households; those zones
# that lost lower-income households over the time period would be flagged as being at risk of displacement.
# The share of lower-income households at risk of displacement would be calculated by
# dividing the number of lower-income households living in TAZs flagged as PDAs, TPAs, or
# highopportunity areas with an increased risk of displacement by the total number of lower-income
# households living in TAZs flagged as PDAs, TPAs, or high-opportunity areas in 2040
# Calculating this first for all DR Risk/CoC/HRA tracts; and next for TRA areas
######## PBA40 Displacement risk in DR Risk/CoC/HRA tracts
# Q1 only
#metrics_dict[runid,metric_id,'Num_LIHH_inDRCoCHRAtracts',y1,dbp] = tract_sum_df.loc[((tract_sum_df['DispRisk'] == 1)|(tract_sum_df['coc_flag_pba2050'] == 1)|\
# (tract_sum_df['hra'] == 1)), 'hhq1_2015'].nunique()
metrics_dict[runid,metric_id,'Num_LIHH_inDRCoCHRAtracts',y2,dbp] = tract_sum_df.loc[((tract_sum_df['DispRisk'] == 1)|(tract_sum_df['coc_flag_pba2050'] == 1)|\
(tract_sum_df['hra'] == 1)), 'hhq1_2050'].sum()
# Total number of LIHH in HRA/CoC/DR tracts that lost hhq1
metrics_dict[runid,metric_id,'Num_LIHH_inDRCoCHRAtracts_disp',y_diff,dbp] = tract_sum_df.loc[(((tract_sum_df['DispRisk'] == 1)|(tract_sum_df['coc_flag_pba2050'] == 1)|\
(tract_sum_df['hra'] == 1)) & (tract_sum_df['lost_hhq1_abs_0pct'] == 1)), 'hhq1_2050'].sum()
metrics_dict[runid,metric_id,'DispRisk_PBA40_DRCoCHRAtracts',y_diff,dbp] = metrics_dict[runid,metric_id,'Num_LIHH_inDRCoCHRAtracts_disp',y_diff,dbp] / \
metrics_dict[runid,metric_id,'Num_LIHH_inDRCoCHRAtracts',y2,dbp]
#For both Q1, Q2 - because this is how it was done in PBA40
metrics_dict[runid,metric_id,'Num_Q1Q2HH_inDRCoCHRAtracts',y2,dbp] = tract_sum_df.loc[((tract_sum_df['DispRisk'] == 1)|(tract_sum_df['coc_flag_pba2050'] == 1)|\
(tract_sum_df['hra'] == 1)), 'hhq1_2050'].sum() + \
tract_sum_df.loc[((tract_sum_df['DispRisk'] == 1)|(tract_sum_df['coc_flag_pba2050'] == 1)|\
(tract_sum_df['hra'] == 1)), 'hhq2_2050'].sum()
metrics_dict[runid,metric_id,'Num_Q1Q2HH_inDRCoCHRAtracts_disp',y_diff,dbp] = tract_sum_df.loc[(((tract_sum_df['DispRisk'] == 1)|(tract_sum_df['coc_flag_pba2050'] == 1)|\
(tract_sum_df['hra'] == 1)) & (tract_sum_df['lost_hhq1_abs_0pct'] == 1)), 'hhq1_2050'].sum() + \
tract_sum_df.loc[(((tract_sum_df['DispRisk'] == 1)|(tract_sum_df['coc_flag_pba2050'] == 1)|\
(tract_sum_df['hra'] == 1)) & (tract_sum_df['lost_hhq1_abs_0pct'] == 1)), 'hhq2_2050'].sum()
metrics_dict[runid,metric_id,'DispRisk_PBA40_Q1Q2_DRCoCHRAtracts',y_diff,dbp] = metrics_dict[runid,metric_id,'Num_Q1Q2HH_inDRCoCHRAtracts_disp',y_diff,dbp] / \
metrics_dict[runid,metric_id,'Num_Q1Q2HH_inDRCoCHRAtracts',y2,dbp]
########### Repeating all above analysis for TRAs
# Calculating share of Q1 households at TRA level using TRA summary dataframe
TRA_sum_df['hhq1_pct_2015'] = TRA_sum_df['hhq1_2015'] / TRA_sum_df['tothh_2015']
#TRA_sum_df['hhq1_pct_2015_normalized'] = TRA_sum_df['hhq1_pct_2015'] * normalize_factor_Q1
TRA_sum_df['hhq1_pct_2050'] = TRA_sum_df['hhq1_2050'] / TRA_sum_df['tothh_2050']
# Total number of TRAs
metrics_dict[runid,metric_id,'Num_TRAs_total',y1,dbp] = TRA_sum_df['juris_tra'].nunique()
# Calculating number of TRAs that Lost LIHH as a share of total HH, with "lost" defined as any loss, or 10% loss
for i in [0, 10]:
if i == 0:
j = 1
else:
j = 0.9
# Calculating change in share of LIHH at TRA level to check gentrification
TRA_sum_df['lost_hhq1_%dpct' % i] = TRA_sum_df.apply (lambda row: check_losthhq1_share(row,j), axis=1)
# Calculating absolute change in LIHH at TRA level to check true displacement
TRA_sum_df['lost_hhq1_abs_%dpct' % i] = TRA_sum_df.apply (lambda row: check_losthhq1_abs(row,j), axis=1)
######## Gentrification in TRAs
# Number or percent of TRAs that lost Q1 households as a share of total HH
metrics_dict[runid,metric_id,'Num_TRAs_lostLIHH_%dpct' % i,y_diff,dbp] = TRA_sum_df.loc[(TRA_sum_df['lost_hhq1_%dpct' % i] == 1), 'juris_tra'].nunique()
metrics_dict[runid,metric_id,'Pct_TRAs_lostLIHH_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_TRAs_lostLIHH_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_TRAs_total',y1,dbp])
print('Number of TRAs that lost LIHH (as a share) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_TRAs_lostLIHH_%dpct' % i,y_diff,dbp] )
print('Pct of TRAs that lost LIHH (as a share) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_TRAs_lostLIHH_%dpct' % i,y_diff,dbp] )
######## Displacement in TRAs
# Number or percent of DR tracts that lost Q1 households in absolute numbers
metrics_dict[runid,metric_id,'Num_TRAs_lostLIHH_abs_%dpct' % i,y_diff,dbp] = TRA_sum_df.loc[(TRA_sum_df['lost_hhq1_abs_%dpct' % i] == 1), 'juris_tra'].nunique()
metrics_dict[runid,metric_id,'Pct_TRAs_lostLIHH_abs_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_TRAs_lostLIHH_abs_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_TRAs_total',y1,dbp])
print('Number of TRAs that lost LIHH (in absolute numbers) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_TRAs_lostLIHH_abs_%dpct' % i,y_diff,dbp] )
print('Pct of TRAs that lost LIHH (in absolute numbers) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_TRAs_lostLIHH_abs_%dpct' % i,y_diff,dbp] )
######## PBA40 Displacement Risk metric in TRAs
metrics_dict[runid,metric_id,'Num_LIHH_inTRAs',y2,dbp] = TRA_sum_df['hhq1_2050'].sum()
metrics_dict[runid,metric_id,'Num_LIHH_inTRAs_disp',y_diff,dbp] = TRA_sum_df.loc[(TRA_sum_df['lost_hhq1_abs_0pct'] == 1), 'hhq1_2050'].sum()
metrics_dict[runid,metric_id,'DispRisk_PBA40_TRAs',y_diff,dbp] = metrics_dict[runid,metric_id,'Num_LIHH_inTRAs_disp',y_diff,dbp] / \
metrics_dict[runid,metric_id,'Num_LIHH_inTRAs',y2,dbp]
metrics_dict[runid,metric_id,'Num_Q1Q2HH_inTRAs',y2,dbp] = TRA_sum_df['hhq1_2050'].sum() + TRA_sum_df['hhq2_2050'].sum()
metrics_dict[runid,metric_id,'Num_Q1Q2_inTRAs_disp',y_diff,dbp] = TRA_sum_df.loc[(TRA_sum_df['lost_hhq1_abs_0pct'] == 1), 'hhq1_2050'].sum() + TRA_sum_df.loc[(TRA_sum_df['lost_hhq1_abs_0pct'] == 1), 'hhq2_2050'].sum()
metrics_dict[runid,metric_id,'DispRisk_PBA40_Q1Q2_TRAs',y_diff,dbp] = metrics_dict[runid,metric_id,'Num_Q1Q2_inTRAs_disp',y_diff,dbp] / \
metrics_dict[runid,metric_id,'Num_Q1Q2HH_inTRAs',y2,dbp]
######## Displacement from Growth Geographies
# Calculating GG rows that lost inc1 Households
GG_sum_df['hhq1_pct_2015'] = GG_sum_df['hhq1_2015'] / GG_sum_df['tothh_2015']
#GG_sum_df['hhq1_pct_2015_normalized'] = GG_sum_df['hhq1_pct_2015'] * normalize_factor_Q1
GG_sum_df['hhq1_pct_2050'] = GG_sum_df['hhq1_2050'] / GG_sum_df['tothh_2050']
# Total number of GGs
metrics_dict[runid,metric_id,'Num_GGs_total',y1,dbp] = GG_sum_df['PDA_ID'].nunique()
# Total number of Transit Rich GGs
GG_TRich_sum_df = GG_sum_df[GG_sum_df['Designation']=="Transit-Rich"]
metrics_dict[runid,metric_id,'Num_GGs_TRich_total',y1,dbp] = GG_TRich_sum_df['PDA_ID'].nunique()
# Calculating number of GGs that Lost LIHH as a share of total HH, with "lost" defined as any loss, or 10% loss
for i in [0, 10]:
if i == 0:
j = 1
else:
j = 0.9
GG_sum_df['lost_hhq1_%dpct' % i] = GG_sum_df.apply (lambda row: check_losthhq1_share(row,j), axis=1)
GG_TRich_sum_df['lost_hhq1_%dpct' % i] = GG_TRich_sum_df.apply (lambda row: check_losthhq1_share(row,j), axis=1)
# Number or percent of GGs that lost Q1 households as a proportion of total HH
metrics_dict[runid,metric_id,'Num_GG_lostLIHH_%dpct' % i,y_diff,dbp] = GG_sum_df.loc[(GG_sum_df['lost_hhq1_%dpct' % i] == 1), 'PDA_ID'].nunique()
metrics_dict[runid,metric_id,'Pct_GG_lostLIHH_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_GG_lostLIHH_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_GGs_total',y1,dbp])
print('Number of GGs that lost LIHH from 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_GG_lostLIHH_%dpct' % i,y_diff,dbp] )
print('Pct of GGs that lost LIHH from 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_GG_lostLIHH_%dpct' % i,y_diff,dbp] )
# Number or percent of Transit Rich GGs that lost Q1 households as a proportion of total HH
metrics_dict[runid,metric_id,'Num_GG_TRich_lostLIHH_%dpct' % i,y_diff,dbp] = GG_TRich_sum_df.loc[(GG_TRich_sum_df['lost_hhq1_%dpct' % i] == 1), 'PDA_ID'].nunique()
metrics_dict[runid,metric_id,'Pct_GG_TRich_lostLIHH_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_GG_TRich_lostLIHH_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_GGs_TRich_total',y1,dbp])
print('Number of Transit Rich GGs that lost LIHH from 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_GG_TRich_lostLIHH_%dpct' % i,y_diff,dbp] )
print('Pct of Transit Rich GGs that lost LIHH from 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_GG_TRich_lostLIHH_%dpct' % i,y_diff,dbp] )
tract_sum_filename = 'C:/Users/{}/Box/Horizon and Plan Bay Area 2050/Equity and Performance/7_Analysis/Metrics/tract_summary_output.csv'.format(os.getenv('USERNAME'))
tract_sum_df.to_csv(tract_sum_filename, header=True, sep=',')
def calculate_Healthy1_HHs_SLRprotected(runid, dbp, parcel_sum_df, metrics_dict):
metric_id = "H1"
# Renaming Parcels as "Protected", "Unprotected", and "Unaffected"
'''
#Basic
def label_SLR(row):
if (row['SLR'] == 12): return 'Unprotected'
elif (row['SLR'] == 24): return 'Unprotected'
elif (row['SLR'] == 36): return 'Unprotected'
elif (row['SLR'] == 100): return 'Protected'
else: return 'Unaffected'
parcel_sum_df['SLR_protection'] = parcel_sum_df.apply (lambda row: label_SLR(row), axis=1)
'''
def label_SLR(row):
if ((row['SLR'] == 12) or (row['SLR'] == 24) or (row['SLR'] == 36)): return 'Unprotected'
elif row['SLR'] == 100: return 'Protected'
else: return 'Unaffected'
parcel_sum_df['SLR_protection'] = parcel_sum_df.apply (lambda row: label_SLR(row), axis=1)
# Calculating protected households
# All households
tothh_2050_affected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("rotected") == True), 'tothh_2050'].sum()
tothh_2050_protected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("Protected") == True), 'tothh_2050'].sum()
tothh_2015_affected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("rotected") == True), 'tothh_2015'].sum()
tothh_2015_protected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("Protected") == True), 'tothh_2015'].sum()
# Q1 Households
hhq1_2050_affected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("rotected") == True), 'hhq1_2050'].sum()
hhq1_2050_protected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("Protected") == True), 'hhq1_2050'].sum()
hhq1_2015_affected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("rotected") == True), 'hhq1_2015'].sum()
hhq1_2015_protected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("Protected") == True), 'hhq1_2015'].sum()
# CoC Households
CoChh_2050_affected = parcel_sum_df.loc[((parcel_sum_df['SLR_protection'].str.contains("rotected") == True) & \
parcel_sum_df['coc_flag_pba2050']==1), 'tothh_2050'].sum()
CoChh_2050_protected = parcel_sum_df.loc[((parcel_sum_df['SLR_protection'].str.contains("Protected") == True) & \
parcel_sum_df['coc_flag_pba2050']==1), 'tothh_2050'].sum()
CoChh_2015_affected = parcel_sum_df.loc[((parcel_sum_df['SLR_protection'].str.contains("rotected") == True) & \
parcel_sum_df['coc_flag_pba2050']==1), 'tothh_2015'].sum()
CoChh_2015_protected = parcel_sum_df.loc[((parcel_sum_df['SLR_protection'].str.contains("Protected") == True) & \
parcel_sum_df['coc_flag_pba2050']==1), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'SLR_protected_pct_affected_tothh',y2,dbp] = tothh_2050_protected / tothh_2050_affected
metrics_dict[runid,metric_id,'SLR_protected_pct_affected_hhq1',y2,dbp] = hhq1_2050_protected / hhq1_2050_affected
metrics_dict[runid,metric_id,'SLR_protected_pct_affected_CoChh',y2,dbp] = CoChh_2050_protected / CoChh_2050_affected
print('********************H1 Healthy********************')
print('Pct of HHs affected by 3ft SLR that are protected in 2050 in %s' % dbp,metrics_dict[runid,metric_id,'SLR_protected_pct_affected_tothh',y2,dbp])
print('Pct of Q1 HHs affected by 3ft SLR that are protected in 2050 in %s' % dbp,metrics_dict[runid,metric_id,'SLR_protected_pct_affected_hhq1',y2,dbp])
print('Pct of CoC HHs affected by 3ft SLR that are protected in 2050 in %s' % dbp,metrics_dict[runid,metric_id,'SLR_protected_pct_affected_CoChh',y2,dbp])
def calculate_Healthy1_HHs_EQprotected(runid, dbp, parcel_sum_df, metrics_dict):
metric_id = "H1"
'''
# Reading building codes file, which has info at building level, on which parcels are inundated and protected
buildings_code = pd.read_csv('C:/Users/ATapase/Box/Horizon and Plan Bay Area 2050/Equity and Performance/7_Analysis/Metrics/Healthy/buildings_with_eq_code.csv')
buildings_eq = pd.merge(left=buildings_code[['building_id', 'parcel_id', 'residential_units', 'year_built', 'earthquake_code']], right=parcel_sum_df[['parcel_id','zone_id','tract_id','coc_flag_pba2050','pba50chcat','hhq1_2015','hhq1_2050','tothh_2015','tothh_2050']], left_on="parcel_id", right_on="parcel_id", how="left")
buildings_eq = pd.merge(left=buildings_eq, right=coc_flag[['tract_id_coc','county_fips']], left_on="tract_id", right_on="tract_id_coc", how="left")
buildings_cat = pd.read_csv('C:/Users/ATapase/Box/Horizon and Plan Bay Area 2050/Equity and Performance/7_Analysis/Metrics/Healthy/building_eq_categories.csv')
buildings_eq = pd.merge(left=buildings_eq, right=buildings_cat, left_on="earthquake_code", right_on="building_eq_code", how="inner")
buildings_eq.drop(['building_eq_code', 'tract_id_coc'], axis=1, inplace=True)
buildings_eq['cost_retrofit_total'] = buildings_eq['residential_units'] * buildings_eq['cost_retrofit']
# Calculated protected households in PLus
# Number of Units retrofitted
metrics_dict['H2_eq_num_units_retrofit'] = buildings_eq['residential_units'].sum()
metrics_dict['H2_eq_num_CoC_units_retrofit'] = buildings_eq.loc[(buildings_eq['coc_flag_pba2050']== 1), 'residential_units'].sum()
metrics_dict['H2_eq_total_cost_retrofit'] = buildings_eq['cost_retrofit_total'].sum()
metrics_dict['H2_eq_CoC_cost_retrofit'] = buildings_eq.loc[(buildings_eq['coc_flag_pba2050']== 1), 'cost_retrofit_total'].sum()
print('Total number of units retrofited',metrics_dict['H2_eq_num_units_retrofit'])
print('CoC number of units retrofited',metrics_dict['H2_eq_num_CoC_units_retrofit'])
print('Total cost of retrofit',metrics_dict['H2_eq_total_cost_retrofit'])
print('CoC cost of retrofit',metrics_dict['H2_eq_CoC_cost_retrofit'])
'''
def calculate_Healthy1_HHs_WFprotected(runid, dbp, parcel_sum_df, metrics_dict):
metric_id = "H1"
'''
#
'''
def calculate_Healthy1_safety(runid, year, dbp, tm_taz_input_df, safety_df, metrics_dict):
metric_id = "H1"
population = tm_taz_input_df.TOTPOP.sum()
per_x_people = 1000000
print('population %d' % population)
fatalities = safety_df.loc[(safety_df['index']=="N_total_fatalities") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
fatalities_m = safety_df.loc[(safety_df['index']=="N_motorist_fatalities") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
fatalities_b = safety_df.loc[(safety_df['index']=="N_bike_fatalities") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
fatalities_p = safety_df.loc[(safety_df['index']=="N_ped_fatalities") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
injuries = safety_df.loc[(safety_df['index']=="N_injuries") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
metrics_dict[runid,metric_id,'fatalities_annual_per_MNppl',year,dbp] = fatalities / population * per_x_people
metrics_dict[runid,metric_id,'fatalities_auto_annual_per_MNppl',year,dbp] = fatalities_m / population * per_x_people
metrics_dict[runid,metric_id,'fatalities_bike_annual_per_MNppl',year,dbp] = fatalities_b / population * per_x_people
metrics_dict[runid,metric_id,'fatalities_ped_annual_per_MNppl',year,dbp] = fatalities_p / population * per_x_people
metrics_dict[runid,metric_id,'injuries_annual_per_MNppl',year,dbp] = injuries / population * per_x_people
metrics_dict[runid,metric_id,'fatalities_annual_per_100MVMT',year,dbp] = safety_df.loc[(safety_df['index']=="N_total_fatalities_per_100M_VMT") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
metrics_dict[runid,metric_id,'fatalities_auto_annual_per_100MVMT',year,dbp] = safety_df.loc[(safety_df['index']=="N_motorist_fatalities_per_100M_VMT") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
metrics_dict[runid,metric_id,'fatalities_bike_annual_per_100MVMT',year,dbp] = safety_df.loc[(safety_df['index']=="N_bike_fatalities_per_100M_VMT") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
metrics_dict[runid,metric_id,'fatalities_ped_annual_per_100MVMT',year,dbp] = safety_df.loc[(safety_df['index']=="N_ped_fatalities_per_100M_VMT") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
metrics_dict[runid,metric_id,'injuries_annual_per_100MVMT',year,dbp] = safety_df.loc[(safety_df['index']=="N_injuries_per_100M_VMT") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
def calculate_Healthy2_emissions(runid, year, dbp, tm_taz_input_df, tm_auto_times_df, emfac_df, metrics_dict):
metric_id = "H2"
population = tm_taz_input_df.TOTPOP.sum()
tm_auto_times_df = tm_auto_times_df.sum(level='Mode')
dailyVMT = tm_auto_times_df['Vehicle Miles'].sum() - tm_auto_times_df.loc['truck', ['Vehicle Miles']].sum()
metrics_dict[runid,metric_id,'daily_vmt_per_capita',year,dbp] = dailyVMT / population
metrics_dict[runid,metric_id,'daily_vmt_per_capita',"2005","2005"] = emfac_df.loc[(emfac_df['dbp']==2005), 'VMT per capita'].sum()
metrics_dict[runid,metric_id,'daily_vmt_per_capita',"2035","2035"] = emfac_df.loc[(emfac_df['dbp']==2035), 'VMT per capita'].sum()
metrics_dict["emfac_hardcode",metric_id,'ghg_emissions_lbs_per_capita',"2005","2005"] = emfac_df.loc[(emfac_df['dbp']==2005), 'Total CO2 Emissions Per Capita (lbs)'].sum()
metrics_dict["emfac_hardcode",metric_id,'ghg_emissions_lbs_per_capita',"2015","2015"] = emfac_df.loc[(emfac_df['dbp']==2015), 'Total CO2 Emissions Per Capita (lbs)'].sum()
metrics_dict["emfac_hardcode",metric_id,'ghg_emissions_lbs_per_capita',"2035","2035"] = emfac_df.loc[(emfac_df['dbp']==2035), 'Total CO2 Emissions Per Capita (lbs)'].sum()
metrics_dict["emfac_hardcode",metric_id,'ghg_emissions_lbs_per_capita',"2050","Plus"] = 0
metrics_dict["emfac_hardcode",metric_id,'ghg_emissions_nonSB375_lbs_per_capita',"2005","2005"] = emfac_df.loc[(emfac_df['dbp']==2005), 'Total CO2 Emissions Per Capita (lbs)'].sum()
metrics_dict["emfac_hardcode",metric_id,'ghg_emissions_nonSB375_lbs_per_capita',"2015","2015"] = emfac_df.loc[(emfac_df['dbp']==2015), 'Total CO2 Emissions Per Capita (lbs)'].sum()
metrics_dict["emfac_hardcode",metric_id,'ghg_emissions_nonSB375_lbs_per_capita',"2035","2035"] = emfac_df.loc[(emfac_df['dbp']==2035), 'Total CO2 Emissions Per Capita (lbs)'].sum()
metrics_dict["emfac_hardcode",metric_id,'ghg_emissions_nonSB375_lbs_per_capita',"2050","Plus"] = 0
def calculate_Vibrant1_JobsHousing(runid, dbp, county_sum_df, metrics_dict):
metric_id = "V1"
metrics_dict[runid,metric_id,'jobs_housing_ratio_region',y1,dbp] = county_sum_df['totemp_2015'].sum() / county_sum_df['tothh_2015'].sum()
metrics_dict[runid,metric_id,'jobs_housing_ratio_region',y2,dbp] = county_sum_df['totemp_2050'].sum() / county_sum_df['tothh_2050'].sum()
for index,row in county_sum_df.iterrows():
metrics_dict[runid,metric_id,'jobs_housing_ratio_%s' % row['county'],y1,dbp] = row['totemp_2015'] / row['tothh_2015']
metrics_dict[runid,metric_id,'jobs_housing_ratio_%s' % row['county'],y2,dbp] = row['totemp_2050'] / row['tothh_2050']
def calculate_Vibrant1_median_commute(runid, year, dbp, tm_commute_df, metrics_dict):
metric_id = "V1"
tm_commute_df['total_commute_miles'] = tm_commute_df['freq'] * tm_commute_df['distance']
commute_dist_df = tm_commute_df[['incQ','freq','total_commute_miles']].groupby(['incQ']).sum()
metrics_dict[runid,metric_id,'mean_commute_distance',year,dbp] = commute_dist_df['total_commute_miles'].sum() / commute_dist_df['freq'].sum()
metrics_dict[runid,metric_id,'mean_commute_distance_inc1',year,dbp] = commute_dist_df['total_commute_miles'][1] / commute_dist_df['freq'][1]
metrics_dict[runid,metric_id,'mean_commute_distance_inc2',year,dbp] = commute_dist_df['total_commute_miles'][2] / commute_dist_df['freq'][2]
metrics_dict[runid,metric_id,'mean_commute_distance_inc3',year,dbp] = commute_dist_df['total_commute_miles'][3] / commute_dist_df['freq'][3]
metrics_dict[runid,metric_id,'mean_commute_distance_inc4',year,dbp] = commute_dist_df['total_commute_miles'][4] / commute_dist_df['freq'][4]
def calculate_Vibrant2_Jobs(runid, dbp, parcel_sum_df, metrics_dict):
metric_id = 'V2'
print('********************V2 Vibrant********************')
# Total Jobs Growth
metrics_dict[runid,metric_id,'Total_jobs',y2,dbp] = parcel_sum_df['totemp_2050'].sum()
metrics_dict[runid,metric_id,'Total_jobs',y1,dbp] = parcel_sum_df['totemp_2015'].sum()
metrics_dict[runid,metric_id,'Total_jobs_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'Total_jobs',y2,dbp]/metrics_dict[runid,metric_id,'Total_jobs',y1,dbp] - 1
print('Number of Jobs in 2050 %s' % dbp,metrics_dict[runid,metric_id,'Total_jobs',y2,dbp])
print('Number of Jobs in 2015 %s' % dbp,metrics_dict[runid,metric_id,'Total_jobs',y1,dbp])
print('Job Growth from 2015 to 2050 %s' % dbp,metrics_dict[runid,metric_id,'Total_jobs_growth',y_diff,dbp])
# MWTEMPN jobs
metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y2,dbp] = parcel_sum_df['MWTEMPN_2050'].sum()
metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y1,dbp] = parcel_sum_df['MWTEMPN_2015'].sum()
metrics_dict[runid,metric_id,'Total_jobs_growth_MWTEMPN',y_diff,dbp] = metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y2,dbp]/metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y1,dbp] - 1
print('Number of Total MWTEMPN Jobs 2050 %s' % dbp,metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y2,dbp])
print('Number of Total MWTEMPN Jobs 2015 %s' % dbp,metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y1,dbp])
print('Job Growth Total MWTEMPN from 2015 to 2050 %s' % dbp,metrics_dict[runid,metric_id,'Total_jobs_growth_MWTEMPN',y_diff,dbp])
# Jobs Growth in PPAs
metrics_dict[runid,metric_id,'PPA_jobs',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('ppa', na=False), 'totemp_2050'].sum()
metrics_dict[runid,metric_id,'PPA_jobs',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('ppa', na=False), 'totemp_2015'].sum()
metrics_dict[runid,metric_id,'jobs_growth_PPA',y_diff,dbp] = metrics_dict[runid,metric_id,'PPA_jobs',y2,dbp]/metrics_dict[runid,metric_id,'PPA_jobs',y1,dbp] - 1
print('Number of Jobs in PPAs 2050 %s' % dbp,metrics_dict[runid,metric_id,'PPA_jobs',y2,dbp])
print('Number of Jobs in PPAs 2015 %s' % dbp,metrics_dict[runid,metric_id,'PPA_jobs',y1,dbp])
print('Job Growth in PPAs from 2015 to 2050 %s' % dbp,metrics_dict[runid,metric_id,'jobs_growth_PPA',y_diff,dbp])
'''
AGREMPN = Agriculture & Natural Resources
MWTEMPN = Manufacturing & Wholesale, Transportation & Utilities
RETEMPN = Retail
FPSEMPN = Financial & Leasing, Professional & Managerial Services
HEREMPN = Health & Educational Services
OTHEMPN = Construction, Government, Information
totemp = total employment
'''
# Jobs Growth MWTEMPN in PPAs (Manufacturing & Wholesale, Transportation & Utilities)
metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('ppa', na=False), 'MWTEMPN_2050'].sum()
metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('ppa', na=False), 'MWTEMPN_2015'].sum()
metrics_dict[runid,metric_id,'jobs_growth_MWTEMPN_PPA',y_diff,dbp] = metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y2,dbp]/metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y1,dbp] - 1
print('Number of MWTEMPN Jobs in PPAs 2050 %s' % dbp,metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y2,dbp])
print('Number of MWTEMPN Jobs in PPAs 2015 %s' % dbp,metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y1,dbp])
print('Job Growth MWTEMPN in PPAs from 2015 to 2050 %s' % dbp,metrics_dict[runid,metric_id,'jobs_growth_MWTEMPN_PPA',y_diff,dbp])
def calculate_travelmodel_metrics_change(list_tm_runid_blueprintonly, metrics_dict):
for tm_runid in list_tm_runid_blueprintonly:
year = tm_runid[:4]
if "Basic" in tm_runid:
dbp = "Basic"
elif "Plus" in tm_runid:
dbp = "Plus"
#elif "PlusCrossing_01" in tm_runid:
# dbp = "Plus_01"
#elif "PlusFixItFirst" in tm_runid:
# dbp = "PlusFixItFirst"
else:
dbp = "Unknown"
metric_id = "A1"
# Tolls
metrics_dict[tm_runid,metric_id,'tolls_per_HH_change_2015',year,dbp] = metrics_dict[tm_runid,metric_id,'tolls_per_HH',year,dbp] / metrics_dict[tm_2015_runid,metric_id,'tolls_per_HH',y1,'2015'] - 1
metrics_dict[tm_runid,metric_id,'tolls_per_HH_change_2050noproject',year,dbp] = metrics_dict[tm_runid,metric_id,'tolls_per_HH',year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'tolls_per_HH',y2,"NoProject"] - 1
metrics_dict[tm_runid,metric_id,'tolls_per_LIHH_change_2015',year,dbp] = metrics_dict[tm_runid,metric_id,'tolls_per_LIHH',year,dbp] / metrics_dict[tm_2015_runid,metric_id,'tolls_per_LIHH',y1,'2015'] - 1
metrics_dict[tm_runid,metric_id,'tolls_per_LIHH_change_2050noproject',year,dbp] = metrics_dict[tm_runid,metric_id,'tolls_per_LIHH',year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'tolls_per_LIHH',y2,"NoProject"] - 1
metrics_dict[tm_runid,metric_id,'tolls_per_inc1HH_change_2015',year,dbp] = metrics_dict[tm_runid,metric_id,'tolls_per_inc1HH',year,dbp] / metrics_dict[tm_2015_runid,metric_id,'tolls_per_inc1HH',y1,'2015'] - 1
metrics_dict[tm_runid,metric_id,'tolls_per_inc1HH_change_2050noproject',year,dbp] = metrics_dict[tm_runid,metric_id,'tolls_per_inc1HH',year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'tolls_per_inc1HH',y2,"NoProject"] - 1
# Transit Fares
metrics_dict[tm_runid,metric_id,'fares_per_HH_change_2015',year,dbp] = metrics_dict[tm_runid,metric_id,'fares_per_HH',year,dbp] / metrics_dict[tm_2015_runid,metric_id,'fares_per_HH',y1,'2015'] - 1
metrics_dict[tm_runid,metric_id,'fares_per_HH_change_2050noproject',year,dbp] = metrics_dict[tm_runid,metric_id,'fares_per_HH',year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'fares_per_HH',y2,"NoProject"] - 1
metrics_dict[tm_runid,metric_id,'fares_per_LIHH_change_2015',year,dbp] = metrics_dict[tm_runid,metric_id,'fares_per_LIHH',year,dbp] / metrics_dict[tm_2015_runid,metric_id,'fares_per_LIHH',y1,'2015'] - 1
metrics_dict[tm_runid,metric_id,'fares_per_LIHH_change_2050noproject',year,dbp] = metrics_dict[tm_runid,metric_id,'fares_per_LIHH',year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'fares_per_LIHH',y2,"NoProject"] - 1
metrics_dict[tm_runid,metric_id,'fares_per_inc1HH_change_2015',year,dbp] = metrics_dict[tm_runid,metric_id,'fares_per_inc1HH',year,dbp] / metrics_dict[tm_2015_runid,metric_id,'fares_per_inc1HH',y1,'2015'] - 1
metrics_dict[tm_runid,metric_id,'fares_per_inc1HH_change_2050noproject',year,dbp] = metrics_dict[tm_runid,metric_id,'fares_per_inc1HH',year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'fares_per_inc1HH',y2,"NoProject"] - 1
metric_id = "C2"
# Highway corridor travel times
for route in ['Antioch_SF','Vallejo_SF','SanJose_SF','Oakland_SanJose','Oakland_SF']:
metrics_dict[tm_runid,metric_id,'travel_time_AM_change_2015_%s' % route,year,dbp] = metrics_dict[tm_runid,metric_id,'travel_time_AM_%s' % route,year,dbp] / metrics_dict[tm_2015_runid,metric_id,'travel_time_AM_%s' % route,y1,'2015'] - 1
metrics_dict[tm_runid,metric_id,'travel_time_AM_change_2050noproject_%s' % route,year,dbp] = metrics_dict[tm_runid,metric_id,'travel_time_AM_%s' % route,year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'travel_time_AM_%s' % route,y2,'NoProject'] - 1
# Transit Crowding by operator
for operator in ['AC Transit Local','AC Transit Transbay','SFMTA LRT','SFMTA Bus','VTA Bus Local','VTA LRT','BART','Caltrain','SamTrans Local','GGT Express','WETA']:
try:
metrics_dict[tm_runid,metric_id,'crowded_pct_personhrs_AM_change_2015_%s' % operator,year,dbp] = metrics_dict[tm_runid,metric_id,'crowded_pct_personhrs_AM_%s' % operator,year,dbp] / metrics_dict[tm_2015_runid,metric_id,'crowded_pct_personhrs_AM_%s' % operator,y1,'2015'] - 1
except:
metrics_dict[tm_runid,metric_id,'crowded_pct_personhrs_AM_change_2015_%s' % operator,year,dbp] = 0
try:
metrics_dict[tm_runid,metric_id,'crowded_pct_personhrs_AM_change_2050noproject_%s' % operator,year,dbp] = metrics_dict[tm_runid,metric_id,'crowded_pct_personhrs_AM_%s' % operator,year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'crowded_pct_personhrs_AM_%s' % operator,y2,'NoProject'] - 1
except:
metrics_dict[tm_runid,metric_id,'crowded_pct_personhrs_AM_change_2050noproject_%s' % operator,year,dbp] = 0
# Transit travel times by operator
for operator in ['AC Transit Local','AC Transit Transbay','SFMTA LRT','SFMTA Bus','VTA Bus Local','VTA LRT','BART','Caltrain','SamTrans Local']:
metrics_dict[tm_runid,metric_id,'time_per_dist_AM_change_2015_%s' % operator,year,dbp] = metrics_dict[tm_runid,metric_id,'time_per_dist_AM_%s' % operator,year,dbp] / metrics_dict[tm_2015_runid,metric_id,'time_per_dist_AM_%s' % operator,y1,'2015'] - 1
metrics_dict[tm_runid,metric_id,'time_per_dist_AM_change_2050noproject_%s' % operator,year,dbp] = metrics_dict[tm_runid,metric_id,'time_per_dist_AM_%s' % operator,year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'time_per_dist_AM_%s' % operator,y2,'NoProject'] - 1
# Transit travel times by mode
for mode_name in ['Local','Express','Ferry','Light Rail','Heavy Rail','Commuter Rail']:
metrics_dict[tm_runid,metric_id,'time_per_dist_AM_change_2015_%s' % mode_name,year,dbp] = metrics_dict[tm_runid,metric_id,'time_per_dist_AM_%s' % mode_name,year,dbp] / metrics_dict[tm_2015_runid,metric_id,'time_per_dist_AM_%s' % mode_name,y1,'2015'] - 1
metrics_dict[tm_runid,metric_id,'time_per_dist_AM_change_2050noproject_%s' % mode_name,year,dbp] = metrics_dict[tm_runid,metric_id,'time_per_dist_AM_%s' % mode_name,year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'time_per_dist_AM_%s' % mode_name,y2,'NoProject'] - 1
def parcel_building_output_sum(urbansim_runid):
#################### creating parcel level df from buildings output
building_output_2050 = pd.read_csv((urbansim_runid+'_building_data_2050.csv'))
building_output_2015 = pd.read_csv((urbansim_runid+'_building_data_2015.csv'))
parcel_building_output_2050 = building_output_2050[['parcel_id','residential_units','deed_restricted_units']].groupby(['parcel_id']).sum()
parcel_building_output_2015 = building_output_2015[['parcel_id','residential_units','deed_restricted_units']].groupby(['parcel_id']).sum()
parcel_building_output_2050 = parcel_building_output_2050.add_suffix('_2050')
parcel_building_output_2015 = parcel_building_output_2015.add_suffix('_2015')
return pd.merge(left=parcel_building_output_2050, right=parcel_building_output_2015, left_on="parcel_id", right_on="parcel_id", how="left")
def calc_pba40urbansim():
urbansim_runid = 'C:/Users/{}/Box/Modeling and Surveys/Share Data/plan-bay-area-2040/RTP17 UrbanSim Output/r7224c/run7224'.format(os.getenv('USERNAME'))
runid = "plan-bay-area-2040/RTP17 UrbanSim Output/r7224c/run7224"
dbp = "PBA40"
metric_id = "Overall"
year2 = "2040"
year1 = "2010"
yeardiff = "2040"
parcel_geo_df = pd.read_csv(parcel_geography_file)
################## Creating parcel summary
hhq_list = ['hhq1','hhq2','hhq3','hhq4']
emp_list = ['AGREMPN','MWTEMPN','RETEMPN','FPSEMPN','HEREMPN','OTHEMPN']
parcel_output_2040_df = pd.read_csv((urbansim_runid+'_parcel_data_2040.csv'))
parcel_output_2040_df['tothh'] = parcel_output_2040_df[hhq_list].sum(axis=1, skipna=True)
parcel_output_2040_df['totemp'] = parcel_output_2040_df[emp_list].sum(axis=1, skipna=True)
parcel_output_2010_df = pd.read_csv((urbansim_runid+'_parcel_data_2010.csv'))
parcel_output_2010_df['tothh'] = parcel_output_2010_df[hhq_list].sum(axis=1, skipna=True)
parcel_output_2010_df['totemp'] = parcel_output_2010_df[emp_list].sum(axis=1, skipna=True)
# keeping essential columns / renaming columns
parcel_output_2040_df.drop(['x','y','zoned_du','zoned_du_underbuild', 'zoned_du_underbuild_nodev', 'first_building_type_id'], axis=1, inplace=True)
parcel_output_2010_df.drop(['x','y','zoned_du','zoned_du_underbuild', 'zoned_du_underbuild_nodev', 'first_building_type_id'], axis=1, inplace=True)
parcel_output_2040_df = parcel_output_2040_df.add_suffix('_2040')
parcel_output_2010_df = parcel_output_2010_df.add_suffix('_2010')
# creating parcel summaries with 2040 and 2010 outputs, and parcel geographic categories
parcel_sum_df = pd.merge(left=parcel_output_2040_df, right=parcel_output_2010_df, left_on="parcel_id_2040", right_on="parcel_id_2010", how="left")
parcel_sum_df =
|
pd.merge(left=parcel_sum_df, right=parcel_geo_df[['pba50chcat','PARCEL_ID']], left_on="parcel_id_2040", right_on="PARCEL_ID", how="left")
|
pandas.merge
|
import pandas as pd
from skmultilearn.model_selection.iterative_stratification import IterativeStratification
from sklearn.model_selection import train_test_split
from pynet.utils import get_pickle_obj
import matplotlib.pyplot as plt
import numpy as np
import os, copy
## This script aims at giving Train/Val/Test scheme for the release of OpenBHB dataset.
## It also aims at giving the BHB extension with private datasets. For now, only Training/Val/Test scheme is
## implemented with Val/(Test-Intra + Test-Inter) preserved as before.
def discretize_continous_label(labels, bins='sturges', verbose=False):
# Get an estimation of the best bin edges. 'Sturges' is conservative for pretty large datasets (N>1000).
bin_edges = np.histogram_bin_edges(labels, bins=bins)
if verbose:
print('Global histogram:\n', np.histogram(labels, bins=bin_edges, density=False), flush=True)
# Discretizes the values according to these bins
discretization = np.digitize(labels, bin_edges[1:], right=True)
if verbose:
print('Bin Counts after discretization:\n', np.bincount(discretization), flush=True)
return discretization
def get_stratification_split(labels, n_test=0.1, preserve:str=None, seed:int=None):
np.random.seed(seed)
dummy = np.arange(len(labels)).reshape(len(labels), -1)
n_splits = int(1/n_test)
stratifier = IterativeStratification(n_splits=n_splits, order=1)
# iterate until we find train/test split preserving the variable "preserve"
for _ in range(n_splits):
train, test = next(stratifier.split(dummy, labels.values))
if preserve is None or set(labels.iloc[train][preserve]) == set(labels.iloc[test][preserve]) \
== set(labels[preserve]):
return (train, test)
print("Impossible to stratify by preserving %s"%preserve)
return None
### Defines the splits used for OpenBHB-10K
path = "/neurospin/hc/openBHB/participants.tsv"
test_path = ["/neurospin/psy_sbox/icbm/ICBM_t1mri_mwp1_participants.csv",
"/neurospin/psy_sbox/hcp_development/participants.csv"]
open_bhb10k = ['abide1', 'abide2', 'ixi', 'npc', 'rbp', 'gsp', 'localizer', 'mpi-leipzig', 'corr', 'nar']
df = pd.read_csv(path, sep="\t")
df_test =
|
pd.read_csv(test_path[0], sep="\t")
|
pandas.read_csv
|
"""
Utility functions for Jupyter notebook to:
- format data
- transform pandas data structures
- compute common stats
These functions are used for both interactive data exploration and to implement
more complex pipelines. The output is reported through logging.
"""
import datetime
import logging
import math
from typing import (
Any,
Callable,
Collection,
Dict,
List,
Optional,
Tuple,
Union,
cast,
)
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
import statsmodels
import statsmodels.api
import tqdm.autonotebook as tauton
import core.plotting as cplott
import helpers.dbg as dbg
import helpers.list as hlist
import helpers.printing as hprint
_LOG = logging.getLogger(__name__)
# #############################################################################
# Helpers.
# #############################################################################
# TODO(gp): Move this to helpers/pandas_helpers.py
def cast_to_df(obj: Union[pd.Series, pd.DataFrame]) -> pd.DataFrame:
"""
Convert a pandas object into a pd.DataFrame.
"""
if isinstance(obj, pd.Series):
df = pd.DataFrame(obj)
else:
df = obj
dbg.dassert_isinstance(df, pd.DataFrame)
return df
def cast_to_series(obj: Union[pd.Series, pd.DataFrame]) -> pd.Series:
"""
Convert a pandas object into a pd.Series.
"""
if isinstance(obj, pd.DataFrame):
dbg.dassert_eq(obj.shape[1], 1)
srs = obj.iloc[:, 1]
else:
srs = obj
dbg.dassert_isinstance(srs, pd.Series)
return srs
# TODO(gp): Need to be tested.
def adapt_to_series(f: Callable) -> Callable:
"""
Extend a function working on dataframes so that it can work on series.
"""
def wrapper(
obj: Union[pd.Series, pd.DataFrame], *args: Any, **kwargs: Any
) -> Any:
# Convert a pd.Series to a pd.DataFrame.
was_series = False
if isinstance(obj, pd.Series):
obj =
|
pd.DataFrame(obj)
|
pandas.DataFrame
|
from typing import Iterable, Dict, Union, List
from json import dumps
from requests import get
from http import HTTPStatus
import pandas as pd
import zipfile
import requests
import io
import os
import re
import scipy
import scipy.stats
import numpy.random as random
import numpy as np
def get_paginated_dataset(filters: Iterable[str], structure: Dict[str, Union[dict, str]] = None,
start_page = 1, end_page=None) -> pd.DataFrame:
"""This is lifted from the NHSE website: https://coronavirus.data.gov.uk/developers-guide
The "filters" param is used to determine what geographical level you will pull,
whilst the "structure" param describes the fields you will pull. The function will loop
over all the pages requested (or all pages if none specified).
ISSUES: The API seems to time out for large datasets (i.e. UTLA), so you might need to pull
in multiple small batches of 5 or 10 pages at a time.
-------
Params
-------
filters : list(str,...)
The geographic area you want. Example: ["areaType=nation;areaName=england"]
You can choose to not include areaName: ['areaType=nation"].
Options for areaType: overview, nation, region, nhsRegion, utla, ltla
structure : dict(str / dict(str))
The columns you want. You specify it as either just a dictionary full of columm
names (the key of the dict defines what the column comes out as for you, so below,
the areaName column comes out as "name"):
{"date": "date",
"areatype": "areaType",
"name": "areaName",
"code": "areaCode",
"newAdmissions": "newAdmissions"}
The options you can take are:
# date - the date of the data point
# areaType - the area type
# areaName - area name
# areaCode - area code (ONS format, i.e. E0000000001).
# newCasesByPublishDate - New cases by publish date
# cumCasesByPublishDate - Cumulative cases by publish date
# cumCasesBySpecimenDateRate - Rate of cumulative cases by publish date per 100k resident population
# newCasesBySpecimenDate - New cases by specimen date
# cumCasesBySpecimenDateRate - Rate of cumulative cases by specimen date per 100k resident population
# cumCasesBySpecimenDate - Cumulative cases by specimen date
# maleCases - Male cases (by age)
# femaleCases - Female cases (by age)
# newPillarOneTestsByPublishDate - New pillar one tests by publish date
# cumPillarOneTestsByPublishDate - Cumulative pillar one tests by publish date
# newPillarTwoTestsByPublishDate - New pillar two tests by publish date
# cumPillarTwoTestsByPublishDate - Cumulative pillar two tests by publish date
# newPillarThreeTestsByPublishDate - New pillar three tests by publish date
# cumPillarThreeTestsByPublishDate - Cumulative pillar three tests by publish date
# newPillarFourTestsByPublishDate - New pillar four tests by publish date
# cumPillarFourTestsByPublishDate - Cumulative pillar four tests by publish date
# newAdmissions - New admissions
# cumAdmissions - Cumulative number of admissions
# cumAdmissionsByAge - Cumulative admissions by age
# cumTestsByPublishDate - Cumulative tests by publish date
# newTestsByPublishDate - New tests by publish date
# covidOccupiedMVBeds - COVID-19 occupied beds with mechanical ventilators
# hospitalCases - Hospital cases
# plannedCapacityByPublishDate - Planned capacity by publish date
# newDeaths28DaysByPublishDate - Deaths within 28 days of positive test
# cumDeaths28DaysByPublishDate - Cumulative deaths within 28 days of positive test
# cumDeaths28DaysByPublishDateRate - Rate of cumulative deaths within 28 days of positive test per 100k resident population
# newDeaths28DaysByDeathDate - Deaths within 28 days of positive test by death date
# cumDeaths28DaysByDeathDate - Cumulative deaths within 28 days of positive test by death date
# cumDeaths28DaysByDeathDateRate - Rate of cumulative deaths within 28 days of positive test by death date per 100k resident population
"""
if structure is None:
structure = {"date": "date",
"areatype": "areaType",
"name": "areaName",
"code": "areaCode",
'newCasesByPublishDate' : 'newCasesByPublishDate',
'cumCasesByPublishDate' : 'cumCasesByPublishDate',
'cumCasesBySpecimenDateRate' : 'cumCasesBySpecimenDateRate',
'newCasesBySpecimenDate' : 'newCasesBySpecimenDate',
'cumCasesBySpecimenDateRate' : 'cumCasesBySpecimenDateRate',
'cumCasesBySpecimenDate' : 'cumCasesBySpecimenDate',
'maleCases' : 'maleCases',
'femaleCases' : 'femaleCases',
'newPillarOneTestsByPublishDate' : 'newPillarOneTestsByPublishDate',
'cumPillarOneTestsByPublishDate' : 'cumPillarOneTestsByPublishDate',
'newPillarTwoTestsByPublishDate' : 'newPillarTwoTestsByPublishDate',
'cumPillarTwoTestsByPublishDate' : 'cumPillarTwoTestsByPublishDate',
'newPillarThreeTestsByPublishDate' : 'newPillarThreeTestsByPublishDate',
'cumPillarThreeTestsByPublishDate' : 'cumPillarThreeTestsByPublishDate',
'newPillarFourTestsByPublishDate' : 'newPillarFourTestsByPublishDate',
'cumPillarFourTestsByPublishDate' : 'cumPillarFourTestsByPublishDate',
'newAdmissions' : 'newAdmissions',
'cumAdmissions' : 'cumAdmissions',
'cumAdmissionsByAge' : 'cumAdmissionsByAge',
'cumTestsByPublishDate' : 'cumTestsByPublishDate',
'newTestsByPublishDate' : 'newTestsByPublishDate',
'covidOccupiedMVBeds' : 'covidOccupiedMVBeds',
'hospitalCases' : 'hospitalCases',
'plannedCapacityByPublishDate' : 'plannedCapacityByPublishDate',
'newDeaths28DaysByPublishDate' : 'newDeaths28DaysByPublishDate',
'cumDeaths28DaysByPublishDate' : 'cumDeaths28DaysByPublishDate',
'cumDeaths28DaysByPublishDateRate' : 'cumDeaths28DaysByPublishDateRate',
'newDeaths28DaysByDeathDate' : 'newDeaths28DaysByDeathDate',
'cumDeaths28DaysByDeathDate' : 'cumDeaths28DaysByDeathDate',
'cumDeaths28DaysByDeathDateRate' : 'cumDeaths28DaysByDeathDateRate',
}
endpoint = "https://api.coronavirus.data.gov.uk/v1/data"
api_params = dict(filters=str.join(";", filters),
structure=dumps(structure, separators=(",", ":")),
format="json", page=1)
data = list()
page_number = start_page
current_data = dict(pagination={'next':True}) # dummy initial "next" pagination
while current_data["pagination"]["next"] is not None:
api_params["page"] = page_number
if page_number == end_page: break
try:
response = get(endpoint, params=api_params, timeout=10)
except Exception as error:
print(f" Trying page {page_number} again...")
continue
if response.status_code >= HTTPStatus.BAD_REQUEST:
raise RuntimeError(f'Request failed: {response.text}')
elif response.status_code == HTTPStatus.NO_CONTENT:
break
current_data = response.json()
page_data: List[StructureType] = current_data['data']
data.extend(page_data)
print(f'{str.join(";", filters)} page {page_number}: {response.url}')
page_number += 1
return pd.DataFrame(data)
def google_mobility(country_filter="GB"):
"""Pulls data from the google mobility report website https://www.google.com/covid19/mobility/.
Specify the country to filter by (two character code), i.e. GB for United Kingdom. If you leave
it blank, i.e. "", then you get everything.
"""
response = requests.get("https://www.gstatic.com/covid19/mobility/Region_Mobility_Report_CSVs.zip")
zip_file = zipfile.ZipFile(io.BytesIO(response.content))
GB_google_mobility_report_paths = list(filter(lambda x: country_filter in x, zip_file.namelist()))
df_gb_google_mobility_report = pd.concat([pd.read_csv(zip_file.open(file)) for file in GB_google_mobility_report_paths],axis=0)
return df_gb_google_mobility_report
def apple_mobility() -> pd.DataFrame:
"""Pulls data from the apple mobility report website https://covid19.apple.com/mobility
It keeps trying dates going back from the most recent to 100 days before today
searching for the most recent file. Once it finds a file it gives you that as a dataframe
"""
import datetime
df_apple_mobility_report = None
for date in pd.date_range(datetime.date.today() - pd.Timedelta(days=100),datetime.date.today(),freq='D')[::-1]:
date_str = date.strftime("%Y-%m-%d")
try:
df_apple_mobility_report = pd.read_csv(f"https://covid19-static.cdn-apple.com/covid19-mobility-data/2021HotfixDev19/v3/en-us/applemobilitytrends-{date_str}.csv")
break
except Exception as error:
pass
if df_apple_mobility_report is None:
print("Couldn't find the apple mobility data")
return df_apple_mobility_report
def interpolate_early_data(series : pd.Series,
zeropoints : list = [-40,-20] ) -> pd.Series:
"""
Interpolate the from zero up to the start of the data, prefilling the defined
"zeropoints" range with 0s (relative to the first filled data point).
"""
series_original = series.copy()
series_without_na = series_original.dropna()
earliest_filled_index = series_without_na.index[0]
zero_points_index = [earliest_filled_index+zeropoints[0],earliest_filled_index+zeropoints[1]]
# set some earlier points to zero to mark the area between which we need to interpolate
empty_series = pd.Series(index=range(zero_points_index[0],earliest_filled_index-1)) # this goes from the zero point up to the start of the data
series_working = empty_series.append(series.dropna())
series_working.loc[zero_points_index[0]:zero_points_index[1]] = 0 # the zero point range as 0
# do the interpolation
series_working = series_working.interpolate(method='pchip', limit_direction='both', limit=None)
# add some noise
noise_scale_factor = 0.5
interp_mask = pd.Series(data=np.zeros(len(series_working)),index = series_working.index, name='interp_mask')
interp_mask.loc[:earliest_filled_index-1] = random.rand(len(series_working.loc[:earliest_filled_index-1]))-0.5
noisy_interp_data = (series_working + (interp_mask * series_working * noise_scale_factor))
return noisy_interp_data.rename(series.name)
def remove_outliers(series, z_score_threshold = 10):
"""Removes entries from a series which lie more than <threshold> times the standard deviation from the mean. The default should remove more obvious spikes."""
series=series.copy()
z_scores = scipy.stats.zscore(series)
abs_z_scores = np.abs(z_scores)
series.loc[abs_z_scores > z_score_threshold] = np.nan
return series.interpolate(method='pchip', limit = 1,limit_direction='forward')
def remove_outlier_window(series: pd.Series,window_length=20, window_z_score_threshold=4) -> pd.Series:
"""Removes outliers by scanning a window across the data and removing anything that exceeds the
z_score threshold (replaces that datapoint through interpolation"""
series_ = series.copy()
series_index = series_.index
series_ = series_.reset_index(drop=True)
data_list = series_[:window_length].tolist()
for idx in series_.index[window_length:]:
series_window = series[idx-window_length:idx].copy()
series_window = remove_outliers(series_window,z_score_threshold=window_z_score_threshold)
data_list.append(series_window[window_length-1])
return pd.Series(data_list, index=series_index)
def nhse_weekly_covid19_admissions_excel(weekly_admissions_url = "https://www.england.nhs.uk/statistics/wp-content/uploads/sites/2/2020/10/Weekly-covid-admissions-publication-201029-2.xlsx"):
"""Pulls the weekly NHSE COVID19 Trust admissions file - useful as it is at a trust level and more recent than the
monthly one, however it only goes back a few monhts.. It combines the different sheets into a single dataframe.
Taken from the below URL:
https://www.england.nhs.uk/statistics/statistical-work-areas/covid-19-hospital-activity/"""
df_nhse_weekly_covid19_admissions = pd.DataFrame([pd.read_excel(weekly_admissions_url, sheet_name = sheet,header=14).dropna(how="all",axis=1).dropna(how="all",axis=0)
.set_index(['NHS England Region', 'Code', 'Name']).rename_axis('date',axis=1).stack().rename(sheet)
for sheet in ['Hosp ads & diag',
'New hosp cases',
'Hosp ads from comm',
'Hosp ads from comm with lag',
'Care home ads and diags',
'All beds COVID','MV beds COVID']]).T
filename = (weekly_admissions_url[-(weekly_admissions_url[::-1].find("/")):]).replace("xlsx","csv")
df_nhse_weekly_covid19_admissions.to_csv(filename)
return df_nhse_weekly_covid19_admissions
def nhse_monthly_covid19_admissions_historic_excel(url="https://www.england.nhs.uk/statistics/wp-content/uploads/sites/2/2020/11/Covid-Publication-12-11-2020_v4-CB.xlsx"):
"""Pulls the monthly NHSE COVID19 Trust admissions file - useful as it is at a trust level and covers the full
history , however it doesn't contain the most recent weeks. It combines the different sheets into a single
dataframe. Taken from the below URL:
https://www.england.nhs.uk/statistics/statistical-work-areas/covid-19-hospital-activity/"""
Excel_nhse_covid_trust_historic = pd.ExcelFile(url)
output_df = pd.DataFrame([(Excel_nhse_covid_trust_historic.parse(sheet,header=12)
.drop(columns='NHS England Region').rename(columns={"Unnamed: 2":"NHS England Region"})
.dropna(how='all',axis=1).dropna(how='all',axis=0)
.set_index(['NHS England Region', 'Code', 'Name']).rename_axis('date',axis=1).stack().rename(sheet)) for sheet in Excel_nhse_covid_trust_historic.sheet_names[1:]]).T
filename = (url[-(url[::-1].find("/")):]).replace("xlsx","csv")
output_df.to_csv(filename)
return output_df
def covid_england_data_blob(utla=True, ltla=True) -> dict:
"""Gives you a steaming pile of fresh COVID-19 data from NHSE, Google and Apple
ToDo: Need to change this to be a class with methods really.
"""
# use default query structure - which I've set above to mean EVERYTHING
output = dict()
query_structure = None #{"date": "date",
# "areatype": "areaType",
# "name": "areaName",
# "code": "areaCode",
# "newAdmissions": "newAdmissions",
# "newPillarTwoTestsByPublishDate": "newPillarTwoTestsByPublishDate",
# "plannedCapacityByPublishDate" : "plannedCapacityByPublishDate",
# "newTestsByPublishDate": "newTestsByPublishDate",
# "covidOccupiedMVBeds": "covidOccupiedMVBeds",
# "hospitalCases":"hospitalCases",
# "newCasesBySpecimenDate":"newCasesBySpecimenDate",
# "newCasesByPublishDate":"newCasesByPublishDate",
# "cumCasesByPublishDate":"cumCasesByPublishDate",
# "newDeaths28DaysByPublishDate":"newDeaths28DaysByPublishDate",
# "maleCases":"maleCases",
# "femaleCases":"femaleCases"}
# England Only
output['england_nhse'] = get_paginated_dataset([f"areaType=nation;areaName=england"], query_structure).dropna(how='all',axis=1).sort_values(['code','date',]).reset_index(drop=True)
# All UK (some of the metrics only work for the UK as a whole...)
output['uk_nhse'] = get_paginated_dataset([f"areaType=overview"], query_structure).dropna(how='all',axis=1).sort_values(['code','date',]).reset_index(drop=True)
# NHS Regions
output['nhsregion_nhse'] = get_paginated_dataset([f"areaType=nhsRegion"], query_structure).dropna(how='all',axis=1).sort_values(['code','date',]).reset_index(drop=True)
# Regions (geographic regions) - some different metrics than NHS Regions
output['region_nhse'] = get_paginated_dataset([f"areaType=region"], query_structure).dropna(how='all',axis=1).sort_values(['code','date',]).reset_index(drop=True)
# Changed the structure for these next two filters, because they simply don't have many of the above columns
# later note: changed to use default as the function just strips unused data anyway
query_structure_2 = None #{"date": "date",
# "areatype": "areaType",
# "name": "areaName",
# "code": "areaCode",
# "newCasesBySpecimenDate":"newCasesBySpecimenDate",
# "newCasesByPublishDate":"newCasesByPublishDate",
# "cumCasesByPublishDate":"cumCasesByPublishDate",
# "newDeaths28DaysByPublishDate":"newDeaths28DaysByPublishDate",}
if utla:
# upper tier local authorities (counties and unitary authorities, i.e. Lancashire, York, Somerset, etc...)
output['utla_nhse'] = get_paginated_dataset([f"areaType=utla"], query_structure_2).dropna(how='all',axis=1).sort_values(['code','date',]).reset_index(drop=True)
df_ltla_nhse_feed = None
if ltla:
# lower tier local authorities (councils and unitary authorities, i.e. Leeds council, Bradford council, York, etc...)
output['ltla_nhse'] = get_paginated_dataset([f"areaType=ltla"], query_structure_2).dropna(how='all',axis=1).sort_values(['code','date',]).reset_index(drop=True)
# google and apple mobility
output['google_mobility'] = google_mobility().sort_values(["country_region_code","sub_region_1",'date']).reset_index(drop=True)
# output['apple_mobility'] = (apple_mobility().set_index(['geo_type','region','transportation_type','alternative_name','sub-region','country'])
# .rename_axis('date',axis=1).stack()
# .unstack('transportation_type').reset_index())
# output['apple_mobility'] = output['apple_mobility'][(output['apple_mobility'].country == "United Kingdom") &
# ((output['apple_mobility']['region'] == 'England')
# | (output['apple_mobility']['sub-region'] == 'England')
# )]
return output
def clean_data(df_nhs_api_data):
""""""
df_nhs_api_data = df_nhs_api_data.assign(
date=pd.to_datetime(df_nhs_api_data['date'], format="%Y-%m-%d", errors='coerce')).set_index(
['areatype', 'date', 'name'])
df_nhs_api_data = df_nhs_api_data[~df_nhs_api_data['code'].str.contains("[WSN]")]
df_nhs_api_data = df_nhs_api_data.query("code != 'null'") # remove all scottish welsh and NI records and code IS NULL records
return df_nhs_api_data
def download_and_save_data():
""""""
data_directory = './data'
covid_data_blob = covid_england_data_blob(utla=True, ltla=True)
# deal with google mobility data
df_dataset = covid_data_blob.pop('google_mobility')
df_dataset.to_csv(f"{data_directory}/gb_google_mobility_report.csv")
# save the nhse API data
for dataset_name, df_dataset in covid_data_blob.items():
clean_data(df_dataset).to_csv(f"{data_directory}/{dataset_name}_feed.csv")
# make lookup # ToDo: eventually get this lookup from a more authorative source, like GeoPortal
df_nhs_api_data = pd.concat(list(covid_data_blob.values()), sort=True)
df_lookup = clean_data(df_nhs_api_data).reset_index()[['code', 'name', 'areatype']].drop_duplicates().set_index('code') # reference data
df_lookup.to_csv(f"{data_directory}/code_name_areatype_lookup.csv")
def get_data(data_dir = './data'):
"""Trawls the data_dir and returns a dictionary with each of the files loaded as pandas dataframes"""
files_list = os.listdir(data_dir)
data_pack = dict()
for file_name in files_list:
file_name_without_extension = file_name.rsplit(".",1)[0]
data_pack[file_name_without_extension] =
|
pd.read_csv(f"{data_dir}/{file_name}")
|
pandas.read_csv
|
import time
import random
from string import ascii_lowercase
import numpy as np
import pandas as pd
import pytest
from sklearn.datasets import fetch_20newsgroups
from dirty_cat import MinHashEncoder
def test_MinHashEncoder(n_sample=70, minmax_hash=False):
X_txt = fetch_20newsgroups(subset='train')['data']
X = np.array(X_txt[:n_sample])[:,None]
for minmax_hash in [True, False]:
for hashing in ['fast', 'murmur']:
if minmax_hash and hashing == 'murmur':
pass # not implemented
# Test output shape
encoder = MinHashEncoder(n_components=50, hashing=hashing)
encoder.fit(X)
y = encoder.transform(X)
assert y.shape == (n_sample, 50), str(y.shape)
assert len(set(y[0])) == 50
# Test same seed return the same output
encoder = MinHashEncoder(50, hashing=hashing)
encoder.fit(X)
y2 = encoder.transform(X)
np.testing.assert_array_equal(y, y2)
# Test min property
if not minmax_hash:
X_substring = [x[:x.find(' ')] for x in X[:,0]]
X_substring = np.array(X_substring)[:,None]
encoder = MinHashEncoder(50, hashing=hashing)
encoder.fit(X_substring)
y_substring = encoder.transform(X_substring)
np.testing.assert_array_less(y - y_substring, 0.0001)
def test_input_type():
# Numpy array
X = np.array(['alice', 'bob'])[:,None]
enc = MinHashEncoder(n_components=2)
enc.fit_transform(X)
# List
X = [['alice'], ['bob']]
enc = MinHashEncoder(n_components=2)
enc.fit_transform(X)
def test_get_unique_ngrams():
string = 'test'
true_ngrams = {
(' ','t'), ('t','e'), ('e','s'), ('s', 't'),
('t',' '), (' ','t','e'), ('t','e','s'),
('e','s','t'), ('s','t',' '), (' ','t','e','s'),
('t','e','s','t'), ('e','s','t',' ')}
ngram_range = (2,4)
enc = MinHashEncoder(n_components=2)
ngrams = enc.get_unique_ngrams(string, ngram_range)
assert ngrams == true_ngrams
def profile_encoder(encoder, hashing='fast', minmax_hash=False):
# not an unit test
from dirty_cat.datasets import fetch_employee_salaries
employee_salaries = fetch_employee_salaries()
df = employee_salaries.X
X = df[["employee_position_title"]]
t0 = time.time()
enc = encoder(n_components=50, hashing=hashing, minmax_hash=minmax_hash)
enc.fit(X)
y = enc.transform(X)
assert y.shape == (len(X), 50)
eta = time.time() - t0
return eta
@pytest.mark.parametrize("input_type, missing, hashing", [
['numpy', 'error', 'fast'],
['pandas', 'zero_impute', 'murmur'],
['numpy', 'zero_impute', 'fast']])
def test_missing_values(input_type, missing, hashing):
X = ['Red',
np.nan,
'green',
'blue',
'green',
'green',
'blue',
float('nan')]
n = 3
z = np.zeros(n)
if input_type == 'numpy':
X = np.array(X, dtype=object)[:,None]
elif input_type == 'pandas':
pd = pytest.importorskip("pandas")
X =
|
pd.DataFrame(X)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 28 22:42:57 2017
@author: 坤
"""
import os
import numpy as np
import pandas as pa
import math as Math
import sys
import pymysql
import dateutil
'''
过滤大震附近小震
参数 地址目录 时间 以及半径
'''
def findEqM(df,minz,maxz):
find=df[df['magnitude']>=int(minz)]
return find[find['magnitude']<int(maxz)]
def getRadian(degree):
return degree * Math.pi / 180.0
def distance(lat1,lon1,lat2,lon2):
EARTH_RADIUS = 6378.137
radLat1 = getRadian(lat1)
radLat2 = getRadian(lat2)
a = radLat1 - radLat2#// 两点纬度差
b = getRadian(lon1) - getRadian(lon2)#// 两点的经度差
a=np.array(a)
b=np.array(b)
s = 2 * np.arcsin(np.sqrt(np.power(np.sin(a / 2), 2) + np.cos(radLat1)* np.cos(radLat2) * np.power(np.sin(b / 2), 2)));
s = s * EARTH_RADIUS
return s ;
def flter(data,Bdata,time,radii):
try:
MinTime=data.eqtime-dateutil.relativedelta.relativedelta(days=time)
MaxTime=data.eqtime#+dateutil.relativedelta.relativedelta(days=time)
BdataCopy=Bdata.copy()
BdataCopy=BdataCopy[(BdataCopy.eqtime>=MinTime)&(BdataCopy.eqtime<=MaxTime)]
BdataCopy.depth=0
BdataCopy.depth=distance(data.latitude,data.longitude,BdataCopy.loc[:,'latitude'],BdataCopy.loc[:,'longitude'])
BdataCopy=BdataCopy[BdataCopy.depth<=radii]
Bdata.drop(BdataCopy.index.tolist(),axis=0,inplace=True)
except BaseException:
BdataCopy=pa.DataFrame()
return BdataCopy,Bdata
dz=pa.Series([sys.argv[1],float(sys.argv[2]),float(sys.argv[3]),0,0],index = ["eqtime","longitude","latitude","magitude","depth"])
dz.eqtime=pa.to_datetime(dz.eqtime)
MinTime=dz.eqtime-dateutil.relativedelta.relativedelta(days=365)
MaxTime=dz.eqtime#+dateutil.relativedelta.relativedelta(days=time)
conn = pymysql.connect(host='172.17.130.212', user='root', password='<PASSWORD>', database='cloud')
df = pa.read_sql("select * from base where eqtime>='"+str(MinTime)+"' and eqtime<'"+str(MaxTime)+"';", con=conn)
df['eqtime']=
|
pa.to_datetime(df['eqtime'])
|
pandas.to_datetime
|
import os
import pandas as pd
import sys
def get_rnn_preds():
prev_working_dir = os.getcwd()
os.chdir(
"C:\\Users\\Sky1\PycharmProjects\\backupFinal\\repository\\src\\data\\Stock_Predictions\\"
)
x = os.listdir()
pred_result_list = []
obs_result_list = []
# use a list for performance (per pandas docs)
# pred_cols = ["pred_pct_chg"]
# obs_cols = ["obs_pct_chg"]
for filename in x:
parsed_symbol = filename.split("_")[0]
if "predicted" in filename:
pred_rnn_df = pd.read_csv(filename, index_col=None, header=None)
pred_rnn_df["symbol"] = parsed_symbol
pred_result_list.append(pred_rnn_df.head(145))
elif "observed" in filename:
obs_rnn_df = pd.read_csv(filename, index_col=None, header=None)
obs_rnn_df["symbol"] = parsed_symbol
obs_result_list.append(obs_rnn_df.head(145))
else:
sys.exit("error. check filenames")
pred_masterdataframe = pd.concat(pred_result_list, axis=0, ignore_index=True)
observed_masterdataframe =
|
pd.concat(obs_result_list, axis=0, ignore_index=True)
|
pandas.concat
|
"""metric handlers and basic class for calculating metrics"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Optional, List, Dict
import pandas
from deepa2 import DeepA2Parser
from deepa2.parsers import Argument
class DA2MetricHandler(ABC):
"""
The Handler interface declares a method for building the chain of handlers.
It also declares a method for executing a request.
"""
@abstractmethod
def set_next(self, handler: DA2MetricHandler) -> DA2MetricHandler:
"""set next handler"""
@abstractmethod
def handle(self, prediction: str, reference: str) -> Optional[Dict]:
"""handle request"""
class AbstractDA2MetricHandler(DA2MetricHandler):
"""
The default chaining behavior can be implemented inside a base handler
class.
"""
_next_handler: Optional[DA2MetricHandler] = None
def set_next(self, handler: DA2MetricHandler) -> DA2MetricHandler:
self._next_handler = handler
# Returning a handler from here will let us link handlers in a
# convenient way like this:
# monkey.set_next(squirrel).set_next(dog)
return handler
@abstractmethod
def handle(self, prediction: str, reference: str) -> Optional[Dict]:
if self._next_handler:
return self._next_handler.handle(prediction, reference)
return None
# All Concrete DA2 Metric Handlers either handle a request or pass it
# to the next handler in the chain.
class ArgdownHandler(AbstractDA2MetricHandler):
"""handles argument reconstructions"""
def handle(self, prediction: str, reference: str) -> Optional[Dict]:
ref_as_argdown = DeepA2Parser.parse_argdown(reference)
if ref_as_argdown:
# reference is argdown
pred_as_argdown = DeepA2Parser.parse_argdown(prediction)
score = self.score(pred_as_argdown, ref_as_argdown)
return score
return super().handle(prediction, reference)
def score(
self, parsed_pred: Optional[Argument], parsed_ref: Optional[Argument]
) -> Dict[str, Any]:
"""scores a reconstructed argument relative to a reference reconsctruction"""
score = {
"valid_argdown": self.valid_argdown(parsed_pred),
"pc_structure": self.pc_structure(parsed_pred),
"consistent_usage": self.consistent_usage(parsed_pred),
"no_petitio": self.no_petitio(parsed_pred),
"no_redundancy": self.no_redundancy(parsed_pred),
"inferential_similarity": self.inferential_similarity(
parsed_pred, parsed_ref
),
}
return score
@staticmethod
def valid_argdown(parsed_pred: Optional[Argument]) -> int:
"""checks if a reconstruction is valid argdown"""
return 1 if parsed_pred else 0
@staticmethod
def pc_structure(parsed_pred: Optional[Argument]) -> Optional[int]:
"""checks if a reconstruction has premises and conclusion"""
if parsed_pred is None:
return None
has_pc_structure = (
not parsed_pred.statements[0].is_conclusion
) and parsed_pred.statements[-1].is_conclusion
return int(has_pc_structure)
@staticmethod
def consistent_usage(parsed_pred: Optional[Argument]) -> Optional[int]:
"""checks if info about used statements is consistent"""
if parsed_pred is None:
return None
used_exist = True # does every statement referred to in inference exist?
used_statements = []
for statement in parsed_pred.statements:
if statement.uses and statement.label:
if any(u >= statement.label for u in statement.uses):
used_exist = False
break
used_statements.extend(statement.uses)
# is every statement (except final one) explicitly referred to in some inference?
evryth_used = len(set(used_statements)) == (len(parsed_pred.statements) - 1)
has_consistent_usage = used_exist and evryth_used
return int(has_consistent_usage)
@staticmethod
def no_petitio(parsed_pred: Optional[Argument]) -> Optional[int]:
"""checks if a reconstruction is no petitio
i.e. no conclusion is a premise,
petitio is a special case of redundancy"""
if parsed_pred is None:
return None
no_petitio = True
visited_texts = []
for statement in parsed_pred.statements:
if statement.text:
if statement.is_conclusion:
# check if conclusion has been introduced as premise before
if statement.text.strip() in visited_texts:
no_petitio = False
break
else:
visited_texts.append(statement.text.strip())
return int(no_petitio)
@staticmethod
def no_redundancy(parsed_pred: Optional[Argument]) -> Optional[int]:
"""checks if a reconstruction is redundant
i.e. no statements has been introduced before"""
if parsed_pred is None:
return None
statement_texts = [s.text.strip() for s in parsed_pred.statements if s.text]
no_redundancy = len(statement_texts) == len(set(statement_texts))
return int(no_redundancy)
@staticmethod
def inferential_similarity(
parsed_pred: Optional[Argument], parsed_ref: Optional[Argument]
) -> Optional[float]:
"""checks if predicted and target argument are inferentially similar"""
if parsed_pred and parsed_ref:
n_pp = len(list(s for s in parsed_pred.statements if not s.is_conclusion))
n_pr = len(list(s for s in parsed_ref.statements if not s.is_conclusion))
n_cp = len(list(s for s in parsed_pred.statements if s.is_conclusion))
n_cr = len(list(s for s in parsed_ref.statements if s.is_conclusion))
inf_sim = (1 - abs(n_pp - n_pr) / (n_pp + n_pr)) * (
1 - abs(n_cp - n_cr) / (n_cp + n_cr)
)
else:
inf_sim = None
return inf_sim
class StatementHandler(AbstractDA2MetricHandler):
"""handles statement list predictions"""
def handle(self, prediction: str, reference: str) -> Optional[Dict]:
is_statement_list = False
if is_statement_list:
score: Dict[str, Any] = {}
return score
return super().handle(prediction, reference)
class FormalizationHandler(AbstractDA2MetricHandler):
"""handles formalization predictions"""
def handle(self, prediction: str, reference: str) -> Optional[Dict]:
is_formalization_list = False
if is_formalization_list:
score: Dict[str, Any] = {}
return score
return super().handle(prediction, reference)
class DA2PredictionEvaluator: # pylint: disable=too-few-public-methods
"""evaluates a list of predictions and references"""
def __init__(self) -> None:
self.argdown_evaluator = ArgdownHandler()
self.statement_evaluator = StatementHandler()
self.formalization_evaluator = FormalizationHandler()
self.argdown_evaluator.set_next(self.statement_evaluator).set_next(
self.formalization_evaluator
)
self._scores: List[Optional[Dict[str, Any]]] = []
@property
def scores(self) -> List[Optional[Dict[str, Any]]]:
"""
The latest individual scores calculated by the evaluator.
"""
return self._scores
def compute_metrics(self, predictions: List[str], references: List[str]):
"""
compute da2 metrics of predictions given references
Args:
predictions: list of predictions to score.
references: list of reference for each prediction.
"""
if len(predictions) != len(references):
raise ValueError("Number of predictions and references must be the same.")
scores = []
for pred, ref in zip(predictions, references):
score = self.argdown_evaluator.handle(pred, ref)
scores.append(score)
# aggregate scores
df_scores =
|
pandas.DataFrame.from_records(scores)
|
pandas.DataFrame.from_records
|
import numpy as np
import pandas as pd
score = pd.read_csv('../Data/score.csv')
cost = pd.read_csv('../Data/吃饭花费1.csv',engine='python')
chifan =
|
pd.read_csv('../Data/吃饭花费.csv',engine='python')
|
pandas.read_csv
|
import re
import urllib
import xml.etree.ElementTree as ElementTree
import zipfile
import cbsodata
import numpy as np
import osmnx as ox
import pandas as pd
import requests
import unicodedata
import string
from bs4 import BeautifulSoup
from os.path import join
verbs = []
def remove_accents(text):
text = unicodedata.normalize('NFD', str(text)).encode('ascii', 'ignore')
return str(text.decode("utf-8"))
def filter_and_save(df, column, filename, min_length=1, filter_verbs=True):
df = df[df[column].str.len() > min_length].drop_duplicates()
df[column] = df.apply(lambda x: remove_accents(x[column]), axis=1)
if filter_verbs:
df = df[~df[column].isin(verbs)]
df = df.sort_values(column)
df.to_csv(join('datasets', filename), index=False)
def update_werkwoorden(download=True, min_length=3):
global verbs
if download:
lst = []
for ltr in list(string.ascii_uppercase):
parse_next_page = True
page = 1
while parse_next_page:
url = 'https://www.mijnwoordenboek.nl/werkwoorden/NL/' + ltr + '/' + str(page)
print(url)
reqs = requests.get(url)
soup = BeautifulSoup(reqs.text, 'lxml')
div = soup.find_all("div", {"style": "clear:both;"})[0]
cnt = 0
for u in div.find_all("a"):
cnt += 1
lst.append(u.text)
print(cnt)
parse_next_page = (cnt > 230)
page += 1
verbs = pd.DataFrame(lst, columns=['werkwoord'])
verbs = verbs[verbs.werkwoord.str.len() >= min_length].drop_duplicates()
verbs = verbs.sort_values('werkwoord')
verbs = verbs.iloc[2:].reset_index()
verbs = verbs[['werkwoord']]
verbs.werkwoord = verbs.werkwoord.str.title()
verbs.to_csv(join('datasets', 'werkwoorden.csv'), index=False)
else:
verbs = pd.read_csv(join('datasets', 'werkwoorden.csv'))
verbs = verbs.werkwoord.values
def update_streetnames(download=True, min_length=6):
#
# Download streetnames per province, and combine to the total set of streetnames
#
# TODO: Utrecht and Groningen downloads are city only, need bounded box
if download:
for name in ['Gelderland', 'Overijssel', 'Drenthe', 'Groningen', 'Friesland', 'Zeeland', 'Utrecht',
'Limburg', 'Noord-Holland', 'Zuid-Holland', 'Flevoland', 'Noord-Brabant']:
print(name)
graph = ox.graph_from_place(name + ", Netherlands", network_type="drive")
gdf_nodes, gdf_edges = ox.graph_to_gdfs(graph)
names = gdf_edges[['name']].reset_index().drop(columns={'u', 'v', 'key'})
names.name = names.name.astype('str')
names = names.drop_duplicates()
names = names[~names.name.str.contains('\[')]
names.to_csv(join('datasets', 'RAW_streets', 'streets_' + name + '.csv'), index=False)
lst = []
for name in ['Gelderland', 'Overijssel', 'Drenthe', 'Groningen', 'Friesland', 'Zeeland', 'Utrecht',
'Limburg', 'Noord-Holland', 'Zuid-Holland', 'Flevoland', 'Noord-Brabant']:
lst.append(pd.read_csv(join('datasets', 'RAW_streets', 'streets_' + name + '.csv')))
streetnames = pd.concat(lst, axis=0, ignore_index=True)
streetnames.columns = ['straatnaam']
filter_and_save(streetnames, 'straatnaam', 'streets_Nederland.csv', min_length=min_length, filter_verbs=True)
def update_places(min_length=4):
#
# Download places and regions from CBS
#
total = pd.DataFrame(cbsodata.get_data('84992NED'))
places = np.append(
np.append(np.append(total.Woonplaatsen.values, total.Naam_2.values), total.Naam_4.values), total.Naam_6.values)
places = pd.DataFrame(places, columns=['plaatsnaam'])
filter_and_save(places, 'plaatsnaam', filename='places.csv', min_length=min_length, filter_verbs=True)
def update_firstnames():
#
# Download first names
#
url = "http://www.naamkunde.net/?page_id=293&vt_download_xml=true"
urllib.request.urlretrieve(url, join("datasets", "RAW_names", "firstnames.xml"))
xtree = ElementTree.parse(join("datasets", "RAW_names", "firstnames.xml"))
xroot = xtree.getroot()
firstnames = []
for node in xroot:
voornaam = node.find("voornaam")
if voornaam is not None:
firstnames.append({"voornaam": voornaam.text})
firstnames = pd.DataFrame(firstnames)
filter_and_save(firstnames, 'voornaam', filename='firstnames.csv', min_length=1, filter_verbs=True)
def update_lastnames():
#
# Download last names
#
url = "http://www.naamkunde.net/wp-content/uploads/oudedocumenten/fn10k_versie1.zip"
urllib.request.urlretrieve(url, join("datasets", "RAW_names", "lastnames.zip"))
zf = zipfile.ZipFile(join("datasets", "RAW_names", "lastnames.zip"), 'r')
f = zf.open("fn_10kw.xml")
xtree = ElementTree.parse(f)
xroot = xtree.getroot()
lastnames = []
for node in xroot:
lastname = node.find("naam")
prefix = node.find("prefix")
if lastname is not None:
if prefix.text is not None:
lastnames.append({"achternaam": prefix.text + " " + lastname.text})
else:
lastnames.append({"achternaam": lastname.text})
lastnames = pd.DataFrame(lastnames)
filter_and_save(lastnames, 'achternaam', filename='lastnames.csv', min_length=1, filter_verbs=True)
def update_diseases():
#
# Download diseases
#
url = 'https://nl.wikibooks.org/wiki/Geneeskunde/Lijst_van_aandoeningen'
reqs = requests.get(url)
soup = BeautifulSoup(reqs.text, 'lxml')
div = soup.find(id="mw-content-text")
diseases = []
for tag in div.find_all("li"):
diseases.append(tag.text)
for d in ['Corona', 'Covid', 'Covid-19']:
diseases.append(d)
diseases = pd.DataFrame(diseases, columns=['aandoening'])
filter_and_save(diseases, 'aandoening', filename='diseases.csv', min_length=1, filter_verbs=False)
def update_medicines():
#
# Download medicines
#
lst = []
for ltr in ['u', 'e', 'o', 'a']:
url = 'https://www.apotheek.nl/zoeken?q=' + ltr + '&filter=medicijnen'
reqs = requests.get(url)
soup = BeautifulSoup(reqs.text, 'lxml')
div = soup.find_all("p", {"class": "searchIndication_text__2l4pd"})
results = int(div[0].find('span').text)
count = 0
while count < results:
for med in soup.find_all("div", {"class": "searchItem_resultTitle__2TXzJ"}):
lst.append(med.text)
url = 'https://www.apotheek.nl/zoeken?q=u&filter=medicijnen&start=' + str(count + 10)
reqs = requests.get(url)
soup = BeautifulSoup(reqs.text, 'lxml')
count += 10
medicines = pd.DataFrame(lst, columns=['original'])
medicines['lengte'] = medicines['original'].str.len()
medicines = medicines.sort_values('lengte')
new = medicines["original"].str.replace('De Tuinen ', '').str.replace('/', ' ').str.replace(',', ' ') \
.str.replace('(', ' ').str.split(" ", n=1, expand=True)
medicines['medicijn'] = new[0].str.title()
filter_and_save(medicines, 'medicijn', filename='medicines.csv', min_length=1, filter_verbs=False)
def update_nationalities():
#
# Download nationalitites
#
total = pd.DataFrame(cbsodata.get_data('03743'))
nationalities = pd.DataFrame(total.Nationaliteiten.unique()[1:-1], columns=['nationaliteit'])
filter_and_save(nationalities, 'nationaliteit', filename='nationalities.csv', min_length=1, filter_verbs=False)
def update_countries():
#
# Download countries
#
url = 'https://nl.wikipedia.org/wiki/Lijst_van_landen_in_2020'
reqs = requests.get(url)
soup = BeautifulSoup(reqs.text, 'lxml')
tbls = soup.find_all("table", {"class": "wikitable"})
lst = []
for tbl in tbls:
rows = tbl.find_all("tr")
for row in rows:
cells = row.find_all("td")
if len(cells) == 4:
naam = cells[0].text.strip()
naam = re.sub("\[.*", "", naam)
lst.append(naam)
naam = cells[1].text.strip()
naam = re.sub("\(.*", "", naam)
lst.append(naam)
lst.append(cells[3].text.strip().split(":")[1].split('/')[0].strip())
countries =
|
pd.DataFrame(lst, columns=['land'])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import argparse
from astropy.io import fits
from fiery_llama.matched_filters import PointFilter, cubeify
parser = argparse.ArgumentParser()
help_data = "Must be .h5 or .fits file type."
parser.add_argument("data", help=help_data)
table_help = "if .h5 file provide table name"
parser.add_argument("--data-table", help=table_help)
help_signal = "Must be .h5 or .fits file type."
parser.add_argument("signal", help=help_signal)
parser.add_argument("--signal-table", help=table_help)
parser.add_argument("--nra", default=100)
parser.add_argument("--ndec", default=100)
_help = "the columns to filter on, if not given defaults to all filter columns"
parser.add_argument("--signal-columns", nargs="*", help=_help)
parser.add_argument("--create-image")
if __name__ == "__main__":
args = parser.parse_args()
if args.data_table is not None:
data = pd.read_hdf(args.data, args.data_table)
else:
hdul = fits.open(args.data)
data =
|
pd.DataFrame(hdul[1].data)
|
pandas.DataFrame
|
import time
import imageio
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from InstagramAPI import InstagramAPI
imageio.plugins.ffmpeg.download()
username = "fafasonga06"
password = "<PASSWORD>"
InstagramAPI = InstagramAPI(username, password)
InstagramAPI.login()
InstagramAPI.getProfileData()
result = InstagramAPI.LastJson
username = result['user']['username']
# print(result)
# print(username)
# print(result['user']['biography'])
InstagramAPI.timelineFeed()
myposts = []
max_id = ""
followers = []
has_more_posts = True
next_max_id = True
while has_more_posts:
InstagramAPI.getSelfUserFeed(maxid=max_id)
if InstagramAPI.LastJson['more_available'] is not True:
has_more_posts = False # stop condition
print("Processing Finished")
max_id = InstagramAPI.LastJson.get('next_max_id', '')
myposts.extend(InstagramAPI.LastJson['items']) # merge lists
time.sleep(1) # Slows the script down to avoid flooding the servers
datas = []
for k in myposts:
likes_counts = k['like_count']
datas.append(likes_counts)
top_likers = k['top_likers']
# print(top_likers)
InstagramAPI.getRecentActivity()
get_recent_activity_response = InstagramAPI.LastJson
InstagramAPI.getProfileData()
user_id = InstagramAPI.LastJson['user']['pk']
InstagramAPI.getUserFollowings(user_id)
following_list = InstagramAPI.LastJson['users']
InstagramAPI.getUserFollowers(user_id)
followers_list = InstagramAPI.LastJson['users']
while next_max_id:
# first iteration hack
if next_max_id == True:
next_max_id = ''
_ = InstagramAPI.getUserFollowers(user_id, maxid=next_max_id)
followers.extend(InstagramAPI.LastJson.get('users', []))
next_max_id = InstagramAPI.LastJson.get('next_max_id', '')
time.sleep(1)
followers_list = followers
user_list = map(lambda x: x['username'], following_list)
following_set = set(user_list)
user_list = map(lambda x: x['username'], followers_list)
followers_set = set(user_list)
not_following_back = following_set - followers_set
fans = followers_set - following_set
print("\nNumber of Followers: ", len(followers_set))
print("Number of Following: ", len(following_set))
print("Number of Not Following back: ", len(not_following_back))
print("Number of Fans: ", len(fans))
print("\nPeople that do not Follow me back: ", not_following_back)
# print("\nfollowing: ", following_set)
# print(datas)
plt.plot(datas)
fig = plt.gcf()
plt.ylabel('Number of Likes')
plt.title('Display of Instagram Likes for my Profile')
fig.savefig("Counts_of_Likes")
plt.show()
followerz = []
for follower in followers_set:
followerz.append(follower)
followingz = []
for following in following_set:
followingz.append(following)
fanz = []
for fann in fans:
fanz.append(fann)
not_followingz = []
for not_following in not_following_back:
not_followingz.append(not_following)
l1 = followerz
l2 = fanz
l3 = followingz
l4 = not_followingz
s1 =
|
pd.Series(l1, name='Followers')
|
pandas.Series
|
import matplotlib.pylab as plt
import pandas as pd
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from pandas import Series
from pandas import DataFrame
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from math import sqrt
import numpy as np
import sys
def test_stationarity(timeseries):
# Determing rolling statistics
rolmean = pd.rolling_mean(timeseries, window=30)
rolstd = pd.rolling_std(timeseries, window=30)
# Plot rolling statistics:
orig = plt.plot(timeseries, color='blue', label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label='Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation')
plt.show(block=False)
# Perform Dickey-Fuller test:
print ('Results of Dickey-Fuller Test:')
dftest = adfuller(timeseries, autolag='AIC')
dfoutput =
|
pd.Series(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used'])
|
pandas.Series
|
# Run like:
## heroku run --size=performance-l python user_summary.py -r heroku
# Or run in the background like:
## heroku run:detached --size=performance-l python user_summary.py -r heroku
import pandas as pd
import numpy as np
import os
import json
import gspread
from datetime import datetime
from app import get_db_cursor
from package import Package
from hubspot import HubSpot
from intercom import intercom
hs = HubSpot()
hs.companies()
def params_changed(x):
defaults = {
'cost_bigdeal_increase': 5.0,
'cost_alacart_increase': 8.0,
'cost_content_fee_percent': 5.7,
'cost_ill': 17.0,
'ill_request_percent_of_delayed': 5.0,
'weight_citation': 10.0,
'weight_authorship': 100.0,
'include_bronze': True, # bronze OA
'include_social_networks': True, # research gate OA
'include_submitted_version': True, # green OA
}
x_filt = {key: x[key] for key in list(defaults.keys())}
differs_log = defaults != x_filt
diff_dict = {k: x_filt[k] for k in x_filt if k in x_filt and defaults[k] != x_filt[k]}
return differs_log, diff_dict
# Get institution ids that have Unsub users w/ permissions access
with get_db_cursor() as cursor:
cmd = """select distinct(ji.id),display_name,created,is_consortium,consortium_id,ror.ror_id
from jump_institution as ji
join jump_user_institution_permission as juip on ji.id=juip.institution_id
join jump_ror_id as ror on ji.id=ror.institution_id
where not ji.is_demo_institution;
"""
cursor.execute(cmd)
rows = cursor.fetchall()
institutions = pd.DataFrame(rows, columns=['institution_id','name','created','is_consortium','consortium_id','ror_id'])
# Consortia
institutions['is_consortium'].fillna(False, inplace=True)
consortia = institutions[institutions['is_consortium']]
## drop demo/test accounts
consortia = consortia[~consortia['name'].str.contains("Demo|Testing")]
# Non-consortia
non_consortia = institutions[~institutions['is_consortium']]
## exclude demo/test institutions
non_consortia = non_consortia[~non_consortia['institution_id'].str.contains("institution-testing")]
non_consortia = non_consortia[~non_consortia['institution_id'].str.contains("institution-demo")]
non_consortia = non_consortia[~non_consortia['name'].str.contains("Demo")]
non_consortia = non_consortia[~non_consortia['name'].str.contains("DEMO")]
non_consortia = non_consortia[~non_consortia['name'].str.contains("Test")]
non_consortia = non_consortia[~non_consortia['name'].str.contains("TEST")]
non_consortia = non_consortia[~non_consortia['name'].str.contains("Scott")]
# Each institution
# institution="institution-jscQRozbejja"
# it = non_consortia[0:20].iterrows()
# row = next(it)[1]
# non_consortia.iterrows()[572]
all_institutions = []
for index, row in non_consortia.iterrows():
print(row["ror_id"])
with get_db_cursor() as cursor:
cmd = "select * from jump_account_package where institution_id = %s"
cursor.execute(cmd, (row["institution_id"],))
rows_inst = cursor.fetchall()
if not rows_inst:
institution_pkgs = pd.DataFrame({"institution_name": row["name"], 'ror_id':row["ror_id"]},
index = [0])
# institution_pkgs["current_deal"] = hs.current_deal(ror_id=row["ror_id"])
# company = hs.filter_by_ror_id(ror_id=row["ror_id"])
else:
institution_pkgs = pd.DataFrame(rows_inst, columns=['account_id','package_id','publisher','package_name','created','con_package_id','institution_id','is_demo','big_deal_cost','is_deleted','updated','default_to_no_perpetual_access','currency','is_dismissed_warning_missing_perpetual_access','is_dismissed_warning_missing_prices','big_deal_cost_increase'])
institution_pkgs.drop(["account_id","con_package_id","is_dismissed_warning_missing_perpetual_access","is_dismissed_warning_missing_prices","default_to_no_perpetual_access","updated"], axis=1, inplace=True)
institution_pkgs["institution_name"] = row["name"]
institution_pkgs["ror_id"] = row["ror_id"]
institution_pkgs["current_deal"] = hs.current_deal(ror_id=row["ror_id"])
company = hs.filter_by_ror_id(ror_id=row["ror_id"])
consortia = None
consortium_account = None
date_last_paid_invoice = None
amount_last_paid_invoice = None
if company:
consortia = company[0].get('consortia')
consortium_account = company[0].get('consortium_account')
dlpi = company[0].get('date_last_paid_invoice')
date_last_paid_invoice = datetime.strptime(dlpi, '%m/%d/%Y').strftime("%Y-%m-%d") if dlpi else None
alpi = company[0].get('amount_last_paid_invoice')
amount_last_paid_invoice = float(alpi) if alpi else None
institution_pkgs["consortia"] = consortia
institution_pkgs["consortium_account"] = consortium_account
institution_pkgs["date_last_paid_invoice"] = date_last_paid_invoice
institution_pkgs["amount_last_paid_invoice"] = amount_last_paid_invoice
institution_pkgs["created_inst"] = row['created'].strftime("%Y-%m-%d")
# intercom
intlastseen = None
emaillastseen = None
with get_db_cursor() as cursor:
cmd = "select * from jump_debug_admin_combo_view where institution_id = %s"
cursor.execute(cmd, (row["institution_id"],))
rows_users = cursor.fetchall()
if rows_users:
emails = list(filter(lambda x: x is not None, [w['email'] for w in rows_users]))
domain = None
if company:
domain = company[0].get('domain')
intlastseen, emaillastseen = intercom(emails, domain)
institution_pkgs["intercom_last_seen"] = intlastseen
institution_pkgs["intercom_last_seen_email"] = emaillastseen
# end intercom
# packages
pkgid = institution_pkgs.get('package_id')
if not isinstance(pkgid, pd.Series):
all_institutions.append(institution_pkgs)
else:
pkg_ids = pkgid.to_list()
pkg_dict_list = []
# This is the slow part: queries for each package
for pkg in pkg_ids:
try:
pkg = Package.query.get(pkg)
mpnum = 0
mp = list(filter(lambda x: x['id'] == "missing_prices", pkg.warnings))
if len(mp):
mpnum = len(mp[0]['journals'])
saved_scenarios = pkg.saved_scenarios
scenario_configs = [params_changed(w.to_dict_definition()['configs']) for w in saved_scenarios]
scenario_dates = [w.created for w in saved_scenarios]
scenario_dates.sort()
pkg_dict_list.append({"package_id":pkg.package_id,
# "created_pkg": pkg.created,
"has_complete_counter_data": pkg.has_complete_counter_data,
"perpetual_access": pkg.data_files_dict['perpetual-access']['is_live'],
"custom_price": pkg.data_files_dict['price']['is_live'],
"missing_prices": mpnum,
"is_feeder_package": pkg.is_feeder_package,
"is_feedback_package": pkg.is_feedback_package,
"scenarios": len(pkg.scenario_ids),
"scenario_user_subrs": any([len(w.to_dict_definition()['subrs']) > 0 for w in pkg.saved_scenarios]),
"scenario_param_chgs": any([x[0] for x in scenario_configs]),
"scenario_param_str": ",".join([str(x[1]) for x in scenario_configs]),
"created_sce_first": min(scenario_dates).strftime("%Y-%m-%d") if scenario_dates else None,
"created_sce_last": max(scenario_dates).strftime("%Y-%m-%d") if scenario_dates else None, })
except Exception as e:
pkg_dict_list.append({})
pkg_details = pd.DataFrame(pkg_dict_list)
all_institutions.append(institution_pkgs.merge(pkg_details, on="package_id"))
# len(all_institutions)
# all_institutions
all_institutions_df = pd.concat(all_institutions)
created_pkg_new = [w.strftime("%Y-%m-%d") if isinstance(w, pd.Timestamp) else w for w in all_institutions_df['created'].to_list()]
del all_institutions_df['created']
all_institutions_df['created_pkg'] = created_pkg_new
all_institutions_df = all_institutions_df[["institution_id","institution_name","ror_id","created_inst","current_deal","consortia","consortium_account","date_last_paid_invoice","amount_last_paid_invoice",
"intercom_last_seen", "intercom_last_seen_email",
"package_id","package_name","created_pkg","publisher","is_deleted",
"currency","big_deal_cost","big_deal_cost_increase","has_complete_counter_data",
"perpetual_access","custom_price","is_feeder_package","is_feedback_package",
"created_sce_first", "created_sce_last",
"scenarios", "scenario_user_subrs", "scenario_param_chgs", "scenario_param_str",]]
pkg_file = 'non_consortia_pkg_level.csv'
all_institutions_df.to_csv(pkg_file, index=False)
# aggregate package level data up to institutions
inst_level = all_institutions_df.copy()
inst_level = inst_level[~inst_level['is_deleted'].fillna(False) & ~inst_level['is_feeder_package'].fillna(False) & ~inst_level['is_feedback_package'].fillna(False)]
inst_level['created_sce_last'] = pd.to_datetime(inst_level['created_sce_last'])
inst_level = pd.concat([
inst_level.groupby(['institution_id','institution_name'])['ror_id'].apply(lambda x: ",".join(list(np.unique(x)))),
inst_level.groupby(['institution_id','institution_name'])['created_inst'].apply(lambda x: ",".join(list(np.unique(x)))),
inst_level.groupby(['institution_id','institution_name'])['current_deal'].apply(lambda x: list(np.unique(x))[0]),
inst_level.groupby(['institution_id','institution_name'])['consortia'].apply(lambda x: ",".join(filter(None, list(set(x))))),
inst_level.groupby(['institution_id','institution_name'])['consortium_account'].apply(lambda x: ",".join(filter(None, list(set(x))))),
inst_level.groupby(['institution_id','institution_name'])['date_last_paid_invoice'].apply(lambda x: ",".join(filter(None, list(set(x))))),
inst_level.groupby(['institution_id','institution_name'])['amount_last_paid_invoice'].apply(lambda x: list(set(x))[0]),
inst_level.groupby(['institution_id','institution_name'])['intercom_last_seen'].apply(lambda x: ",".join(filter(None, list(set(x))))),
inst_level.groupby(['institution_id','institution_name'])['intercom_last_seen_email'].apply(lambda x: ",".join(filter(None, list(set(x))))),
inst_level.groupby(['institution_id','institution_name'])['publisher'].apply(lambda x: ",".join(list(np.unique(list(filter(lambda z: isinstance(z, str), x)))))),
inst_level.groupby(['institution_id','institution_name']).nunique().package_id,
inst_level.groupby(['institution_id','institution_name']).sum().scenarios,
inst_level.groupby(['institution_id','institution_name'])['has_complete_counter_data'].all(),
inst_level.groupby(['institution_id','institution_name'])['perpetual_access'].all(),
inst_level.groupby(['institution_id','institution_name'])['custom_price'].all(),
inst_level.groupby(['institution_id','institution_name'])['created_sce_last'].max(),
inst_level.groupby(['institution_id','institution_name'])['scenario_user_subrs'].any(),
inst_level.groupby(['institution_id','institution_name'])['scenario_param_chgs'].any(),
], axis = 1).reset_index()
inst_level.rename(columns={
'publisher': 'publishers',
'package_id': 'no_pkgs',
'scenarios': 'no_scenarios',
'has_complete_counter_data': 'any_wo_counter_data',
'perpetual_access': 'any_wo_pta',
'custom_price': 'any_wo_custom_prices',
'scenario_user_subrs': 'any_scenario_user_subrs',
'scenario_param_chgs': 'any_scenario_param_chgs'}, inplace=True)
inst_file = 'non_consortia_inst_level.csv'
inst_level.to_csv(inst_file, index=False)
# apply rules
from user_summary_rules import rule_not_paid, rule_not_using, rule_new_users, rule_current_users
inst_for_rules = pd.read_csv(inst_file)
inst_with_rules = rule_not_paid(inst_for_rules)
inst_with_rules = rule_not_using(inst_with_rules)
inst_with_rules = rule_new_users(inst_with_rules)
inst_with_rules = rule_current_users(inst_with_rules)
inst_with_rules.to_csv(inst_file, index=False)
from user_summary_rules import rule_required_data, rule_recommended_data
pkgs_for_rules =
|
pd.read_csv(pkg_file)
|
pandas.read_csv
|
# Copyright 2021 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Preprocess component for time series data."""
import kfp
from kfp.v2.dsl import Dataset
from kfp.v2.dsl import Input
from kfp.v2.dsl import Output
def preprocess(
input_dataset: Input[Dataset],
preprocessed_dataset: Output[Dataset],
time_col: str = 'timestamp',
feature_col: str = 'value'
):
"""Regularizes and resamples the input dataset.
Args:
input_dataset: Input with GCS path to input time series csv.
preprocessed_dataset: Output with GCS path to preprocessed csv.
time_col: Name of csv column with timestamps.
feature_col: Name of csv column with feature values.
Returns:
A preprocessed time series dataframe that may have fewer rows than the
input time series due to regularization and resampling.
"""
import pandas as pd
import tensorflow_probability as tfp
from tensorflow_probability.python.sts.internal.seasonality_util import freq_to_seconds
def load_data(path: str) -> pd.DataFrame:
"""Loads pandas dataframe from csv.
Args:
path: Path to the csv file.
Returns:
A time series dataframe compatible with TFP functions.
"""
original_df =
|
pd.read_csv(path)
|
pandas.read_csv
|
from clustviz.chameleon.graphtools import (
knn_graph,
pre_part_graph,
get_cluster,
connecting_edges,
)
from clustviz.chameleon.chameleon import (
merge_best,
cluster,
rebuild_labels,
len_edges,
)
from sklearn.datasets import make_blobs
import pandas as pd
import numpy as np
import pytest
import sys
def test_knn_graph():
df = pd.DataFrame([[1, 1], [6, 5], [6, 6], [0, 0], [1, 2]])
k = 2
graph = knn_graph(df, k, symmetrical=False, verbose=False)
condition0 = list(graph.edges) == [
(0, 4),
(0, 3),
(1, 2),
(1, 4),
(2, 4),
(3, 4),
]
condition1 = list(graph.nodes) == [0, 1, 2, 3, 4]
assert condition0 & condition1
def test_knn_graph_sym():
df =
|
pd.DataFrame([[1, 1], [6, 5], [6, 6], [0, 0], [1, 2]])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 14 09:27:05 2021
@author: vargh
"""
import numpy as np
import pandas as pd
from sympy import symbols, pi, Eq, integrate, diff, init_printing, solve
from scipy.optimize import curve_fit
from scipy.integrate import cumtrapz
from scipy.interpolate import interp1d, interp2d
from scipy.spatial import ConvexHull, convex_hull_plot_2d
from shapely.geometry import Polygon
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
#init_printing()
## Functions
def calc_maneuver_sum(spec_range, spec_dtr, fuel_used_interper, maneuver_time_interper, printer):
calc_fuel_used = fuel_used_interper(spec_range, spec_dtr)
calc_maneuver_time = maneuver_time_interper(spec_range, spec_dtr)
if printer:
print('Total distance range: %.2f m'%(spec_range))
print('Total fuel mass burned range: %.2f kg'%(calc_fuel_used))
print('Maneuver time: %.2f s'%(calc_maneuver_time))
return calc_fuel_used, calc_maneuver_time
def calc_geom(threeD_balloon, theta):
norm_vec = np.array([np.cos(theta), 0, np.sin(theta)])
proj_of_u_on_n = (np.dot(threeD_balloon, norm_vec))*norm_vec.reshape(len(norm_vec), 1)
proj_of_u_on_n = threeD_balloon - proj_of_u_on_n.transpose()
points = np.zeros((threeD_balloon.shape[0], 2))
points[:, 0] = proj_of_u_on_n[:, 1]
points[:, 1] = proj_of_u_on_n[:, 2]
hull = ConvexHull(points)
bound = points[hull.vertices]
perp_A_x = Polygon(bound).area
cent_y = Polygon(bound).centroid.coords[0][0]
norm_vec2 = np.array([np.sin(theta), 0, np.cos(theta)])
proj_of_u_on_n2 = (np.dot(threeD_balloon, norm_vec2))*norm_vec2.reshape(len(norm_vec2), 1)
proj_of_u_on_n2 = threeD_balloon - proj_of_u_on_n2.transpose()
points2 = np.zeros((threeD_balloon.shape[0], 2))
points2[:, 0] = proj_of_u_on_n2[:, 0]
points2[:, 1] = proj_of_u_on_n2[:, 1]
hull2 = ConvexHull(points2)
bound2 = points2[hull2.vertices]
perp_A_y = Polygon(bound2).area
cent_x = Polygon(bound2).centroid.coords[0][0]
return perp_A_x, perp_A_y, cent_x, cent_y
def init_calc(threeD_balloon, payload_height, payload_width, payload_depth, connector_height, balloon_height, balloon_mass, COG_payload_h, COG_payload_w, rho_atmo, dim_scale, dyn_visc, F_b, thrust_f, thrust_r, m_dot_f, m_dot_r, acc_g, consider_bouyancy_drift, time_step, target_range, d_tol, dragthrustratio, min_burn_index, moment_arm_thruster):
## Initializations
t = np.array([0]) # time
r_m = np.array([total_rover_mass]) # full rover mass
# kinematics in x, displacement, velocity and acceleration
d_x = np.array([0]) # (m)
v_x = np.array([0]) # (m/s)
a_x = np.array([0]) # (m/s^2)
# kinematics in y, displacement, velocity and acceleration
d_y = np.array([0]) # (m)
v_y = np.array([0]) # (m/s)
a_y = np.array([0]) # (m/s^2)
# moment about z
m_z = np.array([0]) # (Nm)
F = np.array([thrust_f]) # Thrust (N)
D_x = np.array([0]) # Drag in x (N)
D_y = np.array([0]) # Drag in y (N)
# rotational kinematics in z, displacement, velocity, accleration
alpha = np.array([0]) # (rad/s^2)
omega = np.array([0]) # (rad/s)
theta = np.array([0]) # (rad)
rem_fuel = np.array([fuel_mass])
ballast_mass = np.array([0])
i = 0
fail = 0
burn_index = 0
while abs(d_x[i] - target_range) > d_tol and not(fail == 1):
## initial conditions
prev_t = t[i]
prev_r_m = r_m[i]
prev_d_x = d_x[i]
prev_v_x = v_x[i]
prev_a_x = a_x[i]
prev_d_y = d_y[i]
prev_v_y = v_y[i]
prev_a_y = a_y[i]
prev_m_z = m_z[i]
prev_F = F[i]
prev_D_x = D_x[i]
prev_D_y = D_y[i]
prev_alpha = alpha[i]
prev_omega = omega[i]
prev_theta = theta[i]
prev_fuel = rem_fuel[i]
prev_ballast_mass = ballast_mass[i]
## time
t = np.append(t, prev_t + time_step)
cur_t = prev_t + time_step
## Modified perpendicular area
perp_A_x, perp_A_y, cent_x, cent_y = calc_geom(threeD_balloon, prev_theta) # calculates perpendicular area in x and y and the centroid for a given theta
## Center of Gravity, Center of Drag, Moment of Inertia (not rotated)
COG_balloon_h = (payload_height + connector_height + balloon_height/2)
COG_balloon_w = cent_x
COG_cur_h = ((r_m[i] - balloon_mass)*COG_payload_h + balloon_mass*COG_balloon_h)/(r_m[i]) # calculates changing height COG
COG_cur_w = ((r_m[i] - balloon_mass)*COG_payload_w + balloon_mass*COG_balloon_w)/(r_m[i]) # calculates changing COG
J_payload_u = r_m[i]*(payload_height**2 + payload_width**2) # untransformed moment of inertia of payload
trans_payload_J_d = np.sqrt(COG_cur_h**2 + COG_cur_w**2) - COG_payload # distance axis of rotation must be moved
J_payload_t = J_payload_u + r_m[i]*trans_payload_J_d**2 # moving axis of rotation with parallel axis theorem
trans_balloon_J_d = np.sqrt((COG_balloon_h - COG_cur_h)**2 + (COG_balloon_w - COG_cur_w)**2) # distance axis of rotation must be moved
J_balloon_t = J_balloon_u + balloon_mass*trans_balloon_J_d**2 # moving axis of rotation with parallel axis theorem
J_tot = J_payload_t + J_balloon_t
COD_balloon_h = COG_balloon_h # needs to be updated based on CFD
COD_balloon_w = COG_balloon_w # needs to be updated based on CFD
# Skin Friction coefficient
if prev_v_x != 0:
re_num = rho_atmo*prev_v_x*dim_scale/dyn_visc # Reynold's Number
C_f = .027/np.power(re_num, 1/7) ## Prandtl's 1/7 Power Law
else:
C_f = 0 # If velocity = 0, C_f = 0
D_mag = np.sqrt(prev_D_x**2 + prev_D_y**2) # magnitude of drag
res_freq = int(np.ceil(2*pi*np.sqrt(J_tot/(F_b*balloon_height)))) # calculated resonant frequency
thrust = thrust_f # thrust
m_dot = m_dot_f # mass flow rate
if abs(D_mag/thrust) < dragthrustratio: # if thrust to drag ratio is less than max ratio, burn
burn_condition = 1
else:
if burn_index > min_burn_index: # if engine has burned for minimal time, and drag condition exceeded, stop burning
burn_condition = 0
burn_index = 0
if burn_condition:
burn_index = burn_index + 1
## Force
cur_F = thrust
cur_fuel = prev_fuel - m_dot*time_step
# Ballast
cur_ballast_mass = prev_ballast_mass + m_dot*time_step
cur_r_m = prev_r_m
else:
cur_F = 0
cur_r_m = prev_r_m
cur_fuel = prev_fuel
mass_deficit = 0
cur_ballast_mass = prev_ballast_mass
perp_A_pay_x = payload_width/np.cos(prev_theta)*payload_depth # calculates perpendicular surface area of payload
pay_drag_x = -.5*(C_D_payload+C_f)*perp_A_pay_x*rho_atmo*prev_v_x**2 # calculates drag from payload
ball_drag_x = -.5*(C_D_balloon+C_f)*perp_A_x*rho_atmo*prev_v_x**2 # calculates drag from balloon in x
ball_drag_y = -.5*(C_D_balloon+C_f)*perp_A_y*rho_atmo*prev_v_y**2 # calculates drag from balloon in y
cur_D_x = pay_drag_x + ball_drag_x # calculates total drag in x
cur_D_y = ball_drag_y # calculates total drag in y
cur_D_mag = np.sqrt(cur_D_x**2 + cur_D_y**2) # Magnitude of drag
## Linear Kinematics
tot_force_x = cur_F*np.cos(prev_theta) + cur_D_x # effective thrust in x
tot_force_y = cur_F*np.sin(prev_theta) + cur_D_y # effective force in y
cur_a_x = tot_force_x/cur_r_m
cur_a_y = tot_force_y/cur_r_m
cur_v_x = prev_v_x+cur_a_x*time_step
cur_v_y = prev_v_y+cur_a_y*time_step
cur_d_x = prev_d_x+cur_v_x*time_step
cur_d_y = prev_d_y+cur_v_y*time_step
## Rotational Kinematics
# Payload Gravity Torque
g_m_a_y_pay = COG_cur_h - COG_payload_h # moment arm for gravity on the payload y
g_m_a_x_pay = COG_cur_w - COG_payload_w # moment arm for gravity on the payload x
g_m_a_pay = np.sqrt(g_m_a_y_pay**2 + g_m_a_x_pay**2)
g_m_pay = abs((cur_r_m - balloon_mass)*acc_g * np.sin(prev_theta) * g_m_a_pay)
# Balloon Gravity Torque
g_m_a_y_ball = COG_cur_h - COG_balloon_h # moment arm for gravity on the payload y
g_m_a_x_ball = COG_cur_w - COG_balloon_w # moment arm for gravity on the payload x
g_m_a_ball = np.sqrt(g_m_a_y_pay**2 + g_m_a_x_pay**2)
g_m_ball = -abs((cur_r_m - balloon_mass)*acc_g * np.sin(prev_theta) * g_m_a_ball)
g_m = g_m_pay + g_m_ball
# Balloon Drag Torque
d_m_a_y = COD_balloon_h - COG_cur_h # moment arm for drag on the balloon y
d_m_a_x = COD_balloon_w - COG_cur_w # moment arm for drag on the balloon x
d_m_a = np.sqrt(d_m_a_y**2 + d_m_a_x**2) # euclidean distance
ball_D_mag = np.sqrt(ball_drag_x**2 + ball_drag_y**2) # magnitude of drag on balloon
d_m = d_m_a*ball_D_mag*np.cos(prev_theta) - pay_drag_x*g_m_a_pay # sum all drag moments
# Bouyancy force torque, balloon
b_m_a_y = COG_balloon_h - COG_cur_h # moment arm for bouyancy force y
b_m_a_x = COG_balloon_w - COG_cur_w # moment arm for bouyancy force x
b_m_a = np.sqrt(b_m_a_y**2 + b_m_a_x**2) # euclidean
b_m = b_m_a * F_b * np.sin(prev_theta) # total buoyancy moment
t_m_a = moment_arm_thruster # thruster moment arm
t_m = cur_F * (moment_arm_thruster) # thruster moment
m_z_tot = d_m - b_m + t_m - g_m # total moment
cur_alpha = m_z_tot / J_tot
cur_omega = prev_omega + cur_alpha*time_step
cur_theta = prev_theta + cur_omega*time_step
## all updates
F = np.append(F, cur_F)
r_m = np.append(r_m, cur_r_m)
D_x = np.append(D_x, cur_D_x)
D_y = np.append(D_y, cur_D_y)
a_x = np.append(a_x, cur_a_x)
a_y = np.append(a_y, cur_a_y)
v_x = np.append(v_x, cur_v_x)
v_y = np.append(v_y, cur_v_y)
d_x = np.append(d_x, cur_d_x)
d_y = np.append(d_y, cur_d_y)
m_z = np.append(m_z, m_z_tot)
alpha = np.append(alpha, cur_alpha)
omega = np.append(omega, cur_omega)
theta = np.append(theta, cur_theta)
rem_fuel = np.append(rem_fuel, cur_fuel)
ballast_mass = np.append(ballast_mass, cur_ballast_mass)
i = i + 1
if cur_fuel < 0:
fail = 1
print('Not Enough Fuel Mass')
if i % 100 == 0:
print('.', end= '')
if i % 5000 == 0:
print('\n')
all_data = np.zeros((len(t), 17))
all_data[:, 0] = t
all_data[:, 1] = F
all_data[:, 2] = r_m
all_data[:, 3] = D_x
all_data[:, 4] = D_y
all_data[:, 5] = a_x
all_data[:, 6] = a_y
all_data[:, 7] = v_x
all_data[:, 8] = v_y
all_data[:, 9] = d_x
all_data[:, 10] = d_y
all_data[:, 11] = m_z
all_data[:, 12] = alpha
all_data[:, 13] = omega
all_data[:, 14] = theta
all_data[:, 15] = rem_fuel
all_data[:, 16] = ballast_mass
headers = ['time', 'force', 'mass', 'drag_x', 'drag_y', 'acceleration_x', 'acceleration_y', 'velocity_x', 'velocity_y', 'displacement_x', 'displacement_y', 'moment_z', 'alpha', 'omega', 'theta', 'fuel_mass', 'ballast_mass']
return
|
pd.DataFrame(all_data, columns=headers)
|
pandas.DataFrame
|
"""
Utils to plot graphs with arrows
"""
import matplotlib.transforms
import matplotlib.patches
import matplotlib.colors
import matplotlib.cm
import numpy as np
import pandas as pd
import logging
from tctx.util import plot
def _clip_arrows(arrows, tail_offset, head_offset):
"""
shorten head & tail so the arrows don't overlap with markers
:param arrows: a pd.DataFrame with columns: 'source_x', 'source_y', 'target_x', 'target_y'
:param tail_offset: how much shorter to make the tail (so it doesn't overlap with the markers)
:param head_offset: how much shorter to make the head (so it doesn't overlap with the markers)
:return: 2 numpy arrays of shape Nx2
"""
source_pos = arrows[['source_x', 'source_y']].values
target_pos = arrows[['target_x', 'target_y']].values
direction = target_pos - source_pos
length = np.sqrt(np.sum(np.square(direction), axis=1))
direction = direction / length[:, np.newaxis]
source_pos = source_pos + direction * tail_offset
target_pos = target_pos + direction * (-1 * head_offset)
return source_pos, target_pos
def plot_arrows_cmap(
ax, arrows, c, cmap=None, norm=None,
tail_offset=0, head_offset=0, head_length=4, head_width=1.25, **kwargs):
"""
Draw multiple arrows using a colormap.
:param ax: matplotlib.axes.Axes
:param arrows: a pd.DataFrame with columns: 'source_x', 'source_y', 'target_x', 'target_y'
:param c: a pd.Series with the same index as arrows or a string that identifies a column in it.
:param tail_offset: how much shorter to make the tail (so it doesn't overlap with the markers)
:param head_offset: how much shorter to make the head (so it doesn't overlap with the markers)
:param kwargs: args for matplotlib.patches.FancyArrowPatch
:return: matplotlib.cm.Mappable that can be used for a colorbar
:param cmap:
:param norm:
:param head_length:
:param head_width:
:return:
"""
if cmap is None:
cmap = 'default'
if isinstance(cmap, str):
cmap = plot.lookup_cmap(cmap)
if isinstance(c, str):
c = arrows[c]
if norm is None:
norm = matplotlib.colors.Normalize(vmin=c.min(), vmax=c.max())
arrowstyle = matplotlib.patches.ArrowStyle.CurveFilledB(head_length=head_length, head_width=head_width)
kwargs.setdefault('linewidth', .75)
source_pos, target_pos = _clip_arrows(arrows, tail_offset, head_offset)
for i, idx in enumerate(arrows.index):
color = cmap(norm(c[idx]))
_plot_single_arrow(ax, source_pos[i], target_pos[i], arrowstyle, color, **kwargs)
sm = matplotlib.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array(c.values)
return sm
def _plot_single_arrow(ax, source_pos, target_pos, arrowstyle, color, **kwargs):
patch_kwargs = kwargs.copy()
patch_kwargs.setdefault('edgecolor', color)
patch_kwargs.setdefault('facecolor', color)
patch = matplotlib.patches.FancyArrowPatch(
posA=source_pos,
posB=target_pos,
arrowstyle=arrowstyle,
**patch_kwargs,
)
ax.add_artist(patch)
def plot_arrows_solid(
ax, arrows, color=None,
tail_offset=0, head_offset=0, head_length=4, head_width=1.25, **kwargs):
"""
Draw multiple arrows using a solid color.
:param ax: matplotlib.axes.Axes
:param arrows: a pd.DataFrame with columns: 'source_x', 'source_y', 'target_x', 'target_y'
:param tail_offset: how much shorter to make the tail (so it doesn't overlap with the markers)
:param head_offset: how much shorter to make the head (so it doesn't overlap with the markers)
:param kwargs: args for matplotlib.patches.FancyArrowPatch
:param color:
:param head_length:
:param head_width:
:param kwargs:
:return:
"""
arrowstyle = matplotlib.patches.ArrowStyle.CurveFilledB(head_length=head_length, head_width=head_width)
kwargs.setdefault('linewidth', .75)
source_pos, target_pos = _clip_arrows(arrows, tail_offset, head_offset)
for i, idx in enumerate(arrows.index):
_plot_single_arrow(ax, source_pos[i], target_pos[i], arrowstyle, color, **kwargs)
class Graph:
"""
A class to plot graphs with per-node and per-edge styles
"""
def __init__(self, nodes, edges, styles=None, transform=None, kwargs_nodes=None, kwargs_edges=None):
"""
:param nodes: a pd.DataFrame with columns ['x', 'y'] representing the 2d position and
column 'style' that can be indexed into the styles DF
:param edges: a pd.DataFrame with columns ['source', 'target'] that can be indexed into the nodes DF and
column 'style' that can be indexed into the styles DF
:param styles: pd.DataFrame with columns for different cmaps ('cmap_from_white', etc),
color levels ('light', 'dark', etc). By default: plot.styles_df
:param kwargs_nodes: default kwargs to nodes plotting
:param kwargs_edges: default kwargs to edges plotting
:param transform: the transform to apply to the graph. Useful when drawing an inset.
"""
assert np.all(edges['source'] != edges['target']), 'self edges'
assert np.all([np.issubdtype(nodes[c].dtype, np.number) for c in ['x', 'y']])
if styles is None:
styles = plot.styles_df.copy()
self.styles = styles
self.nodes = nodes
self.edges = edges
self.transform = transform
self.default_kwargs_nodes = dict(
cmap='cmap',
marker='marker_time',
linewidth=.5,
facecolor='light',
edgecolor='darker',
)
self.default_kwargs_nodes.update(kwargs_nodes or {})
self.default_kwargs_edges = dict(
cmap='cmap',
facecolor='main',
edgecolor='main',
)
self.default_kwargs_edges.update(kwargs_edges or {})
edge_len = self.get_edge_lengths()
too_short = np.count_nonzero(np.isclose(edge_len, 0))
if too_short:
logging.warning(f'{too_short}/{len(edge_len)} edges of zero length')
# pandas complains when editing categories which is inconvenient
if self.nodes['style'].dtype.name == 'category':
self.nodes['style'] = self.nodes['style'].astype(str)
if self.edges['style'].dtype.name == 'category':
self.edges['style'] = self.edges['style'].astype(str)
def copy(self):
return Graph(
nodes=self.nodes.copy(),
edges=self.edges.copy(),
styles=self.styles.copy(),
transform=None if self.transform is None else self.transform.copy(),
kwargs_nodes=self.default_kwargs_nodes.copy(),
kwargs_edges=self.default_kwargs_edges.copy(),
)
def get_edge_lengths(self):
xy0 = self.nodes.loc[self.edges['source'], ['x', 'y']].values
xy1 = self.nodes.loc[self.edges['target'], ['x', 'y']].values
edge_len = np.sqrt(np.sum(np.square(xy0 - xy1), axis=1))
return pd.Series(edge_len, index=self.edges.index)
def _get_arrows(self, selection=None):
if selection is None:
selection = self.edges
if isinstance(selection, (np.ndarray, pd.Index)):
selection = self.edges.loc[selection]
arrows = [selection]
for end in ['source', 'target']:
pos = self.nodes[['x', 'y']].reindex(selection[end])
pos.index = selection.index
pos.columns = [end + '_' + c for c in pos.columns]
arrows.append(pos)
arrows = pd.concat(arrows, axis=1)
return arrows
def _lookup_style_kwargs(self, style, kwargs):
kwargs = kwargs.copy()
if 'style' in kwargs:
specific = kwargs.pop('style')
if style in specific:
kwargs.update(specific[style])
styled_kwargs = kwargs.copy()
for k, v in kwargs.items():
if isinstance(v, str) and v in self.styles.columns:
styled_kwargs[k] = self.styles.loc[style, v]
if self.transform is not None:
styled_kwargs['transform'] = self.transform
return styled_kwargs
def plot_nodes_solid(self, ax, selection=None, **kwargs):
"""
Plot all of the nodes with a flat color
:param ax:
:param selection: an array, index or boolean series that
can be used on self.nodes.loc to draw a subset of the known nodes
:param kwargs: scatter params
:return:
"""
final_kwargs = self.default_kwargs_nodes.copy()
final_kwargs.update(kwargs)
nodes_to_draw = self.nodes
if selection is not None:
assert isinstance(selection, (np.ndarray, pd.Index, pd.Series))
nodes_to_draw = self.nodes.loc[selection]
for style, nodes in nodes_to_draw.groupby('style'):
style_kwargs = self._lookup_style_kwargs(style, final_kwargs)
if 'cmap' in style_kwargs:
style_kwargs.pop('cmap')
ax.scatter(
nodes.x,
nodes.y,
**style_kwargs,
)
def plot_nodes_cmap(self, ax, c=None, selection=None, **kwargs):
"""
Plot all of the nodes with a color map
:param ax:
:param c: series or array matching length of self.nodes,
if none indicated, we expect a column 'c' in self.nodes
:param selection: an array, index or boolean series that
can be used on self.nodes.loc to draw a subset of the known nodes
:param kwargs: scatter params
:return: a dict of style to mappable for use in colorbars
"""
final_kwargs = self.default_kwargs_nodes.copy()
final_kwargs.update(kwargs)
nodes_to_draw = self.nodes
if selection is not None:
assert isinstance(selection, (np.ndarray, pd.Index, pd.Series))
nodes_to_draw = self.nodes.loc[selection]
if c is None:
c = 'c'
if isinstance(c, str):
c = self.nodes[c]
if isinstance(c, np.ndarray):
c = pd.Series(c, index=self.nodes.index)
all_sm = {}
for style, nodes in nodes_to_draw.groupby('style'):
style_kwargs = self._lookup_style_kwargs(style, final_kwargs)
if 'facecolor' in style_kwargs:
style_kwargs.pop('facecolor')
all_sm[style] = ax.scatter(
nodes.x,
nodes.y,
c=c.loc[nodes.index],
**style_kwargs,
)
return all_sm
def plot_nodes_labels(self, ax, nodes=None, va='center', ha='center', fmt='{index}', fontsize=6, **kwargs):
"""
plot a descriptive text for each node.
By default, the index is show, modify fmt to use something else
"""
# TODO allow the style column in the fmt to color by dark of the "label" column.
if nodes is None:
nodes = self.nodes
else:
nodes = self.nodes.loc[nodes]
for idx, row in nodes.iterrows():
ax.text(row['x'], row['y'], fmt.format(index=idx, **row), va=va, ha=ha, fontsize=fontsize, **kwargs)
def plot_edges_cmap(self, ax, c=None, **kwargs):
"""
Plot all of the nodes with a color map
:param ax:
:param c: series or array matching length of self.edges,
if none indicated, we expect a column 'c' in self.edges
:param kwargs: params to plot_arrows_cmap
:return: a dict of style to mappable for use in colorbars
"""
final_kwargs = self.default_kwargs_edges.copy()
final_kwargs.update(kwargs)
if c is None:
c = self.edges['c']
all_sm = {}
for style, arrows in self._get_arrows().groupby('style'):
style_kwargs = self._lookup_style_kwargs(style, final_kwargs)
if 'facecolor' in style_kwargs:
style_kwargs.pop('facecolor')
if 'edgecolor' in style_kwargs:
style_kwargs.pop('edgecolor')
all_sm[style] = plot_arrows_cmap(
ax, arrows, c,
**style_kwargs
)
return all_sm
def plot_edges_solid(self, ax, selection=None, **kwargs):
"""
Plot all of the edges with a flat color
:param ax:
:param selection:
:param kwargs:
:return:
"""
final_kwargs = self.default_kwargs_edges.copy()
final_kwargs.update(kwargs)
for style, arrows in self._get_arrows(selection=selection).groupby('style'):
style_kwargs = self._lookup_style_kwargs(style, final_kwargs)
if 'cmap' in style_kwargs:
style_kwargs.pop('cmap')
plot_arrows_solid(
ax, arrows,
**style_kwargs
)
@classmethod
def from_conns(cls, conns, cells, node_style='ei_type', edge_style='con_type'):
"""plot the connections in XY space"""
all_gids = np.unique(conns[['source_gid', 'target_gid']].values.flatten())
nodes = cells.loc[all_gids, ['x', 'y']].copy()
nodes['style'] = cells.loc[nodes.index, node_style]
edges = conns[['source_gid', 'target_gid']].copy()
edges.columns = ['source', 'target']
edges['style'] = conns.loc[edges.index, edge_style]
return cls(nodes, edges)
@classmethod
def from_conn_jumps(
cls, selected_jumps, detailed_spikes, node_keys, edge_style,
**kwargs):
"""plot spike jumps"""
assert 'x' in node_keys and 'y' in node_keys and 'style' in node_keys
nodes = {}
for k, v in node_keys.items():
if isinstance(v, str):
v = detailed_spikes[v]
else:
assert isinstance(v, (tuple, list, pd.Series, np.ndarray))
nodes[k] = v
nodes = pd.DataFrame(nodes)
edges = selected_jumps[['source_spike', 'target_spike']].copy()
edges.columns = ['source', 'target']
edges['style'] = selected_jumps.loc[edges.index, edge_style]
return cls(nodes, edges, **kwargs)
def get_floating_nodes(self) -> pd.Index:
"""
:return: the index of nodes with no connections in or out
"""
return self.nodes.index[
~self.nodes.index.isin(self.edges['source']) &
~self.nodes.index.isin(self.edges['target'])
]
def get_linked_nodes(self) -> pd.Index:
"""
:return: the index of nodes with at least a connection in or out
"""
return self.nodes.index[~self.nodes.index.isin(self.get_floating_nodes())]
def drop_nodes(self, drop_gids: pd.Index):
"""
remove the given nodes from the graph. This will also remove edges to/from those nodes
:param drop_gids: either a list of node ids or a boolean mask (True == remove)
:return:
"""
if drop_gids.dtype == 'bool':
if isinstance(drop_gids, pd.Series):
drop_gids = drop_gids.reindex(self.nodes.index, fill_value=False)
assert len(drop_gids) == len(self.nodes)
drop_gids = self.nodes.index[drop_gids]
drop_gids = pd.Index(np.asarray(drop_gids))
remaining_gids = self.nodes.index.difference(drop_gids)
self.nodes = self.nodes.loc[remaining_gids].copy()
bad_edges = (
self.edges['source'].isin(drop_gids) |
self.edges['target'].isin(drop_gids)
)
self.edges = self.edges.loc[~bad_edges].copy()
def drop_edges(self, drop_gids: pd.Index):
"""
remove the given edges from the graph
example:
graph.drop_edges(graph.edges['weight'] < .75 * 70)
:param drop_gids: either a list of edge ids or a boolean mask (True == remove)
:return:
"""
if drop_gids.dtype == 'bool':
if isinstance(drop_gids, pd.Series):
drop_gids = drop_gids.reindex(self.edges.index, fill_value=False)
assert len(drop_gids) == len(self.edges)
drop_gids = self.edges.index[drop_gids]
drop_gids = pd.Index(np.asarray(drop_gids))
remaining_gids = self.edges.index.difference(drop_gids)
self.edges = self.edges.loc[remaining_gids].copy()
def add_edges(self, new_edges: pd.DataFrame, **overwrite_cols):
"""
Add edges to this graph.
Inplace.
:param overwrite_cols: pairs of <column, value> to assign to new_edges before adding them.
For example, to set a style.
"""
new_edges = new_edges.copy()
for c, v in overwrite_cols.items():
new_edges[c] = v
missing_cols = self.edges.columns.difference(new_edges.columns)
if len(missing_cols) > 0:
logging.error(f'Missing columns: {list(missing_cols)}. Got: {list(new_edges.columns)}')
return
repeated = self.edges.index.intersection(new_edges.index)
if len(repeated):
logging.warning(f'Repeated edges will be ignored: {repeated}')
new_edges = new_edges.drop(repeated)
valid = (
new_edges['source'].isin(self.nodes.index) &
new_edges['target'].isin(self.nodes.index)
)
if np.any(~valid):
logging.warning(f'{np.count_nonzero(~valid):,g} edges without source or target will be ignored')
new_edges = new_edges[valid]
all_edges =
|
pd.concat([self.edges, new_edges], axis=0, sort=False)
|
pandas.concat
|
#1/bin/usr/env python3
import argparse
import pandas as pd
import os
import ntpath
parser = argparse.ArgumentParser()
parser.add_argument('simul1', help='CSV design file for setting up simulations for first comparate')
parser.add_argument('simul1_set', help='Which set of simulations and value of theta for first comparate to consider')
parser.add_argument('simul2', help='CSV design file for setting up simulations for second comparate')
parser.add_argument('simul2_set', help='Which set of simulations and value of theta for second comparate to consider')
parser.add_argument('simuldir', help='Parent directory of simulated datasets')
parser.add_argument('outdir', help='Directory to which to output merged data')
args = parser.parse_args()
def create_filelist(design_file, set_num):
filelist = []
with open(design_file, 'r') as design:
for index, line in enumerate(design):
if index == 0:
header = line.strip().split(',')
else:
param = line.strip().split(',')
scenario = {header[i]:param[i] for i in range(len(header))}
print(scenario)
try:
r_g1 = scenario['rsim-g1']
r_g2 = scenario['rsim-g2']
except KeyError:
r_g1 = r_g2 = 0.8
filelist.append('out_set_'+set_num+'_theta_'+scenario['theta']+'_rsim-g1_'+str(r_g1)+'_rsim-g2_'+ \
str(r_g2)+'_nbiorep_'+scenario['nbiorep']+'_allelicreads_'+ \
scenario['n_allele_specific_reads']+'_simruns_'+scenario['simruns']+'.tsv')
return(filelist)
df_c1_files = pd.DataFrame(create_filelist(args.simul1, args.simul1_set), columns = ['c1'])
df_c1_files['c1'] = ntpath.basename(args.simul1).split("design_")[1].split("_null")[0]+"_null"+'/'+df_c1_files['c1'].astype(str)
df_c2_files = pd.DataFrame(create_filelist(args.simul2, args.simul2_set), columns = ['c2'])
df_c2_files['c2'] = ntpath.basename(args.simul2).split("design_")[1].split("_null")[0]+"_null"+'/'+df_c2_files['c2'].astype(str)
if 'not_null' in os.path.basename(args.simul1) and 'not_null' in os.path.basename(args.simul2):
df_c1_param = df_c1_files['c1'].str.split('_theta', 1, expand=True)
df_c1_files['param'] = 'theta'+df_c1_param[1].astype(str)
df_c1_files['c1_theta'] = df_c1_param[1].str.split('_rsim', expand=True)[0]
df_c1_files['c1_theta'] = 'theta1'+df_c1_files['c1_theta'].astype(str)
df_c2_param = df_c2_files['c2'].str.split('_theta', 1, expand=True)
df_c2_files['param'] = 'theta'+df_c2_param[1].astype(str)
df_c2_files['c2_theta'] = df_c2_param[1].str.split('_rsim', expand=True)[0]
df_c2_files['c2_theta'] = 'theta2'+df_c2_files['c2_theta'].astype(str)
else:
df_c1_param = df_c1_files['c1'].str.split('_rsim', 1, expand=True)
df_c1_files['c1_theta'] = df_c1_param[0].str.split('theta', expand=True)[1].str.replace('_', 'theta1_')
df_c1_files['param'] = df_c1_param[1]
df_c2_param = df_c2_files['c2'].str.split('_rsim', 1, expand=True)
df_c2_files['c2_theta'] = df_c2_param[0].str.split('theta', expand=True)[1].str.replace('_', 'theta2_')
df_c2_files['param'] = df_c2_param[1]
df_both_comparates = pd.merge(df_c1_files, df_c2_files, on='param', how='inner')
print(df_both_comparates)
for index, row in df_both_comparates.iterrows():
print(args.simuldir)
print(row['c1'])
df_c1_simul = pd.read_csv(os.path.join(args.simuldir, row['c1']), sep="\t")
df_c2_simul = pd.read_csv(os.path.join(args.simuldir, row['c2']), sep="\t")
df_c2_simul.drop(['FEATURE_ID'], axis=1, inplace=True)
df_c2_simul.columns = df_c2_simul.columns.str.replace('c1', 'c2')
if 'H1_not_null_H2_not_null_H3_null' in args.outdir:
filename = row['c1_theta'] + '_' + row['c2_theta'] + '_'+row['param'].split('_', 2)[-1]
else:
filename = row['c1_theta'] + '_' + row['c2_theta'] + '_rsim'+row['param']
print(filename)
|
pd.concat([df_c1_simul, df_c2_simul], axis=1, sort=False)
|
pandas.concat
|
from pathlib import Path
import pandas as pd
from pptx import Presentation
from spx_data_update import UpdateSP500Data
from option_utilities import read_feather, write_feather
from urllib.request import urlretrieve
from pptx.util import Inches
from pptx.enum.text import PP_PARAGRAPH_ALIGNMENT as PP_ALIGN
import pandas_datareader.data as web
import os
def title_only_slide(asset_path, slide_dict, presentation, text_msg=None):
slide = presentation.slides.add_slide(presentation.slide_layouts[slide_dict['TITLE_ONLY']])
for shape in slide.placeholders:
print('%d %s' % (shape.placeholder_format.idx, shape.name))
placeholder = slide.placeholders[13] # idx key, not position
placeholder_picture = placeholder.insert_picture(str(asset_path))
slide.shapes.title.text = text_msg
return slide
def main():
ppt_path = Path.home() / 'Dropbox' / 'option_overlay'
fig_path = Path.home() / 'Dropbox' / 'outputDev' / 'fig'
template_name = 'option_overlay.pptx'
output_name = 'test.pptx'
# Assets
heat_map_path = fig_path / 'heat_map.png'
cum_perf_path = fig_path / 'cum_perf.png'
cum_total_perf_path = fig_path / 'cum_total_perf.png'
cum_total_perf2_path = fig_path / 'cum_total_perf2.png'
hfri_overlay_path = fig_path / 'hfri_overlay.png'
delta_path = fig_path / 'Delta.png'
gamma_path = fig_path / 'Gamma.png'
vega_path = fig_path / 'Vega.png'
rho_path = fig_path / 'Rho.png'
theta_path = fig_path / 'Theta.png'
# Layout index
layout_dict = {'TITLE': 0, 'SUB_TITLE': 1, 'QUOTE': 2, 'TITLE_COLUMN1': 3, 'TITLE_COLUMN2': 4, 'TITLE_COLUMN3': 5,
'TITLE_ONLY': 6, 'CAPTION': 7, 'BLANK': 8}
prs = Presentation(ppt_path / template_name)
# Title slide
for shape in prs.slides[0].placeholders:
print('%d %s' % (shape.placeholder_format.idx, shape.name))
prs.slides[0].shapes[0].text = 'Income Alternatives'
# 1
slide = prs.slides.add_slide(prs.slide_layouts[layout_dict['TITLE_COLUMN1']])
for shape in slide.placeholders:
print('%d %s' % (shape.placeholder_format.idx, shape.name))
# placeholder = slide.placeholders[1] # idx key, not position
slide.shapes.title.text = 'Background'
paragraph_strs = [
'Egg, bacon, sausage and spam.',
'Spam, bacon, sausage and spam.',
'Spam, egg, spam, spam, bacon and spam.'
]
text_frame = slide.placeholders[1].text_frame
text_frame.clear() # remove any existing paragraphs, leaving one empty one
p = text_frame.paragraphs[0]
p.text = paragraph_strs[0]
p.alignment = PP_ALIGN.LEFT
for para_str in paragraph_strs[1:]:
p = text_frame.add_paragraph()
p.text = para_str
p.alignment = PP_ALIGN.LEFT
p.level = 1
# 2
title_only_slide(heat_map_path, layout_dict, prs, text_msg='Monthly Excess Returns (%)')
# 3
title_only_slide(cum_perf_path, layout_dict, prs, text_msg='Cumulative Excess Return')
# 4
title_only_slide(cum_total_perf_path, layout_dict, prs, text_msg='Cumulative Total Return')
# 5
title_only_slide(cum_total_perf2_path, layout_dict, prs, text_msg='Cumulative Total Return')
# 6
title_only_slide(hfri_overlay_path, layout_dict, prs, text_msg='Overlay vs. HFRI')
# 6~10
greek_dict = {delta_path: 'Delta',
gamma_path: 'Gamma',
vega_path: 'Vega',
rho_path: 'Rho',
theta_path: 'Theta'}
for key, value in greek_dict.items():
title_only_slide(key, layout_dict, prs, text_msg='Portfolio ' + value)
# Save and open presentation
prs.save(ppt_path / output_name)
os.system("open " + str(ppt_path / output_name))
#
# greek_dict = {delta_path: 'Delta',
# gamma_path: 'Gamma',
# vega_path: 'Vega',
# rho_path: 'Rho',
# theta_path: 'Theta.png'}
# for key, value in greek_dict.items():
# slide = prs.slides.add_slide(prs.slide_layouts[layout_dict['TITLE_ONLY']])
if __name__ == '__main__':
main()
# for i in range(0, 8, 1):
# blank_slide_layout = prs.slide_layouts[i]
# slide = prs.slides.add_slide(blank_slide_layout)
#
# top = Inches(1.54)
# left = Inches(0.28)
# height = Inches(3.82)
# pic = slide.shapes.add_picture(str(heat_map_path), left, top, height=height)
# for shape in slide.placeholders:
# print('%d %s' % (shape.placeholder_format.idx, shape.name))
def aqr_alt_funds(update_funds=True):
db_directory = UpdateSP500Data.DATA_BASE_PATH / 'xl'
url_string = 'https://funds.aqr.com/-/media/files/fund-documents/pricefiles/'
fund_dict = {'alternative_risk_premia': 'leapmf.xls',
'diversified_arbitrage': 'daf.xls',
'equity_market_neutral': 'emnmf.xls',
'equity_long_short': 'elsmf.xls',
'global_macro': 'gmmf.xls',
'managed_futures': 'mfmf.xls',
'multi_alternative': 'msaf.xls',
'style_premia_alternative': 'spaf.xls'}
url_dict = {value: url_string + value for (key, value) in fund_dict.items()}
if update_funds:
_ = [urlretrieve(value, db_directory / key) for (key, value) in url_dict.items()]
rows_to_skip = list(range(0, 15))
rows_to_skip.append(16)
aqr_funds_index = []
for key, value in fund_dict.items():
df = pd.read_excel(db_directory / value, usecols=[1, 4],
skiprows=rows_to_skip, index_col=0, squeeze=True,
keep_default_na=False)
df = df.rename(key)
aqr_funds_index.append(df)
return pd.concat(aqr_funds_index, axis=1)
def get_fund_assets(update_funds=True):
db_directory = UpdateSP500Data.DATA_BASE_PATH / 'feather'
feather_name = 'all_funds.feather'
if update_funds:
fund_dict = {'^SP500TR': 'S&P 500',
'VDIGX': 'VG Dividend Growth',
'VEIRX': 'VG Equity-Income',
'VWEAX': 'VG High-Yield Corporate',
'VWALX': 'VG High-Yield Muni',
'VBTLX': 'VG Total Bond Market',
'BXMIX': 'Blackstone Alternatives',
'QLEIX': 'AQR Equity Long/Short',
'QGMIX': 'AQR Global Macro',
'QMHIX': 'AQR Managed Futures',
'ADAIX': 'AQR Diversified Arbitrage',
'QSPIX': 'AQR Style Premia',
'AVGRX': 'Dreyfus Dynamic Total Return', #$1.141bn
'FAAAX': 'K2 Franklin Alternative',# fund $1.17bn
'GJRTX': 'GSAM Absolute return', # tracker $2.36bn
'MASNX': '<NAME>',# Strats Fund $2.05bn
'PSMIX': 'Principal Global Multi-Strategy',# Fund $2.76bn
'QOPIX': 'Oppenheimer Fundamental Alternatives',# Fd $1.20
'GAFYX': 'Natixis ASG Global Alternatives'} # Fd $1.39bn
all_funds = [web.get_data_yahoo(key, 'JAN-16-80') for key, _ in fund_dict.items()]
all_funds = [fund['Adj Close'] for fund in all_funds]
all_funds = [fund.rename(fund_name) for fund, fund_name in zip(all_funds, fund_dict.values())]
all_funds = pd.concat(all_funds, axis=1)
# Replace dodgy observation
all_funds['Principal Global Multi-Strategy']['2017-08-24'] = all_funds['Principal Global Multi-Strategy'][
'2017-08-23']
write_feather(all_funds, db_directory / feather_name)
all_funds = read_feather(db_directory / feather_name)
return all_funds
def get_hfr(feather_name, csv_file_path, update_funds=True):
db_directory = UpdateSP500Data.DATA_BASE_PATH / 'xl'
# feather_name = 'hfrx.feather'
if update_funds:
rows_to_skip = list(range(0, 2))
headers = ['Date', 'Index Name', 'Index Code', 'Return', 'Index Value']
df = pd.read_csv(db_directory / csv_file_path, skiprows=rows_to_skip,
squeeze=True, names=headers, engine='python')
index_codes = df['Index Code'].unique()
all_hfr_list = []
for index_code in index_codes[:-1]: # remove HFR company info
idx = df['Index Code'] == index_code
hfr = df[idx].copy()
hfr['Date'] = hfr['Date'].apply(pd.to_datetime)
hfr = hfr.set_index(['Date'])
hfr = hfr.reindex(hfr.index.sort_values())
hfr_index = hfr['Index Value'].rename(hfr['Index Name'].unique()[0])
all_hfr_list.append(hfr_index)
hfr_df =
|
pd.concat(all_hfr_list, axis=1)
|
pandas.concat
|
import copy
import pandas as pd
import pybedtools as pbt
from .general import _sample_names
from functools import reduce
class AnnotatedInteractions:
def __init__(
self,
df,
annot_beds,
completely_contains=None,
):
"""
Initialize AnnotatedInteractions object.
Parameters
----------
df : pandas.DataFrame
Dataframe with peaks. Must contain columns chrom1, start1, end1,
chrom2, start2, and end2. Other columns will not be removed but
may be overwritten if they clash with column names created here.
Interactions must be unique.
annot_beds : dict
Dict whose keys are names (like 'gene', 'promoter', etc.) and whose
values are bed files to annotate the input bed file with.
"""
self.df = df.copy(deep=True)
self.df.index = (self.df.chrom1.astype(str) + ':' +
self.df.start1.astype(str) + '-' +
self.df.end1.astype(str) + '==' +
self.df.chrom2.astype(str) + ':' +
self.df.start1.astype(str) + '-' +
self.df.end2.astype(str))
assert len(set(self.df.index)) == self.df.shape[0]
self.df['name'] = self.df.index
self.feature_to_df = pd.DataFrame(index=self.df.index)
self.annotate_interactions()
self.bts_from_df()
self._initialize_annot_beds(annot_beds)
for k in list(annot_beds.keys()):
self.annotate_bed(bt=self.bt1, name=k, col_name='{}1'.format(k),
df_col='anchor1')
if k in completely_contains:
self.annotate_bed(bt=self.bt1, name=k,
col_name='{}1_complete'.format(k),
df_col='anchor1', complete=True)
for k in list(annot_beds.keys()):
self.annotate_bed(bt=self.bt2, name=k, col_name='{}2'.format(k),
df_col='anchor2')
if k in completely_contains:
self.annotate_bed(bt=self.bt2, name=k,
col_name='{}2_complete'.format(k),
df_col='anchor2', complete=True)
for k in list(annot_beds.keys()):
self.annotate_bed(bt=self.bt1, name=k, col_name='{}_loop'.format(k),
df_col='loop')
if k in completely_contains:
self.annotate_bed(bt=self.bt1, name=k,
col_name='{}_loop_complete'.format(k),
df_col='loop', complete=True)
for k in list(annot_beds.keys()):
self.annotate_bed(bt=self.bt1, name=k,
col_name='{}_loop_inner'.format(k),
df_col='loop_inner')
if k in completely_contains:
self.annotate_bed(bt=self.bt1, name=k,
col_name='{}_loop_inner_complete'.format(k),
df_col='loop_inner', complete=True)
self._bt1_path = None
self._bt2_path = None
self._bt_loop_path = None
self._bt_loop_inner_path = None
def _initialize_annot_beds(
self,
annot_beds,
):
import pybedtools as pbt
self.annot_beds = dict()
for k in list(annot_beds.keys()):
if type(annot_beds[k]) == str:
self.annot_beds[k] = pbt.BedTool(annot_beds[k])
else:
self.annot_beds[k] = annot_beds[k]
def load_saved_bts(self):
"""If the AnnotatedInteractions object was saved to a pickle and
reloaded, this method remakes the BedTool objects."""
if self._bt1_path:
self.bt1 = pbt.BedTool(self._bt1_path)
if self._bt2_path:
self.bt2 = pbt.BedTool(self._bt2_path)
if self._bt_loop_path:
self.bt_loop = pbt.BedTool(self._bt_loop_path)
if self._bt_loop_inner_path:
self.bt_loop_inner = pbt.BedTool(self._bt_loop_inner_path)
def save(
self,
path,
name,
):
"""
Save AnnotatedInteractions object and bed files. The object is stored in
a pickle and the bed files are saved as separate bed files. The object
can be reloaded by reading the pickle using cPickle and the BedTool
objects can be recreated using .load_saved_bts().
Parameters
----------
path : str
Path to save files to. Path should include a basename for the files.
For instance, path='~/abc' will create files like ~/abc.pickle,
~/abc_anchor1.bed, etc.
name : str
Descriptive name used for bed file trackline.
"""
t = 'track type=bed name="{}_anchor1"'.format(name)
self.bt1.saveas(path + '_anchor1.bed', trackline=t)
self._bt1_path = path + '_anchor1.bed'
t = 'track type=bed name="{}_anchor2"'.format(name)
self.bt2.saveas(path + '_anchor2.bed', trackline=t)
self._bt2_path = path + '_anchor2.bed'
t = 'track type=bed name="{}_loop"'.format(name)
self.bt_loop.saveas(path + '_loop.bed', trackline=t)
self._bt_loop_path = path + '_loop.bed'
t = 'track type=bed name="{}_loop_inner"'.format(name)
self.bt_loop_inner.saveas(path + '_loop_inner.bed', trackline=t)
self._bt_loop_inner_path = path + '_loop_inner.bed'
import pickle
pickle.dump(self, open(path + '.pickle', 'w'))
def annotate_bed(
self,
bt,
name,
col_name,
complete=None,
df_col=None,
):
"""
Annotate the input bed file using one of the annotation beds.
Parameters
----------
bt : pybedtools.BedTool
BedTool for either one of the anchors, the loops,
or the loop inners.
name : str
The key for the annoation bed file in annot_beds.
col_name : str
Used to name the columns that will be made.
complete : bool
If True, this method will check whether the features in the
annotation bed are completely contained by the features in the input
bed.
df_col : str
If the name for bt isn't the index of self.df, this specifies
which column of self.df contains the names for bt. For instance,
if bt is the anchor1 BedTool, the df_col='anchor11'.
"""
import numpy as np
import pandas as pd
has_name_col = len(self.annot_beds[name][0].fields) > 3
print('one')
if complete:
res = bt.intersect(self.annot_beds[name], sorted=True, wo=True, F=1)
else:
res = bt.intersect(self.annot_beds[name], sorted=True, wo=True)
print('two')
try:
df = res.to_dataframe(names=list(range(len(res[0].fields))))
ind = df[3].values
if df_col is None:
self.df[col_name] = False
self.df.ix[set(ind), col_name] = True
else:
tdf = pd.DataFrame(True, index=ind, columns=[col_name])
self.df = self.df.merge(tdf, left_on=df_col, right_index=True,
how='outer')
self.df[col_name] = self.df[col_name].fillna(False)
#self.df.ix[self.df[col_name].isnull(), col_name] = False
print('a')
if has_name_col:
vals = df[7].values
else:
vals = list(df[4].astype(str) + ':' +
df[5].astype(str) + '-' +
df[6].astype(str))
print('b')
df.index = vals
gb = df.groupby(3)
t = pd.Series(gb.groups)
print('c')
t = pd.DataFrame(t.apply(lambda x: set(x)))
print('d')
t.columns = ['{}_features'.format(col_name)]
self.df = self.df.merge(t, left_on=df_col, right_index=True,
how='outer')
print('e')
except IndexError:
pass
def annotate_interactions(self):
import numpy as np
self.df['anchor1'] = (self.df.chrom1.astype(str) + ':' +
self.df.start1.astype(str) + '-' +
self.df.end1.astype(str))
self.df['anchor2'] = (self.df.chrom2.astype(str) + ':' +
self.df.start2.astype(str) + '-' +
self.df.end2.astype(str))
self.df['intra'] = True
self.df.ix[self.df.chrom1 != self.df.chrom2, 'intra'] = False
ind = self.df[self.df.intra].index
self.df['loop'] = np.nan
self.df.ix[ind, 'loop'] = (
self.df.ix[ind, 'chrom1'] + ':' +
self.df.ix[ind, ['start1', 'start2']].min(axis=1).astype(str) +
'-' + self.df.ix[ind, ['end1', 'end2']].max(axis=1).astype(str))
self.df['loop_length'] = (self.df[['end1', 'end2']].max(axis=1) -
self.df[['start1', 'start2']].min(axis=1))
ind = ind[(self.df.ix[ind, ['start1', 'start2']].max(axis=1) >
self.df.ix[ind, ['end1', 'end2']].min(axis=1))]
self.df['loop_inner'] = np.nan
self.df.ix[ind, 'loop_inner'] = (
self.df.ix[ind, 'chrom1'] + ':' +
self.df.ix[ind, ['end1', 'end2']].min(axis=1).astype(str) + '-' +
self.df.ix[ind, ['start1', 'start2']].max(axis=1).astype(str))
self.df['loop_inner_length'] = (
self.df[['start1', 'start2']].max(axis=1) -
self.df[['end1', 'end2']].min(axis=1))
def bts_from_df(self):
import pybedtools as pbt
s = '\n'.join(list(set(
self.df.chrom1.astype(str) + '\t' + self.df.start1.astype(str) +
'\t' + self.df.end1.astype(str) + '\t' + self.df.chrom1.astype(str)
+ ':' + self.df.start1.astype(str) + '-' +
self.df.end1.astype(str)))) + '\n'
self.bt1 = pbt.BedTool(s, from_string=True).sort()
s = '\n'.join(list(set(
self.df.chrom2.astype(str) + '\t' + self.df.start2.astype(str) +
'\t' + self.df.end2.astype(str) + '\t' + self.df.chrom2.astype(str)
+ ':' + self.df.start2.astype(str) + '-' +
self.df.end2.astype(str)))) + '\n'
self.bt2 = pbt.BedTool(s, from_string=True).sort()
ind = self.df[self.df.intra].index
s = '\n'.join(
self.df.ix[ind, 'chrom1'].astype(str) + '\t' +
self.df.ix[ind, ['start1', 'start2']].min(axis=1).astype(str) +
'\t' + self.df.ix[ind, ['end1', 'end2']].max(axis=1).astype(str) +
'\t' + self.df.ix[ind, 'name']) + '\n'
self.bt_loop = pbt.BedTool(s, from_string=True).sort()
ind = ind[(self.df.ix[ind, ['start1', 'start2']].max(axis=1) >
self.df.ix[ind, ['end1', 'end2']].min(axis=1))]
s = '\n'.join(
self.df.ix[ind, 'chrom1'].astype(str) + '\t' +
self.df.ix[ind, ['end1', 'end2']].min(axis=1).astype(str) + '\t' +
self.df.ix[ind, ['start1', 'start2']].max(axis=1).astype(str) +
'\t' + self.df.ix[ind, 'name']) + '\n'
self.bt_loop_inner = pbt.BedTool(s, from_string=True).sort()
def beds_to_boolean(beds, ref=None, beds_sorted=False, ref_sorted=False,
**kwargs):
"""
Compare a list of bed files or BedTool objects to a reference bed file and
create a boolean matrix where each row is an interval and each column is a 1
if that file has an interval that overlaps the row interval and a 0
otherwise. If no reference bed is provided, the provided bed files will be
merged into a single bed and compared to that.
Parameters
----------
beds : list
List of paths to bed files or BedTool objects.
ref : str or BedTool
Reference bed file to compare against. If no reference bed is provided,
the provided bed files will be merged into a single bed and compared to
that.
beds_sorted : boolean
Whether the bed files in beds are already sorted. If False, all bed
files in beds will be sorted.
ref_sorted : boolean
Whether the reference bed file is sorted. If False, ref will be sorted.
names : list of strings
Names to use for columns of output files. Overrides define_sample_name
if provided.
define_sample_name : function that takes string as input
Function mapping filename to sample name (or basename). For instance,
you may have the basename in the path and use a regex to extract it.
The basenames will be used as the column names. If this is not provided,
the columns will be named as the input files.
Returns
-------
out : pandas.DataFrame
Boolean data frame indicating whether each bed file has an interval
that overlaps each interval in the reference bed file.
"""
beds = copy.deepcopy(beds)
fns = []
for i,v in enumerate(beds):
if type(v) == str:
fns.append(v)
beds[i] = pbt.BedTool(v)
else:
fns.append(v.fn)
if not beds_sorted:
beds[i] = beds[i].sort()
names = _sample_names(fns, kwargs)
if ref:
if type(ref) == str:
ref = pbt.BedTool(ref)
if not ref_sorted:
ref = ref.sort()
else:
ref = combine(beds)
ind = []
for r in ref:
ind.append('{}:{}-{}'.format(r.chrom, r.start, r.stop))
bdf =
|
pd.DataFrame(0, index=ind, columns=names)
|
pandas.DataFrame
|
import json
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from iteration_utilities import flatten
from define_collection_wave import folder
from helpers import create_folder, cleanhtml
path_mcdonalds = create_folder('1_McDonalds', folder)
def parse_json(dat):
fileName = path_mcdonalds + '/item_' + str(item_id) + '.json'
with open(fileName, 'w') as f:
json.dump(dat, f)
item_type = dat.get('item').get('item_type')
nutrients = dat.get('item').get('nutrient_facts').get('nutrient')
nutrient_dic = {}
for i in range(len(nutrients)):
nutrientName = nutrients[i]['nutrient_name_id']
nutrientQuantity = nutrients[i]['value']
nurteintHundred_g_per_product = nutrients[i]['hundred_g_per_product']
nutrient_uom = nutrients[i]['uom']
nutrient_adultDV = nutrients[i]['adult_dv']
nutrient_childDV = nutrients[i]['child_dv']
nutrient_dic.update({nutrientName + '_Quantity': nutrientQuantity,
nutrientName + '_100_g_per_product': nurteintHundred_g_per_product,
nutrientName + '_uom': nutrient_uom, nutrientName + '_adultDV': nutrient_adultDV,
nutrientName + '_childDV': nutrient_childDV})
dat_dict = {
'Product_Name': dat.get('item').get('item_name'),
'Product_Title': dat.get('item').get('item_meta_title'),
'Product_Description': dat.get('item').get('description'),
'Product_Ingredients': cleanhtml(dat.get('item').get('item_ingredient_statement')),
'Product_Category': dat.get('item').get('default_category').get('category').get('name') if dat.get('item').get(
'default_category') else 'NA',
'item_type': item_type
}
dat_dict.update(nutrient_dic)
return dat_dict
def parse_combo(dat):
# combo meals --> get individual nutrient information and add them together
fileName = path_mcdonalds + '/item_' + str(item_id) + '.json'
with open(fileName, 'w') as f:
json.dump(dat, f)
item_type = dat.get('item').get('item_type')
components = dat.get('item').get('components').get('component')
components_list = []
for component in components:
component_id = component.get('id')
url_component = f'https://www.mcdonalds.com/wws/json/getItemDetails.htm?country=UK&language=en&showLiveData=true&item={component_id}'
dat_component = requests.get(url_component).json()
parse_component = parse_json(dat_component)
if float(parse_component.get('energy_kcal_100_g_per_product'))*100 !=0:
parse_component.update({'servingweight': float(parse_component.get('energy_kcal_Quantity'))/float(parse_component.get('energy_kcal_100_g_per_product'))*100})
else:
parse_component.update({'servingweight': None})
components_list.append(parse_component)
# add the quantities (absolute energy) and % DV
nutrients = {}
for var in [key for key in components_list[0].keys() if 'Quantity' in key or 'DV' in key]:
try:
nutrients[var] = sum([float(component.get(var)) for component in components_list])
except:
nutrients[var] = None
# weighted density
for var in [key for key in components_list[0].keys() if '100_g_per_product'in key]:
try:
nutrients[var] = np.average([float(component.get(var)) for component in components_list], weights=[float(component.get('servingweight')) for component in components_list])
except:
pass
# keep the same unit of measure
for var in [key for key in components_list[0].keys() if 'uom'in key]:
nutrients[var] = components_list[0].get(var)
dat_dict = {
'Product_Name': dat.get('item').get('item_name'),
'Product_Title': dat.get('item').get('item_meta_title'),
'Product_Description': ', '.join([component.get('Product_Name') for component in components_list]),
'Product_Ingredients': cleanhtml(dat.get('item').get('item_ingredient_statement')),
'Product_Category': dat.get('item').get('default_category').get('category').get('name') if dat.get('item').get(
'default_category') else 'NA',
'item_type': item_type
}
dat_dict.update(nutrients)
return(dat_dict)
mcdonalds_url = 'https://www.mcdonalds.com/gb/en-gb/menu.html'
r = requests.get(mcdonalds_url)
soup = BeautifulSoup(r.text, 'html.parser')
# categories
categories = soup.find_all('a',{"class":"link key-arrow-move"})
category_links = ['https://www.mcdonalds.com' + category.get('href') for category in categories]
category_links = set(category_links)
item_ids = []
for category_link in category_links:
#print(category_link)
r = requests.get(category_link)
soup = BeautifulSoup(r.text,'html.parser')
items = soup.find_all('a', {"class": 'categories-item-link'}) #the new path
# old path
if len(items) == 0:
items = soup.find_all('a', {"class": 'mcd-category-page__item-link'})
ids = [item['data-at'].split(':')[3] for item in items[:-1]]
#print(len(ids))
item_ids.append(ids)
item_ids = list(flatten(item_ids))
item_ids = [item_id.replace('a','') for item_id in item_ids]
item_ids = set(item_ids)
mcdonalds_list = []
additional_items = []
for item_id in item_ids:
url = f'https://www.mcdonalds.com/wws/json/getItemDetails.htm?country=UK&language=en&showLiveData=true&item={item_id}'
dat = requests.get(url).json()
if dat.get('item').get('relation_types'):
related_items = dat.get('item').get('relation_types').get('relation_type')[0].get('related_items').get(
'related_item')
related_itemids = [related_item['id'] for related_item in related_items if related_item['id'] != int(item_id)]
additional_items.extend(related_itemids)
try:
dict_temp = parse_json(dat)
except:
# likely a combo meal (e.g., happy meal with different components)
dict_temp = parse_combo(dat)
dict_temp.update({'item_id': item_id})
mcdonalds_list.append(dict_temp)
for item_id in additional_items:
url = f'https://www.mcdonalds.com/wws/json/getItemDetails.htm?country=UK&language=en&showLiveData=true&item={item_id}'
dat = requests.get(url).json()
dict_temp = parse_json(dat)
dict_temp.update({'item_id': item_id})
mcdonalds_list.append(dict_temp)
mcdonalds =
|
pd.DataFrame(mcdonalds_list)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""Copy of Mohit Code.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1eurrCuSALja61q0dT-KPRHK1o-47hWIN
"""
from __future__ import print_function
import tensorflow.keras
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
import datetime
import os
import numpy as np
import sklearn
from sklearn.metrics import f1_score, accuracy_score, classification_report
import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
import pickle, gzip
import sys
# import sys,shutil,datetime,pickle,codecs,tempfile, gzip
# import numpy as np
# import scipy as sp
import pandas as pd
# import matplotlib as mpl
# import cv2
# import sklearn
# import skimage
# import tensorflow as tf
# import keras
from sklearn.cluster import KMeans, MiniBatchKMeans
def scale(X,testing=False,mode='standard',a=None,b=None):
#X=np.nan_to_num
X=np.array(X)
if mode=='scale':
if(not testing):
mx,mn=X.max(axis=0),X.min(axis=0)
else:
mx,mn=b,a
mx=np.where(mx==mn,mx+1,mx)
X=(X-mn)/(mx-mn)
if(testing):return X
return X,mn,mx
elif mode=='standard':
if(not testing):
mean,std=X.mean(axis=0),X.std(axis=0)
else:
mean,std=a,b
std=np.where(std==0,1,std)
X=(X-mean)/std
if(testing):return X
return X,mean,std
def preprocess(X,model_name,mode='standard',doScale=True,testing=False):
if(doScale):
if(not testing):
X,a,b=scale(X,testing,mode=mode)
if not os.path.isdir(model_name+"_MODEL"):
os.makedirs(model_name+"_MODEL")
np.save('{0}_MODEL/A'.format(model_name),a)
np.save('{0}_MODEL/B'.format(model_name),b)
return X
else:
a=np.load('{0}_MODEL/A.npy'.format(model_name)).tolist()
b=np.load('{0}_MODEL/B.npy'.format(model_name)).tolist()
X=scale(X,testing,'standard',a,b)
return X
def unpickle(file,is_bytes=True):
with open(file, 'rb') as fp:
dict = pickle.load(fp, encoding='bytes')
return dict
def load_mnist(path,kind):
'Load MNIST data from `path'
labels_path = os.path.join(path, '%s-labels-idx1-ubyte.gz' % kind)
images_path = os.path.join(path, '%s-images-idx3-ubyte.gz' % kind)
with gzip.open(labels_path, 'rb') as lbpath:
labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8)
with gzip.open(images_path, 'rb') as imgpath:
images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16).reshape(len(labels), 784)
images=pd.DataFrame(images)
images = np.array( [np.array(x).reshape((28,28,1)) for i,x in images.iterrows()] )
return images, labels
def load_cifar(data_path = 'data/CIFAR-10'):
print('path',data_path)
X,Y,target_names = None, None, None
for file_name in os.listdir(data_path):
if 'data_batch' in file_name:
temp = unpickle( os.path.join(data_path,file_name) )
X = pd.concat( (
|
pd.DataFrame(temp[b'data'])
|
pandas.DataFrame
|
#!/usr/bin/env python3
import unittest
import numpy as np
import numpy.testing as nptest
import pandas as pd
import pandas.testing as pdtest
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from datafold.dynfold.transform import (
TSCApplyLambdas,
TSCFeaturePreprocess,
TSCFiniteDifference,
TSCIdentity,
TSCPolynomialFeatures,
TSCPrincipalComponent,
TSCRadialBasis,
TSCTakensEmbedding,
TSCTransformerMixin,
)
from datafold.pcfold.kernels import *
from datafold.pcfold.timeseries.collection import TSCDataFrame, TSCException
def _all_tsc_transformers():
# only finds the ones that are importated (DMAP e.g. is not here)
print(TSCTransformerMixin.__subclasses__())
class TestTSCTransform(unittest.TestCase):
def _setUp_simple_df(self):
idx = pd.MultiIndex.from_arrays(
[[0, 0, 1, 1, 15, 15, 45, 45, 45], [0, 1, 0, 1, 0, 1, 17, 18, 19]]
)
col = ["A", "B"]
self.simple_df = pd.DataFrame(np.random.rand(9, 2), index=idx, columns=col)
def _setUp_takens_df(self):
idx = pd.MultiIndex.from_arrays(
[[0, 0, 1, 1, 15, 15, 45, 45, 45], [0, 1, 0, 1, 0, 1, 17, 18, 19]]
)
col = ["A", "B"]
# Requires non-random values
self.takens_df_short = pd.DataFrame(
np.arange(18).reshape([9, 2]), index=idx, columns=col
)
n_samples_timeseries = 100
idx = pd.MultiIndex.from_product(
[np.array([0, 1]), np.arange(n_samples_timeseries)]
)
self.takens_df_long = pd.DataFrame(
np.random.rand(n_samples_timeseries * 2, 2), index=idx, columns=col
)
def setUp(self) -> None:
self._setUp_simple_df()
self._setUp_takens_df()
def test_is_valid_sklearn_estimator(self):
from sklearn.preprocessing import MinMaxScaler
from sklearn.utils.estimator_checks import check_estimator
TEST_ESTIMATORS = (
TSCIdentity(),
TSCPrincipalComponent(),
TSCFeaturePreprocess(sklearn_transformer=MinMaxScaler()),
TSCFeaturePreprocess(sklearn_transformer=StandardScaler()),
TSCPolynomialFeatures(),
)
for test_estimator in TEST_ESTIMATORS:
for estimator, check in check_estimator(test_estimator, generate_only=True):
try:
check(estimator)
except Exception as e:
print(estimator)
print(check)
raise e
def test_identity0(self):
tsc = TSCDataFrame(self.simple_df)
_id = TSCIdentity()
pdtest.assert_frame_equal(_id.fit_transform(tsc), tsc)
pdtest.assert_frame_equal(_id.inverse_transform(tsc), tsc)
def test_identity1(self):
tsc = TSCDataFrame(self.simple_df)
_id = TSCIdentity(include_const=True)
tsc_plus_const = tsc.copy(deep=True)
tsc_plus_const["const"] = 1
pdtest.assert_frame_equal(_id.fit_transform(tsc.copy()), tsc_plus_const)
pdtest.assert_frame_equal(_id.inverse_transform(tsc_plus_const), tsc)
def test_identity2(self):
data = np.random.rand(5, 5)
data_wo_const = TSCIdentity(include_const=False).fit_transform(data)
data_plus_const = TSCIdentity(include_const=True).fit_transform(data)
nptest.assert_equal(data, data_wo_const)
nptest.assert_equal(data_plus_const, np.column_stack([data, np.ones(5)]))
def test_identity3(self):
data = TSCDataFrame(self.simple_df)
data_wo_const = TSCIdentity(
include_const=False, rename_features=True
).fit_transform(data)
data_with_const = TSCIdentity(
include_const=True, rename_features=True
).fit_transform(data)
data = data.add_suffix("_id")
pdtest.assert_index_equal(data.columns, data_wo_const.columns)
data["const"] = 1
pdtest.assert_index_equal(data.columns, data_with_const.columns)
def test_scale_min_max(self):
tsc_df = TSCDataFrame(self.simple_df)
scale = TSCFeaturePreprocess.from_name("min-max")
scaled_tsc = scale.fit_transform(tsc_df)
# sanity check:
nptest.assert_allclose(scaled_tsc.min().to_numpy(), np.zeros(2), atol=1e-16)
nptest.assert_allclose(scaled_tsc.max().to_numpy(), np.ones(2), atol=1e-16)
# Undoing normalization must give original TSCDataFrame back
pdtest.assert_frame_equal(tsc_df, scale.inverse_transform(scaled_tsc))
def test_scale_standard(self):
tsc_df = TSCDataFrame(self.simple_df)
scale = TSCFeaturePreprocess.from_name("standard")
scaled_tsc = scale.fit_transform(tsc_df)
nptest.assert_array_equal(
scaled_tsc.to_numpy(),
StandardScaler(with_mean=True, with_std=True).fit_transform(
tsc_df.to_numpy()
),
)
# Undoing normalization must give original TSCDataFrame back
pdtest.assert_frame_equal(tsc_df, scale.inverse_transform(scaled_tsc))
def test_sklearn_scaler(self):
tsc_df = TSCDataFrame(self.simple_df)
from sklearn.preprocessing import (
MaxAbsScaler,
PowerTransformer,
QuantileTransformer,
RobustScaler,
)
# each tuple has the class and a dictionary with the init-options
scaler = [
(MaxAbsScaler, dict()),
(PowerTransformer, dict(method="yeo-johnson")),
(PowerTransformer, dict(method="box-cox")),
(
QuantileTransformer,
dict(n_quantiles=tsc_df.shape[0], output_distribution="uniform"),
),
(
QuantileTransformer,
dict(n_quantiles=tsc_df.shape[0], output_distribution="normal"),
),
(RobustScaler, dict()),
]
for cls, kwargs in scaler:
scale = TSCFeaturePreprocess(sklearn_transformer=cls(**kwargs))
tsc_transformed = scale.fit_transform(tsc_df)
# Check the underlying array equals:
nptest.assert_array_equal(
cls(**kwargs).fit_transform(tsc_df.to_numpy()),
tsc_transformed.to_numpy(),
)
# check inverse transform is equal the original TSCDataFrame:
pdtest.assert_frame_equal(tsc_df, scale.inverse_transform(tsc_transformed))
def test_polynomial_feature_transform01(self):
from sklearn.preprocessing import PolynomialFeatures
tsc = TSCDataFrame(self.simple_df)
for degree in [2, 3, 4]:
for include_bias in [True, False]:
actual = TSCPolynomialFeatures(
degree=degree, include_bias=include_bias, include_first_order=True
).fit_transform(tsc)
expected = PolynomialFeatures(
degree=degree, include_bias=include_bias
).fit_transform(tsc.to_numpy())
nptest.assert_array_equal(actual.to_numpy(), expected)
def test_polynomial_feature_transform02(self):
tsc = TSCDataFrame(self.simple_df)
for include_first_order in [True, False]:
poly = TSCPolynomialFeatures(
degree=2, include_bias=True, include_first_order=include_first_order
).fit(tsc)
actual = poly.transform(tsc)
expected = TSCPolynomialFeatures(
degree=2, include_bias=True, include_first_order=False
).fit_transform(tsc)
pdtest.assert_frame_equal(actual, expected)
def test_polynomial_feature_transform03(self):
tsc = TSCDataFrame(self.simple_df)
actual = TSCPolynomialFeatures(
degree=2, include_bias=True, include_first_order=False
).fit_transform(tsc)
pdtest.assert_index_equal(
actual.columns,
pd.Index(["1", "A^2", "A B", "B^2"], name="feature"),
)
actual = TSCPolynomialFeatures(
degree=2, include_bias=False, include_first_order=False
).fit_transform(tsc)
pdtest.assert_index_equal(
actual.columns,
pd.Index(["A^2", "A B", "B^2"], name="feature"),
)
def test_apply_lambda_transform01(self):
# use lambda identity function
tsc = TSCDataFrame(self.simple_df)
lambda_transform = TSCApplyLambdas(lambdas=[lambda x: x]).fit(tsc)
actual = lambda_transform.transform(tsc)
expected = tsc
expected.columns = pd.Index(
["A_lambda0", "B_lambda0"], name=TSCDataFrame.tsc_feature_col_name
)
pdtest.assert_frame_equal(actual, expected)
def test_apply_lambda_transform02(self):
# use numpy function
tsc = TSCDataFrame(self.simple_df)
lambda_transform = TSCApplyLambdas(lambdas=[np.square]).fit(tsc)
actual = lambda_transform.transform(tsc)
expected = tsc.apply(np.square, axis=0, raw=True)
expected.columns = pd.Index(
["A_lambda0", "B_lambda0"], name=TSCDataFrame.tsc_feature_col_name
)
pdtest.assert_frame_equal(actual, expected)
def test_apply_lambda_transform03(self):
# use numpy function
tsc = TSCDataFrame(self.simple_df)
lambda_transform = TSCApplyLambdas(lambdas=[lambda x: x, np.square]).fit(tsc)
actual = lambda_transform.transform(tsc)
identity = tsc
identity.columns = pd.Index(
["A_lambda0", "B_lambda0"], name=TSCDataFrame.tsc_feature_col_name
)
squared = tsc.apply(np.square, axis=0, raw=True)
squared.columns = pd.Index(
["A_lambda1", "B_lambda1"], name=TSCDataFrame.tsc_feature_col_name
)
expected =
|
pd.concat([identity, squared], axis=1)
|
pandas.concat
|
import sys
import numpy as np
from datetime import date, timedelta
from pandas import to_datetime
import Temp_linear as tmpl
import cross_validate_temp as cv
#DEFINE CONSTANTS-------------------------------------------------------------
MASTER_DIR = r'/home/hawaii_climate_products_container/preliminary/'
WORKING_MASTER_DIR = MASTER_DIR + r'air_temp/working_data/'
DEP_MASTER_DIR = MASTER_DIR + r'air_temp/daily/dependencies/'
RUN_MASTER_DIR = MASTER_DIR + r'air_temp/data_outputs/'
PRED_DIR = DEP_MASTER_DIR + r'predictors/'
RAW_DATA_DIR = RUN_MASTER_DIR + r'tables/station_data/daily/raw/statewide/' #Location of station and predictor data for model fit
CV_OUTPUT_DIR = RUN_MASTER_DIR + r'tables/loocv/daily/county/'
ICODE_LIST = ['BI','KA','MN','OA']
PARAM_LIST = ['dem_250']
#END CONSTANTS----------------------------------------------------------------
if __name__=="__main__":
if len(sys.argv) > 1:
input_date = sys.argv[1]
dt =
|
to_datetime(input_date)
|
pandas.to_datetime
|
"""
Author: <NAME> - @VFermat
"""
import pandas as pd
from pandas.tseries.offsets import MonthEnd
class HXLFactor(object):
def calculate_factors(self, prices, dividends, assets, roe, marketcap):
# Lining up dates to end of month
prices.columns = prices.columns + MonthEnd(0)
dividends.columns = dividends.columns + MonthEnd(0)
assets.columns = assets.columns + MonthEnd(0)
roe.columns = roe.columns + MonthEnd(0)
marketcap.columns = marketcap.columns + MonthEnd(0)
dividends, assets, roe = self._padronize_columns(prices.columns,
dividends,
assets,
roe)
self.securities = {
'assets': assets,
'ROE': roe,
'price': prices,
'marketcap': marketcap,
'dividends': dividends
}
# Gathering info
self.securities = self._get_IA_info(self.securities)
self.securities = self._get_return(self.securities)
self.securities = self._get_benchmarks(self.securities)
@staticmethod
def _get_benchmarks(securities):
pass
@staticmethod
def _get_return(securities):
"""
Calculates the return for each security over time and related information.
Parameters
----------
securities : Dict like
A dictionary containing the information on stocks.
Return
----------
n_securities : Dict
Updated dict containing the return for each security over time.
"""
n_securities = securities.copy()
n_securities['lprice'] = n_securities['price'].shift(1, axis=1)
n_securities['pdifference'] = n_securities['price'] - n_securities['lprice']
n_securities['gain'] = n_securities['dividends'] + n_securities['pdifference']
n_securities['return'] = n_securities['gain']/n_securities['lprice']
# Creates a return field which is shifted one month back. Will be used
# when calculating the factors
n_securities['lreturn'] = n_securities['return'].shift(-1, axis=1)
return n_securities
@staticmethod
def _get_IA_info(securities):
"""
Calculates the Investment over Assets ratio and related information
Parameters
----------
securities : Dict like
A dict containing the information on stocks.
Return
----------
n_securities : Dict
Updated dict containing Investment over Assets ratio and related information.
"""
n_securities = securities.copy()
# Calculates 1-year-lagged-assets
n_securities['lassets'] = n_securities['assets'].shift(12, axis=1)
# Calculates Investment
n_securities['investment'] = n_securities['assets'] - n_securities['lassets']
# Calculates Investment over Assets ratio
n_securities['I/A'] = n_securities['investment']/n_securities['lassets']
return n_securities
@staticmethod
def _padronize_columns(pattern, dividends, assets, ROE):
"""
Padronizes information that is not released monthly. In that way, we do not
encounter problems while manipulating data.
Parameters
----------
pattern : Array like
Array containing the pattern for the columns
dividends : DataFrame like
Dataframe containing information on dividends
assets : DataFrame like
Dataframe containing information on assets
ROE : DataFrame like
Dataframe containing information on ROE
Return
----------
ndividends : Dataframe like
Updated Dataframe containing information on dividends
nassets : Dataframe like
Updated Dataframe containing information on assets
n_roe : Dataframe like
Updated Dataframe containing information on ROE
"""
ndividends = pd.DataFrame(index=dividends.index)
nassets = pd.DataFrame(index=assets.index)
n_roe =
|
pd.DataFrame(index=ROE.index)
|
pandas.DataFrame
|
"""Tests for the sdv.constraints.base module."""
import warnings
from unittest.mock import Mock, patch
import pandas as pd
import pytest
from copulas.multivariate.gaussian import GaussianMultivariate
from copulas.univariate import GaussianUnivariate
from rdt.hyper_transformer import HyperTransformer
from sdv.constraints.base import Constraint, _get_qualified_name, get_subclasses, import_object
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import ColumnFormula, UniqueCombinations
def test__get_qualified_name_class():
"""Test the ``_get_qualified_name`` function, if a class is passed.
The ``_get_qualified_name`` function is expected to:
- Return the Fully Qualified Name from a class.
Input:
- A class.
Output:
- The class qualified name.
"""
# Run
fully_qualified_name = _get_qualified_name(Constraint)
# Assert
expected_name = 'sdv.constraints.base.Constraint'
assert fully_qualified_name == expected_name
def test__get_qualified_name_function():
"""Test the ``_get_qualified_name`` function, if a function is passed.
The ``_get_qualified_name`` function is expected to:
- Return the Fully Qualified Name from a function.
Input:
- A function.
Output:
- The function qualified name.
"""
# Run
fully_qualified_name = _get_qualified_name(_get_qualified_name)
# Assert
expected_name = 'sdv.constraints.base._get_qualified_name'
assert fully_qualified_name == expected_name
def test_get_subclasses():
"""Test the ``get_subclasses`` function.
The ``get_subclasses`` function is expected to:
- Recursively find subclasses for the class object passed.
Setup:
- Create three classes, Parent, Child and GrandChild,
which inherit of each other hierarchically.
Input:
- The Parent class.
Output:
- Dict of the subclasses of the class: ``Child`` and ``GrandChild`` classes.
"""
# Setup
class Parent:
pass
class Child(Parent):
pass
class GrandChild(Child):
pass
# Run
subclasses = get_subclasses(Parent)
# Assert
expected_subclasses = {
'Child': Child,
'GrandChild': GrandChild
}
assert subclasses == expected_subclasses
def test_import_object_class():
"""Test the ``import_object`` function, when importing a class.
The ``import_object`` function is expected to:
- Import a class from its qualifed name.
Input:
- Qualified name of the class.
Output:
- The imported class.
"""
# Run
obj = import_object('sdv.constraints.base.Constraint')
# Assert
assert obj is Constraint
def test_import_object_function():
"""Test the ``import_object`` function, when importing a function.
The ``import_object`` function is expected to:
- Import a function from its qualifed name.
Input:
- Qualified name of the function.
Output:
- The imported function.
"""
# Run
imported = import_object('sdv.constraints.base.import_object')
# Assert
assert imported is import_object
class TestConstraint():
def test__identity(self):
"""Test ```Constraint._identity`` method.
``_identity`` method should return whatever it is passed.
Input:
- anything
Output:
- Input
"""
# Run
instance = Constraint('all')
output = instance._identity('input')
# Asserts
assert output == 'input'
def test___init___transform(self):
"""Test ```Constraint.__init__`` method when 'transform' is passed.
If 'transform' is given, the ``__init__`` method should replace the ``is_valid`` method
with an identity and leave ``transform`` and ``reverse_transform`` untouched.
Input:
- transform
Side effects:
- is_valid == identity
- transform != identity
- reverse_transform != identity
"""
# Run
instance = Constraint(handling_strategy='transform')
# Asserts
assert instance.filter_valid == instance._identity
assert instance.transform != instance._identity
assert instance.reverse_transform != instance._identity
def test___init___reject_sampling(self):
"""Test ``Constraint.__init__`` method when 'reject_sampling' is passed.
If 'reject_sampling' is given, the ``__init__`` method should replace the ``transform``
and ``reverse_transform`` methods with an identity and leave ``is_valid`` untouched.
Input:
- reject_sampling
Side effects:
- is_valid != identity
- transform == identity
- reverse_transform == identity
"""
# Run
instance = Constraint(handling_strategy='reject_sampling')
# Asserts
assert instance.filter_valid != instance._identity
assert instance.transform == instance._identity
assert instance.reverse_transform == instance._identity
def test___init___all(self):
"""Test ``Constraint.__init__`` method when 'all' is passed.
If 'all' is given, the ``__init__`` method should leave ``transform``,
``reverse_transform`` and ``is_valid`` untouched.
Input:
- all
Side effects:
- is_valid != identity
- transform != identity
- reverse_transform != identity
"""
# Run
instance = Constraint(handling_strategy='all')
# Asserts
assert instance.filter_valid != instance._identity
assert instance.transform != instance._identity
assert instance.reverse_transform != instance._identity
def test___init___not_kown(self):
"""Test ``Constraint.__init__`` method when a not known ``handling_strategy`` is passed.
If a not known ``handling_strategy`` is given, a ValueError is raised.
Input:
- not_known
Side effects:
- ValueError
"""
# Run
with pytest.raises(ValueError):
Constraint(handling_strategy='not_known')
def test_fit(self):
"""Test the ``Constraint.fit`` method.
The base ``Constraint.fit`` method is expected to:
- Call ``_fit`` method.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3]
})
instance = Constraint(handling_strategy='transform', fit_columns_model=False)
instance._fit = Mock()
# Run
instance.fit(table_data)
# Assert
instance._fit.assert_called_once_with(table_data)
@patch('sdv.constraints.base.GaussianMultivariate', spec_set=GaussianMultivariate)
def test_fit_gaussian_multivariate_correct_distribution(self, gm_mock):
"""Test the ``GaussianMultivariate`` from the ``Constraint.fit`` method.
The ``GaussianMultivariate`` is expected to be called with default distribution
set as ``GaussianUnivariate``.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [1, 2, 3]
})
instance = Constraint(handling_strategy='transform', fit_columns_model=True)
instance.constraint_columns = ('a', 'b')
# Run
instance.fit(table_data)
# Assert
gm_mock.assert_called_once_with(distribution=GaussianUnivariate)
@patch('sdv.constraints.base.GaussianMultivariate', spec_set=GaussianMultivariate)
@patch('sdv.constraints.base.HyperTransformer', spec_set=HyperTransformer)
def test_fit_trains_column_model(self, ht_mock, gm_mock):
"""Test the ``Constraint.fit`` method trains the column model.
When ``fit_columns_model`` is True and there are multiple ``constraint_columns``,
the ``Constraint.fit`` method is expected to:
- Call ``_fit`` method.
- Create ``_hyper_transformer``.
- Create ``_column_model`` and train it.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance = Constraint(handling_strategy='transform', fit_columns_model=True)
instance.constraint_columns = ('a', 'b')
# Run
instance.fit(table_data)
# Assert
gm_mock.return_value.fit.assert_called_once()
calls = ht_mock.return_value.fit_transform.mock_calls
args = calls[0][1]
assert len(calls) == 1
pd.testing.assert_frame_equal(args[0], table_data)
def test_transform(self):
"""Test the ``Constraint.transform`` method.
It is an identity method for completion, to be optionally
overwritten by subclasses.
The ``Constraint.transform`` method is expected to:
- Return the input data unmodified.
Input:
- Anything
Output:
- Input
"""
# Run
instance = Constraint(handling_strategy='transform')
output = instance.transform('input')
# Assert
assert output == 'input'
def test_transform_calls__transform(self):
"""Test that the ``Constraint.transform`` method calls ``_transform``.
The ``Constraint.transform`` method is expected to:
- Return value returned by ``_transform``.
Input:
- Anything
Output:
- Result of ``_transform(input)``
"""
# Setup
constraint_mock = Mock()
constraint_mock.fit_columns_model = False
constraint_mock._transform.return_value = 'the_transformed_data'
constraint_mock._validate_columns.return_value = pd.DataFrame()
# Run
output = Constraint.transform(constraint_mock, 'input')
# Assert
assert output == 'the_transformed_data'
def test_transform_model_disabled_any_columns_missing(self):
"""Test the ``Constraint.transform`` method with invalid data.
If ``table_data`` is missing any columns and ``fit_columns_model``
is False, it should raise a ``MissingConstraintColumnError``.
The ``Constraint.transform`` method is expected to:
- Raise ``MissingConstraintColumnError``.
"""
# Run
instance = Constraint(handling_strategy='transform', fit_columns_model=False)
instance._transform = lambda x: x
instance.constraint_columns = ('a',)
# Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame([[1, 2], [3, 4]], columns=['b', 'c']))
def test_transform_model_enabled_all_columns_missing(self):
"""Test the ``Constraint.transform`` method with missing columns.
If ``table_data`` is missing all of the ``constraint_columns`` and
``fit_columns_model`` is True, it should raise a
``MissingConstraintColumnError``.
The ``Constraint.transform`` method is expected to:
- Raise ``MissingConstraintColumnError``.
"""
# Run
instance = Constraint(handling_strategy='transform')
instance._transform = lambda x: x
instance.constraint_columns = ('a',)
# Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame())
def test_transform_model_enabled_some_columns_missing(self):
"""Test that the ``Constraint.transform`` method uses column model.
If ``table_data`` is missing some of the ``constraint_columns``,
the ``_column_model`` should be used to sample the rest and the
data should be transformed.
Input:
- Table with some missing columns.
Output:
- Transformed data with all columns.
"""
# Setup
instance = Constraint(handling_strategy='transform')
instance._transform = lambda x: x
instance.constraint_columns = ('a', 'b')
instance._hyper_transformer = Mock()
instance._columns_model = Mock()
conditions = [
pd.DataFrame([[5, 1, 2]], columns=['a', 'b', 'c']),
pd.DataFrame([[6, 3, 4]], columns=['a', 'b', 'c'])
]
transformed_conditions = [
pd.DataFrame([[1]], columns=['b']),
pd.DataFrame([[3]], columns=['b'])
]
instance._columns_model.sample.return_value = pd.DataFrame([
[1, 2, 3]
], columns=['b', 'c', 'a'])
instance._hyper_transformer.transform.side_effect = transformed_conditions
instance._hyper_transformer.reverse_transform.side_effect = conditions
# Run
data = pd.DataFrame([[1, 2], [3, 4]], columns=['b', 'c'])
transformed_data = instance.transform(data)
# Assert
expected_tranformed_data = pd.DataFrame([[1, 2, 3]], columns=['b', 'c', 'a'])
expected_result = pd.DataFrame([
[5, 1, 2],
[6, 3, 4]
], columns=['a', 'b', 'c'])
model_calls = instance._columns_model.sample.mock_calls
assert len(model_calls) == 2
instance._columns_model.sample.assert_any_call(num_rows=1, conditions={'b': 1})
instance._columns_model.sample.assert_any_call(num_rows=1, conditions={'b': 3})
reverse_transform_calls = instance._hyper_transformer.reverse_transform.mock_calls
pd.testing.assert_frame_equal(reverse_transform_calls[0][1][0], expected_tranformed_data)
pd.testing.assert_frame_equal(reverse_transform_calls[1][1][0], expected_tranformed_data)
pd.testing.assert_frame_equal(transformed_data, expected_result)
def test_transform_model_enabled_reject_sampling(self):
"""Test the ``Constraint.transform`` method's reject sampling.
If the column model is used but doesn't return valid rows,
reject sampling should be used to get the valid rows.
Setup:
- The ``_columns_model`` returns some valid_rows the first time,
and then the rest with the next call.
Input:
- Table with some missing columns.
Output:
- Transformed data with all columns.
"""
# Setup
instance = Constraint(handling_strategy='transform')
instance._transform = lambda x: x
instance.constraint_columns = ('a', 'b')
instance._hyper_transformer = Mock()
instance._columns_model = Mock()
transformed_conditions = [pd.DataFrame([[1], [1], [1], [1], [1]], columns=['b'])]
instance._columns_model.sample.side_effect = [
pd.DataFrame([
[1, 2],
[1, 3]
], columns=['a', 'b']),
pd.DataFrame([
[1, 4],
[1, 5],
[1, 6],
[1, 7]
], columns=['a', 'b']),
]
instance._hyper_transformer.transform.side_effect = transformed_conditions
instance._hyper_transformer.reverse_transform = lambda x: x
# Run
data = pd.DataFrame([[1], [1], [1], [1], [1]], columns=['b'])
transformed_data = instance.transform(data)
# Assert
expected_result = pd.DataFrame([
[1, 2],
[1, 3],
[1, 4],
[1, 5],
[1, 6]
], columns=['a', 'b'])
model_calls = instance._columns_model.sample.mock_calls
assert len(model_calls) == 2
instance._columns_model.sample.assert_any_call(num_rows=5, conditions={'b': 1})
assert model_calls[1][2]['num_rows'] > 3
pd.testing.assert_frame_equal(transformed_data, expected_result)
def test_transform_model_enabled_reject_sampling_error(self):
"""Test that the ``Constraint.transform`` method raises an error appropriately.
If the column model is used but doesn't return valid rows,
reject sampling should be used to get the valid rows. If it doesn't
get any valid rows in 100 tries, a ``ValueError`` is raised.
Setup:
- The ``_columns_model`` is fixed to always return an empty ``DataFrame``.
Input:
- Table with some missing columns.
Side Effect:
- ``ValueError`` raised.
"""
# Setup
instance = Constraint(handling_strategy='transform')
instance.constraint_columns = ('a', 'b')
instance._hyper_transformer = Mock()
instance._columns_model = Mock()
transformed_conditions = pd.DataFrame([[1]], columns=['b'])
instance._columns_model.sample.return_value = pd.DataFrame()
instance._hyper_transformer.transform.return_value = transformed_conditions
instance._hyper_transformer.reverse_transform.return_value = pd.DataFrame()
# Run / Assert
data = pd.DataFrame([[1, 2], [3, 4]], columns=['b', 'c'])
with pytest.raises(ValueError):
instance.transform(data)
def test_transform_model_enabled_reject_sampling_duplicates_valid_rows(self):
"""Test the ``Constraint.transform`` method's reject sampling fall back.
If the column model is used but doesn't return valid rows,
reject sampling should be used to get the valid rows. If after 100
tries, some valid rows are created but not enough, then the valid rows
are duplicated to meet the ``num_rows`` requirement.
Setup:
- The ``_columns_model`` returns some valid rows the first time, and then
an empy ``DataFrame`` for every other call.
Input:
- Table with some missing columns.
Output:
- Transformed data with all columns.
"""
# Setup
instance = Constraint(handling_strategy='transform')
instance._transform = lambda x: x
instance.constraint_columns = ('a', 'b')
instance._hyper_transformer = Mock()
instance._columns_model = Mock()
transformed_conditions = [
|
pd.DataFrame([[1], [1], [1], [1], [1]], columns=['b'])
|
pandas.DataFrame
|
import pandas as pd
from datetime import date, timedelta
filepath = "people.xlsx"
# 读出工作簿名为Sheet1的工作表
people = pd.read_excel(filepath, sheet_name="Sheet1")
print(people)
print("=====1=====")
# header = 2 表示从第3行开始 相当于跳过了第2行
people1 = pd.read_excel(filepath, header=2, sheet_name="Sheet1")
print(people1)
print("=====2=====")
# skiprows 跳过开头几行 usecols表示使用哪些列的数据
people3 = pd.read_excel(filepath, sheet_name="Sheet1", skiprows=4, usecols="B:C")
print(people3)
print("=====3=====")
# 指定id列为索引 dtype设置某一列数据的类型
people2 =
|
pd.read_excel(filepath, sheet_name="Sheet1", index_col="id", dtype={"name": str, "data": str})
|
pandas.read_excel
|
# -*- coding: utf-8 -*-
import numpy as np
import cantera as ct
import pandas as pd
import re
import warnings
import copy
###################################
# 3b. output data analysis
###################################
def branching_ratios(df, solution, compound, production = False):
"""
This method looks at the consumption pathways of `compound` over
all time points in the data set.
It outputs a pandas.DataFrame which contains columns of pertinant reactions
and values of the branching ratio of each reaction which is defined as
$BR_{i} = \frac{ROC_i}{\Sigma_{j=0}^{j=N} ROC_j }$
where $i$ is the reaction in question, $ROC$ is the rate of consumption of
the desired species, and $N$ is the number of reactions, and $BR$ is the branching ratio.
df = dataframe of run data
solution = cantera solution object
compound = species string which you want to identify
production = if True, shows the reactions forming species X
This method only works on forward reactions
"""
reaction_dataframe = weight_reaction_dataframe_by_stoich_coefficients(df,solution,compound)
if not production:
#only keep consumption
consumption_terms = reaction_dataframe[reaction_dataframe < 0]
df = consumption_terms.dropna('columns','all')
else:
production_terms = reaction_dataframe[reaction_dataframe > 0]
df = production_terms.dropna('columns','all')
total = df.sum('columns')
branching_ratios = df.div(total,'index')
branching_ratios = branching_ratios.fillna(0)
#sort from most important
importance_index = branching_ratios.sum('index').sort_values(ascending=False)
branching_ratios = branching_ratios.reindex(importance_index.index,axis='columns')
return branching_ratios
def consumption_pathways(solution,df,species, time = 'all'):
"""
returns the total rate of production for a particular species at the specified
time(s). Postive values indicate production, negative values indicate consumption
If multiple times are given or the keyword 'all' is used, the output is a DataFrame
with indexes the various times. If only one time is supplied, the output is a
Series.
solution = cantera solution object
df = pandas dataframe of reactions
species = string of species
time = number describing the time points to determine consumption (or list of numbers)
"""
if time=='all':
time = list(df.index)
if isinstance(time,list):
# recursively run consumption_pathways
consumption_values = []
for t in time:
consumption_values.append(consumption_pathways(solution=solution,
df=df,
species=species,
time= t))
consumption_values = pd.DataFrame(consumption_values, index=time)
# sort by total sum of flux
sorted_index = consumption_values.sum('index').sort_values().keys()
return consumption_values[sorted_index]
# the time is not a list, return a pd.Series
try:
reactions_weighted = find_reactions(solution, df,species).loc[time,:]
except KeyError:
reactions_weighted = find_reactions(solution, df,species).loc[return_nearest_time_index(time,df.index, index=False),:]
# weight by stoichiometric_coefficients
stoich_coeffs = [obtain_stoichiometry_of_species(solution, species, reaction) for reaction in reactions_weighted.index]
stoich_coeff_dict = pd.Series(dict(zip(reactions_weighted.index,stoich_coeffs)))
# pandas was having some bug, so manually rewrote the line below
#reactions_weighted *= stoich_coeff_dict
for index in stoich_coeff_dict.index:
reactions_weighted[index] *= stoich_coeff_dict[index]
return reactions_weighted.sort_values()
def quasi_steady_state(df, species):
"""
This method outputs the key parameter, $\frac{|ROP-ROC|}{ROP}$, in quasi steady state
approximation.
df = pd.DataFrame containing get_rop_and_roc_series
species = string of species to use
returns a pd.Series of the qss apprixmation: $\frac{|ROP-ROC|}{ROP}$
"""
return (df['production',species] - df['consumption',species]).abs() / df['production',species]
def compare_species_profile_at_one_time(desired_time, df1,df2,
minimum_return_value=1e-13,
time_string = 'time (s)'):
"""
compares the species profile between two models closest to the desired time
returns a pandas.Series object with the relative species concentrations
given by `compare_2_data_sets`
"""
time_index_1 = return_nearest_time_index(desired_time,df1[time_string])
time_index_2 = return_nearest_time_index(desired_time,df2[time_string])
time_slice_1 = find_species(df1).loc[time_index_1]
time_slice_2 = find_species(df2).loc[time_index_2]
return _compare_2_data_sets(time_slice_1,time_slice_2,minimum_return_value)
def _compare_2_data_sets(model1, model2, minimum_return_value = 1000,diff_returned=0.0):
"""given two pd.Series of data, returns a pd.Series with the relative
differences between the two sets. This requires one of the values to be
above the `minimum_return_cutoff` and the difference to be above `diff_returned`
The difference is returned as $\frac{model1 - model2}{\min(model1,model2)}$.
Where the minimum merges the two datasets using the minimum value at each index.
"""
#ensure all values are the same
model1 = copy.deepcopy(model1)[model2.index].dropna()
model2 = copy.deepcopy(model2)[model1.index].dropna()
minimum_value =
|
pd.DataFrame({'model1':model1,'model2':model2})
|
pandas.DataFrame
|
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day, prev_market_trade_day
from qteasy.utilfuncs import next_market_trade_day
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator
from qteasy.history import stack_dataframes
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.database import DataSource
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000, 20000, 10000])
self.op = np.array([0, 1, -0.33333333])
self.prices = np.array([10, 20, 10])
self.r = qt.Cost()
def test_rate_creation(self):
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
def test_rate_operations(self):
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r(self.amounts), [0.003, 0.003, 0.003]), True, 'fee calculation wrong')
def test_rate_fee(self):
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""Test transaction cost calculated by rate with min_fee"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
|
Timestamp('2040-12-29')
|
pandas.Timestamp
|
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-01-22')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 310, pd.Timestamp('2015-01-09')),
(10, 311, pd.Timestamp('2015-01-15')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-11')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateWindows(NextEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(bz.data(events), columns)
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp('2015-01-14')
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-09'),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp('2015-01-20')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20')],
'estimate': [130., 131., 230., 231.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30
})
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [140., 240.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40
})
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [150., 250.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50
})
return pd.concat([
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
])
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (-1., 2., 3., 4., 5., 6., 7., 100),
'effective_date': (pd.Timestamp('2014-01-01'), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp('2015-01-07'),
# Split before Q1 event
pd.Timestamp('2015-01-09'),
# Split before Q1 event
pd.Timestamp('2015-01-13'),
# Split before Q1 event
pd.Timestamp('2015-01-15'),
# Split before Q1 event
pd.Timestamp('2015-01-18'),
# Split after Q1 event and before Q2 event
pd.Timestamp('2015-01-30'),
# Filter out - this is after our date index
pd.Timestamp('2016-01-01'))
})
sid_10_splits = pd.DataFrame({
SID_FIELD_NAME: 10,
'ratio': (.2, .3),
'effective_date': (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp('2015-01-07'),
# Apply a single split before Q1 event.
pd.Timestamp('2015-01-20')),
})
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame({
SID_FIELD_NAME: 20,
'ratio': (.4, .5, .6, .7, .8, .9,),
'effective_date': (
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18'),
pd.Timestamp('2015-01-30')),
})
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame({
SID_FIELD_NAME: 30,
'ratio': (8, 9, 10, 11, 12),
'effective_date': (
# Split before the event and before the
# split-asof-date.
pd.Timestamp('2015-01-07'),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp('2015-01-09'),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18')),
})
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame({
SID_FIELD_NAME: 40,
'ratio': (13, 14),
'effective_date': (
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-22')
)
})
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame({
SID_FIELD_NAME: 50,
'ratio': (15, 16),
'effective_date': (
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')
)
})
return pd.concat([
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
])
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows,
ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-12')
]),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150. * 1 / 16, pd.Timestamp('2015-01-09')),
], pd.Timestamp('2015-01-13')),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))
], pd.Timestamp('2015-01-14')),
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-01-21')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 221*.8*.9, pd.Timestamp('2015-02-10')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240.*13*14, pd.Timestamp('2015-02-10')),
(50, 250., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-19')] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11*12, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-02-10')),
(30, 131*11*12, pd.Timestamp('2015-01-20')),
(40, 140. * 13 * 14, pd.Timestamp('2015-02-10')),
(50, 150., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousWithSplitAdjustedWindows(PreviousWithSplitAdjustedWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 130*1/10, cls.window_test_start_date),
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140, pd.Timestamp('2015-01-09')),
(50, 150.*1/15*1/16, pd.Timestamp('2015-01-09'))],
pd.Timestamp('2015-01-09')
),
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 230*1/10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/15*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-12')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-13')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-14')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*5, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*.7, cls.window_test_start_date),
(20, 121*.7, pd.Timestamp('2015-01-07')),
(30, 230*11, cls.window_test_start_date),
(40, 240, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100*5*6, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110*.3, pd.Timestamp('2015-01-09')),
(10, 111*.3, pd.Timestamp('2015-01-12')),
(20, 120*.7*.8, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-07')),
(30, 230*11*12, cls.window_test_start_date),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240*13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-21')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-22')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6, pd.Timestamp('2015-01-12')),
(10, 310*.3, pd.Timestamp('2015-01-09')),
(10, 311*.3, pd.Timestamp('2015-01-15')),
(20, 220*.7*.8, cls.window_test_start_date),
(20, 221*.8, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(10, 310*.3, pd.Timestamp('2015-01-09')),
(10, 311*.3, pd.Timestamp('2015-01-15')),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220*5/3, cls.window_test_start_date),
(30, 230*1/10, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
(50, np.NaN, cls.window_test_start_date)],
pd.Timestamp('2015-01-09')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, 200*1/4, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*5/3, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
pd.Timestamp('2015-01-12')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-13', '2015-01-14')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200*5, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-15', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200*5*6, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8, cls.window_test_start_date),
(20, 221*.8, pd.Timestamp('2015-01-17')),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextWithSplitAdjustedWindows(NextWithSplitAdjustedWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
class WithSplitAdjustedMultipleEstimateColumns(WithEstimates):
"""
ZiplineTestCase mixin for having multiple estimate columns that are
split-adjusted to make sure that adjustments are applied correctly.
Attributes
----------
test_start_date : pd.Timestamp
The start date of the test.
test_end_date : pd.Timestamp
The start date of the test.
split_adjusted_asof : pd.Timestamp
The split-adjusted-asof-date of the data used in the test, to be used
to create all loaders of test classes that subclass this mixin.
Methods
-------
make_expected_timelines_1q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range for
each column. Only for 1 quarter out.
make_expected_timelines_2q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range. For 2
quarters out, so only for the column that is requested to be loaded
with 2 quarters out.
Tests
-----
test_adjustments_with_multiple_adjusted_columns
Tests that if you have multiple columns, we still split-adjust
correctly.
test_multiple_datasets_different_num_announcements
Tests that if you have multiple datasets that ask for a different
number of quarters out, and each asks for a different estimates column,
we still split-adjust correctly.
"""
END_DATE = pd.Timestamp('2015-02-10')
test_start_date =
|
pd.Timestamp('2015-01-06', tz='utc')
|
pandas.Timestamp
|
# Author: <NAME> <<EMAIL>>
# License: MIT License
# Last Updated: 2020-02-02
'''
Module for loading NASA Li-ion Battery Data Set.
Dataset provided by the Prognostics CoE at NASA Ames.
This data can be found at:
- https://ti.arc.nasa.gov/tech/dash/groups/pcoe/prognostic-data-repository/
Functions:
- `_datevec2datetime()`: convert MATLAB datevecs to DateTime Objects
- `_get_metadata()`: get `types`, `start_times`, and `ambient_temps` for all cycles
- `_get_metadata_at()`: get `type`, `start_time`, and `ambient_temp` for a specific cycle
- `_cycle2df()`: convert raw cycle data to pandas DataFrame
- `read_battery()`: read `.mat` file for a given battery number and output pandas DataFrame
Private functions are not meant to be called outside of this Module.
How To Use This Module
======================
```
import pandas as pd
from read_nasa import read_battery
data = read_battery(5)
```
'''
# load .mat files
from scipy.io import loadmat
# convert MATLAB datevec format to datetime
from datetime import datetime
# store final output in pandas dataframe
from pandas import DataFrame, concat
# Fields measured for each operation
DATA_FIELDS = {
'charge': [
'Voltage_measured', 'Current_measured', 'Temperature_measured',
'Current_charge', 'Voltage_charge', 'Time'
],
'discharge': [
'Voltage_measured', 'Current_measured', 'Temperature_measured',
'Current_charge', 'Voltage_charge', 'Time', 'Capacity'
],
'impedance': [
'Sense_current', 'Battery_current', 'Current_ratio',
'Battery_impedance', 'Rectified_impedance', 'Re', 'Rct'
]
}
def _datevec2datetime(vec):
'''Convert MATLAB datevecs to Python DateTime Objects
MATLAB datevec example:
`[2008. , 5. , 22. , 21. , 48. , 39.015]`
Parameters:
- `vec`: list-like object in MATLAB datevec format
'''
return datetime(
year=int(vec[0]),
month=int(vec[1]),
day=int(vec[2]),
hour=int(vec[3]),
minute=int(vec[4]),
second=int(vec[5]),
microsecond=int((vec[5]-int(vec[5]))*1000)
)
def _get_metadata(cycles):
'''Get types, start_times, and ambient_temps for all cycles
Parameters:
- `cycles`: nested array-like structure in given format
'''
meta = dict()
# data stored in nested arrays...
meta['types'] = [arr[0] for arr in cycles['type'][0]]
# data stored in nested arrays...
# times in matlab datevec format
meta['start_times'] = [_datevec2datetime(arr[0]) for arr in cycles['time'][0]]
# data stored in nested arrays...
meta['ambient_temps'] = [arr[0][0] for arr in cycles['ambient_temperature'][0]]
return meta
def _get_metadata_at(meta, i):
'''Get type, start_time, and ambient_temp for a specific cycle
Parameters:
- `meta`: metadata dictionary from `_get_metadata`
- `i`: cycle index, i.e. the ith cycle
'''
return (
meta['types'][i],
meta['start_times'][i],
meta['ambient_temps'][i]
)
def _cycle2df(cycle_data, meta, i):
'''Convert raw cycle data to pandas dataframe
Parameters:
- `cycle_data`: raw cycle data - nested array containing data for all fields
relevant for the cycle, according to the cycle type.
- `meta`: metadata from `_get_metadata`
- `i`: cycle index, i.e. the ith cycle
'''
dtype, start_time, ambient_temp = _get_metadata_at(meta, i)
fields = DATA_FIELDS[dtype]
# create data dict, looks like { field:array_like_data_for_field, ... }
cycle_dict = {field:cycle_data[0][0][j][0] for j,field in enumerate(fields)}
# If the data provided is just a single constant element ...
for col, data in cycle_dict.items():
if len(data) == 1:
cycle_dict[col] = data[0]
# Create DataFrame
df =
|
DataFrame(cycle_dict)
|
pandas.DataFrame
|
"""
Test cases for the wiutils.extractors.get_scientific_name function.
"""
import numpy as np
import pandas as pd
import pytest
from wiutils.extractors import get_scientific_name
@pytest.fixture(scope="module")
def images():
return pd.DataFrame(
{
"genus": ["Dasyprocta", np.nan, "No CV Result", "Unknown", "Odocoileus"],
"species": ["fuliginosa", np.nan, "No CV Result", "Unknown", np.nan],
}
)
def test_discard_genus(images):
result = get_scientific_name(images, keep_genus=False)
expected =
|
pd.Series(["Dasyprocta fuliginosa", np.nan, np.nan, np.nan, np.nan])
|
pandas.Series
|
import sqlite3
import datetime
import pytablewriter
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from scipy import stats
scaler = StandardScaler()
conn = sqlite3.connect('data.db')
c = conn.cursor()
def company_names():
companies = {}
cursor = c.execute('''SELECT * FROM companies''')
for row in cursor:
stock = row[0]
name = row[1]
if stock not in companies:
companies[stock] = pd.Series([stock, name], index=['Stock','Name'])
return pd.DataFrame(companies).T
def create_sp_arrays():
df = {}
data = {}
companies = company_names()
cursor = c.execute('''SELECT * FROM stocks''')
for row in cursor:
stock = row[0]
price_date = datetime.datetime.strptime(row[1], '%Y-%M-%d').date()
opn = float(row[2])
high = float(row[3])
low = float(row[4])
close = float(row[5])
change = close - opn
volume = int(row[6])
shares = volume / opn
if stock not in data:
data[stock] = []
if price_date not in df:
df[price_date] =
|
pd.Series([stock, opn, close, high, low, volume, shares], index=['Stock','Open','Close','High','Low','Volume','Shares'])
|
pandas.Series
|
# coding=utf-8
# Author: <NAME>
# Date: June 17, 2020
#
# Description: Calculates entropy-based on network PCA
#
#
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from cycler import cycler
import matplotlib as mpl
mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['mathtext.rm'] = 'serif'
from utils import ensurePathExists
from scipy import stats
import argparse
def in_ranges(x, bins):
return [((x >= lower) & (x <= upper)) for lower, upper in bins]
def compute_entropy(df_dec,
radius_window=1.0,
radius_overlap=0.1,
angle_window=30,
angle_overlap=15,
min_points=10,
n_cut_points=3,
components=9):
""" """
df_dec = df_dec.copy()
#
angle_items = int(angle_window / angle_overlap)
a = np.arange(-180, (181), angle_overlap)
angle_bins = [(i, j) for i, j in zip(a[0:-angle_items], a[angle_items:])]
n_bins = len(angle_bins)
max_entropy = stats.entropy((np.ones(shape=n_bins) / n_bins), base=2)
list_df_ent = []
#
for dim in range(1, (components + 1)):
print('Computing projection: {dim1:d} vs {dim2:d}'.format(dim1=dim, dim2=(dim + 1)))
#
cx = str(dim) + 'c'
cy = str(dim + 1) + 'c'
dist_label = '{cx:s}-{cy:s}-dist'.format(cx=cx, cy=cy)
angle_label = '{cx:s}-{cy:s}-angle'.format(cx=cx, cy=cy)
df_dec[dist_label] = np.hypot(df_dec[cx], df_dec[cy])
df_dec[angle_label] = np.degrees(np.arctan2(df_dec[cy], df_dec[cx]))
#
df_dec.sort_values(dist_label, ascending=True, inplace=True)
radius_max = df_dec[dist_label].max()
#
radius_items = int(radius_window / radius_overlap)
#
b = np.arange(0, (radius_max + radius_overlap), radius_overlap)
radius_intervals = [(s, e) for s, e in zip(b[0:-radius_items], b[radius_items:])]
# Loop radius intervals
r = []
for radius_start, radius_end in radius_intervals:
df_dian_tmp = df_dec.loc[(df_dec[dist_label] >= radius_start) & (df_dec[dist_label] <= radius_end), :]
dfc = df_dian_tmp[angle_label].apply(lambda x: pd.Series(in_ranges(x, angle_bins), angle_bins))
if len(dfc) > min_points:
dfp = (dfc.sum(axis=0) / dfc.sum().sum()).rename('prob').to_frame()
dfp['log2'] = dfp['prob'].apply(np.log2)
#
entropy = stats.entropy(dfp['prob'], base=2)
else:
entropy = np.nan
entropy_norm = entropy / max_entropy
r.append((dim, radius_start, radius_end, entropy, entropy_norm))
#
df_ent_tmp = pd.DataFrame(r, columns=['dim', 'radius-start', 'radius-end', 'entropy', 'entropy-norm'])
# Interpolation
df_ent_tmp['entropy-smooth'] = df_ent_tmp['entropy-norm'].interpolate(method='linear', limit_direction='both')
# Rank
df_ent_tmp['radius-rank'] = df_ent_tmp['radius-start'].rank(method='min')
df_ent_tmp['entropy-rank'] = df_ent_tmp['entropy-norm'].rank(method='min')
# Rank Sum
df_ent_tmp['rank-sum'] = ((df_ent_tmp['radius-rank']) + (df_ent_tmp['entropy-rank']))
# Define Cut Pointns
cut_points = []
# Index % Sort
df_cp = df_ent_tmp.sort_values('rank-sum').loc[(df_ent_tmp['radius-start'] > 1.0), :]
possible_rank = 1
for possible_id, row in df_cp.iterrows():
possible_value = row['radius-start']
if not any([True if abs(possible_value - existing_value) <= 1.0 else False for existing_id, existing_value, existing_rank in cut_points]):
cut_points.append((possible_id, possible_value, possible_rank))
possible_rank += 1
if len(cut_points) >= n_cut_points:
break
#
dict_cut_points = {idx: rank for idx, value, rank in cut_points}
df_ent_tmp['cut-rank'] = df_ent_tmp.index.map(dict_cut_points)
#
# Add to list
list_df_ent.append(df_ent_tmp)
#
df_ent = pd.concat(list_df_ent, axis='index')
#
return df_ent, df_dec
if __name__ == '__main__':
#
# Args
#
parser = argparse.ArgumentParser()
celltypes = ['spermatocyte', 'spermatogonia', 'spermatid', 'enterocyte', 'neuron', 'muscle']
parser.add_argument("--celltype", default='spermatocyte', type=str, choices=celltypes, help="Cell type. Must be either 'spermatocyte' or 'enterocyte'. Defaults to spermatocyte")
parser.add_argument("--network", default='thr', type=str, help="Network to use. Defaults to 'thr'.")
parser.add_argument("--threshold", default=0.5, type=float, help="Threshold value. Defaults to 0.5.")
parser.add_argument("--layer", default='DM', type=str, choices=['DM', 'MM', 'HS'], help="Network layer to compute SVD. Defaults to 'DM'.")
parser.add_argument("--components", default=9, type=int, help="Number of singular values (components) to calculate. Defaults to 9.")
#
parser.add_argument("--radius_window", default=1.0, type=float, help="Window size for the radius dimension.")
parser.add_argument("--radius_overlap", default=0.1, type=float, help="Window overlap size for the radius dimension")
parser.add_argument("--angle_window", default=30, type=int, help="Window size for the angle dimension (in degrees)")
parser.add_argument("--angle_overlap", default=15, type=int, help="Window overlap size for the angle dimension (in degrees)")
#
args = parser.parse_args()
#
celltype = args.celltype # spermatocyte or enterocyte
network = args.network
threshold = args.threshold
threshold_str = str(threshold).replace('.', 'p')
layer = args.layer
components = args.components
#
radius_window = args.radius_window
radius_overlap = args.radius_overlap
angle_window = args.angle_window
angle_overlap = args.angle_overlap
#
threshold_str = str(threshold).replace('.', 'p')
#
#
print('Calculating PCA entropy for {celltype:s}-{network:s}-{threshold:s}-{layer:s}'.format(celltype=celltype, network=network, threshold=threshold_str, layer=layer))
rPCAFile = 'results/pca/{celltype:s}/{layer:s}/pca-{celltype:s}-{network:s}-{threshold:s}-{layer:s}-dim.csv.gz'.format(celltype=celltype, network=network, threshold=threshold_str, layer=layer)
wDiAnFile = 'results/pca/{celltype:s}/{layer:s}/pca-{celltype:s}-{network:s}-{threshold:s}-{layer:s}-dian.csv.gz'.format(celltype=celltype, network=network, threshold=threshold_str, layer=layer)
wEntrFile = 'results/pca/{celltype:s}/{layer:s}/pca-{celltype:s}-{network:s}-{threshold:s}-{layer:s}-entropy.csv.gz'.format(celltype=celltype, network=network, threshold=threshold_str, layer=layer)
#
df_pca =
|
pd.read_csv(rPCAFile, index_col=0, encoding='utf-8')
|
pandas.read_csv
|
import tkinter as Tk
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
from collections import Counter
from tkinter import filedialog
try:
import joypy
except ModuleNotFoundError:
print("Joypy not installed....")
print("Installing it now.....")
import pip
if hasattr(pip, 'main'):
pip.main(['install', package])
else:
pip._internal.main(['install', package])
root = Tk.Tk()
root.title("AA Propensity")
root.geometry("400x200")
def dipeptide_encoding(seq, n):
"""
Returns n-Gram Motif frequency
https://www.biorxiv.org/content/10.1101/170407v1.full.pdf
"""
aa_list = list(seq)
return {''.join(aa_list): n for aa_list, n in Counter(zip(*[aa_list[i:] for i in range(n)])).items() if
not aa_list[0][-1] == (',')}
def get_cmap():
cmap = plt.cm.jet # define the colormap
# extract all colors from the .jet map
cmaplist = [cmap(i) for i in range(cmap.N)]
# force the first color entry to be grey
cmaplist = cmaplist[1:]
# create the new map
cmap = mpl.colors.LinearSegmentedColormap.from_list(
'Custom cmap', cmaplist, cmap.N)
return cmap
def get_filename():
root.filename = filedialog.askopenfilename(title="Select a CSV file")#, filetypes = (("CSV files", "*.csv")))
return root.filename
def graph():
fname = get_filename()
data = pd.read_csv(fname)
to_drop = [i for i, s in enumerate(data.Sequence) if ' ' in s]
data = data.drop(to_drop, axis=0)
seq_vec = data.Sequence.apply(lambda x: dipeptide_encoding(x, 1)).to_list()
df =
|
pd.DataFrame(seq_vec)
|
pandas.DataFrame
|
import os
import json
import io
import logging
import pandas as pd
import xml.dom
import xml.dom.minidom
from shutil import copy2
from enum import Enum
from datetime import datetime
from glob import glob
from collections import Mapping, defaultdict
from operator import itemgetter
from copy import deepcopy
from label_studio_converter.utils import (
parse_config, create_tokens_and_tags, download, get_image_size, get_image_size_and_channels, ensure_dir,
get_polygon_area, get_polygon_bounding_box
)
from label_studio_converter import brush
from label_studio_converter.audio import convert_to_asr_json_manifest
logger = logging.getLogger(__name__)
class FormatNotSupportedError(NotImplementedError):
pass
class Format(Enum):
JSON = 1
JSON_MIN = 2
CSV = 3
TSV = 4
CONLL2003 = 5
COCO = 6
VOC = 7
BRUSH_TO_NUMPY = 8
BRUSH_TO_PNG = 9
ASR_MANIFEST = 10
def __str__(self):
return self.name
@classmethod
def from_string(cls, s):
try:
return Format[s]
except KeyError:
raise ValueError()
class Converter(object):
_FORMAT_INFO = {
Format.JSON: {
'title': 'JSON',
'description': "List of items in raw JSON format stored in one JSON file. Use to export both the data and the annotations for a dataset. It's Label Studio Common Format",
'link': 'https://labelstud.io/guide/export.html#JSON'
},
Format.JSON_MIN: {
'title': 'JSON-MIN',
'description': 'List of items where only "from_name", "to_name" values from the raw JSON format are exported. Use to export only the annotations for a dataset.',
'link': 'https://labelstud.io/guide/export.html#JSON-MIN',
},
Format.CSV: {
'title': 'CSV',
'description': 'Results are stored as comma-separated values with the column names specified by the values of the "from_name" and "to_name" fields.',
'link': 'https://labelstud.io/guide/export.html#CSV'
},
Format.TSV: {
'title': 'TSV',
'description': 'Results are stored in tab-separated tabular file with column names specified by "from_name" "to_name" values',
'link': 'https://labelstud.io/guide/export.html#TSV'
},
Format.CONLL2003: {
'title': 'CONLL2003',
'description': 'Popular format used for the CoNLL-2003 named entity recognition challenge.',
'link': 'https://labelstud.io/guide/export.html#CONLL2003',
'tags': ['sequence labeling', 'text tagging', 'named entity recognition']
},
Format.COCO: {
'title': 'COCO',
'description': 'Popular machine learning format used by the COCO dataset for object detection and image segmentation tasks with polygons and rectangles.',
'link': 'https://labelstud.io/guide/export.html#COCO',
'tags': ['image segmentation', 'object detection']
},
Format.VOC: {
'title': 'Pascal VOC XML',
'description': 'Popular XML format used for object detection and polygon image segmentation tasks.',
'link': 'https://labelstud.io/guide/export.html#Pascal-VOC-XML',
'tags': ['image segmentation', 'object detection']
},
Format.BRUSH_TO_NUMPY: {
'title': 'Brush labels to NumPy',
'description': 'Export your brush labels as NumPy 2d arrays. Each label outputs as one image.',
'link': 'https://labelstud.io/guide/export.html#Brush-labels-to-NumPy-amp-PNG',
'tags': ['image segmentation']
},
Format.BRUSH_TO_PNG: {
'title': 'Brush labels to PNG',
'description': 'Export your brush labels as PNG images. Each label outputs as one image.',
'link': 'https://labelstud.io/guide/export.html#Brush-labels-to-NumPy-amp-PNG',
'tags': ['image segmentation']
},
Format.ASR_MANIFEST: {
'title': 'ASR Manifest',
'description': 'Export audio transcription labels for automatic speech recognition as the JSON manifest format expected by NVIDIA NeMo models.',
'link': 'https://labelstud.io/guide/export.html#ASR-MANIFEST',
'tags': ['speech recognition']
}
}
def all_formats(self):
return self._FORMAT_INFO
def __init__(self, config, project_dir, output_tags=None, upload_dir=None):
self.project_dir = project_dir
self.upload_dir = upload_dir
if isinstance(config, dict):
self._schema = config
elif isinstance(config, str):
if os.path.isfile(config):
with io.open(config) as f:
config_string = f.read()
else:
config_string = config
self._schema = parse_config(config_string)
self._data_keys, self._output_tags = self._get_data_keys_and_output_tags(output_tags)
self._supported_formats = self._get_supported_formats()
def convert(self, input_data, output_data, format, is_dir=True, **kwargs):
if isinstance(format, str):
format = Format.from_string(format)
if format == Format.JSON:
self.convert_to_json(input_data, output_data, is_dir=is_dir)
elif format == Format.JSON_MIN:
self.convert_to_json_min(input_data, output_data, is_dir=is_dir)
elif format == Format.CSV:
header = kwargs.get('csv_header', True)
sep = kwargs.get('csv_separator', ',')
self.convert_to_csv(input_data, output_data, sep=sep, header=header, is_dir=is_dir)
elif format == Format.TSV:
header = kwargs.get('csv_header', True)
sep = kwargs.get('csv_separator', '\t')
self.convert_to_csv(input_data, output_data, sep=sep, header=header, is_dir=is_dir)
elif format == Format.CONLL2003:
self.convert_to_conll2003(input_data, output_data, is_dir=is_dir)
elif format == Format.COCO:
image_dir = kwargs.get('image_dir')
self.convert_to_coco(input_data, output_data, output_image_dir=image_dir, is_dir=is_dir)
elif format == Format.VOC:
image_dir = kwargs.get('image_dir')
self.convert_to_voc(input_data, output_data, output_image_dir=image_dir, is_dir=is_dir)
elif format == Format.BRUSH_TO_NUMPY:
items = self.iter_from_dir(input_data) if is_dir else self.iter_from_json_file(input_data)
brush.convert_task_dir(items, output_data, out_format='numpy')
elif format == Format.BRUSH_TO_PNG:
items = self.iter_from_dir(input_data) if is_dir else self.iter_from_json_file(input_data)
brush.convert_task_dir(items, output_data, out_format='png')
elif format == Format.ASR_MANIFEST:
items = self.iter_from_dir(input_data) if is_dir else self.iter_from_json_file(input_data)
convert_to_asr_json_manifest(
items, output_data, data_key=self._data_keys[0], project_dir=self.project_dir,
upload_dir=self.upload_dir)
def _get_data_keys_and_output_tags(self, output_tags=None):
data_keys = set()
output_tag_names = []
if output_tags is not None:
for tag in output_tags:
if tag not in self._schema:
logger.warning(
'Specified tag "{tag}" not found in config schema: '
'available options are {schema_keys}'.format(
tag=tag, schema_keys=str(list(self._schema.keys()))))
for name, info in self._schema.items():
if output_tags is not None and name not in output_tags:
continue
data_keys |= set(map(itemgetter('value'), info['inputs']))
output_tag_names.append(name)
return list(data_keys), output_tag_names
def _get_supported_formats(self):
if len(self._data_keys) > 1:
return [Format.JSON.name, Format.JSON_MIN.name, Format.CSV.name, Format.TSV.name]
output_tag_types = set()
input_tag_types = set()
for info in self._schema.values():
output_tag_types.add(info['type'])
for input_tag in info['inputs']:
input_tag_types.add(input_tag['type'])
all_formats = [f.name for f in Format]
if not ('Text' in input_tag_types and 'Labels' in output_tag_types):
all_formats.remove(Format.CONLL2003.name)
if not ('Image' in input_tag_types and 'RectangleLabels' in output_tag_types):
all_formats.remove(Format.VOC.name)
if not ('Image' in input_tag_types and ('RectangleLabels' in output_tag_types or
'PolygonLabels' in output_tag_types)):
all_formats.remove(Format.COCO.name)
if not ('Image' in input_tag_types and ('BrushLabels' in output_tag_types or 'brushlabels' in output_tag_types)):
all_formats.remove(Format.BRUSH_TO_NUMPY.name)
all_formats.remove(Format.BRUSH_TO_PNG.name)
if not (('Audio' in input_tag_types or 'AudioPlus' in input_tag_types) and 'TextArea' in output_tag_types):
all_formats.remove(Format.ASR_MANIFEST.name)
return all_formats
@property
def supported_formats(self):
return self._supported_formats
def iter_from_dir(self, input_dir):
if not os.path.exists(input_dir):
raise FileNotFoundError('{input_dir} doesn\'t exist'.format(input_dir=input_dir))
for json_file in glob(os.path.join(input_dir, '*.json')):
for item in self.iter_from_json_file(json_file):
if item:
yield item
def iter_from_json_file(self, json_file):
""" Extract annotation results from json file
param json_file: path to task list or dict with annotations
"""
with io.open(json_file, encoding='utf8') as f:
data = json.load(f)
# one task
if isinstance(data, Mapping):
for item in self.annotation_result_from_task(data):
yield item
# many tasks
elif isinstance(data, list):
for task in data:
for item in self.annotation_result_from_task(task):
if item is not None:
yield item
def annotation_result_from_task(self, task):
has_annotations = 'completions' in task or 'annotations' in task
if not has_annotations:
raise KeyError('Each task dict item should contain "annotations" or "completions" [deprecated],'
'where value is list of dicts')
# get last not skipped completion and make result from it
annotations = task['annotations'] if 'annotations' in task else task['completions']
# skip cancelled annotations
cancelled = lambda x: not (x.get('skipped', False) or x.get('was_cancelled', False))
annotations = list(filter(cancelled, annotations))
if not annotations:
return None
# sort by creation time
annotations = sorted(annotations, key=lambda x: x.get('created_at', 0), reverse=True)
for annotation in annotations:
inputs = task['data']
result = annotation['result']
outputs = defaultdict(list)
# get results only as output
for r in result:
if 'from_name' in r and r['from_name'] in self._output_tags:
v = deepcopy(r['value'])
v['type'] = self._schema[r['from_name']]['type']
if 'original_width' in r:
v['original_width'] = r['original_width']
if 'original_height' in r:
v['original_height'] = r['original_height']
outputs[r['from_name']].append(v)
yield {
'id': task['id'],
'input': inputs,
'output': outputs,
'completed_by': annotation.get('completed_by', {}),
'annotation_id': annotation.get('id')
}
def _check_format(self, fmt):
pass
def _prettify(self, v):
out = []
tag_type = None
for i in v:
j = deepcopy(i)
tag_type = j.pop('type')
if tag_type == 'Choices' and len(j['choices']) == 1:
out.append(j['choices'][0])
else:
out.append(j)
return out[0] if tag_type == 'Choices' and len(out) == 1 else out
def convert_to_json(self, input_data, output_dir, is_dir=True):
self._check_format(Format.JSON)
ensure_dir(output_dir)
output_file = os.path.join(output_dir, 'result.json')
records = []
if is_dir:
for json_file in glob(os.path.join(input_data, '*.json')):
with io.open(json_file, encoding='utf8') as f:
records.append(json.load(f))
with io.open(output_file, mode='w', encoding='utf8') as fout:
json.dump(records, fout, indent=2, ensure_ascii=False)
else:
copy2(input_data, output_file)
def convert_to_json_min(self, input_data, output_dir, is_dir=True):
self._check_format(Format.JSON_MIN)
ensure_dir(output_dir)
output_file = os.path.join(output_dir, 'result.json')
records = []
item_iterator = self.iter_from_dir if is_dir else self.iter_from_json_file
for item in item_iterator(input_data):
record = deepcopy(item['input'])
if item.get('id') is not None:
record['id'] = item['id']
for name, value in item['output'].items():
record[name] = self._prettify(value)
record['annotator'] = item['completed_by'].get('email')
record['annotation_id'] = item['annotation_id']
records.append(record)
with io.open(output_file, mode='w', encoding='utf8') as fout:
json.dump(records, fout, indent=2, ensure_ascii=False)
def convert_to_csv(self, input_data, output_dir, is_dir=True, **kwargs):
self._check_format(Format.CSV)
ensure_dir(output_dir)
output_file = os.path.join(output_dir, 'result.csv')
records = []
item_iterator = self.iter_from_dir if is_dir else self.iter_from_json_file
for item in item_iterator(input_data):
record = deepcopy(item['input'])
if item.get('id') is not None:
record['id'] = item['id']
for name, value in item['output'].items():
pretty_value = self._prettify(value)
record[name] = pretty_value if isinstance(pretty_value, str) else json.dumps(pretty_value)
record['annotator'] = item['completed_by'].get('email')
record['annotation_id'] = item['annotation_id']
records.append(record)
|
pd.DataFrame.from_records(records)
|
pandas.DataFrame.from_records
|
import faiss
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from tqdm.auto import tqdm
import pandas as pd
import scipy
import pickle
import json
import os
import numpy as np
from collections import Counter
import re
from datasets import (
Dataset,
load_from_disk,
concatenate_datasets,
)
from konlpy.tag import Mecab
import numpy as np
from tqdm import tqdm, trange
import argparse
import random
import torch
import os
import torch.nn.functional as F
from transformers import BertModel, BertPreTrainedModel, AdamW, TrainingArguments, get_linear_schedule_with_warmup, AutoTokenizer
import pickle
from rank_bm25 import BM25Okapi, BM25Plus, BM25L, BM25
import time
from contextlib import contextmanager
import math
from multiprocessing import Pool, cpu_count
@contextmanager
def timer(name):
t0 = time.time()
yield
print(f'[{name}] done in {time.time() - t0:.3f} s')
ban_words=("이따금","아마","절대로","무조건","한때","대략","오직",
"오로지","감히","최소","아예","반드시","꼭","때때로","이미"
,"종종","졸곧","약간","기꺼이", "비록","꾸준히","일부러","어쩔", "문득", "어쨌든", "순전히", "필수")
mecab = Mecab()
class ES(BM25):
def __init__(self, corpus, tokenizer=None, k1=1.2, b=0.75, delta=0):
# Algorithm specific parameters
self.k1 = k1
self.b = b
self.delta = delta
super().__init__(corpus, tokenizer)
def _calc_idf(self, nd):
for word, freq in nd.items():
idf = math.log(1 + ((self.corpus_size + 0.5 - freq) / (freq+0.5)))
self.idf[word] = idf
def get_scores(self, query):
score = np.zeros(self.corpus_size)
doc_len = np.array(self.doc_len)
for q in query:
q_freq = np.array([(doc.get(q) or 0) for doc in self.doc_freqs])
score += (self.idf.get(q) or 0) * (self.delta + (q_freq) /
(self.k1 * (1 - self.b + self.b * doc_len / self.avgdl) + q_freq))
return score
def remove_q(query):
stop = "|".join(
"어느 무엇인가요 무엇 누가 누구인가요 누구인가 누구 어디에서 어디에 어디서 어디인가요 어디를 어디 언제 어떤 어떠한 몇 얼마 얼마나 뭐 어떻게 무슨 \?".split(
" "
)
)
rm = re.sub(stop, "", query).strip()
return rm
class BertEncoder(BertPreTrainedModel):
def __init__(self, config):
super(BertEncoder, self).__init__(config)
self.bert = BertModel(config)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None):
outputs = self.bert(input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids)
pooled_output = outputs[1]
return pooled_output
'''
tokenizer = AutoTokenizer.from_pretrained("bert-base-multilingual-cased")
model_checkpoint="bert-base-multilingual-cased"
# load pre-trained model on cuda (if available)
p_encoder = BertEncoder.from_pretrained(model_checkpoint).cuda()
q_encoder = BertEncoder.from_pretrained(model_checkpoint).cuda()
p_encoder.load_state_dict(torch.load("./p_encoder_fin.pth"))
q_encoder.load_state_dict(torch.load("./q_encoder_fin.pth"))
'''
def to_cuda(batch):
return tuple(t.cuda() for t in batch)
class DenseRetrieval:
def __init__(self, tokenize_fn, data_path="./data/", context_path="wikipedia_documents.json"):
self.data_path = data_path
self.wiki_embs = None
with open(os.path.join(data_path, context_path), "r") as f:
wiki = json.load(f)
self.contexts = list(dict.fromkeys([v['text'] for v in wiki.values()]))
pickle_name = f"./data/dense_embedding.bin"
if os.path.isfile(pickle_name):
with open(pickle_name,"rb") as file:
self.wiki_embs=pickle.load(file)
print("Pre")
else:
with torch.no_grad():
self.wiki_embs = []
for text in self.contexts:
p = tokenizer(text, padding="max_length", truncation=True, return_tensors='pt').to('cuda')
wiki_emb = p_encoder(**p).to('cpu').numpy()
self.wiki_embs.append(wiki_emb)
self.wiki_embs = torch.Tensor(self.wiki_embs).squeeze() # (num_passage, emb_dim)
with open(pickle_name,"wb") as file:
pickle.dump(self.wiki_embs,file)
self.f=open("./track.txt","w")
def retrieve(self, query_or_dataset, topk=1):
if isinstance(query_or_dataset, str):
doc_scores, doc_indices = self.get_relevant_doc(query_or_dataset, k=topk)
return doc_scores, doc_indices
elif isinstance(query_or_dataset, Dataset):
# make retrieved result as dataframe
total = []
super_count=0
doc_scores, doc_indices = self.get_relevant_doc_bulk(query_or_dataset['question'], k=5)
for idx, example in enumerate(tqdm(query_or_dataset, desc="Dense retrieval: ")):
tmp = {
"question": example["question"],
"id": example['id'],
#"context_id": doc_indices[idx][0], # retrieved id
"context": " ".join(self.contexts[doc_indices[idx][i]] for i in range(5)) # retrieved doument
}
if 'context' in example.keys() and 'answers' in example.keys():
tmp["original_context"] = example['context'] # original document
tmp["answers"] = example['answers'] # original answer
total.append(tmp)
cqas =
|
pd.DataFrame(total)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 20 14:09:31 2016
@author: bmanubay
"""
import cirpy
import numpy as np
import pandas as pd
from sklearn.externals.joblib import Memory
mem = Memory(cachedir="/home/bmanubay/.thermoml/")
@mem.cache
def resolve_cached(x, rtype):
return cirpy.resolve(x, rtype)
# Define list of all alkane SMILES strings that appear in all of our data
SMILES_alk = ['C', 'CC', 'CCC', 'CCCC', 'CCCCC', 'CCCCCC', 'CCCCCCC', 'CCCCCCCC', 'CCCCCCCCC', 'CCCCCCCCCC', 'CC(C)C', 'CCC(C)C', 'CCCC(C)C', 'C1CCCCC1', 'CC1CCCCC1', 'CCCCCC(C)C', 'CC(C)C(C)C', 'CCC(C)(C)C', 'CCC(C)CC', 'CCCC(C)C', 'CC(C)CC(C)(C)C', 'C1CCCC1', 'C1CCCCCCC1', 'CCC1CCCCC1', 'CC1CCC(C)C(C)C1', 'CCCCC1CCCCC1', 'CC1CCCC1', 'CCCCCCC(C)C', 'CCCCCCCC(C)C', 'CCCCC(C)CCC', 'CCC(C)CCC(C)CC', 'CCC(C)CC(C)CC', 'CCCCCC(C)C', 'C1CCCCCC1', 'CC(C)C(C)C', 'CCC(C)(C)C']
S = pd.DataFrame({'SMILES': SMILES_alk}, columns = ['SMILES'])
# Binary mixtures
aa1 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Property data/actcoeff_bin.csv", sep=';', index_col= 'Unnamed: 0')
aa2 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Property data/dens_bin.csv", sep=';', index_col= 'Unnamed: 0')
aa3 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Property data/dielec_bin.csv", sep=';', index_col= 'Unnamed: 0')
aa4 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Property data/eme_bin.csv", sep=';', index_col= 'Unnamed: 0')
aa5 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Property data/emcp_bin.csv", sep=';', index_col= 'Unnamed: 0')
aa6 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Property data/emv_bin.csv", sep=';', index_col= 'Unnamed: 0')
aa7 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Property data/sos_bin.csv", sep=';', index_col= 'Unnamed: 0')
# Binary Mixtures with alkane-alkane mixtures removed
cc1c = pd.concat([aa1["x1"], aa1["x2"], aa1['SMILES1'], aa1['SMILES2']], axis=1, keys = ["x1", "x2", "SMILES1", "SMILES2"])
ind = np.logical_not(cc1c.SMILES1.isin(S.SMILES) & cc1c.SMILES2.isin(S.SMILES))
cc1c = cc1c[ind]
cc1c = cc1c.drop(['x1','x2'], axis=1)
count1c = pd.Series(cc1c.squeeze().values.ravel()).value_counts()
count1c = count1c.reset_index()
count1c.rename(columns={"index":"SMILES",0:"Count"},inplace=True)
ccc1 = pd.concat([count1c["SMILES"], count1c["Count"]], axis=1, keys=["SMILES", "Count"])
cc2c = pd.concat([aa2["x1"], aa2["x2"], aa2['SMILES1'], aa2['SMILES2']], axis=1, keys = ["x1", "x2", "SMILES1", "SMILES2"])
ind = np.logical_not(cc2c.SMILES1.isin(S.SMILES) & cc2c.SMILES2.isin(S.SMILES))
cc2c = cc2c[ind]
cc2c = cc2c.drop(['x1','x2'], axis=1)
count2c = pd.Series(cc2c.squeeze().values.ravel()).value_counts()
count2c = count2c.reset_index()
count2c.rename(columns={"index":"SMILES",0:"Count"},inplace=True)
ccc2 = pd.concat([count2c["SMILES"], count2c["Count"]], axis=1, keys=["SMILES", "Count"])
cc3c = pd.concat([aa3["x1"], aa3["x2"], aa3['SMILES1'], aa3['SMILES2']], axis=1, keys = ["x1", "x2", "SMILES1", "SMILES2"])
ind = np.logical_not(cc3c.SMILES1.isin(S.SMILES) & cc3c.SMILES2.isin(S.SMILES))
cc3c = cc3c[ind]
cc3c = cc3c.drop(['x1','x2'], axis=1)
count3c = pd.Series(cc3c.squeeze().values.ravel()).value_counts()
count3c = count3c.reset_index()
count3c.rename(columns={"index":"SMILES",0:"Count"},inplace=True)
ccc3 = pd.concat([count3c["SMILES"], count3c["Count"]], axis=1, keys=["SMILES", "Count"])
cc4c = pd.concat([aa4["x1"], aa4["x2"], aa4['SMILES1'], aa4['SMILES2']], axis=1, keys = ["x1", "x2", "SMILES1", "SMILES2"])
ind = np.logical_not(cc4c.SMILES1.isin(S.SMILES) & cc4c.SMILES2.isin(S.SMILES))
cc4c = cc4c[ind]
cc4c = cc4c.drop(['x1','x2'], axis=1)
count4c = pd.Series(cc4c.squeeze().values.ravel()).value_counts()
count4c = count4c.reset_index()
count4c.rename(columns={"index":"SMILES",0:"Count"},inplace=True)
ccc4 = pd.concat([count4c["SMILES"], count4c["Count"]], axis=1, keys=["SMILES", "Count"])
cc5c = pd.concat([aa5["x1"], aa5["x2"], aa5['SMILES1'], aa5['SMILES2']], axis=1, keys = ["x1", "x2", "SMILES1", "SMILES2"])
ind = np.logical_not(cc5c.SMILES1.isin(S.SMILES) & cc5c.SMILES2.isin(S.SMILES))
cc5c = cc5c[ind]
cc5c = cc5c.drop(['x1','x2'], axis=1)
count5c = pd.Series(cc5c.squeeze().values.ravel()).value_counts()
count5c = count5c.reset_index()
count5c.rename(columns={"index":"SMILES",0:"Count"},inplace=True)
ccc5 = pd.concat([count5c["SMILES"], count5c["Count"]], axis=1, keys=["SMILES", "Count"])
cc6c = pd.concat([aa6["x1"], aa6["x2"], aa6['SMILES1'], aa6['SMILES2']], axis=1, keys = ["x1", "x2", "SMILES1", "SMILES2"])
ind = np.logical_not(cc6c.SMILES1.isin(S.SMILES) & cc6c.SMILES2.isin(S.SMILES))
cc6c = cc6c[ind]
cc6c = cc6c.drop(['x1','x2'], axis=1)
count6c = pd.Series(cc6c.squeeze().values.ravel()).value_counts()
count6c = count6c.reset_index()
count6c.rename(columns={"index":"SMILES",0:"Count"},inplace=True)
ccc6 = pd.concat([count6c["SMILES"], count6c["Count"]], axis=1, keys=["SMILES", "Count"])
cc7c = pd.concat([aa7["x1"], aa7["x2"], aa7['SMILES1'], aa7['SMILES2']], axis=1, keys = ["x1", "x2", "SMILES1", "SMILES2"])
ind = np.logical_not(cc7c.SMILES1.isin(S.SMILES) & cc7c.SMILES2.isin(S.SMILES))
cc7c = cc7c[ind]
cc7c = cc7c.drop(['x1','x2'], axis=1)
count7c = pd.Series(cc7c.squeeze().values.ravel()).value_counts()
count7c = count7c.reset_index()
count7c.rename(columns={"index":"SMILES",0:"Count"},inplace=True)
ccc7 = pd.concat([count7c["SMILES"], count7c["Count"]], axis=1, keys=["SMILES", "Count"])
# All data counts
c1 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Pure/Component counts/purecomp_counts_cpmol.csv", sep=';', usecols=['SMILES', 'Count'])
c2 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Pure/Component counts/purecomp_counts_dens.csv", sep=';', usecols=['SMILES', 'Count'])
c3 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Pure/Component counts/purecomp_counts_dielec.csv", sep=';', usecols=['SMILES', 'Count'])
c4 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Pure/Component counts/purecomp_counts_hmol.csv", sep=';', usecols=['SMILES', 'Count'])
c5 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Pure/Component counts/purecomp_counts_sos.csv", sep=';', usecols=['SMILES', 'Count'])
cc1 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Component counts/bincomp_counts_actcoeff.csv", sep=';', usecols=['SMILES', 'Count'])
cc2 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Component counts/bincomp_counts_dens.csv", sep=';', usecols=['SMILES', 'Count'])
cc3 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Component counts/bincomp_counts_dielec.csv", sep=';', usecols=['SMILES', 'Count'])
cc4 =
|
pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Component counts/bincomp_counts_eme.csv", sep=';', usecols=['SMILES', 'Count'])
|
pandas.read_csv
|
"""
Preprocess dataset files
"""
import json
import pandas as pd
from pathlib import Path
from tqdm import tqdm
import yaml
from yacs.config import CfgNode as CN
import numpy as np
import ast
from collections import Counter
from torchtext import vocab
import pickle
from munch import Munch
np.random.seed(5)
class AnetCSV:
def __init__(self, cfg, comm=None):
self.cfg = cfg
if comm is not None:
assert isinstance(comm, (dict, Munch))
self.comm = Munch(comm)
else:
self.comm = Munch()
inp_anet_dict_fpath = cfg.ds.anet_ent_split_file
self.inp_dict_file = Path(inp_anet_dict_fpath)
# Create directory to keep the generated csvs
self.out_csv_dir = self.inp_dict_file.parent / 'csv_dir'
self.out_csv_dir.mkdir(exist_ok=True)
# Structure of anet_dict:
# anet = Dict,
# keys: 1. word to lemma, 2. index to word,
# 3. word to detection 4. video information
# We only need the video information
self.vid_dict_list = json.load(open(inp_anet_dict_fpath))['videos']
def create_csvs(self):
"""
Create the Train/Val split videos
"""
self.vid_info_df =
|
pd.DataFrame(self.vid_dict_list)
|
pandas.DataFrame
|
import csv
from math import nan
import pandas as pd
from pandas.core import arrays
from Build_RFC import createClassifier
from Build_RFC import classify
from datetime import date
import numpy as np
from pandas.core.dtypes.missing import notna
from CCD_Main import save_raster
from osgeo import gdal
from parameters import defaults as dfs
import os
from pathlib import Path
import glob
import math
import multiprocessing
from functools import partial
from CCD_Main import csvParameters
def toDF(seq):
pixel=pd.DataFrame({
'RMSE blue':[seq["blue"]["rmse"]],
'RMSE green':[seq['green']['rmse']],
'RMSE red':[seq['red']['rmse']],
'RMSE nir':[seq['nir']['rmse']],
'RMSE ndvi':[seq['ndvi']['rmse']],
'Coef1 blue':[seq['blue']["coefficients"][0]],
'Coef2 blue':[seq['blue']["coefficients"][1]],
'Coef3 blue':[seq['blue']["coefficients"][2]],
'Coef1 green':[seq['green']["coefficients"][0]],
'Coef2 green':[seq['green']["coefficients"][1]],
'Coef3 green':[seq['green']["coefficients"][2]],
'Coef1 red':[seq['red']["coefficients"][0]],
'Coef2 red':[seq['red']["coefficients"][1]],
'Coef3 red':[seq['red']["coefficients"][2]],
'Coef1 nir':[seq['nir']["coefficients"][0]],
'Coef2 nir':[seq['nir']["coefficients"][1]],
'Coef3 nir':[seq['nir']["coefficients"][2]],
'Coef1 ndvi':[seq['ndvi']["coefficients"][0]],
'Coef2 ndvi':[seq['ndvi']["coefficients"][1]],
'Coef3 ndvi':[seq['ndvi']["coefficients"][2]],
})
return pixel
CCD_csvFile=dfs['CCD_output_CSVfile']
sample_size=dfs["resampleResolution"]
nth=dfs['nth']
name=dfs["className"]
path=Path(CCD_csvFile)
dir=path.parent.absolute()
output=str(dir)+"/Classifications"
parent_dir=Path(dir).parent.absolute()
#parent_dir='/Users/arthur.platel/Desktop/Fusion_Images/deforestationV2/PF-SR'
allFiles = glob.glob(os.path.join(parent_dir, '*.tif'))
if not os.path.isdir(output):
os.mkdir(output)
clsf=createClassifier()
files=[]
for k in range(len(allFiles)):
if k%nth==0:
files.append(allFiles[k])
image0=gdal.Open(files[0])
image=gdal.Warp('/vsimem/in_memory_output.tif',image0,xRes=sample_size,yRes=sample_size,sampleAlg='average')
geo=image.GetGeoTransform()
proj=image.GetProjection()
shape=np.shape(image.ReadAsArray())
y_size=shape[2]
ras_data=geo,proj,output,shape,y_size,sample_size
outarray1=np.zeros((shape[1],shape[2]))
outarray2=np.zeros((shape[1],shape[2]))
rasters=[outarray1,outarray2]
# with open(CCD_csvFile) as csv_file2:
# csv_reader = csv.reader(csv_file2, delimiter=',')
# r=0
# for row in csv_reader:
# print("classifiying pixel",r)
# r+=1
# if len(row)<=1:
# df1=toDF(eval(row[0]))
# df2=toDF(eval(row[0]))
# class1=classify(df1,clsf)
# class2=classify(df2,clsf)
# px1=eval(eval(row[0])["pixel"])
# rasters[0][px1[0]][px1[1]]=class1
# rasters[1][px1[0]][px1[1]]=class2
# else:
# df1=toDF(eval(row[0]))
# df2=toDF(eval(row[1]))
# class1=classify(df1,clsf)
# class2=classify(df2,clsf)
# px1=eval(eval(row[0])["pixel"])
# px2=eval(eval(row[1])["pixel"])
# rasters[0][px1[0]][px1[1]]=class1
# rasters[1][px2[0]][px2[1]]=class2
# if r%100==0:
# save_raster(rasters,name,ras_data)
# save_raster(rasters,name,ras_data)
def rowTuples(num,ras_data,size):
geo,proj,output,shape,y_size,sample_size=ras_data
rows=shape[1]
div=rows//num
remain=shape[1]%num
tuples=[(num*k,(num*(k+1)))for k in range(div)]
divR=remain//size
for k in range(divR):
tuples.append(((div*num),(div*num)+remain))
return tuples
with open(CCD_csvFile) as csv_file:
read=pd.read_csv(csv_file, delimiter=',',header=None,names=list(range(5))).dropna(axis='columns',how='all')
for k in range(len(read)):
for l in range(0,1):
if pd.isnull(read[l][k])==False:
print(eval(read[l][k])['pixel'])
df1=toDF(eval(read[l][k]))
class1=classify(df1,clsf)
pix=eval(eval(read[l][k])['pixel'])
print(pix[0])
outarray1[pix[0],pix[1]]=class1
for g in range(1,2):
if pd.isnull(read[g][k])==False:
pix=eval(eval(read[l][k])['pixel'])
df2=toDF(eval(read[g][k]))
class2=classify(df2,clsf)
outarray2[pix[0],pix[1]]=class2
else:
pix=eval(eval(read[g-1][k])['pixel'])
outarray2[pix[0],pix[1]]=outarray1[pix[0],pix[1]]
if k%100==0:
save_raster(rasters,name,ras_data)
save_raster(rasters,name,ras_data)
def read_csv(filename):
'converts a filename to a pandas dataframe'
return pd.read_csv(filename, delimiter=',',header=None,names=list(range(5))).dropna(axis='columns',how='all')
def classif(rows,CCD_csvFile,ras_data):
geo,proj,output,shape,y_size,sample_size=ras_data
outarray1=np.zeros((rows[1]-rows[0],shape[2]))
outarray2=np.zeros((rows[1]-rows[0],shape[2]))
rasters=[outarray1,outarray2]
#outarray2=np.zeros((len(range(rows)),shape[2]))
rasters=[outarray1,outarray2]
print(rows[1]-rows[0])
value1=shape[2]*rows[0]
value2=shape[2]*rows[1]
with open(CCD_csvFile) as csv_file:
read=read_csv(csv_file)
for k in range(value1,value2):
for l in range(0,1):
if
|
pd.isnull(read[l][k])
|
pandas.isnull
|
import robin_stocks as r
import pandas as pd
from datetime import datetime
import numpy as np
pd.set_option('display.max_columns', None)
pd.set_option('display.max_columns', None)
r.login('rh_username','rh_password')
my_stocks = r.build_holdings()
df =
|
pd.DataFrame(my_stocks)
|
pandas.DataFrame
|
# coding: utf-8
# In[1]:
import calour as ca
from calour.training import plot_scatter
from calour.training import RepeatedSortedStratifiedKFold
# In[2]:
# Import Libraries
import os
import math
import numpy as np
import pandas as pd
import biom
import pickle
import time
import argparse
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GroupKFold
from sklearn.metrics import mean_squared_error
from sklearn.externals import joblib
from sklearn.model_selection import ParameterGrid
from skbio import DistanceMatrix
from scipy.sparse import *
import scipy
from math import sqrt
# In[3]:
# Import Regression Methods
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.svm import SVR
from sklearn.svm import LinearSVR
from sklearn.cross_decomposition import PLSRegression
from sklearn.neural_network import MLPRegressor
from xgboost import XGBRegressor
# In[4]:
import warnings
warnings.filterwarnings('ignore')
# # ## Toggle Dataset, balances
# * '82-soil', #0
# 'PMI_16s', #1
# 'malnutrition', #2
# 'cider', #3
# 'oral_male', #4
# 'oral_female', #5
# 'skin_hand_female', #6
# 'skin_hand_male', #7
# 'skin_head_female', #8
# 'skin_head_male', #9
# 'gut_AGP_female', #10
# 'gut_AGP_male', #11
# 'gut_cantonese_female', #12
# 'gut_cantonese_male' #13
#
# --balances stores balances=True, --no-balances stores balances=false
# --balances stores balances=True, --no-balances stores balances=false
# In[5]:
parser = argparse.ArgumentParser()
parser.add_argument("dataset",
help="the dataset you wish to benchmark with",
type=int, choices=range(0, 7))
parser.add_argument('--balances', dest='balances', action='store_true')
parser.add_argument('--no_balances', dest='balances', action='store_false')
parser.add_argument('--others', dest='others', action='store_true')
parser.add_argument('--rf', dest='rf', action='store_true')
parser.add_argument('--gb', dest='gb', action='store_true')
parser.add_argument('--et', dest='et', action='store_true')
parser.add_argument('--mlp', dest='mlp', action='store_true')
parser.add_argument('--xgb', dest='xgb', action='store_true')
parser.add_argument('--lsvr', dest='lsvr', action='store_true')
parser.add_argument('--pls', dest='pls', action='store_true')
parser.set_defaults(balances=False)
parser.set_defaults(others=False)
parser.set_defaults(rf=False)
parser.set_defaults(gb=False)
parser.set_defaults(et=False)
parser.set_defaults(mlp=False)
parser.set_defaults(xgb=False)
args = parser.parse_args()
dataset = args.dataset
balances = args.balances
# Give a name for the output data file, directory prefixes
dir_prefixes = ['82-soil', #0
'PMI_16s', #1
'malnutrition', #2
'cider', #3
'oral_male', #4
'oral_female', #5
'skin_hand_female', #6
'skin_hand_male', #7
'skin_head_female', #8
'skin_head_male', #9
'gut_AGP_female', #10
'gut_AGP_male', #11
'gut_cantonese_female', #12
'gut_cantonese_male' #13
]
dir_prefix = dir_prefixes[dataset]
if not os.path.isdir(dir_prefix):
os.mkdir(dir_prefix, mode=0o755)
if(balances):
biom_fp = ['82-soil/balances.qza',
'PMI_16s/balances.qza',
'malnutrition/balances.qza',
'cider/balances.qza',
'AGP/balances.qza'
]
else:
biom_fp = ['82-soil/rarefied_20000_filtered_samples_features_frequency_table.biom',
'PMI_16s/PMI_100nt_deblur1-1-0_rarefied8000.biom',
'malnutrition/rarefied_8500_filtered_samples_features_frequency_table.biom',
'cider/cider_150nt_rarefied14500.biom',
"age_prediction/oral_4014/oral_4014__qiita_host_sex_female__.biom",
"age_prediction/oral_4014/oral_4014__qiita_host_sex_male__.biom",
"age_prediction/skin_4168/skin_4168__body_site_hand_qiita_host_sex_female__.biom",
"age_prediction/skin_4168/skin_4168__body_site_hand_qiita_host_sex_male__.biom",
"age_prediction/skin_4168/skin_4168__body_site_head_qiita_host_sex_female__.biom",
"age_prediction/skin_4168/skin_4168__body_site_head_qiita_host_sex_male__.biom",
"age_prediction/gut_4575/gut_4575_rare__cohort_AGP_sex_female__.biom",
"age_prediction/gut_4575/gut_4575_rare__cohort_AGP_sex_male__.biom",
"age_prediction/gut_4575/gut_4575_rare__cohort_cantonese_sex_female__.biom",
"age_prediction/gut_4575/gut_4575_rare__cohort_cantonese_sex_male__.biom"
]
metadata_fp = ['82-soil/20994_analysis_mapping_v3.tsv',
'PMI_16s/21159_analysis_mapping.txt',
'malnutrition/merged_metadata_v3.txt',
'cider/21291_analysis_mapping.txt',
"age_prediction/oral_4014/oral_4014_map__qiita_host_sex_female__.txt",
"age_prediction/oral_4014/oral_4014_map__qiita_host_sex_male__.txt",
"age_prediction/skin_4168/skin_4168_map__body_site_hand_qiita_host_sex_female__.txt",
"age_prediction/skin_4168/skin_4168_map__body_site_hand_qiita_host_sex_male__.txt",
"age_prediction/skin_4168/skin_4168_map__body_site_head_qiita_host_sex_female__.txt",
"age_prediction/skin_4168/skin_4168_map__body_site_head_qiita_host_sex_male__.txt",
"age_prediction/gut_4575/gut_4575_rare_map__cohort_AGP_sex_female__.txt",
"age_prediction/gut_4575/gut_4575_rare_map__cohort_AGP_sex_male__.txt",
"age_prediction/gut_4575/gut_4575_rare_map__cohort_cantonese_sex_female__.txt",
"age_prediction/gut_4575/gut_4575_rare_map__cohort_cantonese_sex_male__.txt"
]
distmatrix_fp = ['82-soil/beta-q2/',
'PMI_16s/beta-q2/',
'malnutrition/beta-q2/',
'cider/beta-q2/'
]
# In[8]:
if(balances):
feature_datatype = 'qiime2'
exp = ca.read_amplicon(biom_fp[dataset], metadata_fp[dataset],
data_file_type='qiime2',
min_reads=None, normalize=None)
else: #BIOM table input
exp = ca.read_amplicon(biom_fp[dataset], metadata_fp[dataset],
min_reads=None, normalize=None)
#if (dataset!=3): exp = exp.filter_abundance(10)
# ## Modify parameter options by shape of data
# Create logarithmic scales for ranges of parameter options where valid inputs can be 1<->n_features or n_samples
# In[11]:
def get_logscale(end, num):
scale = np.geomspace(start=1, stop=end-1, num=num)
scale = list(np.around(scale, decimals=0).astype(int))
return scale
# In[12]:
n_samples = exp.shape[0]
n_features = exp.shape[1]
# In[13]:
#Logarithmic scales based on n_samples
s_logscale = get_logscale(n_samples, 11)
s_logscale7 = get_logscale(n_samples, 8)
s_logscale.pop()
s_logscale7.pop()
# Why .pop()? n_samples is less than total n_samples due to how we split data into folds, so the last item will never be used. e.g.
# ```
# ValueError: Expected n_neighbors <= n_samples, but n_samples = 123, n_neighbors = 152
# ```
# In[14]:
#Logarithmic scales based on n_features
f_logscale = get_logscale(n_features, 10)
f_logscale7 = get_logscale(n_features, 7)
# __NOTE__ Trimmed the parameter space of time-intensive regressors (ensemble methods, neural net) with many parameters. Original parameter set is "commented" out using triple quotes.
# min_samples_leaf must be at least 1 or in (0, 0.5], got 0.6100000000000001
# In[15]:
# CPU cores
#Use all cores for parallelization, as runtime is determined separately
cpu = -1
########## Indicates where sample or feature log scales are used
# KNeighbors: use precomputed weights and different X for KNN
KNN_grids = {'n_neighbors': s_logscale, ##########
'weights': ['uniform', 'distance'],
'algorithm': ['brute'],
'n_jobs': [cpu],
} #20
# KNeighbors for use with Distance Matrices
KNNDistance_grids = {'n_neighbors': s_logscale, ##########
'weights':['uniform','distance'],
'algorithm': ['brute'],
'n_jobs': [cpu],
'p':[2],
'metric':['precomputed'],
} #20
# DecisionTree
DT_grids = {'criterion': ['mse'],
'splitter': ['best','random'],
'max_depth': s_logscale + [None], ##########
'max_features': ['auto', 'sqrt', 'log2'],
'random_state':[2018]
} #66
# RandomForest
RF_grids = {'n_estimators': [1000],
'criterion': ['mse'],
'max_features': f_logscale + ['auto', 'sqrt', 'log2'], ##########
'max_depth': s_logscale + [None], ##########
'n_jobs': [cpu],
'random_state': [2018],
'bootstrap':[True,False],
'min_samples_split': list(np.arange(0.01, 1, 0.2)),
'min_samples_leaf': list(np.arange(0.01, .5, 0.1)) + [1],
} #8580
# ExtraTrees
ET_grids = {'n_estimators': [50, 500, 1000, 5000],
'criterion': ['mse'],
'max_features': f_logscale7 + ['auto', 'sqrt', 'log2'], ##########
'max_depth': s_logscale7 + [None], ##########
'n_jobs': [cpu],
'random_state': [2018],
'bootstrap':[True,False],
'min_samples_split': list(np.arange(0.01, 1, 0.2)),
'min_samples_leaf': list(np.arange(0.01, .5, 0.1)) + [1],
} #19200
# GradientBoosting
GB_grids = {'loss' : ['ls', 'lad', 'huber', 'quantile'],
'alpha' : [1e-3, 1e-2, 1e-1, 0.5,0.9],
'learning_rate': [3e-1, 2e-1, 1e-1, 5e-2],
'n_estimators': [1000,5000],
'criterion': ['mse'],
'max_features': f_logscale7 + ['auto', 'sqrt', 'log2'], ##########
'max_depth': s_logscale7 + [None], ##########
'random_state': [2018]
} #12800
# Ridge
Ridge_grids = {'alpha': [1e-15, 1e-10, 1e-8, 1e-4, 1e-3,
1e-2, 1, 5, 10, 20],
'fit_intercept': [True],
'normalize': [True, False],
'solver': ['auto', 'svd', 'cholesky', 'lsqr',
'sparse_cg', 'sag', 'saga'],
'random_state': [2018]
} #140
# Lasso
Lasso_grids = {'alpha': [1e-15, 1e-10, 1e-8, 1e-4, 1e-3,
1e-2, 1, 5, 10, 20],
'fit_intercept': [True],
'normalize': [True, False],
'random_state': [2018],
'selection': ['random', 'cyclic']
} #40
# ElasticNet
EN_grids = {'alpha': [1e-15, 1e-10, 1e-8, 1e-4, 1e-3,
1e-2, 1, 5, 10, 20],
'l1_ratio': list(np.arange(0.0, 1.1, 0.1)),
'fit_intercept': [True],
'random_state': [2018],
'selection': ['random', 'cyclic']
} #200
# Linear SVR
LinearSVR_grids = {'C': [1e-4, 1e-3, 1e-2, 1e-1, 1e1,
1e2, 1e3, 1e4, 1e5, 1e6, 1e7],
'epsilon':[1e-2, 1e-1, 1e0, 1],
'loss': ['squared_epsilon_insensitive', 'epsilon_insensitive'],
'random_state': [2018],
'gamma':['auto', 100, 10, 1, 1e-4, 1e-2, 1e-3,
1e-4, 1e-5, 1e-6],
'coef0':[0, 1, 10, 100]
} #3520
# RBF SVR
RSVR_grids = {'C': [1e-4, 1e-3, 1e-2, 1e-1, 1e1,
1e2, 1e3, 1e4, 1e5, 1e6, 1e7],
'epsilon':[1e-2, 1e-1, 1e0, 1],
'kernel':['rbf'],
'gamma':['auto', 100, 10, 1, 1e-4, 1e-2, 1e-3,
1e-4, 1e-5, 1e-6],
'coef0':[0, 1, 10, 100]
} #1760
# Sigmoid SVR
SSVR_grids = {'C': [1e-4, 1e-3, 1e-2, 1e-1, 1e1,
1e2, 1e3, 1e4, 1e5, 1e6, 1e7],
'epsilon':[1e-2, 1e-1, 1e0, 1],
'kernel':['sigmoid'],
'gamma':['auto', 100, 10, 1, 1e-4, 1e-2, 1e-3,
1e-4, 1e-5, 1e-6],
'coef0':[0, 1, 10, 100]
}
#'epsilon':[1e-2, 1e-1, 1e0, 1e1, 1e2]
# Epsilon >10 causes divide by zero error,
# C<=0 causes ValueError: b'C <= 0'
#1760
# PLS Regression
PLS_grids = {'n_components': list(np.arange(1,20)),
'scale': [True, False],
'max_iter': [500],
'tol': [1e-08, 1e-06, 1e-04, 1e-02, 1e-00],
'copy': [True, False]
} #400
# XGBoost
XGB_grids = {'max_depth': s_logscale + [None], ##########
'learning_rate': [3e-1, 2e-1, 1e-1, 5e-2],
'n_estimators': [1000,5000],
'objective': ['reg:linear'],
'booster': ['gbtree', 'gblinear'],
'n_jobs': [cpu],
'gamma': [0, 0.2, 0.5, 1, 3],
'reg_alpha': [1e-3, 1e-1, 1],
'reg_lambda': [1e-3, 1e-1, 1],
'scale_pos_weight': [1],
'base_score': [0.5],
'random_state': [2018],
'silent': [1] #no running messages will be printed
} #9900
# Multi-layer Perceptron
MLP_grids = {'hidden_layer_sizes': [(100,),(200,),(100,50),(50,50),(25,25,25)],
'activation': ['identity', 'logistic', 'tanh', 'relu'],
'solver': ['lbfgs', 'sgd', 'adam'],
'alpha': [1e-3, 1e-1, 1, 10, 100],
'batch_size': ['auto'],
'max_iter': [50,100,200,400],
'learning_rate': ['constant'],
'random_state': [2018,14,2362,3456,24,968,90],
} #7,680
# In[16]:
reg_names = ["KNeighbors",
"DecisionTree",
"RandomForest",
"ExtraTrees",
"GradientBoosting",
"Ridge", "Lasso", "ElasticNet",
"LinearSVR", "RadialSVR", "SigmoidSVR",
"PLSRegressor",
"XGBRegressor",
"MLPRegressor",
]
dm_names = [
"sokalsneath",
"correlation",
"dice",
"cosine",
"chebyshev",
"jaccard",
"rogerstanimoto",
"yule",
"hamming",
"euclidean",
"sokalmichener",
"canberra",
"matching",
"braycurtis",
"aitchison",
"russellrao",
"kulsinski",
"sqeuclidean",
"cityblock",
"weighted_unifrac",
"unweighted_unifrac",
"weighted_normalized_unifrac",
"generalized_unifrac"
]
ensemble_names = ["RandomForest",
"ExtraTrees",
"GradientBoosting",
"XGBRegressor"]
#names = ["KNeighbors", "Ridge", "Lasso", "ElasticNet"]
names = reg_names + dm_names #### reg_names + dm_names
dm_set = set(dm_names) # for easy look-up
# Each regressor and their grid, preserving order given above
regressors = [
KNeighborsRegressor,
DecisionTreeRegressor,
RandomForestRegressor,
ExtraTreesRegressor,
GradientBoostingRegressor,
Ridge, Lasso, ElasticNet,
LinearSVR, SVR, SVR, PLSRegression,
XGBRegressor,
MLPRegressor
]
##regressors = [KNeighborsRegressor,Ridge, Lasso, ElasticNet] ########
regressors += [KNeighborsRegressor] * len(dm_names) ### += to =
all_param_grids = [
KNN_grids,
DT_grids,
RF_grids,
ET_grids,
GB_grids,
Ridge_grids,
Lasso_grids,
EN_grids,
LinearSVR_grids,
RSVR_grids,
SSVR_grids,
PLS_grids,
XGB_grids,
MLP_grids,
]
all_param_grids += [KNNDistance_grids] * len(dm_names)
regFromName = dict(zip(names, regressors))
gridFromName = dict(zip(names, all_param_grids))
# ## Main benchmarking loop
# In[18]:
target = None
#Specify column to predict
if (dataset==0): #82-soil
target = 'ph'
if (dataset==1):
target = 'days_since_placement'
if (dataset==2):
target = 'age'
if (dataset==3):
target = 'fermentation_day'
if (dataset>=4 and dataset<10):
target = 'qiita_host_age'
if (dataset>-10):
target = 'age'
# In[19]:
# ENSURE METADATA TARGET IS TYPE INT
exp.sample_metadata[target] = pd.to_numeric(exp.sample_metadata[target])
# In[ ]:
for reg_idx, (reg, name, grid) in enumerate(zip(regressors, names, all_param_grids)):
is_distmatrix = name in dm_set #Boolean switch for distance-matrix specific code blocks
if is_distmatrix: ##### Use specific X and y for distance matrix benchmarking, not amplicon experiment object
md = exp.sample_metadata
dm = DistanceMatrix.read(distmatrix_fp[dataset]+name+'.txt')
md = md.filter(dm.ids,axis='index')
dm = dm.filter(md.index, strict=True)
X_dist = dm.data
y_dist = md[target]
if (name=="PLSRegressor"):
md = exp.sample_metadata
X_dist = exp.data.toarray()
y_dist = md[target]
# Make directory for this regressor if it does not yet exist
dir_name = dir_prefix +'/' +dir_prefix + '-' + name
if not os.path.isdir(dir_name):
os.mkdir(dir_name, mode=0o755)
paramsList = list(ParameterGrid(grid))
# For each set of parameters, get scores for model across 10 folds
for param_idx, param in enumerate(paramsList):
# If the benchmark data for this param set doesn't exist, benchmark it
if not (os.path.isfile(dir_name+'/'+str(param_idx).zfill(5)+'_predictions.pkl') #####changed from if not
or os.path.isfile(dir_name+'/'+str(param_idx).zfill(5)+'_fold_rmse.pkl')):
if is_distmatrix or (name=="PLSRegressor"): #If benchmarking distance matrix:
# new splits generator for each set of parameterzs
if (dataset==2): #Use GroupKFold with Malnutrition dataset (2)
splits = GroupKFold(n_splits = 16).split(X = X_dist, y = y_dist, groups = md['Child_ID'])
else:
splits = RepeatedSortedStratifiedKFold(5, 3, random_state=2018).split(X_dist, y_dist)
### Start Timing
start = time.process_time()
df = pd.DataFrame(columns = ['CV', 'SAMPLE', 'Y_PRED', 'Y_TRUE'])
cv_idx = 0
CV = []
Y_PRED = []
Y_TRUE = []
Y_IDS = []
for train_index, test_index in splits: #y_classes
if is_distmatrix:
X_train, X_test = X_dist[train_index], X_dist[list(test_index),:][:,list(train_index)]
else:
X_train, X_test = X_dist[train_index], X_dist[test_index]
y_train, y_test = y_dist[train_index], y_dist[test_index]
y_train = np.asarray(y_train, dtype='int')
y_test_ids = y_dist.index[test_index] ####
#print(y_test_ids)
#print(X_train, X_train.shape)
#print(y_train, y_train.shape)
if is_distmatrix:
m = KNeighborsRegressor(**param)
else:
m = reg(**param)
m.fit(X_train, y_train)
y_pred = m.predict(X_test)
CV.extend([cv_idx] * len(y_pred))
Y_PRED.extend(y_pred)
Y_TRUE.extend(y_test)
Y_IDS.extend(y_test_ids)
cv_idx += 1
df['CV'] = CV
df['Y_TRUE'] = Y_TRUE
df['Y_PRED'] = Y_PRED
df['SAMPLE'] = Y_IDS
end = time.process_time()
### End Timing
else: #All others; Not benchmarking distance matrix
if (dataset==2): #Use GroupKFold with Malnutrition dataset (2)
it = exp.regress(target, reg(),
cv = GroupKFold(n_splits = 16).split(X = exp.data,
y = exp.sample_metadata['Age_days'],
groups = exp.sample_metadata['Child_ID']),
params=[param])
else:
it = exp.regress(target, reg(),
cv = RepeatedSortedStratifiedKFold(5, 3, random_state=2018),
params=[param])
### Start Timing
start = time.process_time()
df = next(it)
end = time.process_time()
### End Timing
# Predictions-level dataframe, saved by param_idx
df.to_pickle(dir_name+'/'+str(param_idx).zfill(5)+'_predictions.pkl')
# Calculate RMSE for each fold in this set
fold_rmse = pd.DataFrame()
fold_rmse['RMSE'] = df.groupby('CV').apply(lambda x: np.sqrt(mean_squared_error(x['Y_PRED'].values, x['Y_TRUE'].values)))
fold_rmse['PARAM'] = [param] * fold_rmse.shape[0]
# Store runtimes for this param set
param_runtime = end-start
fold_rmse['RUNTIME'] = [param_runtime] * fold_rmse.shape[0]
fold_rmse.to_pickle(dir_name+'/'+str(param_idx).zfill(5)+'_fold_rmse.pkl')
print(param_idx)
print(param)
# ## NULL MODELS
# * Needs one null model per dataset
# * Randomly permute y_true 100 times, and compare each permutation to y_true (RMSE)
# * Series, len=102, [mean, median, RMSE_00, ... RMSE99]
# * Save to pkl for use in large box/violin plot. Plot mean+median as points, RMSE as box/violin
# In[22]:
y_true = exp.sample_metadata[target].values
data = []
index = []
for i in range(0,100):
index.append('RMSE_'+str(i))
y_perm = np.random.permutation(y_true)
data.append(sqrt(mean_squared_error(y_perm, y_true)))
data = [np.mean(data), np.median(data)] + data
index = ['MEAN', "MEDIAN"] + index
null_model =
|
pd.Series(data, index)
|
pandas.Series
|
## usage
# at a level above emmer/
# python3 -m emmer.test.test_revisit_threshold
from ..main.basic.read import RawDataImport, RetrospectDataImport, GetFiles
from ..main.advanced.iteration import MinusOneVNE, InfoRichCalling, reproducibility_summary
from ..bake import BakeCommonArgs
from ..posthoc.stats.revisit_thresholds import RevisitThresholdArgs, FindMinFromLM, evaluateInputTuple, floatRange, RevisitThreshold
from ..troubleshoot.err.error import *
#from ..troubleshoot.warn.warning import *
from itertools import compress
import numpy.testing
import argparse
import unittest
import pandas
import numpy
import time
import sys
import os
class TestRevisitThresholdArgs(unittest.TestCase):
def test_getArgsV(self):
print('\ntest_RevisitThreshold.getArgsN:')
print(' case 1: missing args.n setting')
sys.argv[1:] = ['-m', 'RevisitThreshold']
processed_args = BakeCommonArgs(suppress = True, test = False, neglect = True, silence = False)
processed_args.getHomeKeepingArgs()
with self.assertRaises(ErrorCode13):
revisit_threshold_args = RevisitThresholdArgs(args = processed_args.args, current_wd = '', suppress = True, silence = False)
revisit_threshold_args.getArgsE()
print('===========================================================')
def test_getArgsI(self):
print('\ntest_RevisitThreshold.getArgsI:')
print(' case 1: missing args.i setting')
sys.argv[1:] = ['-m', 'RevisitThreshold']
processed_args = BakeCommonArgs(suppress = True, test = False, neglect = True, silence = False)
processed_args.getHomeKeepingArgs()
with self.assertRaises(ErrorCode14):
revisit_threshold_args = RevisitThresholdArgs(args = processed_args.args, current_wd = '', suppress = True, silence = False)
revisit_threshold_args.getArgsI()
print('===========================================================')
def test_getArgsUTL(self):
print('\ntest_RevisitThreshold.getArgsUTL:')
print(' case 1: have no args.l, args.u, and args.t setting')
sys.argv[1:] = ['-m', 'RevisitThreshold']
processed_args = BakeCommonArgs(suppress = True, test = False, neglect = True, silence = False)
processed_args.getHomeKeepingArgs()
with self.assertRaises(ErrorCode18):
revisit_threshold_args = RevisitThresholdArgs(args = processed_args.args, current_wd = '', suppress = True, silence = False)
revisit_threshold_args.getArgsUTL()
print('===========================================================')
def test_getRevisitThresholdArgs(self): # TODO
pass
class TestEvaluateInputTuple(unittest.TestCase):
def test_passing(self):
print('\ntest_EvaluateInputTuple.passing:')
print(' case 1: passing test condition')
print(' 1.1: an reasonalbe input that != 0,0,0')
print(' -t: "2,1,1"')
args_t = '2,1,0.5'
my_result = evaluateInputTuple(args_t)
expected_result = (2,1,0.5)
self.assertEqual(my_result, expected_result)
print(' ---------------------------------------------------')
print(' 1.2: an input that == 0,0,0')
print(' -t: "0,0,0"')
args_t = '0,0,0'
my_result = evaluateInputTuple(args_t)
expected_result = (0,0,0)
self.assertEqual(my_result, expected_result)
print('===========================================================')
def test_error17(self):
print('\ntest_EvaluateInputTuple.error17:')
print(' -t: "2,2,2,1"')
args_t = '2,2,2,1'
with self.assertRaises(ErrorCode17):
output = evaluateInputTuple(args_t, suppress = True, second_chance = False)
print('===========================================================')
def test_error20(self):
print('\ntest_EvaluateInputTuple.error20:')
print(' Case 1: first element less than 0')
print(' -t: "-2,2,1"')
args_t = '-2,2,1'
with self.assertRaises(ErrorCode20):
output = evaluateInputTuple(args_t, suppress = True, second_chance = False)
## Case 1: confirm the function generate a empty array
print(' ---------------------------------------------------')
print(' Case 2: second element equals to 0')
print(' -t: "2,0,1"')
args_t = '2,0,1'
with self.assertRaises(ErrorCode20):
output = evaluateInputTuple(args_t, suppress = True, second_chance = False)
print('===========================================================')
def test_error19(self):
print('\ntest_EvaluateInputTuple.error19:')
print(' -t: "1,2,1"')
args_t = '1,2,1'
with self.assertRaises(ErrorCode19):
output = evaluateInputTuple(args_t, suppress = True, second_chance = False)
print('===========================================================')
def test_error24(self):
print('\ntest_EvaluateInputTuple.error24:')
print(' -t: "3,2,0"')
args_t = '3,2,0'
with self.assertRaises(ErrorCode24):
output = evaluateInputTuple(args_t, suppress = True, second_chance = False)
print('===========================================================')
def test_error22(self):
print('\ntest_EvaluateInputTuple.error22:')
print(' -t: "3,2,2"')
args_t = '3,2,2'
with self.assertRaises(ErrorCode22):
output = evaluateInputTuple(args_t, suppress = True, second_chance = False)
print('===========================================================')
def test_error25(self):
print('\ntest_EvaluateInputTuple.error25:')
print(' -t: "3,2,7"')
args_t = '3,2,7'
with self.assertRaises(ErrorCode25):
output = evaluateInputTuple(args_t, suppress = True, second_chance = False)
print('===========================================================')
def test_error26(self):
print('\ntest_EvaluateInputTuple.error26:')
print(' -t: "2,2,7"')
args_t = '2,2,7'
with self.assertRaises(ErrorCode26):
output = evaluateInputTuple(args_t, suppress = True, second_chance = False)
print('===========================================================')
class TestFloatRange(unittest.TestCase):
def test_floatRange(self):
print('\ntest_floatRange:')
print(' input_tuple = (3, 1, 0.5)')
input_tuple = (3, 1, 0.5)
my_result = floatRange(input_tuple)
expected_result = [1, 1.5, 2, 2.5, 3]
self.assertEqual(my_result, expected_result)
print('===========================================================')
class TestFindMinFromLM(unittest.TestCase):
def test_FindMinFromLM(self):
print('\ntest_FindMinFromLM:')
data = {'x':[1, 2, 3, 4, 5, 6], 'y':[2.2, 4.2, 6.2, 8.2, 10.2, 12.2]}
input_df =
|
pandas.DataFrame(data, index =['sample1', 'sample2', 'sample3', 'sample4', 'sample5', 'sample6'])
|
pandas.DataFrame
|
import datetime as datetime
import requests as requests
import pandas as pd
from typing import Union, Dict
from collections import namedtuple
from yfinance_ez.base import TickerBase
from yfinance_ez.constants import (
TimePeriods, QUARTERLY, YEARLY, HistoryColumns, USER_AGENT_HEADERS)
class Ticker(TickerBase):
def __repr__(self):
return f'yfinance_ez.Ticker object <{self.ticker}>'
def _download_options(self, date=None) -> Dict:
url = f'{self._base_url}/v7/finance/options/{self.ticker}'
if date:
url += f'?date={date}'
r = requests.get(url=url, proxies=self._proxy, headers=USER_AGENT_HEADERS).json()
if r['optionChain']['result']:
for exp in r['optionChain']['result'][0]['expirationDates']:
self._expirations[datetime.datetime.fromtimestamp(
exp).strftime('%Y-%m-%d')] = exp
return r['optionChain']['result'][0]['options'][0]
return {}
def _options2df(self, opt: Dict, tz=None) -> pd.DataFrame:
data = pd.DataFrame(opt).reindex(columns=[
'contractSymbol',
'lastTradeDate',
'strike',
'lastPrice',
'bid',
'ask',
'change',
'percentChange',
'volume',
'openInterest',
'impliedVolatility',
'inTheMoney',
'contractSize',
'currency'])
data['lastTradeDate'] =
|
pd.todatetime(data['lastTradeDate'], unit='s')
|
pandas.todatetime
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 12 13:48:34 2019
@author: vrrodovalho
"""
import os
import sys
import re
import pathlib
import pandas as pd
import numpy as np
import KEGG as kegg
import matplotlib.pyplot as plt
import seaborn as sns
from adjustText import adjust_text
from tabulate import tabulate
'''
'''
def update_annotation(df, column, replace_dict):
'''
Updates a dataframe column based on a dictionary.
Parameters
----------
df : DataFrame
DataFrame that will be modified.
column : str
Name of the column that will be modified.
replace_dict : dict
Dictionary whose keys will be replaced by its values in the selected
column in df.
Returns
-------
df : DataFrame
The updated DataFrame.
'''
df = df.copy()
df[column] = df[column].replace(replace_dict, regex=True)
return df
def export_gmt(df, cat_col='KEGG_Pathway', cat_sep=',', genes_col='gi',
description_map={}, replace_dict={}, cat_fill_na='?',
ref_org='', drop_unknowns=True, filter_by_size=False,
size_limit=(2,150), forbidden_set_prefixes=['map'],
output_dir='', file_name=''):
'''
Converts a df mapping gene -> categories to a df mapping category -> genes
And creates a GMT file for using with gProfiler
Parameters
----------
df : DataFrame
DESCRIPTION.
cat_col : str, optional
Name of the column with the categories annotation (KEGG, COG...).
The default is 'KEGG_Pathway'.
cat_sep : str, optional
The delimiter that separates multiple categories in a row.
The default is ','.
genes_col : str, optional
Name of the column with the genes. The default is 'gi'.
description_map : dict, optional
A dictionary that gives a description to each category.
That could be COG letters as keys and their meaning as values.
The default is {}.
replace_dict : dict, optional
A dictionary to replace row values, useful to take out obsolete
annotation. The default is {}.
cat_fill_na : str, optional
A value to fill missing values. The default is '?'.
ref_org : str, optional
A kegg organism code to be used as a reference for orthologues
filtering. The default is ''.
drop_unknowns : bool, optional
Whether to drop the functional category defined as unknown, that
previously had na values. The default is False.
filter_by_size : bool, optional
Whether to filter functional categories by min/max size.
The default is False.
size_limit : tuple, optional
A tuple containing 2 integers, a min and a max size for the sets of
functional categories. The default is (2,150).
forbidden_set_prefixes : list, optional
If some gene sets are forbidden, they can be identified in a prefix
list to be removed from the dataset. The default is ['map'].
output_dir : str, optional
Output directory. The default is ''.
file_name : str, optional
Output file name. The default is ''.
Returns
-------
sub_df : DataFrame
A DataFrame close to the GMT file produced.
'''
# simplify df
sub_df = df.loc[:, [genes_col, cat_col]].copy()
# make needed replacements
if replace_dict:
sub_df = update_annotation(sub_df, column=cat_col,
replace_dict=replace_dict)
# fill na as specified
sub_df[cat_col].fillna(cat_fill_na, inplace=True)
# devide rows with multiple annotation based on delimiter
if cat_sep == '':
sub_df = (sub_df.set_index([genes_col])
.stack()
.apply(lambda x: pd.Series(list(x)))
.stack()
.unstack(-2)
.reset_index(-1, drop=True)
.reset_index()
)
else:
sub_df = (sub_df.set_index([genes_col])
.stack()
.str.split(cat_sep, expand=True)
.stack()
.unstack(-2)
.reset_index(-1, drop=True)
.reset_index()
)
sub_df = ( sub_df.groupby(by=cat_col)[genes_col]
.apply(set)
.reset_index()
)
# # filter by set size, to eliminate sets too short or too long
if filter_by_size:
if size_limit:
min_size = min(size_limit)
max_size = max(size_limit)
sub_df['size'] = sub_df[genes_col].apply(len)
sub_df = sub_df.sort_values(by=['size'], ascending=False)
sub_df = sub_df.loc[ ( ( sub_df['size'] > min_size ) & \
( sub_df['size'] < max_size ) ),
[cat_col, genes_col]]
else:
str1 = "If filter_by_size is True, size_limit should be defined "
str2 = "as a tuple containing 2 int: a min and a max limit."
print(str1 + str2)
sub_df = sub_df.set_index(cat_col)
# take out unknown category (privously na)
if drop_unknowns:
sub_df = sub_df.drop([cat_fill_na])
# take out forbidden gene sets
if forbidden_set_prefixes:
for i in forbidden_set_prefixes:
sub_df = sub_df[~sub_df.index.str.startswith(i)]
# Use a KEGG reference organism to drop unrelated pathways
if ref_org:
allowed_ids = search_allowed_pathways_ids(ref_org)
sub_df = sub_df[sub_df.index.isin(allowed_ids)]
# change 1-column set to several columns and name them accordingly
f = lambda x: 'element_{}'.format(x + 1)
sub_df = pd.DataFrame(sub_df[genes_col].values.tolist(),
sub_df.index, dtype=object
).rename(columns=f)
sub_df = sub_df.reset_index()
# Add description to gene sets, if available
if description_map:
description_map[cat_fill_na] = 'Unknown'
sub_df['description'] = sub_df[cat_col].map(description_map)
else:
sub_df['description'] = np.nan
# reorder description column, to be in GMT style
cols = list(sub_df.columns)
cols.remove('description')
cols.insert(1, 'description')
sub_df = sub_df.loc[:,cols]
# # save and return
output_file = output_dir / file_name
sub_df.to_csv(output_file, header=False, index=False, sep='\t')
return sub_df
def generate_gprofiler_list(df, id_column='', category_filter={},
ordered_by='', output_dir='', file_name=''):
'''
Returns a list of genes to use in GProfiler.
Parameters
----------
df : DataFrame
The initial DataFrame.
id_column : str
The name of the column that contains gene IDs.
category_filter : dict, optional
A dictionary in which keys are column names (str) and values are
allowed rows in that column (str). The default is {}.
ordered_by : str, optional
The name of the column that will be used to sort gene list.
It could be a expression measure. The default is ''.
output_dir : str
Output directory.
file_name : str
Output file name.
Returns
-------
string : str
A list of genes to be used in GProfiler.
'''
df = df.copy()
# Filter gene list by column values (such as a category)
if category_filter:
for col in category_filter:
value = category_filter[col]
df = df.loc[df[col] == value, :]
# Sort gene list by column values (such as expression)
if ordered_by:
df[ordered_by] = df[ordered_by].abs()
df = df.sort_values(by=ordered_by, ascending=False)
min_value = df.iloc[0, df.columns.get_loc(ordered_by)]
max_value = df.iloc[-1, df.columns.get_loc(ordered_by)]
string = "Ordered in {}, from {} to {}. ".format(ordered_by,
min_value, max_value)
print(string)
# Make final string and files
proteins = df.loc[:, id_column].astype(str).to_list()
string = '\n'.join(proteins)
output_file = output_dir / file_name
with open(output_file, 'w') as output:
output.write(string)
return string
def merge_enrichment_sources(source_files={'name': 'dataframe'},
max_p_val=0.05, v_limit=6, replace_values={},
output_dir='', file_name=''):
'''
Merge enrichment results from different sources (KEGG, COG) into the
same dataframe, corresponding to the same set of proteins.
Parameters
----------
source_files : dict
A dictionary where the keys are string identifiers (KEGG, COG) and the
values are the dataframes corresponding to the enrichment results
corresponding to those strings.
max_p_val : float, optional
The p-value threshold of significance. The default is 0.05.
v_limit : float, optional
Vertical superior limit of log(p-value). Values exceeding that
threshold are capped to it. The default is 6.
replace_values : dict, optional
A dictionary where the keys are column names and the values are
replacement dictionaries, containing key-value pairs for replacing
values in that column. The default is {}.
output_dir : str, optional
Output directory. The default is ''.
file_name : str, optional
Output file name. The default is ''.
Returns
-------
df : DataFrame
A merged DataFrame.
'''
df = pd.DataFrame()
for item_name in source_files:
item = source_files[item_name]
item['source'] = item_name
df = pd.concat([df, item])
df['log_p_value'] = np.log10(df['adjusted_p_value']) * -1
df['sig'] = np.where(df['adjusted_p_value'] <= max_p_val, 'sig.', 'not sig.')
df = df.sort_values(by=['source', 'log_p_value'], ascending=False)
df['log_p_value_capped'] = np.where(df['log_p_value'] >= v_limit,
v_limit, df['log_p_value'])
if replace_values:
for col in replace_values:
replace_dict = replace_values[col]
df[col] = df[col].replace(replace_dict, regex=True)
# save file
df.to_excel(output_dir/file_name, index=False)
return df
def plot_enrichment(df, data = {'x':'source', 'y':'log_p_value_capped',
'label_col':'term_id', 'label_desc_col':'term_name'},
v_limit=6, max_p_val=0.05,
significancy={'column':'sig','true':'sig.','false':'not sig.'},
jitter_val=0.3, s=4, reg_categories= {'column': 'sig',
'true':'up', 'false':'down', 'true_color':'blue',
'false_color':'red'}, title='Functional enrichment',
save_fig=True,output_dir='',file_name='',file_format='tif',
dpi=300):
'''
Plot enrichment
Parameters
----------
df : DataFrame
A dataframe containing the data to be plotted. Ideally generated by
merge_enrichment_sources function.
data : dict, optional
A dictionary specifying column names in df for x, y and label values.
The default is {'x':'source', 'y':'log_p_value_capped',
'label_col':'term_id', 'label_desc_col':'term_name'}.
max_p_val : float, optional
The p-value threshold of significance. The default is 0.05.
v_limit : float, optional
Vertical superior limit of log(p-value). Values exceeding that
threshold are capped to it. The default is 6.
significancy : dict, optional
A dictionary specifying which is the significancy column and what
values should be considered True and False.
The default is {'column':'sig','true':'sig.','false':'not sig.'}.
jitter_val : float, optional
Parameter for stripplot. Affects the points distribution.
The default is 0.3.
s : float, optional
The size of the points in the graph. The default is 4.
reg_categories : dict, optional
A dictionary specifying regulation categories (up-regulated,
down-regulated), the column, their values and colors.
The default is {'column':'sig', 'true':'up', 'false':'down',
'true_color':'blue', 'false_color':'red'}.
title : str, optional
A title string to be plotted in the graph.
The default is 'Functional enrichment'.
save_fig : bool, optional
Wether to save figure or not. The default is True.
output_dir : str, optional
Output directory. The default is ''.
file_name : str, optional
Output file name. The default is ''.
file_format : str, optional
File format. The default is 'tif'.
dpi : int, optional
Resolution. The default is 300.
Returns
-------
dict
A dictionary containing the final DataFrame and a legend string.
'''
df = df.copy()
fig = plt.figure()
ax = plt.axes()
sub_df_sig = df.loc[ df[significancy['column']] == significancy['true'] ]
sub_df_not = df.loc[ df[significancy['column']] == significancy['false'] ]
x = data['x']
y = data['y']
commons = {'ax':ax,'x':x,'y':y,'size':s,'marker':'s','jitter':jitter_val}
# plot not significtives
sns.stripplot(data=sub_df_not, linewidth=0.1, alpha=0.5, color='grey',
**commons)
# plot significatives
if reg_categories:
palette = {reg_categories['true']:reg_categories['true_color'],
reg_categories['false']:reg_categories['false_color']}
sns.stripplot(data=sub_df_sig,linewidth=0.5,alpha=1.0,palette=palette,
hue=reg_categories['column'],dodge=True, **commons)
else:
sns.stripplot(data=sub_df_sig,linewidth=0.5,alpha=1.0,color='blue',
**commons)
# title?
if title != '':
plt.title(title, loc='center')
# plot lines
ax.set(ylim=(-0.2, v_limit+1))
log_max_p_val = np.log10(max_p_val) * -1
plt.axhline(y=log_max_p_val , color='grey', linewidth=0.5, linestyle='--')
plt.axhline(y=v_limit , color='grey', linewidth=0.5, linestyle='--')
# plot labels
plt.xlabel('', fontsize=12, fontname="sans-serif")
plt.ylabel('Statistical significance [-log10(P-value)]', fontsize=12,
fontname="sans-serif")
# create a df with x-y coordinates only for significatives
df_graph = pd.DataFrame({'x' : [], y : []})
for i in range(len(ax.collections)):
coll = ax.collections[i]
x_values, y_values = np.array(coll.get_offsets()).T
# look for significative y
annotate = False
for i in y_values:
if i >= log_max_p_val:
annotate = True
break
# if found significative y, add to df that will be used to annotate
if annotate:
sub_df = pd.DataFrame({'x':x_values, y:y_values})
df_graph = pd.concat([df_graph, sub_df])
# transfer id col to df_graph in order to have unique identifiers
# and avoid label confusion
unique_id = data['label_col']
unique_id_desc = data['label_desc_col']
df_graph[unique_id] = sub_df_sig[unique_id]
# anottate significative y
merged = sub_df_sig.merge(df_graph, on=[y, unique_id], how='left')
sig_x = merged['x']
sig_y = merged[y]
labels = merged[unique_id]
coordinates = []
for xi, yi, label in zip(sig_x, sig_y, labels):
element = ax.annotate(label, xy=(xi,yi), xytext=(3,3), size=8,
ha="center", va="top", textcoords="offset points")
coordinates.append(element)
# ajust labels to avoid superposition
adjust_text(coordinates, autoalign='xy',
arrowprops=dict(arrowstyle='<-, head_length=0.05, head_width=0.05',
color='black', alpha=0.6, linewidth=0.5))
plt.show()
# return a legend string and file
legend_df = sub_df_sig.loc[:,[unique_id, unique_id_desc]]
legend = tabulate(legend_df, showindex=False)
legend_file_name = '.'.join(file_name.split('.')[:-1]) + '.txt'
output_legend = output_dir / legend_file_name
with open(output_legend, 'w') as output:
output.write(legend)
# save
if save_fig:
fig.savefig(output_dir/file_name, format=file_format, dpi=dpi,
bbox_inches="tight")
return {'sub_df_sig':sub_df_sig, 'df_graph':df_graph,
'df':merged, 'legend':legend}
def search_allowed_pathways_ids(ref_org, unknown='?'):
'''
Search in KEGG all the pathways ids for an organism
Parameters
----------
ref_org : str
KEGG organism code.
Returns
-------
allowed_ids : list
List of allowed ids (with ko prefix).
'''
kegg_data = kegg.get_KEGG_data(org=ref_org, get_pathway_list=True,
get_genes_and_pathways=False, format_conversion=False,
genes_names=False)
org_pathways = kegg.parse_KEGG_pathways_description(kegg_data['pathways'])
allowed_ref_ids = list(org_pathways.keys())
allowed_ids = []
p = '[a-z]+([0-9]+)'
for ref_id in allowed_ref_ids:
general_id = re.match(p,ref_id).groups()[0]
general_id = 'ko' + general_id
allowed_ids.append(general_id)
allowed_ids.append(unknown)
return allowed_ids
def export_tables(proteomics_df=None, proteomics_id_col='', enrichment_df=None,
enrichment_id_col='', enrichment_src_col='', merge_all=False,
enrichment_desc_col='', split_ch=',', enrichment_filter={},
map_src2annot={}, output_dir='', file_name_prefix=''):
'''
Function to export merge proteomics annotation and functional enrichment
table and filter based on specific rules.
Parameters
----------
proteomics_df : DataFrame
A DataFrame containing proteomics annotation.
proteomics_id_col : str
The name of the column in proteomics_df where the protein ids are.
enrichment_df : DataFrame
A DataFrame containing enrichment results for proteins in proteomics_df.
enrichment_id_col : str
The name of the column where the functional category ids are specified.
enrichment_src_col : str
The name of the column where the source database is specified.
enrichment_desc_col : str
The name of the column where the description of id is specified.
split_ch : str
A character to split a string into a list of items in
enrichment_id_set_col. The default is ','.
merge_all : bool
Whether to merge all enriched categories elements in one single
dataframe. Otherwise, they will be returned separated by category
in a dictionary. The default is 'False'.
enrichment_filter : dict, optional
A dictionary describing a filter for enrichment_df the format
{ col_name : [allowed_values] }. Only rows fulfilling these rules are
accepted.
map_src2annot : dict
A dictionary with the relationship between
{ col_name : [allowed_values] }. Only rows fulfilling these rules are
accepted.
output_dir : str,
The output directory.
file_name_prefix : str
A prefix for every output file name. The default is ''.
Returns
-------
None.
'''
prot = proteomics_df.copy()
enri = enrichment_df.copy()
# get descritions
desc = dict(zip( enri[enrichment_id_col], enri[enrichment_desc_col]))
# filter enrichment data (significative)
if enrichment_filter:
for col in enrichment_filter:
col_values = enrichment_filter[col]
enri = enri.loc[enri[col].isin(col_values) ,:]
# get dictionary of enriched categories by enrichment source
enri_items = enri.loc[:,[enrichment_src_col, enrichment_id_col]]
enri_items = ( enri_items.groupby(enrichment_src_col)[enrichment_id_col]
.apply(set).to_dict() )
# search items in proteomics_df that correspond to enriched categories
enriched_elements = {}
appended_data = []
prot = prot.fillna('?')
for src in enri_items:
where2look = map_src2annot[src]
cats = enri_items[src]
for cat in cats:
description = desc[cat]
sub_prot = prot.loc[prot[where2look].str.contains(cat) ,:]
n_prot = sub_prot.shape[0]
appended_data.append(sub_prot)
print("{} \t{} \t(n={}) \t{}".format(src, cat, n_prot,
description))
enriched_elements[cat + ' : ' + description] = sub_prot
file_name = '{}_{}_{}_{}.xlsx'.format(file_name_prefix, src, cat,
description)
sub_prot.astype(str).to_excel(output_dir / file_name, index=False)
single_df = pd.concat(appended_data)
single_df = single_df.drop_duplicates()
file_name = '{}_merged.xlsx'.format(file_name_prefix)
single_df.astype(str).to_excel(output_dir / file_name, index=False)
# merge all enriched categories elements
if merge_all:
enriched_elements = single_df
return enriched_elements
##############################################################################
# DIRECTORY SYSTEM
src_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
main_dir = os.path.dirname(src_dir)
root_dir = os.path.dirname(main_dir)
data_dir = pathlib.Path(main_dir) / 'data'
input_dir = pathlib.Path(data_dir) / 'input'
output_dir = pathlib.Path(data_dir) / 'output'
sys.path.insert(0, root_dir)
# FILE PATHS
proteomics_SEC_and_UC_file = input_dir / 'proteomics_SEC_and_UC_curated.xlsx'
proteomics_UC_file = input_dir / 'proteomics_UC.xlsx'
proteomics_core_file = input_dir / 'proteome_core.xlsx'
proteomics_accessory_file = input_dir / 'proteome_accessory.xlsx'
proteomics_single_file = input_dir / 'proteome_single.xlsx'
proteomics_not_EVs_file = input_dir / 'proteome_not_EVs.xlsx'
cogs_file = input_dir / 'COGs.xlsx'
kegg_ko_storage_file = input_dir / 'kegg_ko.data'
gprofiler_core_kegg_file = input_dir / 'gProfiler_core_kegg.csv'
gprofiler_core_cog_file = input_dir / 'gProfiler_core_cog.csv'
gprofiler_accessory_kegg_file = input_dir / 'gProfiler_accessory_kegg.csv'
gprofiler_accessory_cog_file = input_dir / 'gProfiler_accessory_cog.csv'
gprofiler_single_kegg_file = input_dir / 'gProfiler_single_kegg.csv'
gprofiler_single_cog_file = input_dir / 'gProfiler_single_cog.csv'
# READ FILES
proteomics_SEC_and_UC = pd.read_excel(proteomics_SEC_and_UC_file)
proteomics_UC = pd.read_excel(proteomics_UC_file)
proteomics_core = pd.read_excel(proteomics_core_file)
proteomics_accessory = pd.read_excel(proteomics_accessory_file)
proteomics_single = pd.read_excel(proteomics_single_file)
proteomics_not_EVs = pd.read_excel(proteomics_not_EVs_file)
cogs_map = pd.read_excel(cogs_file)
kegg_data_ko = kegg.get_all_KEGG_data(kegg_ko_storage_file, org='ko')
gprofiler_core_kegg = pd.read_csv(gprofiler_core_kegg_file)
gprofiler_core_cog = pd.read_csv(gprofiler_core_cog_file)
gprofiler_accessory_kegg = pd.read_csv(gprofiler_accessory_kegg_file)
gprofiler_accessory_cog = pd.read_csv(gprofiler_accessory_cog_file)
gprofiler_single_kegg =
|
pd.read_csv(gprofiler_single_kegg_file)
|
pandas.read_csv
|
"""Data Updating Utility (:mod:`bucky.util.update_data_repos`).
A utility for fetching updated data for mobility and case data from public repositories.
This module pulls from public git repositories and preprocessed the
data if necessary. For case data, unallocated or unassigned cases are
distributed as necessary.
"""
import logging
import os
import ssl
import subprocess
import urllib.request
import numpy as np
import pandas as pd
import tqdm
from .read_config import bucky_cfg
# Options for correcting territory data
TERRITORY_DATA = bucky_cfg["data_dir"] + "/population/territory_pop.csv"
ADD_AMERICAN_SAMOA = False
# CSSE UIDs for Michigan prison information
MI_PRISON_UIDS = [84070004, 84070005]
# CSSE IDs for Utah local health districts
UT_LHD_UIDS = [84070015, 84070016, 84070017, 84070018, 84070019, 84070020]
def get_timeseries_data(col_name, filename, fips_key="FIPS", is_csse=True):
"""Transforms a historical data file to a dataframe with FIPs, date, and case or death data.
Parameters
----------
col_name : str
Column name to extract from data.
filename : str
Location of filename to read.
fips_key : str, optional
Key used in file for indicating county-level field.
is_csse : bool, optional
Indicates whether the file is CSSE data. If True, certain areas
without FIPS are included.
Returns
-------
df : pandas.DataFrame
Dataframe with the historical data indexed by FIPS, date
"""
# Read file
df = pd.read_csv(filename)
# CSSE-specific correction
if is_csse:
# Michigan prisons have no FIPS, replace with their UID to be processed later
mi_data = df.loc[df["UID"].isin(MI_PRISON_UIDS)]
mi_data = mi_data.assign(FIPS=mi_data["UID"])
df.loc[mi_data.index] = mi_data.values # noqa: PD011
# Utah health districts have NAN FIPS, replace with UID
utah_local_dist_data = df.loc[df["UID"].isin(UT_LHD_UIDS)]
utah_local_dist_data = utah_local_dist_data.assign(FIPS=utah_local_dist_data["UID"])
df.loc[utah_local_dist_data.index] = utah_local_dist_data.values
# Get dates and FIPS columns only
cols = list(df.columns)
idx = cols.index("1/22/20")
# Keep columns after index
keep_cols = cols[idx:]
# Add FIPS
keep_cols.append(fips_key)
# Drop other columns
df = df[keep_cols]
# Reindex and stack
df = df.set_index(fips_key)
# Stack
df = df.stack().reset_index()
# Replace column names
df.columns = ["FIPS", "date", col_name]
return df
def distribute_unallocated_csse(confirmed_file, deaths_file, hist_df):
"""Distributes unallocated historical case and deaths data from CSSE.
JHU CSSE data contains state-level unallocated data, indicated with
"Unassigned" or "Out of" for each state. This function distributes
these unallocated cases based on the proportion of cases in each
county relative to the state.
Parameters
----------
confirmed_file : str
filename of CSSE confirmed data
deaths_file : str
filename of CSSE death data
hist_df : pandas.DataFrame
current historical DataFrame containing confirmed and death data
indexed by date and FIPS code
Returns
-------
hist_df : pandas.DataFrame
modified historical DataFrame with cases and deaths distributed
"""
hist_df = hist_df.reset_index()
if "index" in hist_df.columns:
hist_df = hist_df.drop(columns=["index"])
hist_df = hist_df.assign(state_fips=hist_df["FIPS"] // 1000)
hist_df = hist_df.set_index(["date", "FIPS"])
# Read cases and deaths files
case_df = pd.read_csv(confirmed_file)
deaths_df = pd.read_csv(deaths_file)
# Get unassigned and 'out of X'
cases_unallocated = case_df.loc[
(case_df["Combined_Key"].str.contains("Out of")) | (case_df["Combined_Key"].str.contains("Unassigned"))
]
cases_unallocated = cases_unallocated.assign(state_fips=cases_unallocated["FIPS"].astype(str).str[3:].astype(float))
deaths_unallocated = deaths_df.loc[
(deaths_df["Combined_Key"].str.contains("Out of")) | (deaths_df["Combined_Key"].str.contains("Unassigned"))
]
deaths_unallocated = deaths_unallocated.assign(
state_fips=deaths_unallocated["FIPS"].astype(str).str[3:].astype(float),
)
# Sum unassigned and 'out of X'
extra_cases = cases_unallocated.groupby("state_fips").sum()
extra_deaths = deaths_unallocated.groupby("state_fips").sum()
extra_cases = extra_cases.drop(
columns=[
"UID",
"code3",
"FIPS",
"Lat",
"Long_",
],
)
extra_deaths = extra_deaths.drop(
columns=[
"UID",
"Population",
"code3",
"FIPS",
"Lat",
"Long_",
],
)
# Reformat dates to match processed data's format
extra_cases.columns = pd.to_datetime(extra_cases.columns)
extra_deaths.columns = pd.to_datetime(extra_deaths.columns)
# Iterate over states in historical data
for state_fips in tqdm.tqdm(
extra_cases.index.array,
desc="Distributing unallocated state data",
dynamic_ncols=True,
):
# Get extra cases and deaths
state_extra_cases = extra_cases.xs(state_fips)
state_extra_deaths = extra_deaths.xs(state_fips)
# Get historical data
state_df = hist_df.loc[hist_df["state_fips"] == state_fips]
state_df = state_df.reset_index()
state_confirmed = state_df[["FIPS", "date", "cumulative_reported_cases"]]
state_confirmed = state_confirmed.pivot(index="FIPS", columns="date", values="cumulative_reported_cases")
frac_df = state_confirmed / state_confirmed.sum()
frac_df = frac_df.replace(np.nan, 0)
# Distribute cases and deaths based on this matrix
dist_cases = frac_df.mul(state_extra_cases, axis="columns").T.stack()
dist_deaths = frac_df.mul(state_extra_deaths, axis="columns").T.stack()
# Index historical data
state_df = state_df.set_index(["date", "FIPS"])
tmp = dist_deaths.to_frame(name="cumulative_deaths")
tmp["cumulative_reported_cases"] = dist_cases
state_df += tmp
hist_df.loc[state_df.index] = state_df.values
hist_df = hist_df.drop(columns=["state_fips"])
return hist_df
def distribute_data_by_population(total_df, dist_vect, data_to_dist, replace):
"""Distributes data by population across a state or territory.
Parameters
----------
total_df : pandas.DataFrame
DataFrame containing confirmed and death data indexed by date and
FIPS code
dist_vect : pandas.DataFrame
Population data for each county as proportion of total state
population, indexed by FIPS code
data_to_dist: pandas.DataFrame
Data to distribute, indexed by data
replace : bool
If true, distributed values overwrite current historical data in
DataFrame. If false, distributed values are added to current data
Returns
-------
total_df : pandas.DataFrame
Modified input dataframe with distributed data
"""
# Create temporary dataframe and merge
tmp = total_df.reset_index()
tmp = tmp.merge(dist_vect, on="FIPS")
tmp = tmp.merge(data_to_dist, on="date")
# Use population fraction to scale
if replace:
tmp = tmp.assign(cumulative_reported_cases=tmp["pop_fraction"] * tmp["cumulative_reported_cases_y"])
tmp = tmp.assign(cumulative_deaths=tmp["pop_fraction"] * tmp["cumulative_deaths_y"])
else:
tmp = tmp.assign(
cumulative_reported_cases=tmp["cumulative_reported_cases_x"]
+ tmp["pop_fraction"] * tmp["cumulative_reported_cases_y"],
)
tmp = tmp.assign(
cumulative_deaths=tmp["cumulative_deaths_x"] + tmp["pop_fraction"] * tmp["cumulative_deaths_y"],
)
# Discard merge columns
tmp = tmp[["FIPS", "date", "cumulative_reported_cases", "cumulative_deaths"]]
tmp = tmp.set_index(["FIPS", "date"])
total_df.loc[tmp.index] = tmp.values
return total_df
def get_county_population_data(csse_deaths_file, county_fips):
"""Uses JHU CSSE deaths file to get county population data as as fraction of population across list of counties.
Parameters
----------
csse_deaths_file : str
filename of CSSE deaths file
county_fips: numpy.ndarray
list of FIPS to return population data for
Returns
-------
population_df: pandas.DataFrame
DataFrame with population fraction data indexed by FIPS
"""
# Use CSSE Deaths file to get population values by FIPS
df = pd.read_csv(csse_deaths_file)
population_df = df.loc[df["FIPS"].isin(county_fips)][["FIPS", "Population"]].set_index("FIPS")
population_df = population_df.assign(pop_fraction=population_df["Population"] / population_df["Population"].sum())
population_df = population_df.drop(columns=["Population"])
return population_df
def distribute_utah_data(df, csse_deaths_file):
"""Distributes Utah case data for local health departments spanning multiple counties.
Utah has 13 local health districts, six of which span multiple counties. This
function distributes those cases and deaths by population across their constituent
counties.
Parameters
----------
df : pandas.DataFrame
DataFrame containing historical data indexed by FIPS and date
csse_deaths_file : str
File location of CSSE deaths file
Returns
-------
df : pandas.DataFrame
Modified DataFrame containing corrected Utah historical data
indexed by FIPS and date
"""
local_districts = {
# Box Elder, Cache, Rich
84070015: {"name": "Bear River, Utah, US", "FIPS": [49003, 49005, 49033]},
# Juab, Millard, Piute, Sevier, Wayne, Sanpete
84070016: {"name": "Central Utah, Utah, US", "FIPS": [49023, 49027, 49031, 49041, 49055, 49039]},
# Carbon, Emery, Grand
84070017: {"name": "Southeast Utah, Utah, US", "FIPS": [49007, 49015, 49019]},
# Garfield, Iron, Kane, Washington, Beaver
84070018: {"name": "Southwest Utah, Utah, US", "FIPS": [49017, 49021, 49025, 49053, 49001]},
# Daggett, Duchesne, Uintah
84070019: {"name": "TriCounty, Utah, Utah, US", "FIPS": [49009, 49013, 49047]},
# <NAME>
84070020: {"name": "Weber-Morgan, Utah, US", "FIPS": [49057, 49029]},
}
for district_uid, local_district in local_districts.items():
# Get list of fips
fips_list = local_district["FIPS"]
# Deaths file has population data
county_pop = get_county_population_data(csse_deaths_file, fips_list)
# Get district data
district_data = df.loc[district_uid]
# Add to Michigan data, do not replace
df = distribute_data_by_population(df, county_pop, district_data, True)
# Drop health districts data from dataframe
df = df.loc[~df.index.get_level_values(0).isin(UT_LHD_UIDS)]
return df
def distribute_mdoc(df, csse_deaths_file):
"""Distributes Michigan Department of Corrections data across Michigan counties by population.
Parameters
----------
df : pandas.DataFrame
Current historical DataFrame indexed by FIPS and date, which
includes MDOC and FCI data
csse_deaths_file : str
File location of CSSE deaths file (contains population data)
Returns
-------
df : pandas.DataFrame
Modified historical dataframe with Michigan prison data distributed
and added to Michigan data
"""
# Get Michigan county populations
tmp = df.reset_index()
michigan_counties = tmp.loc[tmp["FIPS"] // 1000 == 26]["FIPS"].unique()
michigan_pop = get_county_population_data(csse_deaths_file, michigan_counties)
# Get prison data
mdoc_data = df.xs(MI_PRISON_UIDS[0], level=0)
fci_data = df.xs(MI_PRISON_UIDS[1], level=0)
# Sum and distribute
mi_unallocated = mdoc_data + fci_data
# Add to Michigan data, do not replace
df = distribute_data_by_population(df, michigan_pop, mi_unallocated, False)
# Drop prison data from dataframe
df = df.loc[~df.index.get_level_values(0).isin(MI_PRISON_UIDS)]
return df
def distribute_territory_data(df, add_american_samoa):
"""Distributes territory-wide case and death data for territories.
Uses county population to distribute cases for US Virgin Islands,
Guam, and CNMI. Optionally adds a single case to the most populous
American Samoan county.
Parameters
----------
df : pandas.DataFrame
Current historical DataFrame indexed by FIPS and date, which
includes territory-wide case and death data
add_american_samoa: bool
If true, adds 1 case to American Samoa
Returns
-------
df : pandas.DataFrame
Modified historical dataframe with territory-wide data
distributed to counties
"""
# Get population data from file
age_df = pd.read_csv(TERRITORY_DATA, index_col="fips")
# use age-stratified data to get total pop per county
pop_df = pd.DataFrame(age_df.sum(axis=1)).reset_index()
pop_df = pop_df.rename(columns={"fips": "FIPS", 0: "total"})
# Drop PR because CSSE does have county-level PR data now
pop_df = pop_df.loc[~(pop_df["FIPS"] // 1000).isin([72, 60])]
# Create nan dataframe for territories (easier to update than append)
tfips = pop_df["FIPS"].unique()
dates = df.index.unique(level=1).array
fips_col = []
date_col = []
for fips in tfips:
for d in dates:
fips_col.append(fips)
date_col.append(d)
tframe = pd.DataFrame.from_dict(
{
"FIPS": fips_col,
"date": date_col,
"cumulative_reported_cases": [np.nan for d in date_col],
"cumulative_deaths": [np.nan for d in date_col],
},
).set_index(["FIPS", "date"])
df = pd.concat([df, tframe])
# CSSE has state-level data for Guam, CNMI, USVI
state_level_fips = [66, 69, 78]
for state_fips in state_level_fips:
state_data = df.xs(state_fips, level=0)
state_pop = pop_df.loc[pop_df["FIPS"] // 1000 == state_fips]
state_pop = state_pop.assign(pop_fraction=state_pop["total"] / state_pop["total"].sum())
state_pop = state_pop.drop(columns=["total"])
df = distribute_data_by_population(df, state_pop, state_data, True)
# Optionally add 1 confirmed case to most populous AS county
if add_american_samoa:
as_frame = pd.DataFrame.from_dict(
{
"FIPS": [60050 for d in dates],
"date": dates,
"cumulative_reported_cases": [1.0 for d in dates],
"cumulative_deaths": [0.0 for d in dates],
},
).set_index(["FIPS", "date"])
df = pd.concat([df, as_frame])
return df
def process_csse_data():
"""Performs pre-processing on CSSE data.
CSSE data is separated into two different files: confirmed cases and
deaths. These two files are combined into one dataframe, indexed by
FIPS and date with two columns, Confirmed and Deaths. This function
distributes CSSE that is either unallocated or territory-wide instead
of county-wide. Michigan data from the state Department of Corrections
and Federal Correctional Institution is distributed to Michigan counties.
New York City data which is currently all placed in one county (New
York County) is distributed to the other NYC counties. Territory data
for Guam, CNMI, and US Virgin Islands is also distributed. This data
is written to a CSV.
"""
data_dir = bucky_cfg["data_dir"] + "/cases/COVID-19/csse_covid_19_data/csse_covid_19_time_series/"
# Get confirmed and deaths files
confirmed_file = os.path.join(data_dir, "time_series_covid19_confirmed_US.csv")
deaths_file = os.path.join(data_dir, "time_series_covid19_deaths_US.csv")
confirmed = get_timeseries_data("Confirmed", confirmed_file)
deaths = get_timeseries_data("Deaths", deaths_file)
# rename columns
confirmed = confirmed.rename(columns={"Confirmed": "cumulative_reported_cases"})
deaths = deaths.rename(columns={"Deaths": "cumulative_deaths"})
# Merge datasets
data = confirmed.merge(deaths, on=["FIPS", "date"], how="left").fillna(0)
# Remove missing FIPS
data = data[data.FIPS != 0]
# Replace FIPS with adm2
# data.rename(columns={"FIPS" : "adm2"}, inplace=True)
# print(data.columns)
data = data.set_index(["FIPS", "date"])
# Distribute territory and Michigan DOC data
data = distribute_territory_data(data, ADD_AMERICAN_SAMOA)
data = distribute_mdoc(data, deaths_file)
data = distribute_utah_data(data, deaths_file)
data = data.reset_index()
data = data.assign(date=pd.to_datetime(data["date"]))
data = data.sort_values(by="date")
data = distribute_unallocated_csse(confirmed_file, deaths_file, data)
# Rename FIPS index to adm2
data.index = data.index.rename(["date", "adm2"])
# Write to files
hist_file = bucky_cfg["data_dir"] + "/cases/csse_hist_timeseries.csv"
logging.info(f"Saving CSSE historical data as {hist_file}")
data.to_csv(hist_file)
def update_hhs_hosp_data():
"""Retrieves updated historical data from healthdata.gov and writes to CSV."""
logging.info("Downloading HHS Hospitalization data")
hosp_url = "https://healthdata.gov/api/views/g62h-syeh/rows.csv?accessType=DOWNLOAD"
filename = bucky_cfg["data_dir"] + "/cases/hhs_hosps.csv"
# Download case and death data
context = ssl._create_unverified_context() # pylint: disable=W0212 # nosec
# Create filename
with urllib.request.urlopen(hosp_url, context=context) as testfile, open( # nosec
filename,
"w",
encoding="utf-8",
) as f:
f.write(testfile.read().decode())
# Map state abbreviation to ADM1
hhs_data =
|
pd.read_csv(filename)
|
pandas.read_csv
|
"""This module retrieves stock lists."""
from abc import ABC, abstractmethod
import pandas as pd
from pathlib import Path
from string import capwords
import re
from financialdatapy import request
#: Root directory of a package.
PATH = str(Path(__file__).parents[1]) + '/'
class NeedsUpdateError(Exception):
"""Raised if cik list needs to be updated to the latest."""
pass
class StockList(ABC):
"""A class representing stock list of listed companies."""
def template_method(self, update: bool) -> pd.DataFrame:
"""Get stock list data saved in local.
If it is not saved in local, retrieve the data from source.
:param update: Updates stock list to the latest.
:type update: bool
:raises: :class:`NeedsUpdateError`: If cik list needs to be updated
to the latest.
:return: Stock list.
:rtype: pandas.DataFrame
"""
try:
if update:
raise NeedsUpdateError()
usa_stock_list = pd.read_csv(PATH+'data/usa_stock_list.csv')
except (FileNotFoundError, NeedsUpdateError):
usa_stock_list = self.get_data()
usa_stock_list.to_csv(PATH+'data/usa_stock_list.csv', index=False)
return usa_stock_list
@abstractmethod
def get_data(self):
pass
class UsStockList(StockList):
"""A class representing stock list of listed companies in USA."""
def get_data(self) -> pd.DataFrame:
"""Get a list of companies CIK(Central Index Key) from SEC.
The list also contains ticker of a company.
:return: Dataframe with CIK, company name, and ticker for its columns.
:rtype: pandas.DataFrame
"""
url = 'https://www.sec.gov/files/company_tickers_exchange.json'
res = request.Request(url)
cik_data = res.get_json()
cik_list =
|
pd.DataFrame(cik_data['data'], columns=cik_data['fields'])
|
pandas.DataFrame
|
import pandas as pd
#declare url variable with html address
url = 'http://www.espn.com/nhl/statistics/player/_/stat/points/sort/points/year/2015/seasontype/2'
#load the html table into a dataframe, header=1 means that there are headers in the table
df = pd.read_html(url,header=1)[0]
#rename table columns
df.columns=['RK','PLAYER','TEAM','GP','G','A','PTS','+/-','PIM','PTS/G','SOG','PCT','GWG','G2','A2','G3','A3']
#drop rows (axis=0) where there is atleast 4 Nans present
df=df.dropna(axis=0,thresh = 4)
df
#drop duplicates from the table (specify column names)
df.drop_duplicates(subset=['PLAYER','TEAM','GP','G','A','PTS','+/-','PIM','PTS/G','SOG','PCT','GWG','G2','A2','G3','A3'],inplace=True)
# drop RK column, axis =1 for columns, inplace=True - do the action to this table
df.drop('RK',1,inplace=True)
#drop row 11 in the dataframe, axis = 0 for rows
df.drop(11,0,inplace=True)
#Converting columns to appropriate types
df.PCT =
|
pd.to_numeric(df.PCT, errors='coerce')
|
pandas.to_numeric
|
# -*- coding: utf-8 -*-
"""
Project : PyCoA
Date : april 2020 - march 2021
Authors : <NAME>, <NAME>, <NAME>
Copyright ©pycoa.fr
License: See joint LICENSE file
Module : coa.display
About :
-------
An interface module to easily plot pycoa data with bokeh
"""
from coa.tools import kwargs_test, extract_dates, verb, get_db_list_dict
from coa.error import *
import math
import pandas as pd
import geopandas as gpd
import numpy as np
from collections import defaultdict
import itertools
import json
import io
from io import BytesIO
import base64
from IPython import display
import copy
import locale
from bokeh.models import ColumnDataSource, TableColumn, DataTable, ColorBar, LogTicker,\
HoverTool, CrosshairTool, BasicTicker, GeoJSONDataSource, LinearColorMapper, LogColorMapper,Label, \
PrintfTickFormatter, BasicTickFormatter, NumeralTickFormatter, CustomJS, CustomJSHover, Select, \
Range1d, DatetimeTickFormatter, Legend, LegendItem, Text
from bokeh.models.widgets import Tabs, Panel
from bokeh.plotting import figure
from bokeh.layouts import row, column, gridplot
from bokeh.palettes import Category10, Category20, Viridis256
from bokeh.models import Title
from bokeh.io import export_png
from bokeh import events
from bokeh.models.widgets import DateSlider
from bokeh.models import LabelSet, WMTSTileSource
from bokeh.transform import transform, cumsum
import shapely.geometry as sg
import branca.colormap
from branca.colormap import LinearColormap
from branca.element import Element, Figure
import folium
from PIL import Image
import coa.geo as coge
import matplotlib.pyplot as plt
import datetime as dt
import bisect
from functools import wraps
from IPython.core.display import display, HTML
width_height_default = [500, 380]
MAXCOUNTRIESDISPLAYED = 27
class CocoDisplay:
def __init__(self, db=None, geo = None):
verb("Init of CocoDisplay() with db=" + str(db))
self.database_name = db
self.dbld = get_db_list_dict()
self.lcolors = Category20[20]
self.scolors = Category10[5]
self.ax_type = ['linear', 'log']
self.geom = []
self.geopan = gpd.GeoDataFrame()
self.location_geometry = None
self.boundary_metropole = None
self.listfigs = []
self.options_stats = ['when','input','input_field']
self.options_charts = [ 'bins']
self.options_front = ['where','option','which','what','visu']
self.available_tiles = ['openstreet','esri','stamen']
self.available_modes = ['mouse','vline','hline']
self.uptitle, self.subtitle = ' ',' '
self.dfigure_default = {'plot_height':width_height_default[1] ,'plot_width':width_height_default[0],'title':None,'textcopyright':'default'}
self.dvisu_default = {'mode':'mouse','tile':self.available_tiles[0],'orientation':'horizontal','cursor_date':None,'maplabel':None,'guideline':False}
self.when_beg = dt.date(1, 1, 1)
self.when_end = dt.date(1, 1, 1)
self.alloptions = self.options_stats + self.options_charts + self.options_front + list(self.dfigure_default.keys()) +\
list(self.dvisu_default.keys()) + ['resumetype']
self.iso3country = self.dbld[self.database_name][0]
self.granularity = self.dbld[self.database_name][1]
self.namecountry = self.dbld[self.database_name][2]
try:
if self.granularity != 'nation':
self.geo = coge.GeoCountry(self.iso3country)
if self.granularity == 'region':
self.location_geometry = self.geo.get_region_list()[['code_region', 'name_region', 'geometry']]
self.location_geometry = self.location_geometry.rename(columns={'name_region': 'location'})
if self.iso3country == 'PRT':
tmp=self.location_geometry.rename(columns={'name_region': 'location'})
tmp = tmp.loc[tmp.code_region=='PT.99']
self.boundary_metropole =tmp['geometry'].total_bounds
if self.iso3country == 'FRA':
tmp=self.location_geometry.rename(columns={'name_region': 'location'})
tmp = tmp.loc[tmp.code_region=='999']
self.boundary_metropole =tmp['geometry'].total_bounds
elif self.granularity == 'subregion':
list_dep_metro = None
self.location_geometry = self.geo.get_subregion_list()[['code_subregion', 'name_subregion', 'geometry']]
self.location_geometry = self.location_geometry.rename(columns={'name_subregion': 'location'})
#if country == 'FRA':
# list_dep_metro = geo.get_subregions_from_region(name='Métropole')
#elif country == 'ESP':
# list_dep_metro = geo.get_subregions_from_region(name='España peninsular')
#if list_dep_metro:
# self.boundary_metropole = self.location_geometry.loc[self.location_geometry.code_subregion.isin(list_dep_metro)]['geometry'].total_bounds
else:
self.geo=coge.GeoManager('name')
geopan = gpd.GeoDataFrame()#crs="EPSG:4326")
info = coge.GeoInfo()
allcountries = self.geo.get_GeoRegion().get_countries_from_region('world')
geopan['location'] = [self.geo.to_standard(c)[0] for c in allcountries]
geopan = info.add_field(field=['geometry'],input=geopan ,geofield='location')
geopan = gpd.GeoDataFrame(geopan, geometry=geopan.geometry, crs="EPSG:4326")
geopan = geopan[geopan.location != 'Antarctica']
geopan = geopan.dropna().reset_index(drop=True)
self.location_geometry = geopan
except:
raise CoaTypeError('What data base are you looking for ?')
''' FIGURE COMMUN FOR ALL '''
def standardfig(self, **kwargs):
"""
Create a standard Bokeh figure, with pycoa.fr copyright, used in all the bokeh charts
"""
plot_width = kwargs.get('plot_width', self.dfigure_default['plot_width'])
plot_height = kwargs.get('plot_height', self.dfigure_default['plot_height'])
textcopyright = kwargs.get('textcopyright', self.dfigure_default['textcopyright'])
if textcopyright == 'default':
textcopyright = '©pycoa.fr (data from: {})'.format(self.database_name)
else:
textcopyright = '©pycoa.fr ' + textcopyright
citation = Label(x=0.65 * plot_width - len(textcopyright), y=0.01 * plot_height,
x_units='screen', y_units='screen',
text_font_size='1.5vh', background_fill_color='white', background_fill_alpha=.75,
text=textcopyright)
for i in list(self.dvisu_default.keys()) + self.options_front + self.options_charts + ['textcopyright'] + self.options_stats + ['date_slider']:
if i in kwargs.keys():
kwargs.pop(i)
kwargs.pop('title')
fig = figure(**kwargs, tools=['save', 'box_zoom,reset'], toolbar_location="right")
#fig.add_layout(citation)
fig.add_layout(Title(text=self.uptitle, text_font_size="10pt"), 'above')
fig.add_layout(Title(text=self.subtitle, text_font_size="8pt", text_font_style="italic"), 'below')
return fig
def get_listfigures(self):
return self.listfigs
def set_listfigures(self,fig):
if not isinstance(fig,list):
fig = [fig]
self.listfigs = fig
''' WRAPPER COMMUN FOR ALL'''
def decowrapper(func):
'''
Main decorator it mainly deals with arg testings
'''
@wraps(func)
def wrapper(self, input = None, input_field = None, **kwargs):
"""
Parse a standard input, return :
- pandas: with location keyword (eventually force a column named 'where' to 'location')
- kwargs:
* keys = [plot_width, plot_width, title, when, title_temporal,bins, what, which]
Note that method used only the needed variables, some of them are useless
"""
if not isinstance(input, pd.DataFrame):
raise CoaTypeError(input + 'Must be a pandas, with pycoa structure !')
kwargs_test(kwargs, self.alloptions, 'Bad args used in the display function.')
when = kwargs.get('when', None)
which = kwargs.get('which', input.columns[2])
if input_field and 'cur_' in input_field:
what = which
else:
# cumul is the default
what = kwargs.get('what', which)
if input_field is None:
input_field = which
if isinstance(input_field,list):
test = input_field[0]
else:
test = input_field
if input[[test,'date']].isnull().values.all():
raise CoaKeyError('All values for '+ which + ' is nan nor empty')
option = kwargs.get('option', None)
bins = kwargs.get('bins', 10)
title = kwargs.get('title', None)
#textcopyright = kwargs.get('textcopyright', 'default')
kwargs['plot_width'] = kwargs.get('plot_width', self.dfigure_default['plot_width'])
kwargs['plot_height'] = kwargs.get('plot_height', self.dfigure_default['plot_height'])
if 'where' in input.columns:
input = input.rename(columns={'where': 'location'})
if 'codelocation' and 'clustername' not in input.columns:
input['codelocation'] = input['location']
input['clustername'] = input['location']
input['rolloverdisplay'] = input['location']
input['permanentdisplay'] = input['location']
else:
if self.granularity == 'nation' :
#input['codelocation'] = input['codelocation'].apply(lambda x: str(x).replace('[', '').replace(']', '') if len(x)< 10 else x[0]+'...'+x[-1] )
input['permanentdisplay'] = input.apply(lambda x: x.clustername if self.geo.get_GeoRegion().is_region(x.clustername) else str(x.codelocation), axis = 1)
else:
if self.granularity == 'subregion' :
input = input.reset_index(drop=True)
if isinstance(input['codelocation'][0],list):
input['codelocation'] = input['codelocation'].apply(lambda x: str(x).replace("'", '')\
if len(x)<5 else '['+str(x[0]).replace("'", '')+',...,'+str(x[-1]).replace("'", '')+']')
trad={}
cluster = input.clustername.unique()
if isinstance(input.location[0],list):
cluster = [i for i in cluster]
for i in cluster:
if i == self.namecountry:
input['permanentdisplay'] = input.clustername #[self.dbld[self.database_name][2]]*len(input)
else:
if self.geo.is_region(i):
trad[i] = self.geo.is_region(i)
elif self.geo.is_subregion(i):
trad[i] = self.geo.is_subregion(i)#input.loc[input.clustername==i]['codelocation'].iloc[0]
else:
trad[i] = i
trad={k:(v[:3]+'...'+v[-3:] if len(v)>8 else v) for k,v in trad.items()}
if ',' in input.codelocation[0]:
input['permanentdisplay'] = input.clustername
else:
input['permanentdisplay'] = input.codelocation#input.clustername.map(trad)
elif self.granularity == 'region' :
if all(i == self.namecountry for i in input.clustername.unique()):
input['permanentdisplay'] = [self.namecountry]*len(input)
else:
input['permanentdisplay'] = input.codelocation
input['rolloverdisplay'] = input['location']
maplabel = kwargs.get('maplabel', None)
if maplabel and 'unsorted' in maplabel:
pass
else:
input = input.sort_values(by=input_field, ascending = False).reset_index(drop=True)
uniqloc = input.clustername.unique()
if len(uniqloc) < 5:
colors = self.scolors
else:
colors = self.lcolors
colors = itertools.cycle(colors)
dico_colors = {i: next(colors) for i in uniqloc}
input = input.copy()
if not 'colors' in input.columns:
input.loc[:,'colors'] = input['clustername'].map(dico_colors)#(pd.merge(input, country_col, on='location'))
if not isinstance(input_field, list):
input_field = [input_field]
else:
input_field = input_field
col2=which
when_beg = input[[col2,'date']].date.min()
when_end = input[[col2,'date']].date.max()
if when:
when_beg, when_end = extract_dates(when)
if when_end > input[[col2,'date']].date.max():
when_end = input[[col2,'date']].date.max()
if when_beg == dt.date(1, 1, 1):
when_beg = input[[col2,'date']].date.min()
if not isinstance(when_beg, dt.date):
raise CoaNoData("With your current cuts, there are no data to plot.")
if when_end <= when_beg:
print('Requested date below available one, take', when_beg)
when_end = when_beg
if when_beg > input[[col2,'date']].date.max() or when_end > input[[col2,'date']].date.max():
raise CoaNoData("No available data after "+str(input[[input_field[0],'date']].date.max()))
when_end_change = when_end
for i in input_field:
if input[i].isnull().all():
raise CoaTypeError("Sorry all data are NaN for " + i)
else:
when_end_change = min(when_end_change,CocoDisplay.changeto_nonull_date(input, when_end, i))
if func.__name__ not in ['pycoa_date_plot', 'pycoa_plot', 'pycoa_scrollingmenu', 'pycoa_spiral_plot','pycoa_yearly_plot']:
if len(input_field) > 1:
print(str(input_field) + ' is dim = ' + str(len(input_field)) + '. No effect with ' + func.__name__ + '! Take the first input: ' + input_field[0])
input_field = input_field[0]
if when_end_change != when_end:
when_end = when_end_change
self.when_beg = when_beg
self.when_end = when_end
input = input.loc[(input['date'] >= self.when_beg) & (input['date'] <= self.when_end)]
title_temporal = ' (' + 'between ' + when_beg.strftime('%d/%m/%Y') + ' and ' + when_end.strftime('%d/%m/%Y') + ')'
if func.__name__ not in ['pycoa_date_plot', 'pycoa_plot', 'pycoa_scrollingmenu', 'pycoa_spiral_plot','pycoa_yearly_plot']:
title_temporal = ' (' + when_end.strftime('%d/%m/%Y') + ')'
title_option=''
if option:
if 'sumallandsmooth7' in option:
option.remove('sumallandsmooth7')
option += ['sumall','smooth7']
title_option = ' (option: ' + str(option)+')'
input_field_tostring = str(input_field).replace('[', '').replace(']', '').replace('\'', '')
whichtitle = which
if 'pop' in input_field_tostring:
whichtitle = input_field_tostring.replace('weekly ','').replace('daily ','')
if 'daily' in input_field_tostring:
titlefig = whichtitle + ', ' + 'day to day difference' + title_option
elif 'weekly' in input_field_tostring:
titlefig = whichtitle + ', ' + 'week to week difference' + title_option
else:
if 'cur_' in which or 'idx_' in which:
#titlefig = which + ', ' + 'current ' + which.replace('cur_','').replace('idx_','')+ title_option
titlefig = whichtitle + ', current value' + title_option
else:
titlefig = whichtitle + ', cumulative'+ title_option
if title:
title = title
else:
title = titlefig
self.uptitle = title
textcopyright = kwargs.get('textcopyright', None)
if textcopyright:
textcopyright = '©pycoa.fr ' + textcopyright + title_temporal
kwargs.pop('textcopyright')
else:
textcopyright = '©pycoa.fr data from: {}'.format(self.database_name)+' '+title_temporal
self.subtitle = textcopyright
kwargs['title'] = title+title_temporal
return func(self, input, input_field, **kwargs)
return wrapper
@decowrapper
def pycoa_resume_data(self, input, input_field, **kwargs):
loc=list(input['clustername'].unique())
input['cases'] = input[input_field]
resumetype = kwargs.get('resumetype','spiral')
if resumetype == 'spiral':
dspiral={i:CocoDisplay.spiral(input.loc[ (input.clustername==i) &
(input.date >= self.when_beg) &
(input.date <= self.when_end)].sort_values(by='date')) for i in loc}
input['resume']=input['clustername'].map(dspiral)
elif resumetype == 'spark':
spark={i:CocoDisplay.sparkline(input.loc[ (input.clustername==i) &
(input.date >= self.when_beg) &
(input.date <= self.when_end)].sort_values(by='date')) for i in loc}
input['resume']=input['clustername'].map(spark)
else:
raise CoaError('pycoa_resume_data can use spiral or spark ... here what ?')
input = input.loc[input.date==input.date.max()].reset_index(drop=True)
def path_to_image_html(path):
return '<img src="'+ path + '" width="60" >'
input=input.drop(columns=['permanentdisplay','rolloverdisplay','colors','cases'])
input=input.apply(lambda x: x.round(2) if x.name in [input_field,'daily','weekly'] else x)
if isinstance(input['location'][0], list):
col=[i for i in list(input.columns) if i not in ['clustername','location','codelocation']]
col.insert(0,'clustername')
input = input[col]
input=input.set_index('clustername')
else:
input = input.drop(columns='clustername')
input=input.set_index('location')
return input.to_html(escape=False,formatters=dict(resume=path_to_image_html))
''' DECORATORS FOR PLOT: DATE, VERSUS, SCROLLINGMENU '''
def decoplot(func):
"""
decorator for plot purpose
"""
@wraps(func)
def inner_plot(self, input = None, input_field = None, **kwargs):
mode = kwargs.get('mode', None)
if mode:
mode = mode
else:
mode = self.dvisu_default['mode']
if mode not in self.available_modes:
raise CoaTypeError('Don\'t know the mode wanted. So far:' + str(self.available_modes))
kwargs['mode'] = mode
if 'location' in input.columns:
location_ordered_byvalues = list(
input.loc[input.date == self.when_end].sort_values(by=input_field, ascending=False)['clustername'].unique())
input = input.copy() # needed to avoid warning
input.loc[:,'clustername'] = pd.Categorical(input.clustername,
categories=location_ordered_byvalues, ordered=True)
input = input.sort_values(by=['clustername', 'date']).reset_index(drop = True)
if func.__name__ != 'pycoa_scrollingmenu' :
if len(location_ordered_byvalues) >= MAXCOUNTRIESDISPLAYED:
input = input.loc[input.clustername.isin(location_ordered_byvalues[:MAXCOUNTRIESDISPLAYED])]
list_max = []
for i in input_field:
list_max.append(max(input.loc[input.clustername.isin(location_ordered_byvalues)][i]))
if len([x for x in list_max if not np.isnan(x)]) > 0:
amplitude = (np.nanmax(list_max) - np.nanmin(list_max))
if amplitude > 10 ** 4:
self.ax_type.reverse()
if func.__name__ == 'pycoa_scrollingmenu' :
if isinstance(input_field,list):
if len(input_field) > 1:
print(str(input_field) + ' is dim = ' + str(len(input_field)) + '. No effect with ' + func.__name__ + '! Take the first input: ' + input_field[0])
input_field = input_field[0]
if self.dbld[self.database_name][1] == 'nation' and self.dbld[self.database_name][0] != 'WW':
func.__name__ = 'pycoa_date_plot'
return func(self, input, input_field, **kwargs)
return inner_plot
''' PLOT VERSUS '''
@decowrapper
@decoplot
def pycoa_plot(self, input = None, input_field = None ,**kwargs):
'''
-----------------
Create a versus plot according to arguments.
See help(pycoa_plot).
Keyword arguments
-----------------
- input = None : if None take first element. A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element. It should be a list dim=2. Moreover the 2 variables must be present
in the DataFrame considered.
- plot_heigh = width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- mode = mouse
- cursor_date = None if True
- orientation = horizontal
- when : default min and max according to the inpude DataFrame.
Dates are given under the format dd/mm/yyyy.
when format [dd/mm/yyyy : dd/mm/yyyy]
if [:dd/mm/yyyy] min date up to
if [dd/mm/yyyy:] up to max date
'''
if len(input_field) != 2:
raise CoaTypeError('Two variables are needed to plot a versus chart ... ')
panels = []
cases_custom = CocoDisplay.rollerJS()
if self.get_listfigures():
self.set_listfigures([])
listfigs=[]
for axis_type in self.ax_type:
standardfig = self.standardfig( x_axis_label = input_field[0], y_axis_label = input_field[1],
y_axis_type = axis_type, **kwargs )
standardfig.add_tools(HoverTool(
tooltips=[('Location', '@rolloverdisplay'), ('date', '@date{%F}'),
(input_field[0], '@{casesx}' + '{custom}'),
(input_field[1], '@{casesy}' + '{custom}')],
formatters={'location': 'printf', '@{casesx}': cases_custom, '@{casesy}': cases_custom,
'@date': 'datetime'}, mode = kwargs['mode'],
point_policy="snap_to_data")) # ,PanTool())
for loc in input.clustername.unique():
pandaloc = input.loc[input.clustername == loc].sort_values(by='date', ascending='True')
pandaloc.rename(columns={input_field[0]: 'casesx', input_field[1]: 'casesy'}, inplace=True)
standardfig.line(x='casesx', y='casesy',
source=ColumnDataSource(pandaloc), legend_label=pandaloc.clustername.iloc[0],
color=pandaloc.colors.iloc[0], line_width=3, hover_line_width=4)
standardfig.legend.label_text_font_size = "12px"
panel = Panel(child=standardfig, title=axis_type)
panels.append(panel)
standardfig.legend.background_fill_alpha = 0.6
standardfig.legend.location = "top_left"
listfigs.append(standardfig)
CocoDisplay.bokeh_legend(standardfig)
self.set_listfigures(listfigs)
tabs = Tabs(tabs=panels)
return tabs
''' DATE PLOT '''
@decowrapper
@decoplot
def pycoa_date_plot(self, input = None, input_field = None, **kwargs):
'''
-----------------
Create a date plot according to arguments. See help(pycoa_date_plot).
Keyword arguments
-----------------
- input = None : if None take first element. A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element could be a list
- plot_heigh= width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- mode = mouse
- guideline = False
- cursor_date = None if True
- orientation = horizontal
- when : default min and max according to the inpude DataFrame.
Dates are given under the format dd/mm/yyyy.
when format [dd/mm/yyyy : dd/mm/yyyy]
if [:dd/mm/yyyy] min date up to
if [dd/mm/yyyy:] up to max date
'''
guideline = kwargs.get('guideline',self.dvisu_default['guideline'])
panels = []
listfigs = []
cases_custom = CocoDisplay.rollerJS()
if isinstance(input['rolloverdisplay'][0],list):
input['rolloverdisplay'] = input['clustername']
for axis_type in self.ax_type:
standardfig = self.standardfig( y_axis_type = axis_type, x_axis_type = 'datetime',**kwargs)
i = 0
r_list=[]
maxou=-1000
lcolors = iter(self.lcolors)
line_style = ['solid', 'dashed', 'dotted', 'dotdash','dashdot']
for val in input_field:
for loc in list(input.clustername.unique()):
input_filter = input.loc[input.clustername == loc].reset_index(drop = True)
src = ColumnDataSource(input_filter)
leg = input_filter.clustername[0]
#leg = input_filter.permanentdisplay[0]
if len(input_field)>1:
leg = input_filter.permanentdisplay[0] + ', ' + val
if len(list(input.clustername.unique())) == 1:
color = next(lcolors)
else:
color = input_filter.colors[0]
r = standardfig.line(x = 'date', y = val, source = src,
color = color, line_width = 3,
legend_label = leg,
hover_line_width = 4, name = val, line_dash=line_style[i%4])
r_list.append(r)
maxou=max(maxou,np.nanmax(input_filter[val].values))
i += 1
for r in r_list:
label = r.name
tooltips = [('Location', '@rolloverdisplay'), ('date', '@date{%F}'), (r.name, '@$name{0,0.0}')]
formatters = {'location': 'printf', '@date': 'datetime', '@name': 'printf'}
hover=HoverTool(tooltips = tooltips, formatters = formatters, point_policy = "snap_to_data", mode = kwargs['mode'], renderers=[r]) # ,PanTool())
standardfig.add_tools(hover)
if guideline:
cross= CrosshairTool()
standardfig.add_tools(cross)
if axis_type == 'linear':
if maxou < 1e4 :
standardfig.yaxis.formatter = BasicTickFormatter(use_scientific=False)
standardfig.legend.label_text_font_size = "12px"
panel = Panel(child=standardfig, title = axis_type)
panels.append(panel)
standardfig.legend.background_fill_alpha = 0.6
standardfig.legend.location = "top_left"
standardfig.legend.click_policy="hide"
standardfig.legend.label_text_font_size = '8pt'
if len(input_field) > 1 and len(input_field)*len(input.clustername.unique())>16:
standardfig.legend.visible=False
standardfig.xaxis.formatter = DatetimeTickFormatter(
days = ["%d/%m/%y"], months = ["%d/%m/%y"], years = ["%b %Y"])
CocoDisplay.bokeh_legend(standardfig)
listfigs.append(standardfig)
self.set_listfigures(listfigs)
tabs = Tabs(tabs = panels)
return tabs
''' SPIRAL PLOT '''
@decowrapper
@decoplot
def pycoa_spiral_plot(self, input = None, input_field = None, **kwargs):
guideline = kwargs.get('guideline',self.dvisu_default['guideline'])
panels = []
listfigs = []
if isinstance(input['rolloverdisplay'][0],list):
input['rolloverdisplay'] = input['clustername']
borne=300
kwargs.pop('plot_width')
standardfig = self.standardfig(y_axis_type = None, x_axis_type = None,
width=kwargs['plot_height'], x_range=[-borne, borne], y_range=[-borne, borne], match_aspect=True,**kwargs)
if len(input.clustername.unique()) > 1 :
print('Can only display spiral for ONE location. I took the first one:', input.clustername[0])
input = input.loc[input.clustername == input.clustername[0]].copy()
input['date']=pd.to_datetime(input["date"])
input["dayofyear"]=input.date.dt.dayofyear
input['year']=input.date.dt.year
input['cases'] = input[input_field]
K = 2*input[input_field].max()
#drop bissextile fine tuning in needed in the future
input = input.loc[~(input['date'].dt.month.eq(2) & input['date'].dt.day.eq(29))].reset_index(drop=True)
input["dayofyear_angle"] = input["dayofyear"]*2 * np.pi/365
input["r_baseline"] = input.apply(lambda x : ((x["year"]-2020)*2 * np.pi + x["dayofyear_angle"])*K,axis=1)
size_factor = 16
input["r_cas_sup"] = input.apply(lambda x : x["r_baseline"] + 0.5*x[input_field]*size_factor,axis=1)
input["r_cas_inf"] = input.apply(lambda x : x["r_baseline"] - 0.5*x[input_field]*size_factor,axis=1)
radius = 200
def polar(theta,r,norm=radius/input["r_baseline"].max()):
x = norm*r*np.cos(theta)
y = norm*r*np.sin(theta)
return x,y
x_base,y_base=polar(input["dayofyear_angle"],input["r_baseline"])
x_cas_sup,y_cas_sup=polar(input["dayofyear_angle"],input["r_cas_sup"])
x_cas_inf,y_cas_inf=polar(input["dayofyear_angle"],input["r_cas_inf"])
xcol,ycol=[],[]
[ xcol.append([i,j]) for i,j in zip(x_cas_inf,x_cas_sup)]
[ ycol.append([i,j]) for i,j in zip(y_cas_inf,y_cas_sup)]
standardfig.patches(xcol,ycol,color='blue',fill_alpha = 0.5)
src = ColumnDataSource(data=dict(
x=x_base,
y=y_base,
date=input['date'],
cases=input['cases']
))
standardfig.line( x = 'x', y = 'y', source = src, legend_label = input.clustername[0],
line_width = 3, line_color = 'blue')
circle = standardfig.circle('x', 'y', size=2, source=src)
cases_custom = CocoDisplay.rollerJS()
hover_tool = HoverTool(tooltips=[('Cases', '@cases{0,0.0}'), ('date', '@date{%F}')],
formatters={'Cases': 'printf', '@{cases}': cases_custom, '@date': 'datetime'},
renderers=[circle],
point_policy="snap_to_data")
standardfig.add_tools(hover_tool)
outer_radius=250
[standardfig.annular_wedge(
x=0, y=0, inner_radius=0, outer_radius=outer_radius, start_angle=i*np.pi/6,\
end_angle=(i+1)*np.pi/6,fill_color=None,line_color='black',line_dash='dotted')
for i in range(12)]
label = ['January','February','March','April','May','June','July','August','September','October','November','December']
xr,yr = polar(np.linspace(0, 2 * np.pi, 13),outer_radius,1)
standardfig.text(xr[:-1], yr[:-1], label,text_font_size="9pt", text_align="center", text_baseline="middle")
standardfig.legend.background_fill_alpha = 0.6
standardfig.legend.location = "top_left"
standardfig.legend.click_policy="hide"
return standardfig
''' SCROLLINGMENU PLOT '''
@decowrapper
@decoplot
def pycoa_scrollingmenu(self, input = None, input_field = None, **kwargs):
'''
-----------------
Create a date plot, with a scrolling menu location, according to arguments.
See help(pycoa_scrollingmenu).
Keyword arguments
-----------------
len(location) > 2
- input = None : if None take first element. A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element could be a list
- plot_heigh= width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- mode = mouse
-guideline = False
- cursor_date = None if True
- orientation = horizontal
- when : default min and max according to the inpude DataFrame.
Dates are given under the format dd/mm/yyyy.
when format [dd/mm/yyyy : dd/mm/yyyy]
if [:dd/mm/yyyy] min date up to
if [dd/mm/yyyy:] up to max date
'''
mode = kwargs.get('mode',self.dvisu_default['mode'])
guideline = kwargs.get('guideline',self.dvisu_default['guideline'])
uniqloc = input.clustername.unique().to_list()
uniqloc.sort()
if 'location' in input.columns:
if len(uniqloc) < 2:
raise CoaTypeError('What do you want me to do ? You have selected, only one country.'
'There is no sens to use this method. See help.')
input = input[['date', 'clustername', input_field]]
input = input.sort_values(by='clustername', ascending = True).reset_index(drop=True)
mypivot = pd.pivot_table(input, index='date', columns='clustername', values=input_field)
column_order = uniqloc
mypivot = mypivot.reindex(column_order, axis=1)
source = ColumnDataSource(mypivot)
filter_data1 = mypivot[[uniqloc[0]]].rename(columns={uniqloc[0]: 'cases'})
src1 = ColumnDataSource(filter_data1)
filter_data2 = mypivot[[uniqloc[1]]].rename(columns={uniqloc[1]: 'cases'})
src2 = ColumnDataSource(filter_data2)
cases_custom = CocoDisplay.rollerJS()
hover_tool = HoverTool(tooltips=[('Cases', '@cases{0,0.0}'), ('date', '@date{%F}')],
formatters={'Cases': 'printf', '@{cases}': cases_custom, '@date': 'datetime'}, mode = mode,
point_policy="snap_to_data") # ,PanTool())
panels = []
for axis_type in self.ax_type:
standardfig = self.standardfig( y_axis_type = axis_type, x_axis_type = 'datetime', **kwargs)
standardfig.yaxis[0].formatter = PrintfTickFormatter(format = "%4.2e")
standardfig.add_tools(hover_tool)
if guideline:
cross= CrosshairTool()
standardfig.add_tools(cross)
def add_line(src, options, init, color):
s = Select(options = options, value = init)
r = standardfig.line(x = 'date', y = 'cases', source = src, line_width = 3, line_color = color)
li = LegendItem(label = init, renderers = [r])
s.js_on_change('value', CustomJS(args=dict(s0=source, s1=src, li=li),
code="""
var c = cb_obj.value;
var y = s0.data[c];
s1.data['cases'] = y;
li.label = {value: cb_obj.value};
s1.change.emit();
"""))
return s, li
s1, li1 = add_line(src1, uniqloc, uniqloc[0], self.scolors[0])
s2, li2 = add_line(src2, uniqloc, uniqloc[1], self.scolors[1])
standardfig.add_layout(Legend(items = [li1, li2]))
standardfig.legend.location = 'top_left'
layout = row(column(row(s1, s2), row(standardfig)))
panel = Panel(child = layout, title = axis_type)
panels.append(panel)
tabs = Tabs(tabs = panels)
label = standardfig.title
return tabs
''' YEARLY PLOT '''
@decowrapper
@decoplot
def pycoa_yearly_plot(self, input = None, input_field = None, **kwargs):
'''
-----------------
Create a date plot according to arguments. See help(pycoa_date_plot).
Keyword arguments
-----------------
- input = None : if None take first element. A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element could be a list
- plot_heigh= width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- mode = mouse
- guideline = False
- cursor_date = None if True
- orientation = horizontal
- when : default min and max according to the inpude DataFrame.
Dates are given under the format dd/mm/yyyy.
when format [dd/mm/yyyy : dd/mm/yyyy]
if [:dd/mm/yyyy] min date up to
if [dd/mm/yyyy:] up to max date
'''
guideline = kwargs.get('guideline',self.dvisu_default['guideline'])
if len(input.clustername.unique()) > 1 :
print('Can only display yearly plot for ONE location. I took the first one:', input.clustername[0])
input = input.loc[input.clustername == input.clustername[0]].copy()
panels = []
listfigs = []
cases_custom = CocoDisplay.rollerJS()
input['date']=pd.to_datetime(input["date"])
#drop bissextile fine tuning in needed in the future
input = input.loc[~(input['date'].dt.month.eq(2) & input['date'].dt.day.eq(29))].reset_index(drop=True)
input = input.copy()
input.loc[:,'allyears']=input['date'].apply(lambda x : x.year)
input['allyears'] = input['allyears'].astype(int)
input.loc[:,'dayofyear']= input['date'].apply(lambda x : x.dayofyear)
allyears = list(input.allyears.unique())
if isinstance(input['rolloverdisplay'][0],list):
input['rolloverdisplay'] = input['clustername']
if len(input_field)>1:
CoaError('Only one variable could be displayed')
else:
input_field=input_field[0]
for axis_type in self.ax_type:
standardfig = self.standardfig( y_axis_type = axis_type,**kwargs)
i = 0
r_list=[]
maxou=-1000
input['cases']=input[input_field]
line_style = ['solid', 'dashed', 'dotted', 'dotdash']
colors = itertools.cycle(self.lcolors)
for loc in list(input.clustername.unique()):
for year in allyears:
input_filter = input.loc[(input.clustername == loc) & (input['date'].dt.year.eq(year))].reset_index(drop = True)
src = ColumnDataSource(input_filter)
leg = loc + ' ' + str(year)
r = standardfig.line(x = 'dayofyear', y = input_field, source = src,
color = next(colors), line_width = 3,
legend_label = leg,
hover_line_width = 4, name = input_field)
maxou=max(maxou,np.nanmax(input_filter[input_field].values))
label = input_field
tooltips = [('Location', '@rolloverdisplay'), ('date', '@date{%F}'), ('Cases', '@cases{0,0.0}')]
formatters = {'location': 'printf', '@date': 'datetime', '@name': 'printf'}
hover=HoverTool(tooltips = tooltips, formatters = formatters, point_policy = "snap_to_data", mode = kwargs['mode']) # ,PanTool())
standardfig.add_tools(hover)
if guideline:
cross= CrosshairTool()
standardfig.add_tools(cross)
if axis_type == 'linear':
if maxou < 1e4 :
standardfig.yaxis.formatter = BasicTickFormatter(use_scientific=False)
standardfig.legend.label_text_font_size = "12px"
panel = Panel(child=standardfig, title = axis_type)
panels.append(panel)
standardfig.legend.background_fill_alpha = 0.6
standardfig.legend.location = "top_left"
standardfig.legend.click_policy="hide"
labelspd=input.loc[(input.allyears.eq(2021)) & (input.date.dt.day.eq(1))]
standardfig.xaxis.ticker = list(labelspd['dayofyear'].astype(int))
replacelabelspd = labelspd['date'].apply(lambda x: str(x.strftime("%b")))
#label_dict = dict(zip(input.loc[input.allyears.eq(2020)]['daymonth'],input.loc[input.allyears.eq(2020)]['date'].apply(lambda x: str(x.day)+'/'+str(x.month))))
standardfig.xaxis.major_label_overrides = dict(zip(list(labelspd['dayofyear'].astype(int)),list(replacelabelspd)))
CocoDisplay.bokeh_legend(standardfig)
listfigs.append(standardfig)
tooltips = [('Location', '@rolloverdisplay'), ('date', '@date{%F}'), (r.name, '@$name{0,0.0}')]
formatters = {'location': 'printf', '@date': 'datetime', '@name': 'printf'}
hover=HoverTool(tooltips = tooltips, formatters = formatters, point_policy = "snap_to_data", mode = kwargs['mode'], renderers=[r]) # ,PanTool())
standardfig.add_tools(hover)
if guideline:
cross= CrosshairTool()
standardfig.add_tools(cross)
self.set_listfigures(listfigs)
tabs = Tabs(tabs = panels)
return tabs
''' DECORATORS FOR HISTO VERTICAL, HISTO HORIZONTAL, PIE & MAP'''
def decohistomap(func):
"""
Decorator function used for histogram and map
"""
@wraps(func)
def inner_hm(self, input = None, input_field = None, **kwargs):
tile = kwargs.get('tile', self.dvisu_default['tile'])
maplabel = kwargs.get('maplabel', None)
if not isinstance(maplabel,list):
maplabel=[maplabel]
#if maplabel:
# maplabel = maplabel
if 'map' in func.__name__:
kwargs['maplabel'] = maplabel
orientation = kwargs.get('orientation', self.dvisu_default['orientation'])
cursor_date = kwargs.get('cursor_date', None)
#if orientation:
# kwargs['orientation'] = orientation
#kwargs['cursor_date'] = kwargs.get('cursor_date', self.dvisu_default['cursor_date'])
if isinstance(input['location'].iloc[0],list):
input['rolloverdisplay'] = input['clustername']
input = input.explode('location')
else:
input['rolloverdisplay'] = input['location']
uniqloc = input.clustername.unique()
geopdwd = input
if maplabel and 'unsorted' in maplabel:
pass
else:
geopdwd = geopdwd.sort_values(by=input_field, ascending = False).reset_index(drop=True)
started = geopdwd.date.min()
ended = geopdwd.date.max()
if cursor_date:
date_slider = DateSlider(title = "Date: ", start = started, end = ended,
value = ended, step=24 * 60 * 60 * 1000, orientation = orientation)
#wanted_date = date_slider.value_as_datetime.date()
#if func.__name__ == 'pycoa_mapfolium' or func.__name__ == 'pycoa_map' or func.__name__ == 'innerdecomap' or func.__name__ == 'innerdecopycoageo':
if func.__name__ in ['pycoa_mapfolium','pycoa_map','pycoageo' ,'pycoa_pimpmap']:
if isinstance(input.location.to_list()[0],list):
geom = self.location_geometry
geodic={loc:geom.loc[geom.location==loc]['geometry'].values[0] for loc in geopdwd.location.unique()}
geopdwd['geometry'] = geopdwd['location'].map(geodic)
else:
geopdwd = pd.merge(geopdwd, self.location_geometry, on='location')
kwargs['tile'] = tile
if self.iso3country in ['USA']:#['FRA','USA']
geo = copy.deepcopy(self.geo)
d = geo._list_translation
if func.__name__ != 'pycoa_mapfolium':
if any(i in list(geopdwd.codelocation.unique()) for i in d.keys()) \
or any(True for i in d.keys() if ''.join(list(geopdwd.codelocation.unique())).find(i)!=-1):
geo.set_dense_geometry()
kwargs.pop('tile')
else:
geo.set_main_geometry()
d = {}
new_geo = geo.get_data()[['name_'+self.granularity,'geometry']]
new_geo = new_geo.rename(columns={'name_'+self.granularity:'location'})
new_geo = new_geo.set_index('location')['geometry'].to_dict()
geopdwd['geometry'] = geopdwd['location'].map(new_geo)
geopdwd = gpd.GeoDataFrame(geopdwd, geometry=geopdwd.geometry, crs="EPSG:4326")
if func.__name__ == 'pycoa_histo':
pos = {}
new = pd.DataFrame()
n = 0
for i in uniqloc:
perloc = geopdwd.loc[geopdwd.clustername == i]
if all(perloc != 0):
pos = perloc.index[0]
if new.empty:
new = perloc
else:
new = new.append(perloc)
n += 1
geopdwd = new.reset_index(drop=True)
if cursor_date:
date_slider = date_slider
else:
date_slider = None
kwargs['date_slider'] = date_slider
return func(self, geopdwd, input_field, **kwargs)
return inner_hm
''' VERTICAL HISTO '''
@decowrapper
@decohistomap
def pycoa_histo(self, geopdwd, input_field = None, **kwargs):
'''
-----------------
Create 1D histogramme by value according to arguments.
See help(pycoa_histo).
Keyword arguments
-----------------
- geopdwd : A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element could be a list
- plot_heigh= width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- when : default min and max according to the inpude DataFrame.
Dates are given under the format dd/mm/yyyy.
when format [dd/mm/yyyy : dd/mm/yyyy]
if [:dd/mm/yyyy] min date up to
if [dd/mm/yyyy:] up to max date
'''
geopdwd_filter = geopdwd.loc[geopdwd.date == self.when_end]
geopdwd_filter = geopdwd_filter.reset_index(drop = True)
input = geopdwd_filter.rename(columns = {'cases': input_field})
bins = kwargs.get('bins', None)
if 'location' in input.columns:
uniqloc = list(input.clustername.unique())
allval = input.loc[input.clustername.isin(uniqloc)][['clustername', input_field,'permanentdisplay']]
min_val = allval[input_field].min()
max_val = allval[input_field].max()
if bins:
bins = bins
else:
if len(uniqloc) == 1:
bins = 2
min_val = 0.
else:
bins = 11
delta = (max_val - min_val ) / bins
interval = [ min_val + i*delta for i in range(bins+1)]
contributors = { i : [] for i in range(bins+1)}
for i in range(len(allval)):
rank = bisect.bisect_left(interval, allval.iloc[i][input_field])
if rank == bins+1:
rank = bins
contributors[rank].append(allval.iloc[i]['clustername'])
colors = itertools.cycle(self.lcolors)
lcolors = [next(colors) for i in range(bins+1)]
contributors = dict(sorted(contributors.items()))
frame_histo = pd.DataFrame({
'left': [0]+interval[:-1],
'right':interval,
'middle_bin': [format((i+j)/2, ".1f") for i,j in zip([0]+interval[:-1],interval)],
'top': [len(i) for i in list(contributors.values())],
'contributors': [', '.join(i) for i in contributors.values()],
'colors': lcolors})
#tooltips = """
#<div style="width: 400px">
#<b>Middle value:</b> @middle_bin <br>
#<b>Contributors:</b> @contributors{safe} <br>
#</div>
#"""
tooltips = """
<b>Middle value:</b> @middle_bin <br>
<b>Contributors:</b> @contributors{safe} <br>
"""
hover_tool = HoverTool(tooltips = tooltips)
panels = []
bottom = 0
x_axis_type, y_axis_type, axis_type_title = 3 * ['linear']
for axis_type in ["linear", "linlog", "loglin", "loglog"]:
if axis_type == 'linlog':
y_axis_type, axis_type_title = 'log', 'logy'
if axis_type == 'loglin':
x_axis_type, y_axis_type, axis_type_title = 'log', 'linear', 'logx'
if axis_type == 'loglog':
x_axis_type, y_axis_type = 'log', 'log'
axis_type_title = 'loglog'
standardfig = self.standardfig(x_axis_type=x_axis_type, y_axis_type=y_axis_type, **kwargs)
standardfig.yaxis[0].formatter = PrintfTickFormatter(format = "%4.2e")
standardfig.xaxis[0].formatter = PrintfTickFormatter(format="%4.2e")
standardfig.add_tools(hover_tool)
standardfig.x_range = Range1d(1.05 * interval[0], 1.05 * interval[-1])
standardfig.y_range = Range1d(0, 1.05 * frame_histo['top'].max())
if x_axis_type == "log":
left = 0.8
if frame_histo['left'][0] <= 0:
frame_histo.at[0, 'left'] = left
else:
left = frame_histo['left'][0]
standardfig.x_range = Range1d(left, 10 * interval[-1])
if y_axis_type == "log":
bottom = 0.0001
standardfig.y_range = Range1d(0.001, 10 * frame_histo['top'].max())
standardfig.quad(source=ColumnDataSource(frame_histo), top='top', bottom=bottom, left='left', \
right='right', fill_color='colors')
panel = Panel(child=standardfig, title=axis_type_title)
panels.append(panel)
tabs = Tabs(tabs=panels)
return tabs
''' DECORATORS FOR HISTO VERTICAL, HISTO HORIZONTAL, PIE '''
def decohistopie(func):
@wraps(func)
def inner_decohistopie(self, geopdwd, input_field, **kwargs):
"""
Decorator for
Horizontal histogram & Pie Chart
"""
geopdwd['cases'] = geopdwd[input_field]
maplabel = kwargs.get('maplabel',None)
plot_width = kwargs.get('plot_width',self.dfigure_default['plot_width'])
plot_height = kwargs.get('plot_height',self.dfigure_default['plot_height'])
geopdwd_filter = geopdwd.loc[geopdwd.date == self.when_end]
geopdwd_filter = geopdwd_filter.reset_index(drop = True)
geopdwd_filter['cases'] = geopdwd_filter[input_field]
cursor_date = kwargs.get('cursor_date',self.dvisu_default['cursor_date'])
date_slider = kwargs['date_slider']
my_date = geopdwd.date.unique()
dico_utc = {i: DateSlider(value=i).value for i in my_date}
geopdwd['date_utc'] = [dico_utc[i] for i in geopdwd.date]
#geopdwd = geopdwd.drop_duplicates(["date", "codelocation","clustername"])#for sumall avoid duplicate
#geopdwd_filter = geopdwd_filter.drop_duplicates(["date", "codelocation","clustername"])
geopdwd = geopdwd.drop_duplicates(["date","clustername"])#for sumall avoid duplicate
geopdwd_filter = geopdwd_filter.drop_duplicates(["date","clustername"])
locunique = geopdwd_filter.clustername.unique()#geopdwd_filtered.location.unique()
geopdwd_filter = geopdwd_filter.copy()
nmaxdisplayed = MAXCOUNTRIESDISPLAYED
if len(locunique) >= nmaxdisplayed :#and func.__name__ != 'pycoa_pie' :
if func.__name__ != 'pycoa_pie' :
geopdwd_filter = geopdwd_filter.loc[geopdwd_filter.clustername.isin(locunique[:nmaxdisplayed])]
else:
geopdwd_filter_first = geopdwd_filter.loc[geopdwd_filter.clustername.isin(locunique[:nmaxdisplayed-1])]
geopdwd_filter_other = geopdwd_filter.loc[geopdwd_filter.clustername.isin(locunique[nmaxdisplayed-1:])]
geopdwd_filter_other = geopdwd_filter_other.groupby('date').sum()
geopdwd_filter_other['location'] = 'others'
geopdwd_filter_other['clustername'] = 'others'
geopdwd_filter_other['codelocation'] = 'others'
geopdwd_filter_other['permanentdisplay'] = 'others'
geopdwd_filter_other['rolloverdisplay'] = 'others'
geopdwd_filter_other['colors'] = '#FFFFFF'
geopdwd_filter = geopdwd_filter_first
geopdwd_filter = geopdwd_filter.append(geopdwd_filter_other)
if func.__name__ == 'pycoa_horizonhisto' :
#geopdwd_filter['bottom'] = geopdwd_filter.index
geopdwd_filter['left'] = geopdwd_filter['cases']
geopdwd_filter['right'] = geopdwd_filter['cases']
geopdwd_filter['left'] = geopdwd_filter['left'].apply(lambda x: 0 if x > 0 else x)
geopdwd_filter['right'] = geopdwd_filter['right'].apply(lambda x: 0 if x < 0 else x)
n = len(geopdwd_filter.index)
d = plot_height / n
ymax = plot_height
geopdwd_filter['top'] = [ymax*(n-i)/n + d/2 for i in range(n)]
geopdwd_filter['bottom'] = [ymax*(n-i)/n - d/2 for i in range(n)]
geopdwd_filter['horihistotexty'] = geopdwd_filter['bottom'] + d/2
geopdwd_filter['horihistotextx'] = geopdwd_filter['right']
if maplabel and 'label%' in maplabel:
geopdwd_filter['right'] = geopdwd_filter['right'].apply(lambda x: 100.*x)
geopdwd_filter['horihistotextx'] = geopdwd_filter['right']
geopdwd_filter['horihistotext'] = [str(round(i))+'%' for i in geopdwd_filter['right']]
else:
geopdwd_filter['horihistotext'] = [ '{:.3g}'.format(float(i)) if float(i)>1.e4 else round(float(i),2) for i in geopdwd_filter['right'] ]
geopdwd_filter['horihistotext'] = [str(i) for i in geopdwd_filter['horihistotext']]
if func.__name__ == 'pycoa_pie' :
geopdwd_filter = self.add_columns_for_pie_chart(geopdwd_filter,input_field)
geopdwd = self.add_columns_for_pie_chart(geopdwd,input_field)
if maplabel and 'label%' in maplabel:
geopdwd_filter['textdisplayed2'] = geopdwd_filter['percentage']
geopdwd['textdisplayed2'] = geopdwd['percentage']
source = ColumnDataSource(data = geopdwd)
input_filter = geopdwd_filter
srcfiltered = ColumnDataSource(data = input_filter)
max_value = max(input_filter['cases'])
min_value = min(input_filter['cases'])
min_value_gt0 = min(input_filter[input_filter['cases'] > 0]['cases'])
panels = []
for axis_type in self.ax_type:
plot_width = kwargs['plot_width']
plot_height = kwargs['plot_height']
standardfig = self.standardfig( x_axis_type = axis_type, x_range = (1.05*min_value, 1.05 * max_value),**kwargs)
if maplabel and 'label%' in maplabel:
standardfig.x_range = Range1d(0.01, 1.2 * max_value*100)
standardfig.xaxis.axis_label = 'percentage(%)'
standardfig.xaxis.formatter = BasicTickFormatter(use_scientific=False)
else:
standardfig.xaxis[0].formatter = PrintfTickFormatter(format="%4.2e")
standardfig.x_range = Range1d(0.01, 1.2 * max_value)
if not input_filter[input_filter[input_field] < 0.].empty:
standardfig.x_range = Range1d(1.2 * min_value, 1.2 * max_value)
if axis_type == "log":
if not input_filter[input_filter[input_field] < 0.].empty:
print('Some value are negative, can\'t display log scale in this context')
else:
if func.__name__ == 'pycoa_horizonhisto' :
if maplabel and 'label%' in maplabel:
standardfig.x_range = Range1d(0.01, 50 * max_value*100)
else:
standardfig.x_range = Range1d(0.01, 50 * max_value)
srcfiltered.data['left'] = [0.01] * len(srcfiltered.data['right'])
if func.__name__ == 'pycoa_pie':
if not input_filter[input_filter[input_field] < 0.].empty:
raise CoaKeyError('Some values are negative, can\'t display a Pie chart, try histo by location')
standardfig.plot_width = plot_height
standardfig.plot_height = plot_height
if date_slider:
date_slider.width = int(0.8*plot_width)
callback = CustomJS(args = dict(source = source,
source_filter = srcfiltered,
date_slider = date_slider,
ylabel = standardfig.yaxis[0],
title = standardfig.title,
x_range = standardfig.x_range,
x_axis_type = axis_type,
figure = standardfig),
code = """
var date_slide = date_slider.value;
var dates = source.data['date_utc'];
var val = source.data['cases'];
var loc = source.data['clustername'];
//var loc = source.data['location'];
var subregion = source.data['name_subregion'];
var codeloc = source.data['codelocation'];
var colors = source.data['colors'];
var newval = [];
var newloc = [];
var newcolors = [];
var newcodeloc = [];
var newname_subregion = [];
var labeldic = {};
for (var i = 0; i < dates.length; i++){
if (dates[i] == date_slide){
newval.push(parseFloat(val[i]));
newloc.push(loc[i]);
newcodeloc.push(codeloc[i]);
newcolors.push(colors[i]);
if(typeof subregion !== 'undefined')
newname_subregion.push(subregion[i]);
}
}
var len = source_filter.data['clustername'].length;
var indices = new Array(len);
for (var i = 0; i < len; i++) indices[i] = i;
indices.sort(function (a, b) { return newval[a] > newval[b] ? -1 : newval[a] < newval[b] ? 1 : 0; });
var orderval = [];
var orderloc = [];
var ordercodeloc = [];
var ordername_subregion = [];
var ordercolors = [];
var textdisplayed = [];
for (var i = 0; i < len; i++)
{
orderval.push(newval[indices[i]]);
orderloc.push(newloc[indices[i]]);
ordercodeloc.push(newcodeloc[indices[i]]);
if(typeof subregion !== 'undefined')
ordername_subregion.push(newname_subregion[i]);
ordercolors.push(newcolors[indices[i]]);
//labeldic[len-indices[i]] = newcodeloc[indices[i]];
textdisplayed.push(newcodeloc[indices[i]].padStart(40,' '));
}
source_filter.data['cases'] = orderval;
const reducer = (accumulator, currentValue) => accumulator + currentValue;
var tot = orderval.reduce(reducer);
var top = [];
var bottom = [];
var starts = [];
var ends = [];
var middle = [];
var text_x = [];
var text_y = [];
var r = 0.7;
var bthick = 0.95;
var cumul = 0.;
var percentage = [];
var angle = [];
var text_size = [];
var left_quad = [];
var right_quad = [];
for(var i = 0; i < orderval.length; i++)
{
cumul += ((orderval[i] / tot) * 2 * Math.PI);
ends.push(cumul);
if(i==0)
starts.push(0);
else
starts.push(ends[i-1]);
middle.push((ends[i]+starts[i])/2);
text_x.push(r*Math.cos(middle[i]));
text_y.push(r*Math.sin(middle[i]));
percentage.push(String(100.*orderval[i] / tot).slice(0, 4));
angle.push((orderval[i] / tot) * 2 * Math.PI)
/*if ((ends[i]-starts[i]) > 0.08*(2 * Math.PI))
text_size.push('10pt');
else
text_size.push('6pt');*/
//top.push((orderval.length-i) + bthick/2);
//bottom.push((orderval.length-i) - bthick/2);
if (isNaN(orderval[i])) orderval[i] = 0.;
if(orderval[i]<=0.)
{
left_quad.push(orderval[i]);
right_quad.push(0.);
}
else
{
left_quad.push(0);
right_quad.push(orderval[i]);
}
}
source_filter.data['clustername'] = orderloc;
source_filter.data['codelocation'] = ordercodeloc;
//source_filter.data['colors'] = ordercolors;
if(typeof subregion !== 'undefined')
source_filter.data['rolloverdisplay'] = ordername_subregion;
else
source_filter.data['rolloverdisplay'] = orderloc;
source_filter.data['ends'] = ends;
source_filter.data['starts'] = starts;
source_filter.data['middle'] = middle;
source_filter.data['text_x'] = text_x;
source_filter.data['text_y'] = text_y;
//source_filter.data['text_size'] = text_size;
source_filter.data['percentage'] = percentage;
source_filter.data['angle'] = angle;
source_filter.data['left'] = left_quad;
source_filter.data['right'] = right_quad;
var mid =[];
var ht = [];
var textdisplayed2 = [];
var n = right_quad.length;
var d = figure.plot_height / n;
var ymax = figure.plot_height;
for(i=0; i<right_quad.length;i++){
top.push(parseInt(ymax*(n-i)/n+d/2));
bottom.push(parseInt(ymax*(n-i)/n-d/2));
mid.push(parseInt(ymax*(n-i)/n));
labeldic[parseInt(ymax*(n-i)/n)] = ordercodeloc[i];
ht.push(right_quad[i].toFixed(2).toString());
var a=new Intl.NumberFormat().format(right_quad[i])
textdisplayed2.push(a.toString().padStart(26,' '));
//textdisplayed2.push(right_quad[i].toFixed(2).toString().padStart(40,' '));
}
source_filter.data['top'] = top;
source_filter.data['bottom'] = bottom;
source_filter.data['horihistotextxy'] = mid;
source_filter.data['horihistotextx'] = right_quad;
source_filter.data['horihistotext'] = ht;
source_filter.data['permanentdisplay'] = ordercodeloc;
source_filter.data['textdisplayed'] = textdisplayed;
source_filter.data['textdisplayed2'] = textdisplayed2;
var maxx = Math.max.apply(Math, right_quad);
var minx = Math.min.apply(Math, left_quad);
ylabel.major_label_overrides = labeldic;
console.log(labeldic);
x_range.end = 1.2 * maxx;
x_range.start = 1.05 * minx;
if(minx >= 0){
x_range.start = 0.01;
source_filter.data['left'] = Array(left_quad.length).fill(0.01);
}
var tmp = title.text;
tmp = tmp.slice(0, -11);
var dateconverted = new Date(date_slide);
var dd = String(dateconverted.getDate()).padStart(2, '0');
var mm = String(dateconverted.getMonth() + 1).padStart(2, '0'); //January is 0!
var yyyy = dateconverted.getFullYear();
var dmy = dd + '/' + mm + '/' + yyyy;
title.text = tmp + dmy+")";
source_filter.change.emit();
""")
date_slider.js_on_change('value', callback)
cases_custom = CocoDisplay.rollerJS()
if func.__name__ == 'pycoa_pie' :
standardfig.add_tools(HoverTool(
tooltips=[('Location', '@rolloverdisplay'), (input_field, '@cases{0,0.0}'), ('%','@percentage'), ],
formatters={'location': 'printf', '@{' + 'cases' + '}': cases_custom, '%':'printf'},
point_policy="snap_to_data")) # ,PanTool())
else:
standardfig.add_tools(HoverTool(
tooltips=[('Location', '@rolloverdisplay'), (input_field, '@cases{0,0.0}'), ],
formatters={'location': 'printf', '@{' + 'cases' + '}': cases_custom, },
point_policy="snap_to_data")) # ,PanTool())
panel = Panel(child = standardfig, title = axis_type)
panels.append(panel)
return func(self, srcfiltered, panels, date_slider)
return inner_decohistopie
''' VERTICAL HISTO '''
@decowrapper
@decohistomap
@decohistopie
def pycoa_horizonhisto(self, srcfiltered, panels, date_slider):
'''
-----------------
Create 1D histogramme by location according to arguments.
See help(pycoa_histo).
Keyword arguments
-----------------
- srcfiltered : A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element could be a list
- plot_heigh= width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- mode = mouse
- cursor_date = None if True
- orientation = horizontal
- when : default min and max according to the inpude DataFrame.
Dates are given under the format dd/mm/yyyy.
when format [dd/mm/yyyy : dd/mm/yyyy]
if [:dd/mm/yyyy] min date up to
if [dd/mm/yyyy:] up to max date
'''
n = len(panels)
new_panels = []
for i in range(n):
fig = panels[i].child
fig.y_range = Range1d(min(srcfiltered.data['bottom']), max(srcfiltered.data['top']))
fig.yaxis[0].formatter = NumeralTickFormatter(format="0.0")
ytick_loc = [int(i) for i in srcfiltered.data['horihistotexty']]
fig.yaxis.ticker = ytick_loc
label_dict = dict(zip(ytick_loc,srcfiltered.data['permanentdisplay']))
fig.yaxis.major_label_overrides = label_dict
#print(fig.y_range ,fig.yaxis.major_label_overrides)
fig.quad(source = srcfiltered,
top='top', bottom = 'bottom', left = 'left', right = 'right', color = 'colors', line_color = 'black',
line_width = 1, hover_line_width = 2)
labels = LabelSet(
x = 'horihistotextx',
y = 'horihistotexty',
x_offset=5,
y_offset=-4,
text = 'horihistotext',
source = srcfiltered,text_font_size='10px',text_color='black')
fig.add_layout(labels)
panel = Panel(child = fig, title = panels[i].title)
new_panels.append(panel)
tabs = Tabs(tabs = new_panels)
if date_slider:
tabs = column(date_slider,tabs)
return tabs
''' PIE '''
def add_columns_for_pie_chart(self,df,column_name):
df = df.copy()
column_sum = df[column_name].sum()
df['percentage'] = df[column_name]/column_sum
percentages = [0] + df['percentage'].cumsum().tolist()
df['angle'] = (df[column_name]/column_sum)*2 * np.pi
df['starts'] = [p * 2 * np.pi for p in percentages[:-1]]
df['ends'] = [p * 2 * np.pi for p in percentages[1:]]
df['diff'] = (df['ends'] - df['starts'])
df['middle'] = df['starts']+np.abs(df['ends']-df['starts'])/2.
df['cos'] = np.cos(df['middle']) * 0.9
df['sin'] = np.sin(df['middle']) * 0.9
df['text_size'] = '8pt'
df['textdisplayed'] = df['permanentdisplay'].str.pad(36, side = "left")
locale.setlocale(locale.LC_ALL, 'en_US')
df['textdisplayed2'] = [ locale.format("%d", i, grouping=True)\
for i in df[column_name]]
#df['textdisplayed2'] = df[column_name].astype(str) #[i.str for i in df[column_name]]
df['textdisplayed2'] = df['textdisplayed2'].str.pad(26, side = "left")
#df['textdisplayed2'] = df[column_name].str.pad(26, side = "left")
df.loc[df['diff'] <= np.pi/20,'textdisplayed']=''
df.loc[df['diff'] <= np.pi/20,'textdisplayed2']=''
df['percentage'] = 100.*df['percentage']
return df
@decowrapper
@decohistomap
@decohistopie
def pycoa_pie(self, srcfiltered, panels, date_slider):
'''
-----------------
Create a pie chart according to arguments.
See help(pycoa_pie).
Keyword arguments
-----------------
- srcfiltered : A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element could be a list
- plot_heigh= width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- mode = mouse
- cursor_date = None if True
- orientation = horizontal
'''
standardfig = panels[0].child
standardfig.plot_height=400
standardfig.plot_width=400
standardfig.x_range = Range1d(-1.1, 1.1)
standardfig.y_range = Range1d(-1.1, 1.1)
standardfig.axis.visible = False
standardfig.xgrid.grid_line_color = None
standardfig.ygrid.grid_line_color = None
standardfig.wedge(x=0, y=0, radius=1.,line_color='#E8E8E8',
start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'),
fill_color='colors', legend_label='clustername', source=srcfiltered)
standardfig.legend.visible = False
labels = LabelSet(x=0, y=0,text='textdisplayed',angle=cumsum('angle', include_zero=True),
text_font_size="10pt",source=srcfiltered,render_mode='canvas')
labels2 = LabelSet(x=0, y=0, text='textdisplayed2',
angle=cumsum('angle', include_zero=True),text_font_size="8pt",source=srcfiltered)
standardfig.add_layout(labels)
standardfig.add_layout(labels2)
if date_slider:
standardfig = column(date_slider,standardfig)
return standardfig
''' MAP FOLIUM '''
@decowrapper
@decohistomap
def pycoa_mapfolium(self, geopdwd, input_field, **kwargs):
'''
-----------------
Create a map folium to arguments.
See help(pycoa_histo).
Keyword arguments
-----------------
- srcfiltered : A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element could be a list
- plot_heigh= width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- mode = mouse
- cursor_date = None if True
- orientation = horizontal
- when : default min and max according to the inpude DataFrame.
Dates are given under the format dd/mm/yyyy.
when format [dd/mm/yyyy : dd/mm/yyyy]
if [:dd/mm/yyyy] min date up to
if [dd/mm/yyyy:] up to max date
'''
title = kwargs.get('title', None)
tile = kwargs.get('tile', self.dvisu_default['tile'])
tile = CocoDisplay.convert_tile(tile, 'folium')
maplabel = kwargs.get('maplabel',self.dvisu_default['maplabel'])
plot_width = kwargs.get('plot_width',self.dfigure_default['plot_width'])
plot_height = kwargs.get('plot_height',self.dfigure_default['plot_height'])
geopdwd['cases'] = geopdwd[input_field]
geopdwd_filtered = geopdwd.loc[geopdwd.date == self.when_end]
geopdwd_filtered = geopdwd_filtered.reset_index(drop = True)
geopdwd_filtered['cases'] = geopdwd_filtered[input_field]
my_date = geopdwd.date.unique()
dico_utc = {i: DateSlider(value=i).value for i in my_date}
geopdwd['date_utc'] = [dico_utc[i] for i in geopdwd.date]
#geopdwd = geopdwd.drop_duplicates(["date", "codelocation","clustername"])#for sumall avoid duplicate
#geopdwd_filtered = geopdwd_filtered.sort_values(by='cases', ascending = False).reset_index()
#locunique = geopdwd_filtered.clustername.unique()#geopdwd_filtered.location.unique()
if self.database_name == 'risklayer':
geopdwd_filtered = geopdwd_filtered.loc[geopdwd_filtered.geometry.notna()]
uniqloc = list(geopdwd_filtered.codelocation.unique())
geopdwd_filtered = geopdwd_filtered.drop(columns=['date', 'colors'])
msg = "(data from: {})".format(self.database_name)
minx, miny, maxx, maxy = geopdwd_filtered.total_bounds
mapa = folium.Map(tiles=tile, attr='<a href=\"http://pycoa.fr\"> ©pycoa.fr </a>' + msg)
#min_lat=minx, max_lat=maxx, min_lon=miny, max_lon=maxy)
#location=[geopdwd_filtered.centroid.y.mean(),geopdwd_filtered.centroid.x.mean()],)
if self.dbld[self.database_name][0] != 'WW':
mapa.fit_bounds([(miny, minx), (maxy, maxx)])
fig = Figure(width=plot_width, height=plot_height)
fig.add_child(mapa)
min_col, max_col = CocoDisplay.min_max_range(np.nanmin(geopdwd_filtered[input_field]),
np.nanmax(geopdwd_filtered[input_field]))
min_col_non0 = (np.nanmin(geopdwd_filtered.loc[geopdwd_filtered['cases']>0.]['cases']))
invViridis256 = Viridis256[::-1]
if 'log' in maplabel:
geopdwd_filtered['cases'] = geopdwd_filtered.loc[geopdwd_filtered['cases']>0]['cases']
color_mapper = LinearColorMapper(palette=invViridis256, low=min_col_non0, high=max_col, nan_color='#d9d9d9')
colormap = branca.colormap.LinearColormap(color_mapper.palette).to_step(data=list(geopdwd_filtered['cases']),n=10,method='log')
else:
color_mapper = LinearColorMapper(palette=invViridis256, low=min_col, high=max_col, nan_color='#d9d9d9')
colormap = branca.colormap.LinearColormap(color_mapper.palette).scale(min_col, max_col)
colormap.caption = title
colormap.add_to(mapa)
map_id = colormap.get_name()
custom_label_colorbar_js = """
var div = document.getElementById('legend');
var ticks = document.getElementsByClassName('tick')
for(var i = 0; i < ticks.length; i++){
var values = ticks[i].textContent.replace(',','')
val = parseFloat(values).toExponential(1).toString().replace("+", "")
if(parseFloat(ticks[i].textContent) == 0) val = 0.
div.innerHTML = div.innerHTML.replace(ticks[i].textContent,val);
}
"""
e = Element(custom_label_colorbar_js)
html = colormap.get_root()
html.script.get_root().render()
html.script._children[e.get_name()] = e
geopdwd_filtered[input_field + 'scientific_format'] = \
(['{:.5g}'.format(i) for i in geopdwd_filtered['cases']])
# (['{:.3g}'.format(i) if i>100000 else i for i in geopdwd_filter[input_field]])
map_dict = geopdwd_filtered.set_index('location')[input_field].to_dict()
if np.nanmin(geopdwd_filtered[input_field]) == np.nanmax(geopdwd_filtered[input_field]):
map_dict['FakeCountry'] = 0.
if 'log' in maplabel:
color_scale = branca.colormap.LinearColormap(color_mapper.palette).to_step(data=list(geopdwd_filtered['cases']),n=10,method='log')
else:
color_scale = LinearColormap(color_mapper.palette, vmin=min(map_dict.values()), vmax=max(map_dict.values()))
def get_color(feature):
value = map_dict.get(feature['properties']['location'])
if value is None or np.isnan(value):
return '#8c8c8c' # MISSING -> gray
else:
return color_scale(value)
displayed = 'rolloverdisplay'
folium.GeoJson(
geopdwd_filtered,
style_function=lambda x:
{
'fillColor': get_color(x),
'fillOpacity': 0.8,
'color': None
},
highlight_function=lambda x: {'weight': 2, 'color': 'green'},
tooltip=folium.features.GeoJsonTooltip(fields=[displayed, input_field + 'scientific_format'],
aliases=['location' + ':', input_field + ":"],
style="""
background-color: #F0EFEF;
border: 2px solid black;
border-radius: 3px;
box-shadow: 3px;
opacity: 0.2;
"""),
# '<div style="barialckground-color: royalblue 0.2; color: black; padding: 2px; border: 1px solid black; border-radius: 2px;">'+input_field+'</div>'])
).add_to(mapa)
return mapa
''' DECORATOR FOR MAP BOKEH '''
def decopycoageo(func):
@wraps(func)
def innerdecopycoageo(self, geopdwd, input_field, **kwargs):
geopdwd['cases'] = geopdwd[input_field]
geopdwd_filtered = geopdwd.loc[geopdwd.date == self.when_end]
geopdwd_filtered = geopdwd_filtered.reset_index(drop = True)
geopdwd_filtered = gpd.GeoDataFrame(geopdwd_filtered, geometry=geopdwd_filtered.geometry, crs="EPSG:4326")
geopdwd = geopdwd.sort_values(by=['clustername', 'date'], ascending = [True, False])
geopdwd_filtered = geopdwd_filtered.sort_values(by=['clustername', 'date'], ascending = [True, False]).drop(columns=['date', 'colors'])
new_poly = []
geolistmodified = dict()
for index, row in geopdwd_filtered.iterrows():
split_poly = []
new_poly = []
if row['geometry']:
for pt in self.get_polycoords(row):
if type(pt) == tuple:
new_poly.append(CocoDisplay.wgs84_to_web_mercator(pt))
elif type(pt) == list:
shifted = []
for p in pt:
shifted.append(CocoDisplay.wgs84_to_web_mercator(p))
new_poly.append(sg.Polygon(shifted))
else:
raise CoaTypeError("Neither tuple or list don't know what to do with \
your geometry description")
if type(new_poly[0]) == tuple:
geolistmodified[row['location']] = sg.Polygon(new_poly)
else:
geolistmodified[row['location']] = sg.MultiPolygon(new_poly)
ng = pd.DataFrame(geolistmodified.items(), columns=['location', 'geometry'])
geolistmodified = gpd.GeoDataFrame({'location': ng['location'], 'geometry': gpd.GeoSeries(ng['geometry'])}, crs="epsg:3857")
geopdwd_filtered = geopdwd_filtered.drop(columns='geometry')
geopdwd_filtered = pd.merge(geolistmodified, geopdwd_filtered, on='location')
#if kwargs['wanted_dates']:
# kwargs.pop('wanted_dates')
return func(self, geopdwd, geopdwd_filtered, **kwargs)
return innerdecopycoageo
''' RETURN GEOMETRY, LOCATIO + CASES '''
@decowrapper
@decohistomap
@decopycoageo
def pycoageo(self, geopdwd, geopdwd_filtered, **kwargs):
return geopdwd_filtered
def decomap(func):
@wraps(func)
def innerdecomap(self, geopdwd, geopdwd_filtered, **kwargs):
title = kwargs.get('title', None)
maplabel = kwargs.get('maplabel',self.dvisu_default['maplabel'])
tile = kwargs.get('tile', None)
if tile:
tile = CocoDisplay.convert_tile(tile, 'bokeh')
uniqloc = list(geopdwd_filtered.clustername.unique())
dfLabel = pd.DataFrame()
sourcemaplabel = ColumnDataSource(dfLabel)
if maplabel or func.__name__ == 'pycoa_pimpmap':
locsum = geopdwd_filtered.clustername.unique()
numberpercluster = geopdwd_filtered['clustername'].value_counts().to_dict()
sumgeo = geopdwd_filtered.copy()
sumgeo['geometry'] = sumgeo['geometry'].buffer(0.001) #needed with geopandas 0.10.2
sumgeo = sumgeo.dissolve(by='clustername', aggfunc='sum').reset_index()
sumgeo['nb'] = sumgeo['clustername'].map(numberpercluster)
#print(geopdwd_filtered.loc[geopdwd_filtered.clustername=='Île-de-France'].reset_index(drop=True).explode(index_parts=False))
centrosx = sumgeo['geometry'].centroid.x
centrosy = sumgeo['geometry'].centroid.y
cases = sumgeo['cases']/sumgeo['nb']
dfLabel=pd.DataFrame({'clustername':sumgeo.clustername,'centroidx':centrosx,'centroidy':centrosy,'cases':cases,'geometry':sumgeo['geometry']})
if 'spark' in maplabel:
sparkos = {i: CocoDisplay.sparkline(geopdwd.loc[ (geopdwd.clustername==i) &
(geopdwd.date >= self.when_beg) &
(geopdwd.date <= self.when_end)].sort_values(by='date')['cases']) for i in locsum }
dfpimp = pd.DataFrame(list(sparkos.items()), columns=['clustername', 'pimpmap'])
dfLabel=
|
pd.merge(dfLabel,dfpimp,on=['clustername'],how="inner")
|
pandas.merge
|
# -*- coding: utf-8 -*-
"""
Simulate Back EMF Voltage for Toyota Prius Motor at different Speed
Pizza Model = only part of the model is simulated and results are extrapolated
Multiprocessing disabled = One instance of simulation is enabled as flux linkage
is constant wrt to speed
Author: <NAME>
"""
"""
Number of workers defined based on number of problems. Since this particular
multiprocess is not computationally intensive making it equal to 12 workers
"""
#TODO:
# [] Deviation in simulation vs Experimental Results by 1.2
Workers=12
DeviationFactor=1.2
import femm as fem
from femm import *
import math as math
from math import *
from matplotlib import *
from matplotlib.pyplot import *
import time as myTime
import pandas as pd
import matplotlib.pyplot as plt
from statistics import mean
from numpy import diff
import concurrent.futures
import os
cwd=os.getcwd()
xldir=cwd+'\ExcelResults'
pltdir=cwd+'\Plots'
def rotate(origin, point, angle):
"""
Rotate a point counterclockwise by a given angle around a given origin.
The angle should be given in radians.
"""
ox, oy = origin
px, py = point
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return qx, qy
def BEMFSimulation(BEMFSpeed):
try:
openfemm()
newdocument(0)
Length = 83.6;# #mm motor length
mi_probdef(0,'millimeters','planar',1e-8,Length,30,1)
## Stator Geometry
Nslots = 48;
StatorOD = 269;
StatorID = 161.93;
ShoeHeight = 1.02;# Hs0
ShoeRadius = 0.50;# Hs1
SlotHeight = 29.16;# Hs2
SlotDia = 5.64; # 2Rs
PostHeight = SlotDia/2+SlotHeight+ShoeRadius+ShoeHeight;
SlotOpen = 1.93; #Bs0
SlotWidthTop = 3.15;
SlotWidthBot = SlotDia;
StatorPitchAirgap = pi*StatorID/Nslots;
StatorPitchSlotTop = pi*(StatorID+2*ShoeHeight)/Nslots;
StatorPitchSlotBot = pi*(StatorID+2*ShoeHeight+2*ShoeRadius+2*SlotHeight)/Nslots;
PostThickTop = StatorPitchSlotTop-SlotWidthTop;
PostThickBot = StatorPitchSlotBot-SlotWidthBot;
Npoles = 8;
RotorOD = 160.47;
RotorID = 111;
BridgeID = 9.4;
DuctMinDia = RotorID+2*BridgeID;
DuctThick = 4.7;
RibHeight = 3;
RibWidth = 14;
DistMinMag = 3;
MagThick = 6.5;
MagWidth = 18.9;
Bridge = 1.42;
DuctMaxDia = RotorOD-2*Bridge;
alpha = 72.5;
theta = 360/Npoles/2;
#Reading Geometry
mi_readdxf('prius_mm_pizza.dxf')
mi_zoomnatural()
#AddingMaterial
mi_getmaterial('Air')
mi_getmaterial('20 AWG')
mi_addmaterial('19 AWG',1.0,1.0,0,0,58,0,0,0,3,0,0,1,0.912)
mi_addmaterial('N36Z_20',1.03,1.03,920000,0,0.667)
mi_addmaterial('M19_29G',0,0,0,0,1.9,0.34,0,0.94,0)
b=[0,0.05,0.1,0.15,0.36,0.54,0.65,0.99,1.2,1.28,1.33,1.36,1.44,1.52,
1.58,1.63,1.67,1.8,1.9,2,2.1,2.3,2.5,2.563994494,3.7798898740]
h=[0,22.28,25.46,31.83,47.74,63.66,79.57,159.15,318.3,477.46,636.61,
795.77,1591.5,3183,4774.6,6366.1,7957.7,15915,31830,111407,190984,
350135,509252,560177.2,1527756]
for n in range(0,len(b)):
mi_addbhpoint('M19_29G',b[n],h[n])
r0=0.5*StatorOD
ri=0.5*RotorID
r0In=0.5*StatorID
riOut=0.5*RotorOD
#AddingBlockLabels
#At this point I am directly refering the geometry for coordinates
mi_addblocklabel(0.95*r0,5) #Adding StatorLabel
mi_selectlabel(0.95*r0,5)
mi_setblockprop('M19_29G',1,0,'None',0,1,0)
mi_clearselected()
mi_addblocklabel(0.95*r0In,5) #Adding RotorLabel
mi_selectlabel(0.95*r0In,5)
mi_setblockprop('M19_29G',1,0,'None',0,1,0)
mi_clearselected()
mi_addblocklabel(82.83,5) #Adding StatorAir
mi_selectlabel(82.83,5)
mi_setblockprop('Air',0.1,0,'None',0,1,0)
mi_clearselected()
mi_addblocklabel(82,5) #Adding RotorAir
mi_selectlabel(82,5)
mi_setblockprop('Air',0.1,0,'None',0,1,0)
mi_clearselected()
mi_addblocklabel(r0In+10,5) #Adding Winding
mi_selectlabel(r0In+10,5)
mi_setblockprop('20 AWG',1,0,'None',0,1,0)
mi_copyrotate(0,0,360/Nslots,5)
mi_clearselected()
mi_addblocklabel(68,16.5) #Adding RotorMagnet1
mi_selectlabel(68,16.5)
mi_setblockprop('N36Z_20',1,0,'None',40,3,0)
mi_clearselected()
mi_addblocklabel(60,36.5) #Adding RotorMagnet2
mi_selectlabel(60,36.5)
mi_setblockprop('N36Z_20',1,0,'None',5,3,0)
mi_clearselected()
mi_addblocklabel(75,9) #Adding Airpocket
mi_selectlabel(75,9)
mi_setblockprop('Air',1,0,'None',0,1,0)
mi_clearselected()
mi_addblocklabel(62,25) #Adding Airpocket
mi_selectlabel(62,25)
mi_setblockprop('Air',1,0,'None',0,1,0)
mi_clearselected()
mi_addblocklabel(60,47) #Adding Airpocket
mi_selectlabel(60,47)
mi_setblockprop('Air',1,0,'None',0,1,0)
mi_clearselected()
#adding boundary
#Anti periodic Boundary = Since 1 pole is present adding Antiperiodic boundary
mi_addboundprop('AirBound',0,0,0,0,0,0,0,0,0) #Vector Potential Boundary
mi_selectarcsegment(125,50)
mi_selectarcsegment(51,21)
mi_setarcsegmentprop(5,'AirBound',0,1);
mi_clearselected()
mi_addboundprop('pb1',0,0,0,0,0,0,0,0,5,0,0) #Stator Side Boundary
mi_selectsegment(80,80)
mi_selectsegment(100,0)
mi_setsegmentprop('pb1',1,0,0,0)
mi_clearselected()
mi_addboundprop('pb2',0,0,0,0,0,0,0,0,5,0,0) #Stator Air Boundary
mi_selectsegment(58.5,58.5)
mi_selectsegment(82.7,0)
mi_setsegmentprop('pb2',1,0,0,0)
mi_clearselected()
mi_addboundprop('pb3',0,0,0,0,0,0,0,0,5,0,0) #Rotor Air Boundary
mi_selectsegment(58.1,58.1)
mi_selectsegment(82.21,0)
mi_setsegmentprop('pb3',1,0,0,0)
mi_clearselected()
mi_addboundprop('pb4',0,0,0,0,0,0,0,0,5,0,0) #Rotor Air Boundary
mi_selectsegment(53,53)
mi_selectsegment(75,0)
mi_setsegmentprop('pb4',1,0,0,0)
mi_clearselected()
mi_addboundprop('SlidingBoundary',0,0,0,0,0,0,0,0,7,0,-67.5) #SlidingBoundary
mi_selectarcsegment(82.5,5)
mi_selectarcsegment(82.2,5)
mi_setarcsegmentprop(0.1,'SlidingBoundary',0,0)
mi_clearselected()
#Initialization if circuit parameters
Current = [10] # Amps
Strands = 9;
ParallelWires = 13;
Turns = 117 # 9 strands x 13 wires in parallel
Phase_init = 0; #120 # deg electrical angle
# Phase_step = 8;
Phase = Phase_init;
SpeedRPM = BEMFSpeed #RPM
Freq = Npoles*SpeedRPM/120 # Hz
time = 0;
mi_addcircprop('A',Current[0]*sin(2*pi*Freq*time+Phase*pi/180),1)
mi_addcircprop('B',Current[0]*sin(2*pi*Freq*time+Phase*pi/180+2*pi/3),1)
mi_addcircprop('C',Current[0]*sin(2*pi*Freq*time+Phase*pi/180+4*pi/3),1)
Circuit = ['A','A','B','B','C','C']
CoilDir = [+1,+1,-1,-1,+1,+1]
origin=(0,0)
point=(r0In+10,5)
for i in range(len(Circuit)):
x,y=rotate(origin,point,radians(i*7.5))
mi_selectlabel(x,y)
mi_setblockprop('19 AWG',1,0,Circuit[i],0,1,CoilDir[i]*Strands)
mi_clearselected()
mi_zoomnatural()
PhaseArray=[40]
niterat=90;
InitialAngle=360/Nslots
StepAngle=1
k=0;
Torque=0;
step_vec=[];
torq_vec=[];
time_vec=[];
Phase_vec=[];
Phase_max=[];
Torque_max=[];
PhA_vec=[];
PhB_vec=[];
PhC_vec=[];
totTorq=[]
CircProp=[]
Angle=InitialAngle
mi_modifyboundprop('SlidingBoundary',10,InitialAngle)
startTime=float(myTime.time())
startTimeInterval=float(myTime.time())
avgTorqArray=[]
SpeedRPM = BEMFSpeed #RPM
Freq = Npoles*SpeedRPM/120
Phase=40
time=0
step_vec=[];
torq_vec=[];
time_vec=[];
Phase_vec=[];
PhA_vec=[];
PhB_vec=[];
PhC_vec=[];
CircPropA=[]
CircPropB=[]
CircPropC=[]
#Simulation Start
for i in range((niterat)):
Curr_PhA = 0
Curr_PhB = 0
Curr_PhC = 0
mi_modifycircprop('A',1,Curr_PhA)
mi_modifycircprop('B',1,Curr_PhB)
mi_modifycircprop('C',1,Curr_PhC)
mi_modifyboundprop('SlidingBoundary',10,Angle)
mi_saveas('BEMF'+str(BEMFSpeed)+'.FEM')
mi_clearselected()
smartmesh(1)
mi_analyze(0);
mi_loadsolution();
#Uncomment if you want to save the density plot
# mo_showdensityplot(1,0,2,0.0,'mag');
# mo_savebitmap('SinglePoimt'+'Current'+str(Current[j])+'_'+str(i)+'.png')
Angle=Angle+StepAngle
CircPropA.append(mo_getcircuitproperties('A'))
CircPropB.append(mo_getcircuitproperties('B'))
CircPropC.append(mo_getcircuitproperties('C'))
time_vec.append(time)
Phase_vec.append(Phase)
torq_vec.append(Torque)
time = time + 1/Freq/niterat
InitialAngle =0;
totTorq.append(torq_vec)
avgTorqArray.append(mean(torq_vec))
nowTime=myTime.time()
fluxLinkageA=[]
fluxLinkageB=[]
fluxLinkageC=[]
for i in range(len(CircPropA)):
fluxLinkageA.append(CircPropA[i][2])
fluxLinkageB.append(CircPropB[i][2])
fluxLinkageC.append(CircPropC[i][2])
closefemm()
return fluxLinkageA,fluxLinkageB,fluxLinkageC,niterat
except Exception as e:
return e
def BEMFcomputation(fluxLinkageA,fluxLinkageB,fluxLinkageC,BEMFSpeed,niterat):
try:
Npoles=8
SpeedRPM = BEMFSpeed #RPM
Freq = Npoles*SpeedRPM/120
Phase=40
time=0
step_vec=[];
torq_vec=[];
time_vec=[];
diffactor=Npoles/DeviationFactor
dt=(1/(6*SpeedRPM))
time=0
for i in range(len(fluxLinkageA)):
time=time+dt*1
time_vec.append(time)
bemfA=diffactor*(diff(fluxLinkageA)/dt) #Line Neutral
bemfB=diffactor*(diff(fluxLinkageB)/dt)
bemfC=diffactor*(diff(fluxLinkageC)/dt)
bemfTimeArray=[]
bemfTimeArray.append(time_vec)
bemfTimeArray[0].pop(len(bemfTimeArray[0])-1)
figure()
plot(bemfTimeArray[0],bemfA)
plot(bemfTimeArray[0],bemfB)
plot(bemfTimeArray[0],bemfC)
title('Phase Voltage '+str(SpeedRPM)+' RPM')
xlabel('Time(sec)')
ylabel('Voltage(V)')
plt.savefig(pltdir+'\BEMF-PHASE_'+str(SpeedRPM)+'.png')
figure()
title('Line Voltage '+str(SpeedRPM)+' RPM')
plot(bemfTimeArray[0],bemfA-bemfB)
plot(bemfTimeArray[0],bemfB-bemfC)
plot(bemfTimeArray[0],bemfC-bemfA)
xlabel('Time(sec)')
ylabel('Voltage(V)')
plt.savefig(pltdir+'\BEMF-LINE_'+str(SpeedRPM)+'.png')
data = [time_vec,fluxLinkageA,fluxLinkageB,fluxLinkageC]
alldata=pd.DataFrame(data)
alldata=alldata.T
alldata.columns=['Time','FA','FB','FC']
alldata.to_excel(xldir+'\Fluxlinkage'+str(SpeedRPM)+'.xlsx',index=False)
data=[bemfTimeArray[0],bemfA,bemfB,bemfC]
alldata=pd.DataFrame(data)
alldata=alldata.T
alldata.columns=['Time','VA-Phase','VB-Phase','VC-Phase']
alldata.to_excel(xldir+'\PhaseVoltage'+str(SpeedRPM)+'.xlsx',index=False)
data=[bemfTimeArray[0],bemfA-bemfB,bemfB-bemfC,bemfC-bemfA]
alldata=
|
pd.DataFrame(data)
|
pandas.DataFrame
|
from backlight.metrics import trade_metrics as module
import pandas as pd
import pytest
import backlight.datasource
from backlight.trades import trades as tr
@pytest.fixture
def symbol():
return "usdjpy"
@pytest.fixture
def market(symbol):
data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0], [9.0], [9.0]]
df = pd.DataFrame(
index=pd.date_range(start="2018-06-06", freq="1D", periods=len(data)),
data=data,
columns=["mid"],
)
return backlight.datasource.from_dataframe(df, symbol)
@pytest.fixture
def trades(symbol):
symbol = "usdjpy"
data = [1.0, -2.0, 1.0, 2.0, -4.0, 2.0, 1.0, 0.0, 1.0, 0.0]
index = pd.date_range(start="2018-06-06", freq="1D", periods=len(data))
trades = []
for i in range(0, len(data), 2):
trade = pd.Series(index=index[i : i + 2], data=data[i : i + 2], name="amount")
trades.append(trade)
trades = tr.make_trades(symbol, trades)
return trades
@pytest.fixture
def closed_trades(symbol):
symbol = "usdjpy"
data = [1.0, -1.0, 2.0, -2.0, -4.0, 4.0, 1.0, -1.0, 1.0, -1.0]
index = pd.date_range(start="2018-06-06", freq="1D", periods=len(data))
trades = []
for i in range(0, len(data), 2):
trade = pd.Series(index=index[i : i + 2], data=data[i : i + 2], name="amount")
trades.append(trade)
trades = module.make_trades(symbol, trades)
return trades
def test__calc_pl():
periods = 3
symbol = "usdjpy"
dates =
|
pd.date_range(start="2018-12-01", periods=periods)
|
pandas.date_range
|
# -*- coding: utf-8 -*-
"""This module powers the DstSubjects class that is the workhorse to
obtain subjects and subjects from Statistics Denmark.
"""
from pandas import DataFrame, to_datetime, read_csv
from pydst import utils
from pydst import validators
import requests
from collections import OrderedDict
from io import StringIO
import os
class Dst(object):
"""Retrieve subjects, metadata and data from Statistics Denmark.
This class provides some simple functions to retrieve information
from Statistics Denmark's API.
Attributes:
lang (str): Can take the values ``en`` for English or ``da``
for Danish
"""
def __init__(self, lang='en'):
self.lang = utils.check_lang(lang)
self.base_url = 'https://api.statbank.dk'
self.version = 'v1'
def get_subjects(self, subjects=None, lang=None):
"""Retrieve subjects and sub subjects from Statistics Denmark.
This function allows to retrieve the subjects and subsubjects
Statistics Denmark uses to categorize their tables. These subjectsID
can be used to only retrieve the tables that is classified with
the respective SubjectsID using ``get_tables``.
Args:
subjects (str/list, optional): If a valid subjectsID is provided
it will return the subject's subsubjects if available. subjects
can either be a list of subjectsIDs in string format or a comma
seperated string
lang (str, optional): If lang is provided it uses this argument
instead of the Dst's class attribute lang. Can take the values
``en`` for English or ``da`` for Danish
Returns:
pandas.DataFrame: Returns a DataFrame with subjects.
Examples:
The example beneath shows how ``get_subjects`` is used.
>>> from pydst import Dst
>>> Dst().get_subjects()
active desc hasSubjects id
0 True Population and elections True 02
1 True Living conditions True 05
2 True Education and knowledge True 03
.. ... ... ... ..
10 True Business sectors True 11
11 True Geography, environment and energy True 01
12 True Other True 19
[13 rows x 4 columns]
"""
lang = utils.assign_lang(self, lang)
if not isinstance(subjects, (str, list, type(None))):
raise ValueError('Subjects must be a list or a string of subject ids')
if isinstance(subjects, (str, list)):
validators.subject_validator(subjects)
query_dict = {
'lang': lang,
'format': 'JSON'
}
if isinstance(subjects, list):
str_subjects = ','.join(subjects)
else:
str_subjects = subjects
url = utils.construct_url(self.base_url,
self.version,
'subjects',
str_subjects or '',
query_dict)
r = requests.get(url)
utils.bad_request_wrapper(r)
return utils.desc_to_df(r.json())
def get_tables(self, subjects=None, inactive_tables=False, lang=None):
"""
Args:
inactive_tables (bool, optional): If True the DataFrame will
contain tables that are no longer updated.
subjects (str/list, optional): If a valid subjectsID is provided
it will return the subject's subsubjects if available. subjects
can either be a list of subjectsIDs in string format or a comma
seperated string
lang (str, optional): If lang is provided it uses this argument
instead of the Dst's class attribute lang. Can take the values
``en`` for English or ``da`` for Danish
Returns:
pandas.DataFrame: Returns a DataFrame with subjects.
Todo:
* Check inactive_tables (cerberus validator)
Examples:
The example beneath shows how ``get_tables`` is used.
>>> from pydst import Dst
>>> Dst().get_tables()
active firstPeriod id latestPeriod
0 True 2008Q1 FOLK1A 2018Q2
1 True 2008Q1 FOLK1B 2018Q2
2 True 2008Q1 FOLK1C 2018Q2
... ... ... ... ...
1958 True 2005 SKOVRG01 2016
1959 True 2005 SKOVRG02 2016
1960 True 2005 SKOVRG03 2016
text unit
0 Population at the first day of the quarter number
1 Population at the first day of the quarter number
2 Population at the first day of the quarter number
... ... ...
1958 Growing stock (physical account) 1,000 m3
1959 Growing stock (monetary account) DKK mio.
1960 Forest area (Kyoto) (physical account) km2
updated variables
0 2018-05-08 08:00:00 [region, sex, age, marital status, time]
1 2018-05-08 08:00:00 [region, sex, age, citizenship, time]
2 2018-05-08 08:00:00 [region, sex, age, ancestry, country of origin...
... ... ...
1958 2017-11-28 08:00:00 [balance items, species of wood, county counci...
1959 2017-11-28 08:00:00 [balance items, species of wood, county counci...
1960 2017-11-28 08:00:00 [balance items, county council district, time]
[1961 rows x 8 columns]
"""
lang = utils.assign_lang(self, lang)
if not isinstance(subjects, (str, list, type(None))):
raise ValueError('Subjects must be a list or a string of subject ids')
if isinstance(subjects, (str, list)):
validators.subject_validator(subjects)
if not isinstance(inactive_tables, bool):
raise ValueError('Must be boolean') # replace with cerberus validator
query_dict = {
'lang': lang,
'format': 'JSON',
'includeInactive': 'true' if inactive_tables else None
}
if not subjects is None:
query_dict['subjects'] = subjects
url = utils.construct_url(self.base_url,
self.version,
'tables',
'',
query_dict)
r = requests.get(url)
utils.bad_request_wrapper(r)
res = DataFrame(r.json())
res['updated'] =
|
to_datetime(res['updated'])
|
pandas.to_datetime
|
import coloredlogs
import datetime
import errno
import ipaddress
import logging
import maxminddb
import os
from numpy import source
import pandas as pd
import getpass
import pyesedb as esedb
import sqlite3
import sys
import traceback
import uuid
import binascii
import struct
import time
from argparse import ArgumentParser
from configparser import ConfigParser
from datetime import datetime, timedelta
from binascii import unhexlify
from pandas.core.frame import DataFrame
from pandas.io.parsers import ParserError
from struct import unpack
__author__ = '<NAME>'
__version__ = '20211106'
__credit__ = 'Inspired by BriMor Labs/KStrike'
"""
BSD 3-Clause License
Copyright (c) 2021, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Requirements:
* Python3
* Colorlogs (pip install colorlog)
* maxminddb (pip install maxminddb)
* Pandas (pip install pandas)
* libesedb (pyesedb) (compile from source: https://github.com/libyal/libesedb) (pip install libesedb-python failed for 3.8 and 3.9, YMMVH)
* GeoLite2-City.mmdb (https://www.maxmind.com)
Artifact References:
https://www.crowdstrike.com/blog/user-access-logging-ual-overview/
https://advisory.kpmg.us/blog/2021/digital-forensics-incident-response.html
https://en.wikipedia.org/wiki/Extensible_Storage_Engine
"""
class LogClass:
def __init__(self, logname, debug_level=20):
"""
Critical == 50
Error == 40
Warning == 30
Info == 20
Debug == 10
Notset = 0
"""
current_user = getpass.getuser()
# log_format = '%(asctime)s:%(levelname)s:%(message)s'
# date_fmt = '%m/%d/%Y %I:%M:%S'
# logging.basicConfig(filename=logname, format=log_format, level=debug_level, filemode='a', datefmt=date_fmt)
# console = logging.StreamHandler()
# console.setLevel(debug_level)
# formatter = logging.Formatter(log_format)
# console.setFormatter(formatter)
# logging.getLogger('').addHandler(console)
clr_log_format = '%(asctime)s:%(hostname)s:%(programname)s:%(username)s[%(process)d]:%(levelname)s:%(message)s'
coloredlogs.install(level=debug_level, fmt=clr_log_format)
@staticmethod
def log(level='info', message=''):
if level.lower() == 'debug':
logging.debug(message)
if level.lower() == 'info':
logging.info(message)
if level.lower() == 'warning':
logging.warning(message)
if level.lower() == 'error':
logging.error(message)
if level.lower() == 'critical':
logging.critical(message)
class PrepClass:
def __init__(self, raw_output_path):
self.raw_output_path = raw_output_path
self.log_file = os.path.join(self.raw_output_path, 'Script Processing.log')
self.sql_db = os.path.join(self.raw_output_path, 'UAL_DB.sqlite')
self.sql_file = ''
self.db_setup = ''
self.p_log = ''
self.config = ''
def setup_logging(self, debug_mode=False):
log_level = 'info'
if debug_mode:
log_level = 'debug'
numeric_level = getattr(logging, log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % log_level)
self.p_log = LogClass(self.log_file, numeric_level)
return self.p_log
def setup_output_directory(self):
if not os.path.exists(self.raw_output_path):
try:
os.makedirs(self.raw_output_path)
except OSError as error:
if error.errno != errno.EEXIST:
raise
class UALClass:
def __init__(self, config_dict):
self.config_dict = config_dict
self.source_path = config_dict['raw_input_path']
self.ese_dbs = self.get_ese_files(self.source_path)
self.out_path = config_dict['raw_output_path']
self.maxmind_db = config_dict['maxminddb']
self.ftype = config_dict['ftype']
self.plog = config_dict['p_log']
self.sql_db = os.path.join(self.out_path, 'UAL.db')
self.GUID = list()
self.chained_databases = dict()
self.system_identity = list()
self.series_list = list()
self.chain_db_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
def appendFeature(base_df, ext_series, drop_col_name, fill_value = None):
'''Concatenates a Series to a DataFrame. If after concatenating data
the first row of a given day in ext_series is nan, it could be replaced
by fill_value (as long as a fill_value is supplied). Right after that,
a forward fill (ffill) is applied to the concatenated column.
Also, after concatenating and ffilling, all rows containing nan in
drop_col_name column will be deleted.'''
fullset = pd.concat([base_df, ext_series], axis=1)
fill_col_name = ext_series.name
# Add a fill value at the beginning of given column, only if specified
if fill_col_name != None:
# extracting all the different dates (year/month/day) in the full dataset
days = fullset.to_period('D').index.unique()
# finding the mininum timestamp for each day in 'dates'.
# this is used to assign a value of 0 people in the room at the beginning of each day,
## only if the value is null.
for d in days:
idx = fullset[d.strftime("%Y-%m-%d")].index.min()
if pd.isnull(fullset.loc[idx,fill_col_name]):
fullset.set_value(idx, fill_col_name, fill_value)
# Forward fill only the changes of people count in the room
fullset[fill_col_name] = fullset[fill_col_name].ffill()
# then drop cases where there were no readings from the sensors (using only temperature it's enough)
fullset.dropna(subset=[drop_col_name], inplace=True)
return fullset
def replaceOutliers(df, mult=3):
'''Replaces the outliers in a dataframe using a standard deviation multiplier.
The default behaviour is to exclude all values greater than 3 stdevs'''
res = df.copy()
res[df.apply(lambda x: np.abs(x - x.mean()) > mult*x.std())] = np.nan
return res.ffill().bfill()
def moving_average(a, n=15) :
'''Regular moving average over a numpy array'''
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def totalMA(a, n=15):
'''Decrementing-window moving average to avoid losing data from initial
points lower than the window. Works over a numpy array. Example:
Having an array [1,2,3,4,5], a regular MA with n=4 will output
[2.5, 3.5] while totalMA with n=4 will return [1.0, 1.5, 2.0, 2.5, 3.5],
applying MAs of windows n= 3, 2 and 1 for values lower than the initial
window of 4.'''
tail = []
for i in range(n-1):
tail.append(np.mean(a[0:i+1]))
tail.extend(moving_average(a, n))
return tail
def interpolateByDay(df, tframe = '5S', useLast = False):
'''Interpolate a set of days in a dataframe without adding any additional
dead time. Instead of interpolating the entire dataset, it braks down
the work by day and then concatenates everything back together.
This is useful to prevent adding values before or after the first or
last reading of a given day, but ensuring that it will add missing
values (through interpolation) inbetween the start and end dates.
The default timeframe is 5 seconds.'''
result = None
days = df.to_period('D').index.unique()
for d in days:
aux = df[d.strftime("%Y-%m-%d")]
# assigns NaN to new added values, then interpolate method will take
# care of these blanks.
# An alternate option for this method allows to resample using last seen
# value in new timeframe.
if (not useLast):
aux = aux.resample(tframe).apply(lambda x: x.mean() if len(x) > 0 else np.nan)
else:
aux = aux.resample(tframe).last()
# using pchip method to get a smoothed curve between initial and end points
# of any previous gap.
if (not useLast):
aux = aux.interpolate(method='pchip')
if result is None:
result = aux
else:
result =
|
pd.concat([result,aux])
|
pandas.concat
|
# -*- coding: utf-8 -*-
import pandas as pd
import coppredict.preprocessing as pre
class Patricia:
def __init__(self):
self._data = {}
def check_index(self, pattern, ind, case):
"""
Atributes:
pattern
ind
case
"""
try:
if case == 1:
val = pattern[ind]
return val
elif case == 2:
val = pattern[ind:]
return val
except IndexError:
return ''
def add_pattern(self, pattern, support):
"""
Creates and adds a pattern
Atributes:
pattern
support
Example:
[C1,C2,-C3,--C1,-C2,--C3,--C4] => C1C2-C3--C1-C2--C3--C4
"""
data = self._data
i = 0
pattern = pre.convert_pattern_to_string(pattern)
while 1:
try:
tmp = pattern[i]
if isinstance(tmp, str):
node = data[tmp]
except KeyError:
if data:
ind = pattern[i]
rest = pattern[i+1:]
data[ind] = [rest, {support}]
return
else:
if tmp == '':
return
else:
if i != 0:
data[''] = ['', {}]
data[pattern[i]] = [pattern[i+1:], {support}]
return
i += 1
if self.starts_with_sublist(pattern, node[0], i):
if len(pattern[i:]) == len(node[0]):
if node[1]:
try:
node[1]['']
except KeyError:
data = node[1]
data[''] = ['', {support}]
return
else:
i += len(node[0])
data = node[1]
else:
ii = i
j = 0
while ii != len(pattern) and j != len(node[0]) and pattern[ii:ii+1] == node[0][j:j+1]:
ii += 1
j += 1
tmp_data = {node[0][j]: [node[0][j + 1:], node[1]]}
ind1 = self.check_index(pattern, ii, 1)
ind2 = self.check_index(pattern, ii + 1, 2)
tmp_data[ind1] = [ind2, {support}]
data[pattern[i-1]] = [node[0][:j], tmp_data]
return
def starts_with_sublist(self, l, sub, i):
"""
Check if a list starts with a sublist.
"""
# It's independent of class scope
return l[i:len(sub)+i] == sub
def is_prefix(self, pattern):
"""
Check if a pattern is a prefix.
"""
data = self._data
i = 0
patternlen = len(pattern)
while 1:
try:
node = data[pattern[i:i+1]]
except KeyError:
return False
i += 1
if pattern.startswith(node[0][:patternlen-i], i):
if patternlen - i > len(node[0]):
i += len(node[0])
data = node[1]
else:
return True
else:
return False
def is_pattern(self, pattern):
"""
Check if a pattern belongs to the Patricia Trie, if not, returns false.
If yes, returns the weight stored.
Atributes:
pattern
"""
data = self._data
i = 0
while 1:
try:
node = data[pattern[i]]
except KeyError:
return False
except TypeError:
return False
i += 1
if pattern[i:len(node[0])+i] == node[0]:
if len(pattern[i:]) == len(node[0]):
if node[1]:
try:
vl = node[1]['']
except KeyError:
return False
except IndexError:
return False
except TypeError:
aux = node[1]
return next(iter(aux))
aux = node[1][''][1]
return next(iter(aux))
else:
i += len(node[0])
data = node[1]
else:
return False
def remove_pattern(self, pattern):
"""
Not used.
"""
data = self._data
i = 0
while 1:
try:
node = data[pattern[i:i+1]]
except KeyError:
print("Pattern is not in trie.")
return
i += 1
if pattern.startswith(node[0], i):
if len(pattern[i:]) == len(node[0]):
if node[1]:
try:
vl = node[1]['']
node[1].pop('')
except KeyError:
print("Pattern is not in trie.")
return
data.pop(pattern[i-1:i])
return
else:
i += len(node[0])
data = node[1]
else:
print("Pattern is not in trie.")
return
def get_super_patterns(self, pattern):
"""
Finds all the supper patterns of the input pattern and returns a dataframe with each supper pattern and its weight.
"""
data = self._data
df_sup =
|
pd.DataFrame(columns=['sup_pattern', 'weight'])
|
pandas.DataFrame
|
'''
MIT License
Copyright (c) 2020 Minciencia
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import requests
import utils
import pandas as pd
import datetime as dt
import numpy as np
from itertools import groupby
import time
class vacunacion:
def __init__(self,output,indicador):
self.output = output
self.indicador = indicador
self.my_files = {
'vacunacion_fabricante':
'https://raw.githubusercontent.com/IgnacioAcunaF/covid19-vaccination/master/output/chile-vaccination-type.csv',
'vacunacion_region':
'https://raw.githubusercontent.com/IgnacioAcunaF/covid19-vaccination/master/output/chile-vaccination.csv',
'vacunacion_edad':
'https://github.com/IgnacioAcunaF/covid19-vaccination/raw/master/output/chile-vaccination-ages.csv',
'vacunacion_grupo':
'https://github.com/IgnacioAcunaF/covid19-vaccination/raw/master/output/chile-vaccination-groups.csv',
}
self.path = '../input/Vacunacion'
def get_last(self):
## baja el archivo que corresponde
if self.indicador == 'fabricante':
print('Retrieving files')
print('vacunacion_fabricante')
r = requests.get(self.my_files['vacunacion_fabricante'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_fabricante' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
elif self.indicador == 'campana':
print('Retrieving files')
print('vacunacion_region')
r = requests.get(self.my_files['vacunacion_region'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_region' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
elif self.indicador == 'edad':
print('Retrieving files')
print('vacunacion_edad')
r = requests.get(self.my_files['vacunacion_edad'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_edad' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
elif self.indicador == 'caracteristicas_del_vacunado':
print('Retrieving files')
print('vacunacion_grupo')
r = requests.get(self.my_files['vacunacion_grupo'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_grupo' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
## selecciona el archivo que corresponde
if self.indicador == 'fabricante':
print('reading files')
print('vacunacion_fabricante')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_fabricante.csv')
elif self.indicador == 'campana':
print('reading files')
print('vacunacion_region')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_region.csv')
elif self.indicador == 'edad':
print('reading files')
print('vacunacion_edad')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_edad.csv')
elif self.indicador == 'caracteristicas_del_vacunado':
print('reading files')
print('vacunacion_grupo')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_grupo.csv')
elif self.indicador == 'vacunas_region':
print('reading files')
print('vacunacion por region por dia')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_comuna':
print('reading files')
print('vacunacion por comuna por dia')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_edad_region':
print('reading files')
print('vacunacion por region por edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_edad_sexo':
print('reading files')
print('vacunacion por sexo por edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_3.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_3_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
print('vacunacion por sexo por edad y FECHA')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_6.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_6_2.csv', sep=';', encoding='ISO-8859-1')
self.last_edad_fecha = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_prioridad':
print('reading files')
print('vacunacion por grupos prioritarios')
self.last_added = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_8.csv', sep=';', encoding='ISO-8859-1')
# aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_8_2.csv', sep=';', encoding='ISO-8859-1')
# self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_comuna_edad':
print('reading files')
print('vacunacion por comuna por edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_establecimiento':
print('reading files')
print('vacunacion por establecimiento')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_fabricante':
print('reading files')
print('vacunacion por fabricante y fecha')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_fabricante_edad':
print('reading files')
print('vacunacion por fabricante y edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_9.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_9_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
def last_to_csv(self):
if self.indicador == 'fabricante':
## campana por fabricante
self.last_added.rename(columns={'Dose': 'Dosis'}, inplace=True)
self.last_added.rename(columns={'Type': 'Fabricante'}, inplace=True)
self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera",
"Second": "Segunda",
"Third": "Tercera",
"Fourth": "Cuarta",
"Unique": "Unica"
})
identifiers = ['Fabricante', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
elif self.indicador == 'campana':
## campana por region
self.last_added.rename(columns={'Dose': 'Dosis'}, inplace=True)
utils.regionName(self.last_added)
self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera",
"Second": "Segunda",
"Third": "Tercera",
"Fourth": "Cuarta",
"Unique": "Unica"
})
identifiers = ['Region', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
elif self.indicador == 'edad':
## campana por edad
self.last_added.rename(columns={'Dose': 'Dosis',
'Age':'Rango_etario'}, inplace=True)
self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera",
"Second": "Segunda"
})
identifiers = ['Rango_etario', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
elif self.indicador == 'caracteristicas_del_vacunado':
## campana por caracter del vacunado
self.last_added.rename(columns={'Dose': 'Dosis',
'Group':'Grupo'}, inplace=True)
self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera",
"Second": "Segunda"
})
identifiers = ['Grupo', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
elif self.indicador == 'vacunas_region':
self.last_added.rename(columns={'REGION_CORTO': 'Region',
'COD_COMUNA_FINAL': 'Comuna',
'FECHA_INMUNIZACION': 'Fecha',
'SUM_of_SUM_of_2aDOSIS': 'Segunda_comuna',
'SUM_of_SUM_of_1aDOSIS': 'Primera_comuna',
'SUM_of_SUM_of_ÚnicaDOSIS':'Unica_comuna',
'SUM_of_4_Dosis':'Cuarta_comuna',
'SUM_of_Refuerzo_DOSIS':'Refuerzo_comuna'}, inplace=True)
self.last_added = self.last_added.dropna(subset=['Fecha'])
self.last_added['Fecha'] = pd.to_datetime(self.last_added['Fecha'],format='%d/%m/%Y').dt.strftime("%Y-%m-%d")
self.last_added.sort_values(by=['Region','Fecha'], inplace=True)
utils.regionName(self.last_added)
regiones = pd.DataFrame(self.last_added['Region'].unique())
#transformar
## agrupar por comuna
self.last_added['Primera'] = self.last_added.groupby(['Region','Fecha'])['Primera_comuna'].transform('sum')
self.last_added['Segunda'] = self.last_added.groupby(['Region','Fecha'])['Segunda_comuna'].transform('sum')
self.last_added['Unica'] = self.last_added.groupby(['Region', 'Fecha'])['Unica_comuna'].transform('sum')
self.last_added['Refuerzo'] = self.last_added.groupby(['Region', 'Fecha'])['Refuerzo_comuna'].transform('sum')
self.last_added['Cuarta'] = self.last_added.groupby(['Region', 'Fecha'])['Cuarta_comuna'].transform(
'sum')
self.last_added = self.last_added[['Region','Fecha','Primera','Segunda','Unica','Refuerzo','Cuarta']]
self.last_added.drop_duplicates(inplace=True)
##llenar fechas para cada region y crear total
idx = pd.date_range(self.last_added['Fecha'].min(), self.last_added['Fecha'].max())
df = pd.DataFrame()
total = pd.DataFrame(columns=['Region','Fecha','Primera','Segunda','Unica','Refuerzo','Cuarta'])
total = utils.fill_in_missing_dates(total, 'Fecha', 0, idx)
total["Region"] = total["Region"].replace({0: 'Total'})
for region in regiones[0]:
df_region = self.last_added.loc[self.last_added['Region'] == region]
df_region = utils.fill_in_missing_dates(df_region,'Fecha',0,idx)
df_region["Region"] = df_region["Region"].replace({0:region})
total['Primera'] = df_region['Primera'] + total['Primera']
total['Segunda'] = df_region['Segunda'] + total['Segunda']
total['Unica'] = df_region['Unica'] + total['Unica']
total['Refuerzo'] = df_region['Refuerzo'] + total ['Refuerzo']
total['Cuarta'] = df_region['Cuarta'] + total['Cuarta']
df = df.append(df_region, ignore_index=True)
total = total.append(df,ignore_index=True)
total['Fecha'] = total['Fecha'].dt.strftime("%Y-%m-%d")
self.last_added = total
##sumar totales
self.last_added['Primera'] = pd.to_numeric(self.last_added['Primera'])
self.last_added['Segunda'] = pd.to_numeric(self.last_added['Segunda'])
self.last_added['Unica'] = pd.to_numeric(self.last_added['Unica'])
self.last_added['Refuerzo'] = pd.to_numeric(self.last_added['Refuerzo'])
self.last_added['Cuarta'] = pd.to_numeric(self.last_added['Cuarta'])
self.last_added['Primera'] = self.last_added.groupby(['Region'])['Primera'].transform('cumsum')
self.last_added['Segunda'] = self.last_added.groupby(['Region'])['Segunda'].transform('cumsum')
self.last_added['Unica'] = self.last_added.groupby(['Region'])['Unica'].transform('cumsum')
self.last_added['Refuerzo'] = self.last_added.groupby(['Region'])['Refuerzo'].transform('cumsum')
self.last_added['Cuarta'] = self.last_added.groupby(['Region'])['Cuarta'].transform('cumsum')
self.last_added['Total'] = self.last_added.sum(numeric_only=True, axis=1)
##transformar en input
df = pd.DataFrame()
regiones = pd.DataFrame(self.last_added['Region'].unique())
for region in regiones[0]:
df_region = self.last_added.loc[self.last_added['Region'] == region]
df_region.set_index('Fecha',inplace=True)
df_region = df_region[['Primera','Segunda','Unica','Refuerzo','Cuarta']].T
df_region.reset_index(drop=True, inplace=True)
df = df.append(df_region, ignore_index=True)
new_col = ['Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta',
'Primera', 'Segunda','Unica','Refuerzo','Cuarta']
df.insert(0, column='Dosis', value=new_col)
new_col = pd.DataFrame()
for region in regiones[0]:
col = [region,region,region,region,region]
new_col = new_col.append(col, ignore_index=True)
df.insert(0, column='Region', value=new_col)
self.last_added = df
identifiers = ['Region', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
df_std.to_json(self.output + '.json',orient='values',force_ascii=False)
elif self.indicador == 'vacunas_edad_region':
self.last_added.rename(columns={'NOMBRE_REGION': 'Region',
'COD_COMUNA': 'Comuna',
'EDAD_ANOS': 'Edad',
'POBLACION':'Poblacion',
'2aDOSIS_RES': 'Segunda_comuna',
'1aDOSIS_RES': 'Primera_comuna',
'4aDOSIS':'Cuarta_comuna',
'Refuerzo_DOSIS':'Refuerzo_comuna',
'ÚnicaDOSIS':'Unica_comuna'}, inplace=True)
self.last_added.sort_values(by=['Region', 'Edad'], inplace=True)
utils.regionName(self.last_added)
regiones = pd.DataFrame(self.last_added['Region'].unique())
# transformar
## agrupar por comuna
self.last_added['Primera'] = self.last_added.groupby(['Region', 'Edad'])['Primera_comuna'].transform('sum')
self.last_added['Segunda'] = self.last_added.groupby(['Region', 'Edad'])['Segunda_comuna'].transform('sum')
self.last_added['Unica'] = self.last_added.groupby(['Region', 'Edad'])['Unica_comuna'].transform('sum')
self.last_added['Refuerzo'] = self.last_added.groupby(['Region', 'Edad'])['Refuerzo_comuna'].transform('sum')
self.last_added['Cuarta'] = self.last_added.groupby(['Region', 'Edad'])['Cuarta_comuna'].transform('sum')
self.last_added['Poblacion'] = self.last_added.groupby(['Region','Edad'])['Poblacion'].transform('sum')
self.last_added = self.last_added[['Region', 'Edad', 'Poblacion','Primera', 'Segunda','Unica','Refuerzo','Cuarta']]
self.last_added.drop_duplicates(inplace=True)
##crear total
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.decomposition import PCA
# Dataset for cross validation
def Load_mushroom_dataset():
# loading mushoroom dataset from UCI
path="https://archive.ics.uci.edu/ml/machine-learning-databases/mushroom/agaricus-lepiota.data"
df =
|
pd.read_csv(path)
|
pandas.read_csv
|
"""<NAME>020.
MLearner Machine Learning Library Extensions
Author:<NAME><www.linkedin.com/in/jaisenbe>
License: MIT
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import datetime
import joblib
import time
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import lightgbm as lgb
from lightgbm import LGBMClassifier
import seaborn as sns
from mlearner.training import Training
from mlearner.utils import ParamsManager
import warnings
warnings.filterwarnings("ignore")
param_file = "mlearner/classifier/config/models.json"
class modelLightBoost(Training, BaseEstimator, ClassifierMixin):
"""
Ejemplo multiclass:
https://www.kaggle.com/nicapotato/multi-class-lgbm-cv-and-seed-diversification
"""
def __init__(self, name="LGB", random_state=99, train_dir="", params=None, *args, **kwargs):
self.name = name
self.train_dir = train_dir + "/" + "model_" + str(self.name) + "/"
self.random_state = random_state
if params is None:
self.get_params_json()
self.params.update({
'model_dir': self.train_dir,
"seed": self.random_state})
else:
# if isinstance(params)
self.params = params
self.model = LGBMClassifier(**self.params)
super().__init__(self.model, random_state=self.random_state)
def get_params_json(self):
self.manager_models = ParamsManager(param_file, key_read="Models")
self.params = self.manager_models.get_params()["LightBoost"]
self.manager_finetune = ParamsManager(param_file, key_read="FineTune")
self.params_finetune = self.manager_finetune.get_params()["LightBoost"]
def dataset(self, X, y, categorical_columns_indices=None, test_size=0.2, *args, **kwarg):
self.categorical_columns_indices = categorical_columns_indices
self.X = X
self.columns = list(X)
self.y, self.cat_replace = self.replace_multiclass(y)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=test_size, random_state=self.random_state)
self.dtrain = lgb.Dataset(self.X_train.values, label=self.y_train.values,
feature_name=self.X_train.columns.tolist())
self.dvalid = lgb.Dataset(self.X_test.values, label=self.y_test.values,
feature_name=self.X_test.columns.tolist())
self.all_train_data = lgb.Dataset(self.X.values, label=self.y.values,
feature_name=self.X.columns.tolist())
def set_dataset_nosplit(self, X_train, X_test, y_train, y_test, categorical_columns_indices=None, *args, **kwarg):
self.categorical_columns_indices = categorical_columns_indices
self.columns = list(X_train)
_ytrain, _ = self.replace_multiclass(y_train)
_ytest, _ = self.replace_multiclass(y_test)
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.X = pd.concat([X_train, X_test], axis=0)
self.y = pd.concat([y_train, y_test], axis=0)
self.dtrain = lgb.Dataset(self.X_train.values, label=self.y_train.values,
feature_name=self.X_train.columns.tolist())
self.dvalid = lgb.Dataset(self.X_test.values, label=self.y_test.values,
feature_name=self.X_test.columns.tolist())
self.all_train_data = lgb.Dataset(self.X.values, label=self.y.values,
feature_name=self.X.columns.tolist())
def replace_multiclass(self, targets):
_unic = targets.unique().tolist()
_remp = np.arange(0, len(_unic)).tolist()
return targets.replace(_unic, _remp), _unic
def fit(self, X=None, y=None, X_train=None, X_test=None, y_train=None, y_test=None, mute=False,
use_best_model=True, verbose=0, num_boost_round=100, nosplit=False, **kwargs):
if not nosplit:
self.dataset(X, y)
else:
self.set_dataset_nosplit(X_train, X_test, y_train, y_test)
self.params.update({'verbose': verbose})
self.model = lgb.train(
self.params,
self.dtrain,
num_boost_round=num_boost_round,
verbose_eval=verbose,
**kwargs)
preds_test = [np.argmax(line) for line in self.model.predict(self.X_test, num_iteration=self.model.best_iteration)]
score_test = accuracy_score(self.y_test, preds_test)
preds_train = [np.argmax(line) for line in self.model.predict(self.X_train, num_iteration=self.model.best_iteration)]
score_train = accuracy_score(self.y_train, preds_train)
if not mute:
print("Accurancy para el conjunto de entrenamiento ---> {:.2f}%".format(score_train*100))
print("Accurancy para el conjunto de validacion ------> {:.2f}%".format(score_test*100))
def fit_cv(self, X=None, y=None, X_train=None, X_test=None, y_train=None, y_test=None, nfold=5,
use_best_model=True, verbose=200, nosplit=False, early_stopping_rounds=150, num_boost_round=2000, **kwargs):
if not nosplit:
self.dataset(X, y)
else:
self.set_dataset_nosplit(X_train, X_test, y_train, y_test)
self.params.update({'verbose': verbose})
self.lgb_cv = lgb.cv(
params=self.params,
train_set=self.all_train_data,
num_boost_round=num_boost_round,
stratified=True,
nfold=nfold,
seed=self.random_state,
early_stopping_rounds=early_stopping_rounds,
**kwargs)
loss = self.params["metric"]
optimal_rounds = np.argmin(self.lgb_cv[str(loss) + '-mean'])
best_cv_score = min(self.lgb_cv[str(loss) + '-mean'])
if not verbose == 0:
print("\nOptimal Round: {}\nOptimal Score: {:.3f} + stdv:{:.3f}".format(
optimal_rounds, best_cv_score, self.lgb_cv[str(loss) + '-stdv'][optimal_rounds]))
results = {"Rounds": optimal_rounds,
"Score": best_cv_score,
"STDV": self.lgb_cv[str(loss) + '-stdv'][optimal_rounds],
"LB": None,
"Parameters": self.params}
score = np.mean(self.lgb_cv[str(loss) + '-mean'])
return score, results
def func_acc(self, prob_pred, y_target):
_y_pred = np.zeros(len(prob_pred))
for i in range(0, len(prob_pred)):
_y_pred[i] = int(np.argmax(prob_pred[i]))
accuracy = accuracy_score(_y_pred, y_target)
return accuracy
def pred_binary(self, X, *args, **kwargs):
_X_copy = X.loc[:, self.columns].copy()
preds = self.model.predict(_X_copy, *args, **kwargs)
return np.where(preds > 0.5, 1, 0)
def pred_multiclass(self, X, *args, **kwargs):
_X_copy = X.loc[:, self.columns].copy()
return [np.argmax(line) for line in self.model.predict(_X_copy, num_iteration=self.model.best_iteration)]
def update_model(self, **kwargs):
for k, v in kwargs.items():
setattr(self.model, k, v)
def save_model(self, direct="./checkpoints", name="LGM_model", file_model=".txt"):
if not os.path.isdir(direct):
try:
os.mkdir(direct)
print("Directorio creado: " + direct)
except OSError as e:
raise NameError("Error al crear el directorio")
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
if file_model == ".txt":
filename = direct + "/" + name + "_" + current_time + ".txt"
self.model.save_model(filename)
elif file_model == ".pkl":
filename = direct + "/" + name + "_" + current_time + ".pkl"
joblib.dump(self.model, filename)
else:
raise NameError("Type {} not permited".format(file_model))
print("Modelo guardado en la ruta: " + filename)
def load_model(self, direct="./checkpoints/LGM_model.txt", file_model=".txt"):
if not os.path.isdir(direct):
print("no existe el drectorio especificado")
if file_model == ".txt":
self.model = LGBMClassifier(model_file=direct)
elif file_model == ".pkl":
self.model = joblib.load(direct)
else:
raise NameError("Type {} not permited".format(file_model))
print("Modelo cargado de la ruta: " + direct)
def predict(self, X, *args, **kwargs):
_X_copy = X.loc[:, self.columns].copy()
return self.model.predict(_X_copy, *args, **kwargs)
def predict_proba(self, X, *args, **kwargs):
_X_copy = X.loc[:, self.columns].copy()
return self.model.predict_proba(_X_copy, *args, **kwargs)
def index_features(self, features):
_index = []
for i in features:
_index.append(self.X.columns.get_loc(i))
if _index == []:
raise NameError("No coincide ninguna de las features introducidas")
return _index
def get_important_features(self, display=True, max_num_features=20):
if display:
lgb.plot_importance(self.model, max_num_features=max_num_features, figsize=(6, 6), title='Feature importance (LightGBM)')
plt.show()
# return _feature_importance_df
def FineTune_SearchCV(self, X=None, y=None, X_train=None, X_test=None, y_train=None, y_test=None, params=None, params_finetune=None, ROC=False,
randomized=True, cv=10, display_ROC=True, verbose=0, n_iter=10, replace_model=True, nosplit=False, finetune_dir=""):
self.get_params_json()
self.finetune_dir = finetune_dir + "/" + "model_finetune_" + str(self.name) + "/"
self.params.update({
'train_dir': self.finetune_dir,
"seed": self.random_state})
if params is not None:
self.params = params
if params_finetune is not None:
self.params_finetune = params_finetune
if not nosplit:
self.dataset(X, y)
else:
self.set_dataset_nosplit(X_train, X_test, y_train, y_test)
self.params.update({'verbosity': verbose})
self.model = LGBMClassifier(**self.params)
self._best_Parameters, self.results_df = self.FineTune(self.model, self.X_train, self.y_train, self.params_finetune,
cv=cv, randomized=True, n_iter=n_iter, verbose=1)
self.params.update(**self._best_Parameters)
self.fit(self.X_train, self.y_train)
print("\n")
score = accuracy_score(self.y_test, self.pred_multiclass(self.X_test))
print("\n")
print("Resultado del conjunto de test con los parametros optimos: {:.2f}%".format(score*100))
print("\n")
print("Report clasificacion con el conjunto de test: ")
self.evaluate(self.model, self.X_test, self.y_test)
print("\n")
print("Validacion cruzada con todos los datos del dataset: ")
print("\n")
self.KFold_CrossValidation(LGBMClassifier(**self._best_Parameters), self.X, self.y, n_splits=cv, ROC=ROC, shuffle=True, mute=False,
logdir_report="", display=True, save_image=True, verbose=0)
return self._best_Parameters, self.results_df
def SeedDiversification_cv(self, X=None, y=None, X_train=None, X_test=None, y_train=None, y_test=None, n_iter=10, n_max=2018-2022, cv=10,
nosplit=False, finetuneseed_dir="", display=True, save_image=True, verbose=0):
allmodelstart = time.time()
self.get_params_json()
self.finetune_dir = finetuneseed_dir + "/" + "model_finetune_seed" + str(self.name) + "/"
self.params.update({'train_dir': self.finetune_dir,
'verbosity': verbose})
if not nosplit:
self.dataset(X, y)
else:
self.set_dataset_nosplit(X_train, X_test, y_train, y_test)
self.params.update({'verbosity': verbose})
self.model = LGBMClassifier(**self.params)
_rd = np.random.uniform(0, n_max, n_iter).astype(np.int32).tolist()
params_finetuneseed = {"seed": _rd}
del(_rd)
self._best_Parameters, self.results_df = self.FineTune(self.model, self.X, self.y,
params_finetuneseed, randomized=False, cv=cv, n_iter=n_iter,
verbose=1, mute=True)
print("All Model Runtime: %0.2f Minutes" % ((time.time() - allmodelstart)/60))
print("Diversificacion de la semilla - mean AUC: {:.2f}% - std AUC: {:.5f}".format(
self.results_df['mean_test_AUC'].mean()*100, self.results_df['std_test_AUC'].mean()))
print("Diversificacion de la semilla - mean Acc: {:.2f}% - std Acc: {:.5f}".format(
self.results_df['mean_test_Accuracy'].mean()*100, self.results_df['std_test_Accuracy'].mean()))
return self._best_Parameters, self.results_df
def SeedDiversification_fs(self, X, y, params, n_iter=10, mute=False, logdir_report="", display=True, save_image=True):
allmodelstart = time.time()
# Run Model with different Seeds
all_feature_importance_df = pd.DataFrame()
_y, _ = self.replace_multiclass(y)
all_seeds = np.random.uniform(1, 1000, n_iter).astype(np.int32).tolist()
for seeds_x in all_seeds:
modelstart = time.time()
print("Seed: ", seeds_x,)
# Go Go Go
params["seed"] = seeds_x
model = lgb.train(
params,
lgb.Dataset(X.values, label=_y.values),
verbose_eval=100)
# Feature Importance
fold_importance_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Authors: <NAME>
UNESCO-IHE 2016
Contact: <EMAIL>
Repository: https://github.com/wateraccounting/wa
Module: Sheets/sheet1
"""
import os
import pandas as pd
import time
import xml.etree.ElementTree as ET
import subprocess
def create_sheet3(basin, period, units, data, output, template=False):
"""
Keyword arguments:
basin -- The name of the basin
period -- The period of analysis
units -- A list with the units of the data:
[<water consumption>, <land productivity>, <water productivity>]
data -- A csv file that contains the water data. The csv file has to
follow an specific format. A sample csv is available in the link:
https://github.com/wateraccounting/wa/tree/master/Sheets/csv
output -- A list (length 2) with the output paths of the jpg files
for the two parts of the sheet
template -- A list (length 2) of the svg files of the sheet.
Use False (default) to use the standard svg files.
Example:
from wa.Sheets import *
create_sheet3(basin='Helmand', period='2007-2011',
units=['km3/yr', 'kg/ha/yr', 'kg/m3'],
data=[r'C:\Sheets\csv\Sample_sheet3_part1.csv',
r'C:\Sheets\csv\Sample_sheet3_part2.csv'],
output=[r'C:\Sheets\sheet_3_part1.jpg',
r'C:\Sheets\sheet_3_part2.jpg'])
"""
# Read table
df1 = pd.read_csv(data[0], sep=';')
df2 = pd.read_csv(data[1], sep=';')
# Data frames
df1c = df1.loc[df1.USE == "CROP"]
df1n = df1.loc[df1.USE == "NON-CROP"]
df2c = df2.loc[df2.USE == "CROP"]
df2n = df2.loc[df2.USE == "NON-CROP"]
# Read csv file part 1
crop_r01c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c01 = crop_r02c01 + crop_r03c01
crop_r01c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c02 = crop_r02c02 + crop_r03c02
crop_r01c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c03 = crop_r02c03 + crop_r03c03
crop_r01c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c04 = crop_r02c04 + crop_r03c04
crop_r01c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r02c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "ET rainfall") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r03c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "Incremental ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r04c05 = crop_r02c05 + crop_r03c05
crop_r01c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c06 = crop_r02c06 + crop_r03c06
crop_r01c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c07 = crop_r02c07 + crop_r03c07
crop_r01c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r02c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "ET rainfall") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r03c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "Incremental ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r04c08 = crop_r02c08 + crop_r03c08
crop_r01c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c09 = crop_r02c09 + crop_r03c09
crop_r01c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c10 = crop_r02c10 + crop_r03c10
crop_r01c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c11 = crop_r02c11 + crop_r03c11
crop_r01c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c12 = crop_r02c12 + crop_r03c12
noncrop_r01c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "ET")].WATER_CONSUMPTION)
noncrop_r02c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
noncrop_r03c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
noncrop_r04c01 = noncrop_r02c01 + noncrop_r03c01
noncrop_r01c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "ET")].WATER_CONSUMPTION)
noncrop_r02c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
noncrop_r03c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
noncrop_r04c02 = noncrop_r02c02 + noncrop_r03c02
crop_r01 = pd.np.nansum([crop_r01c01, crop_r01c02, crop_r01c03,
crop_r01c04, crop_r01c05, crop_r01c06,
crop_r01c07, crop_r01c08, crop_r01c09,
crop_r01c10, crop_r01c11, crop_r01c12])
crop_r02 = pd.np.nansum([crop_r02c01, crop_r02c02, crop_r02c03,
crop_r02c04, crop_r02c05, crop_r02c06,
crop_r02c07, crop_r02c08, crop_r02c09,
crop_r02c10, crop_r02c11, crop_r02c12])
crop_r03 = pd.np.nansum([crop_r03c01, crop_r03c02, crop_r03c03,
crop_r03c04, crop_r03c05, crop_r03c06,
crop_r03c07, crop_r03c08, crop_r03c09,
crop_r03c10, crop_r03c11, crop_r03c12])
crop_r04 = crop_r02 + crop_r03
noncrop_r01 = pd.np.nansum([noncrop_r01c01, noncrop_r01c02])
noncrop_r02 = pd.np.nansum([noncrop_r02c01, noncrop_r02c02])
noncrop_r03 = pd.np.nansum([noncrop_r03c01, noncrop_r03c02])
noncrop_r04 = noncrop_r02 + noncrop_r03
ag_water_cons = crop_r01 + crop_r04 + noncrop_r01 + noncrop_r04
# Read csv file part 2
# Land productivity
lp_r01c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r02c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r03c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r04c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r01c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r02c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r03c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r04c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r01c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
# Water productivity
wp_r01c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r02c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r03c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r04c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r01c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r02c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r03c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r04c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r01c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
# Calculations & modify svgs
if not template:
path = os.path.dirname(os.path.abspath(__file__))
svg_template_path_1 = os.path.join(path, 'svg', 'sheet_3_part1.svg')
svg_template_path_2 = os.path.join(path, 'svg', 'sheet_3_part2.svg')
else:
svg_template_path_1 = os.path.abspath(template[0])
svg_template_path_2 = os.path.abspath(template[1])
tree1 = ET.parse(svg_template_path_1)
tree2 = ET.parse(svg_template_path_2)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Titles
xml_txt_box = tree1.findall('''.//*[@id='basin']''')[0]
xml_txt_box.getchildren()[0].text = 'Basin: ' + basin
xml_txt_box = tree1.findall('''.//*[@id='period']''')[0]
xml_txt_box.getchildren()[0].text = 'Period: ' + period
xml_txt_box = tree1.findall('''.//*[@id='units']''')[0]
xml_txt_box.getchildren()[0].text = 'Part 1: Agricultural water consumption (' + units[0] + ')'
xml_txt_box = tree2.findall('''.//*[@id='basin2']''')[0]
xml_txt_box.getchildren()[0].text = 'Basin: ' + basin
xml_txt_box = tree2.findall('''.//*[@id='period2']''')[0]
xml_txt_box.getchildren()[0].text = 'Period: ' + period
xml_txt_box = tree2.findall('''.//*[@id='units2']''')[0]
xml_txt_box.getchildren()[0].text = 'Part 2: Land productivity (' + units[1] + ') and water productivity (' + units[2] + ')'
# Part 1
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c01']''')[0]
if not pd.isnull(crop_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c02']''')[0]
if not pd.isnull(crop_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c03']''')[0]
if not pd.isnull(crop_r01c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c04']''')[0]
if not pd.isnull(crop_r01c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c05']''')[0]
if not pd.isnull(crop_r01c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c06']''')[0]
if not pd.isnull(crop_r01c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c07']''')[0]
if not pd.isnull(crop_r01c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c08']''')[0]
if not pd.isnull(crop_r01c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c09']''')[0]
if not pd.isnull(crop_r01c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c10']''')[0]
if not pd.isnull(crop_r01c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c11']''')[0]
if not pd.isnull(crop_r01c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c12']''')[0]
if not pd.isnull(crop_r01c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01']''')[0]
if not pd.isnull(crop_r01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c01']''')[0]
if not pd.isnull(crop_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c02']''')[0]
if not pd.isnull(crop_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c03']''')[0]
if not pd.isnull(crop_r02c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c04']''')[0]
if not pd.isnull(crop_r02c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c05']''')[0]
if not pd.isnull(crop_r02c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c06']''')[0]
if not pd.isnull(crop_r02c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c07']''')[0]
if not pd.isnull(crop_r02c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c08']''')[0]
if not pd.isnull(crop_r02c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c09']''')[0]
if not pd.isnull(crop_r02c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c10']''')[0]
if not pd.isnull(crop_r02c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c11']''')[0]
if not pd.isnull(crop_r02c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c12']''')[0]
if not pd.isnull(crop_r02c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02']''')[0]
if not pd.isnull(crop_r02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c01']''')[0]
if not pd.isnull(crop_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c02']''')[0]
if not pd.isnull(crop_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c03']''')[0]
if not pd.isnull(crop_r03c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c04']''')[0]
if not pd.isnull(crop_r03c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c05']''')[0]
if not pd.isnull(crop_r03c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c06']''')[0]
if not pd.isnull(crop_r03c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c07']''')[0]
if not pd.isnull(crop_r03c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c08']''')[0]
if not pd.isnull(crop_r03c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c09']''')[0]
if not pd.isnull(crop_r03c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c10']''')[0]
if not pd.isnull(crop_r03c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c11']''')[0]
if not pd.isnull(crop_r03c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c12']''')[0]
if not pd.isnull(crop_r03c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03']''')[0]
if not pd.isnull(crop_r03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c01']''')[0]
if not pd.isnull(crop_r04c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c02']''')[0]
if not pd.isnull(crop_r04c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c03']''')[0]
if not pd.isnull(crop_r04c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c04']''')[0]
if not pd.isnull(crop_r04c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c05']''')[0]
if not pd.isnull(crop_r04c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c06']''')[0]
if not pd.isnull(crop_r04c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c07']''')[0]
if not pd.isnull(crop_r04c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c08']''')[0]
if not pd.isnull(crop_r04c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c09']''')[0]
if not pd.isnull(crop_r04c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c10']''')[0]
if not pd.isnull(crop_r04c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c11']''')[0]
if not pd.isnull(crop_r04c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c12']''')[0]
if not pd.isnull(crop_r04c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04']''')[0]
if not pd.isnull(crop_r04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01c01']''')[0]
if not pd.isnull(noncrop_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01c02']''')[0]
if not pd.isnull(noncrop_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01']''')[0]
if not pd.isnull(noncrop_r01) and noncrop_r01 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02c01']''')[0]
if not pd.isnull(noncrop_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02c02']''')[0]
if not pd.isnull(noncrop_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02']''')[0]
if not pd.isnull(noncrop_r02) and noncrop_r02 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03c01']''')[0]
if not pd.isnull(noncrop_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03c02']''')[0]
if not pd.isnull(noncrop_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03']''')[0]
if not pd.isnull(noncrop_r03) and noncrop_r03 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04c01']''')[0]
if not pd.isnull(noncrop_r04c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04c02']''')[0]
if not pd.isnull(noncrop_r04c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04']''')[0]
if not pd.isnull(noncrop_r04) and noncrop_r04 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04
else:
xml_txt_box.getchildren()[0].text = '-'
# Part 2
xml_txt_box = tree1.findall('''.//*[@id='ag_water_cons']''')[0]
if not pd.isnull(ag_water_cons):
xml_txt_box.getchildren()[0].text = '%.2f' % ag_water_cons
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c01']''')[0]
if not pd.isnull(lp_r01c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c02']''')[0]
if not pd.isnull(lp_r01c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c03']''')[0]
if not pd.isnull(lp_r01c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c04']''')[0]
if not pd.isnull(lp_r01c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c05']''')[0]
if not pd.isnull(lp_r01c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c06']''')[0]
if not pd.isnull(lp_r01c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c07']''')[0]
if not pd.isnull(lp_r01c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c08']''')[0]
if not pd.isnull(lp_r01c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c09']''')[0]
if not pd.isnull(lp_r01c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c10']''')[0]
if not pd.isnull(lp_r01c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c11']''')[0]
if not pd.isnull(lp_r01c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c12']''')[0]
if not pd.isnull(lp_r01c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c01']''')[0]
if not pd.isnull(lp_r02c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c02']''')[0]
if not pd.isnull(lp_r02c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c03']''')[0]
if not pd.isnull(lp_r02c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c04']''')[0]
if not pd.isnull(lp_r02c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c05']''')[0]
if not pd.isnull(lp_r02c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c06']''')[0]
if not pd.isnull(lp_r02c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c07']''')[0]
if not pd.isnull(lp_r02c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c08']''')[0]
if not pd.isnull(lp_r02c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c09']''')[0]
if not pd.isnull(lp_r02c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c10']''')[0]
if not pd.isnull(lp_r02c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c11']''')[0]
if not pd.isnull(lp_r02c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c12']''')[0]
if not pd.isnull(lp_r02c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c01']''')[0]
if not pd.isnull(lp_r03c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c02']''')[0]
if not pd.isnull(lp_r03c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c03']''')[0]
if not pd.isnull(lp_r03c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c04']''')[0]
if not pd.isnull(lp_r03c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c05']''')[0]
if not pd.isnull(lp_r03c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c06']''')[0]
if not pd.isnull(lp_r03c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c07']''')[0]
if not pd.isnull(lp_r03c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c08']''')[0]
if not pd.isnull(lp_r03c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c09']''')[0]
if not pd.isnull(lp_r03c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c10']''')[0]
if not pd.isnull(lp_r03c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c11']''')[0]
if not pd.isnull(lp_r03c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c12']''')[0]
if not pd.isnull(lp_r03c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c01']''')[0]
if not pd.isnull(lp_r04c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c02']''')[0]
if not pd.isnull(lp_r04c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c03']''')[0]
if not pd.isnull(lp_r04c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c04']''')[0]
if not pd.isnull(lp_r04c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c05']''')[0]
if not pd.isnull(lp_r04c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c06']''')[0]
if not pd.isnull(lp_r04c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c07']''')[0]
if not pd.isnull(lp_r04c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c08']''')[0]
if not pd.isnull(lp_r04c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c09']''')[0]
if not pd.isnull(lp_r04c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c10']''')[0]
if not pd.isnull(lp_r04c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c11']''')[0]
if not pd.isnull(lp_r04c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c12']''')[0]
if not pd.isnull(lp_r04c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c01']''')[0]
if not pd.isnull(wp_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c02']''')[0]
if not pd.isnull(wp_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c03']''')[0]
if not pd.isnull(wp_r01c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c04']''')[0]
if not pd.isnull(wp_r01c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c05']''')[0]
if not pd.isnull(wp_r01c05):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c06']''')[0]
if not pd.isnull(wp_r01c06):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c07']''')[0]
if not pd.isnull(wp_r01c07):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c08']''')[0]
if not pd.isnull(wp_r01c08):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c09']''')[0]
if not pd.isnull(wp_r01c09):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c10']''')[0]
if not pd.isnull(wp_r01c10):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c11']''')[0]
if not pd.isnull(wp_r01c11):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c12']''')[0]
if not pd.isnull(wp_r01c12):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c01']''')[0]
if not pd.isnull(wp_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c02']''')[0]
if not pd.isnull(wp_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c03']''')[0]
if not pd.isnull(wp_r02c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c04']''')[0]
if not pd.isnull(wp_r02c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c05']''')[0]
if not pd.isnull(wp_r02c05):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c06']''')[0]
if not pd.isnull(wp_r02c06):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c07']''')[0]
if not pd.isnull(wp_r02c07):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c08']''')[0]
if not pd.isnull(wp_r02c08):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c09']''')[0]
if not pd.isnull(wp_r02c09):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c10']''')[0]
if not pd.isnull(wp_r02c10):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c11']''')[0]
if not pd.isnull(wp_r02c11):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c12']''')[0]
if not pd.isnull(wp_r02c12):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c01']''')[0]
if not pd.isnull(wp_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c02']''')[0]
if not pd.isnull(wp_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c03']''')[0]
if not pd.isnull(wp_r03c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c04']''')[0]
if not pd.isnull(wp_r03c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c05']''')[0]
if not pd.isnull(wp_r03c05):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c06']''')[0]
if not pd.isnull(wp_r03c06):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c07']''')[0]
if not pd.isnull(wp_r03c07):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c08']''')[0]
if not pd.isnull(wp_r03c08):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c09']''')[0]
if not pd.isnull(wp_r03c09):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c10']''')[0]
if not pd.isnull(wp_r03c10):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c11']''')[0]
if not pd.isnull(wp_r03c11):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c12']''')[0]
if not pd.isnull(wp_r03c12):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c01']''')[0]
if not pd.isnull(wp_r04c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c02']''')[0]
if not pd.isnull(wp_r04c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c03']''')[0]
if not pd.isnull(wp_r04c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c04']''')[0]
if not pd.isnull(wp_r04c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c05']''')[0]
if not pd.isnull(wp_r04c05):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c06']''')[0]
if not pd.isnull(wp_r04c06):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c07']''')[0]
if not pd.isnull(wp_r04c07):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c08']''')[0]
if not pd.isnull(wp_r04c08):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c09']''')[0]
if not pd.isnull(wp_r04c09):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c10']''')[0]
if not pd.isnull(wp_r04c10):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c11']''')[0]
if not pd.isnull(wp_r04c11):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c12']''')[0]
if not pd.isnull(wp_r04c12):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r05c01']''')[0]
if not pd.isnull(lp_r05c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r05c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r05c02']''')[0]
if not pd.isnull(lp_r05c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r05c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r05c03']''')[0]
if not pd.isnull(lp_r05c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r05c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r05c04']''')[0]
if not pd.isnull(lp_r05c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r05c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r06c01']''')[0]
if not pd.isnull(lp_r06c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r06c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r06c02']''')[0]
if not pd.isnull(lp_r06c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r06c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r06c03']''')[0]
if not pd.isnull(lp_r06c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r06c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r06c04']''')[0]
if not pd.isnull(lp_r06c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r06c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r07c01']''')[0]
if not pd.isnull(lp_r07c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r07c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r07c02']''')[0]
if not pd.isnull(lp_r07c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r07c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r07c03']''')[0]
if not pd.isnull(lp_r07c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r07c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r07c04']''')[0]
if not pd.isnull(lp_r07c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r07c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r08c01']''')[0]
if not pd.isnull(lp_r08c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r08c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r08c02']''')[0]
if not pd.isnull(lp_r08c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r08c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r08c03']''')[0]
if not pd.isnull(lp_r08c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r08c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r08c04']''')[0]
if not pd.isnull(lp_r08c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r08c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r05c01']''')[0]
if not pd.isnull(wp_r05c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r05c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r05c02']''')[0]
if not pd.isnull(wp_r05c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r05c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r05c03']''')[0]
if not pd.isnull(wp_r05c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r05c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r05c04']''')[0]
if not pd.isnull(wp_r05c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r05c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r06c01']''')[0]
if not pd.isnull(wp_r06c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r06c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r06c02']''')[0]
if not pd.isnull(wp_r06c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r06c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r06c03']''')[0]
if not pd.isnull(wp_r06c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r06c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r06c04']''')[0]
if not pd.isnull(wp_r06c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r06c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r07c01']''')[0]
if not
|
pd.isnull(wp_r07c01)
|
pandas.isnull
|
from __future__ import print_function
import collections
import os
import re
import sys
import numpy as np
import pandas as pd
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..', 'utils'))
sys.path.append(lib_path)
from data_utils import get_file
global_cache = {}
SEED = 2017
P1B3_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B3/'
def impute_and_scale(df, scaling='std'):
"""Impute missing values with mean and scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to impute and scale
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
df = df.dropna(axis=1, how='all')
imputer = Imputer(strategy='mean', axis=0)
mat = imputer.fit_transform(df)
if scaling is None or scaling.lower() == 'none':
return pd.DataFrame(mat, columns=df.columns)
if scaling == 'maxabs':
scaler = MaxAbsScaler()
elif scaling == 'minmax':
scaler = MinMaxScaler()
else:
scaler = StandardScaler()
mat = scaler.fit_transform(mat)
df = pd.DataFrame(mat, columns=df.columns)
return df
def describe_response_data(df, cells=['all'], drugs=['A'], doses=[-5, -4]):
if 'all' in cells or cells == 'all':
cells = all_cells()
if 'all' in drugs or drugs == 'all':
drugs = all_drugs()
elif len(drugs) == 1 and re.match("^[ABC]$", drugs[0].upper()):
drugs = drugs_in_set('Jason:' + drugs[0].upper())
print('cells:', cells)
print('drugs:', drugs)
lconc = -4
for cell in cells:
d = df[(df['CELLNAME'] == cell) & (df['LOG_CONCENTRATION'] == lconc)]
print(cell)
print(d.describe())
break
def load_dose_response(min_logconc=-4., max_logconc=-4., subsample=None, fraction=False):
"""Load cell line response to different drug compounds, sub-select response for a specific
drug log concentration range and return a pandas dataframe.
Parameters
----------
min_logconc : -3, -4, -5, -6, -7, optional (default -4)
min log concentration of drug to return cell line growth
max_logconc : -3, -4, -5, -6, -7, optional (default -4)
max log concentration of drug to return cell line growth
subsample: None, 'naive_balancing' (default None)
subsampling strategy to use to balance the data based on growth
fraction: bool (default False)
divide growth percentage by 100
"""
path = get_file(P1B3_URL + 'NCI60_dose_response_with_missing_z5_avg.csv')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep=',', engine='c',
na_values=['na', '-', ''],
dtype={'NSC':object, 'CELLNAME':str, 'LOG_CONCENTRATION':np.float32, 'GROWTH':np.float32})
global_cache[path] = df
df = df[(df['LOG_CONCENTRATION'] >= min_logconc) & (df['LOG_CONCENTRATION'] <= max_logconc)]
df = df[['NSC', 'CELLNAME', 'GROWTH', 'LOG_CONCENTRATION']]
if subsample and subsample == 'naive_balancing':
df1 = df[df['GROWTH'] <= 0]
df2 = df[(df['GROWTH'] > 0) & (df['GROWTH'] < 50)].sample(frac=0.7, random_state=SEED)
df3 = df[(df['GROWTH'] >= 50) & (df['GROWTH'] <= 100)].sample(frac=0.18, random_state=SEED)
df4 = df[df['GROWTH'] > 100].sample(frac=0.01, random_state=SEED)
df = pd.concat([df1, df2, df3, df4])
if fraction:
df['GROWTH'] /= 100
df = df.set_index(['NSC'])
return df
def load_drug_descriptors(ncols=None, scaling='std', add_prefix=True):
"""Load drug descriptor data, sub-select columns of drugs descriptors
randomly if specificed, impute and scale the selected data, and return a
pandas dataframe.
Parameters
----------
ncols : int or None
number of columns (drugs descriptors) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
add_prefix: True or False
add feature namespace prefix
"""
path = get_file(P1B3_URL + 'descriptors.2D-NSC.5dose.filtered.txt')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na','-',''],
dtype=np.float32)
global_cache[path] = df
df1 = pd.DataFrame(df.loc[:,'NAME'].astype(int).astype(str))
df1.rename(columns={'NAME': 'NSC'}, inplace=True)
df2 = df.drop('NAME', 1)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:,usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df_dg = pd.concat([df1, df2], axis=1)
return df_dg
def load_cell_expression_u133p2(ncols=None, scaling='std', add_prefix=True):
"""Load U133_Plus2 cell line expression data prepared by Judith,
sub-select columns of gene expression randomly if specificed,
scale the selected data and return a pandas dataframe.
Parameters
----------
ncols : int or None
number of columns (gene expression) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
add_prefix: True or False
add feature namespace prefix
"""
path = get_file('http://bioseed.mcs.anl.gov/~fangfang/p1h/GSE32474_U133Plus2_GCRMA_gene_median.txt')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c')
global_cache[path] = df
df1 = df['CELLNAME']
df2 = df.drop('CELLNAME', 1)
if add_prefix:
df2 = df2.add_prefix('expr.')
total = df.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df =
|
pd.concat([df1, df2], axis=1)
|
pandas.concat
|
# -- --------------------------------------------------------------------------------------------------- -- #
# -- project: A python project for algorithmic trading in FXCM -- #
# -- --------------------------------------------------------------------------------------------------- -- #
# -- script: requirements.txt : text file with the required libraries for the project -- #
# -- author: YOUR GITHUB USER NAME -- #
# -- license: MIT License -- #
# -- --------------------------------------------------------------------------------------------------- -- #
# -- Template repository: https://github.com/IFFranciscoME/trading-project -- #
# -- --------------------------------------------------------------------------------------------------- -- #
import ta
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
import calendar
import time
from datetime import datetime, timedelta
import string
from pandas.core.common import flatten
#La usaré como decorador para obtener el tiempo de ejecución de cada función
def metrica_computacional(funcion):
"""
Decorador que evuelve el tiempo que le toma a una función ejecutarse
Parámetros
---------
funcion: La función de la que quieras obtener el tiempo
Returns
-------
print() del tiempo de ejecución
"""
def funcion_medida(*args, **kwargs):
inicio = time.time()
c = funcion(*args, **kwargs)
print(time.time() - inicio)
return c
return funcion_medida
#@metrica_computacional
def ema(serie, length):
"""
Devuelve el promedio móvil exponencial de una serie
Parámetros
----------
serie: Serie de datos de la cuál se obtendrá el promedio móvil exponencial
length: Longitud del promedio móvil exponencial (¿cuántos datos hacia atrás se tomarán en cuenta?)
Returns
-------
ema: Lista con los promedio móviles exponenciales
"""
ema = ta.trend.ema_indicator(serie, window=length, fillna=False)
return ema
#@metrica_computacional
def signals(short_ema, long_ema, serie):
"""
Devuelve las señales de compra según el cruce de las velas
Parámetros
----------
short_ema: Longitud promedio móvil corto
long_ema: Longitud promedio móvil largo
serie: Serie de datos de la cuál se obtendrá el promedio móvil exponencial
Returns
-------
señales: Lista con [0,1,2]
1 -> Señal de compra
0 -> Sin señal
2 -> Señal de venta
"""
señales = np.zeros(len(serie))
for i in range(1,len(serie)):
if short_ema[i-1] <= long_ema[i-1] and short_ema[i] > long_ema[i]:
señales[i] = 1
elif short_ema[i-1] >= long_ema[i-1] and short_ema[i] < long_ema[i]:
señales[i] = 2
else:
señales[i] = 0
return señales
#@metrica_computacional
def signal_index(lista):
"""
Devuelve el índice de las señales de compra
Parámetros
----------
lista: Lista con señales de compra/venta
Returns
-------
indice_señal: Lísta con los índices de las señales de compra/venta
"""
indice_señal = []
for i in range(1,len(lista)):
if lista[i-1] == 1:
for j in range(i,len(lista)):
if lista[j] == 2:
indice_señal.append([i,j+2])
break
continue
return indice_señal
#@metrica_computacional
def operations(lista, precios):
"""
Devuelve fechas y precios que hay entre una señal de compra y una de venta
Parámetros
----------
lista: Índices del DataFrame con las señales de compra/venta
precios: Los precios que vas a usar en tus operaciones (close, open, high, low), dependiendo de la estrategia
Returns
-------
operaciones: Lista con DataFrame individual de cada una de las operaciones realizadas
"""
operaciones = []
for i in range(len(lista)):
operaciones.append(pd.DataFrame(precios[lista[i][0]:lista[i][1]]))
return operaciones
#@metrica_computacional
def open_price_profit(lista):
"""
Devuelve el rendimiento que cada vela tiene con el precio de apertura de la operación
Parámetros
----------
lista: Lista de las operaciones realizadas
Returns
-------
retorno_operacion: Lista anidada, que contiene rendimiento respecto al precio de apertura
de cada precio guardado, de cada una de las operaciones del periodo.
"""
retorno_operacion = []
for i in range(len(lista)):
retorno_vela = []
for j in range(len(lista[i])):
retorno_vela.append((lista[i].iloc[j][0]/ lista[i].iloc[0][0]-1))
retorno_operacion.append(retorno_vela)
return retorno_operacion
#@metrica_computacional
def profit(lista, comision, take_profit, stop_loss):
"""
Devuelve el rendimiento obtenido por operación, ya sea alcanzado el take profit, stop loss o bien recibiendo
una señal de venta
Parámetros
----------
lista: Lista anidada, que contiene rendimiento respecto al precio de apertura
de cada precio guardado, de cada una de las operaciones del periodo.
comision: Comisión que cobra el broker por operación
take_profit: Nivel de rendimiento con el cual finalizamos la operación
stop_loss: Nivel de pérdida máximo con el cuál decidimos abandonar la operación
Returns
-------
rendimiento: Lista con los rendimientos obtenidos en las operaciones del periodo
"""
rendimiento = []
for i in range(len(lista)):
for j in range(len(lista[i])):
if j < len(lista[i])-1:
if lista[i][j] >= take_profit or lista[i][j] <= stop_loss:
rendimiento.append(lista[i][j] - comision)
break
else:
rendimiento.append(lista[i][-1] - comision)
return rendimiento
#@metrica_computacional
def capital_flow(lista, capital):
"""
Devuelve el flujo del capital durante el periodo de trading
Parámetros
----------
lista: Lista de rendimientos del periodo
capital: Capital con el que se trabajará durante el periodo
Returns
-------
flujo_capital: Lista con el movimiento del capital durante el periodo
"""
flujo_capital = []
flujo_capital.append(capital)
for i in range(len(lista)):
flujo_capital.append(flujo_capital[i] * (1+lista[i]))
return flujo_capital
## MEDIDAS ATRIBUCIÓN AL DESEMPEÑO
#@metrica_computacional
def f_pip_size(name:str):
"""
Devuelve los pips del símbolo con el que estás trabajando
Parámetros
----------
name: Símbolo con el que estás trabajando
Returns
-------
pip_size: pips
"""
diccionario = pd.read_csv("instruments_pips.csv", index_col="Instrument")["TickSize"].to_dict()
name = name.replace("/","_").upper()
if name in diccionario:
pip_size = diccionario[name]
else:
pip_size = 1/100
return pip_size
#@metrica_computacional
def columnas_tiempos(rend_individual, operaciones, take_profit, stop_loss):
"""
Devuelve el tiempo que duró cada operación hasta el cierre de cualquier tipo
(take profit, stop loss, señal de venta)
Parámetros
----------
rend_individual: Lista anidada, que contiene rendimiento respecto al precio de apertura
de cada precio guardado, de cada una de las operaciones del periodo.
operaciones: Lista con las operaciones del periodo
take_profit: Nivel de rendimiento con el cual finalizamos la operación
stop_loss: Nivel de pérdida máximo con el cuál decidimos abandonar la operación
Returns
-------
dataframe: DatafRame con el número de operación y el tiempo que duró la misma
"""
tiempo_operacion = []
for i in range(len(rend_individual)):
for j in range(len(rend_individual[i])):
if j < len(rend_individual[i])-1:
if rend_individual[i][j] >= take_profit or rend_individual[i][j] <= stop_loss:
tiempo_operacion.append(datetime.strptime(operaciones[i].index[j], "%d/%m/%Y")\
- datetime.strptime(operaciones[i].index[0], "%d/%m/%Y"))
break
else:
tiempo_operacion.append(datetime.strptime(operaciones[i].index[-1], "%d/%m/%Y")\
- datetime.strptime(operaciones[i].index[0], "%d/%m/%Y"))
dataframe = pd.DataFrame(tiempo_operacion)
dataframe.columns = ["Tiempo"]
dataframe.index.name = "# Operación"
return dataframe
#@metrica_computacional
def f_columnas_pips(pips:int, rendimiento):
"""
Devuelve varciación en pips del rendimiento obtenido durante el periodo
Parámetros
----------
pips: pips
rendimineto: Lista con los rendimientos del periodo
Returns
-------
Dataframe: DatafRame con:
Profit: Rendimiento de la operación
Pips: Variación en pips respecto al rendimiento anterior
Profit_acum: Suma acumulada del profit
Pip_acum: Suma acumulada de pips
"""
rendimiento_porcentual = rendimiento
rendimiento_pips = []
rendimiento_pips.append(rendimiento[0]*pips)
for i in range(1,len(rendimiento)):
rendimiento_pips.append((rendimiento[i] - rendimiento[i-1]) * pips)
profit_acum = np.cumsum(rendimiento_porcentual)
pips_acum = np.cumsum(rendimiento_pips)
dataframe = pd.DataFrame([rendimiento_porcentual, rendimiento_pips, profit_acum, pips_acum]).T
dataframe.columns = ["Profit", "Pips", "Profit_acum", "Pip_acum"]
return dataframe
#@metrica_computacional
def f_estadísticas_ba(rendimiento, operaciones, name:str ="df_1_tabla" or "df_2_ranking"):
"""
Devuelve un diccionario con 2 DataFrames de estadísticas del trading durante el periodo
Parámetros
----------
rendimineto: Lista con los rendimientos del periodo
operaciones: Lista con las operaciones del periodo
name: Tabla que deseas consultar {"df_1_tabla" ,"df_2_ranking"}
Returns
-------
dataframe: DataFrame de estadísticas
"""
ganadora = 0
perdedora = 0
for i in range(len(rendimiento)):
if rendimiento[i] > 0:
ganadora += 1
else:
perdedora +=1
column_1 = ["Op_totales", "Ganadoras", "Perdedoras", "Mediana (Profit)", "Mediana (Pips)",\
"R. efectividad", "R. proporción"]
column_2 = [int(len(rendimiento)), int(ganadora), int(perdedora), np.median(rendimiento),\
np.median(f_columnas_pips(pips = f_pip_size("btc/USD"), rendimiento=rendimiento)["Pips"]),\
ganadora / len(rendimiento), ganadora / perdedora]
column_3 = ["Operaciones totales","Operaciones ganadoras","Operaciones perdedoras",\
"Mediana de profit de operaciones", "Mediana de pips de operaciones",\
"Ganadoras Totales/Operaciones Totales",\
"Ganadoras Totales/Perdedoras Totales"]
df_1_tabla = pd.DataFrame([column_1, column_2, column_3]).T
df_1_tabla.columns= ["Medida", "Valor", "Descripción"]
dias = []
for i in range(len(operaciones)):
dias.append(calendar.day_name[datetime.strptime(operaciones[i].index[0],"%d/%m/%Y").weekday()])
dias_positivos = []
dias_negativos = []
for i in range(len(dias)):
if rendimiento[i] > 0:
dias_positivos.append([dias[i],rendimiento[i]])
else:
dias_negativos.append([dias[i],rendimiento[i]])
dias_positivos = pd.DataFrame(dias_positivos)[0].value_counts()
dias_negativos = pd.DataFrame(dias_negativos)[0].value_counts()
dias_union =
|
pd.concat([dias_positivos,dias_negativos], axis=1)
|
pandas.concat
|
"""
TechMiner.Result
==================================================================================================
"""
import altair as alt
import geopandas
import geoplot
import itertools
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from techminer.common import *
from collections import OrderedDict
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.optimize import minimize
from shapely.geometry import Point, LineString
from sklearn.cluster import KMeans
from matplotlib.patches import Rectangle
from wordcloud import WordCloud, ImageColorGenerator
#----------------------------------------------------------------------------------------------------
def _compute_graph_layout(graph):
path_length = nx.shortest_path_length(graph)
distances = pd.DataFrame(index=graph.nodes(), columns=graph.nodes())
for row, data in path_length:
for col, dist in data.items():
distances.loc[row,col] = dist
distances = distances.fillna(distances.max().max())
return nx.kamada_kawai_layout(graph, dist=distances.to_dict())
#--------------------------------------------------------------------------------------------------------
class Result(pd.DataFrame):
"""Class implementing a dataframe with results of analysis.
"""
#----------------------------------------------------------------------------------------------------
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False,
cluster_data=None, call=None):
super().__init__(data, index, columns, dtype, copy)
self._call = call
self._cluster_data = None
self._cluster_data = cluster_data
#----------------------------------------------------------------------------------------------------
@property
def _constructor_expanddim(self):
return self
#----------------------------------------------------------------------------------------------
def _add_count_to_label(self, column):
count = self.groupby(by=column, as_index=True)[self.columns[-2]].sum()
count = {key : value for key, value in zip(count.index, count.tolist())}
self[column] = self[column].map(lambda x: cut_text(str(x) + ' [' + str(count[x]) + ']'))
#----------------------------------------------------------------------------------------------
def altair_barhplot(self, color='Greys'):
"""
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.documents_by_year().altair_barhplot()
alt.Chart(...)
.. image:: ../figs/altair_barhplot.jpg
:width: 800px
:align: center
"""
if len(self.columns) != 3:
Exception('Invalid call for result of function:' + self._call)
columns = self.columns.tolist()
data = pd.DataFrame(self.copy())
if data.columns[1] != 'Cited by':
data[columns[0]] = data[columns[0]].map(str) + ' [' + data[columns[1]].map(str) + ']'
data[data.columns[0]] = data[data.columns[0]].map(lambda x: cut_text(x))
if columns[0] == 'Year':
data = data.sort_values(by=columns[0], ascending=False)
return alt.Chart(data).mark_bar().encode(
alt.Y(columns[0] + ':N', sort=alt.EncodingSortField(
field=columns[1] + ':Q')),
alt.X(columns[1] + ':Q'),
alt.Color(columns[1] + ':Q', scale=alt.Scale(scheme=color)))
#----------------------------------------------------------------------------------------------
def altair_barplot(self):
"""Vertical bar plot in Altair.
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.documents_by_year().altair_barplot()
alt.Chart(...)
.. image:: ../figs/altair_barplot.jpg
:width: 500px
:align: center
"""
if len(self.columns) != 3:
Exception('Invalid call for result of function:' + self._call)
columns = self.columns.tolist()
data = pd.DataFrame(self.copy())
data[columns[0]] = data[columns[0]].map(str) + ' [' + data[columns[1]].map(str) + ']'
data[data.columns[0]] = data[data.columns[0]].map(lambda x: cut_text(x))
return alt.Chart(data).mark_bar().encode(
alt.X(columns[0] + ':N', sort=alt.EncodingSortField(field=columns[1] + ':Q')),
alt.Y(columns[1] + ':Q'),
alt.Color(columns[1] + ':Q', scale=alt.Scale(scheme='greys')))
#----------------------------------------------------------------------------------------------------
def altair_circle(self, ascending_r=None, ascending_c=None, filename=None, **kwds):
"""Altair scatter plot with filled circles for visualizing relationships.
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.auto_corr(
... column='Authors',
... sep=',',
... top_n=30
... ).altair_circle()
alt.Chart(...)
.. image:: ../figs/altair_circle.png
:width: 800px
:align: center
"""
if len(self.columns) != 4:
Exception('Invalid call for result of function:' + self._call)
if ascending_r is None or ascending_r is True:
sort_X = 'ascending'
else:
sort_X = 'descending'
if ascending_c is None or ascending_c is True:
sort_Y = 'ascending'
else:
sort_Y = 'descending'
chart = alt.Chart(self).mark_circle().encode(
alt.X(self.columns[0] + ':N',
axis=alt.Axis(labelAngle=270),
sort=sort_X),
alt.Y(self.columns[1] + ':N',
sort=sort_Y),
size=self.columns[2],
color=self.columns[2])
if filename is not None:
char.save(filename)
return chart
#----------------------------------------------------------------------------------------------------
def altair_heatmap(self, ascending_r=None, ascending_c=None, filename=None, **kwds):
"""Altair Heatmap
Available cmaps:
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.terms_by_year(
... column='Authors',
... sep=',',
... top_n=20).altair_heatmap()
alt.Chart(...)
.. image:: ../figs/altair_heatmap.jpg
:width: 600px
:align: center
"""
if len(self.columns) != 4:
Exception('Invalid call for result of function:' + self._call)
## force the same order of cells in rows and cols ------------------------------------------
if self._call == 'auto_corr':
if ascending_r is None and ascending_c is None:
ascending_r = True
ascending_c = True
elif ascending_r is not None and ascending_r != ascending_c:
ascending_c = ascending_r
elif ascending_c is not None and ascending_c != ascending_r:
ascending_r = ascending_c
else:
pass
## end -------------------------------------------------------------------------------------
_self = self.copy()
_self[_self.columns[0]] = _self[_self.columns[0]].map(lambda w: cut_text(w))
_self[_self.columns[1]] = _self[_self.columns[1]].map(lambda w: cut_text(w))
if ascending_r is None or ascending_r is True:
sort_X = 'ascending'
else:
sort_X = 'descending'
if ascending_c is None or ascending_c is True:
sort_Y = 'ascending'
else:
sort_Y = 'descending'
graph = alt.Chart(_self).mark_rect().encode(
alt.X(_self.columns[0] + ':O', sort=sort_X),
alt.Y(_self.columns[1] + ':O', sort=sort_Y),
color=_self.columns[2] + ':Q')
if self._call == 'co_ocurrence':
text = graph.mark_text(
align='center',
baseline='middle',
dx=5
).encode(
text=_self.columns[2] + ':Q'
)
else:
text = None
plt.tight_layout()
return graph
#----------------------------------------------------------------------------------------------
def barhplot(self, color='gray', figsize=(12,8)):
"""Plots a pandas.DataFrame using Altair.
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.documents_by_year().barhplot()
.. image:: ../figs/barhplot.jpg
:width: 600px
:align: center
"""
if len(self.columns) != 3:
Exception('Invalid call for result of function:' + self._call)
data = pd.DataFrame(self.copy())
columns = data.columns.tolist()
if data.columns[1] != 'Cited by':
data[columns[0]] = data[columns[0]].map(str) + ' [' + data[columns[1]].map(str) + ']'
data[data.columns[0]] = data[data.columns[0]].map(lambda x: cut_text(x))
if columns[0] == 'Year':
data = data.sort_values(by=columns[0], ascending=True)
else:
data = data.sort_values(by=columns[1], ascending=True)
#plt.figure(figsize=figsize)
data.plot.barh(columns[0], columns[1], color=color, figsize=figsize)
plt.gca().xaxis.grid(True)
#----------------------------------------------------------------------------------------------
def barplot(self, color='gray', figsize=(8,12)):
"""Vertical bar plot in matplotlib.
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.documents_by_year().barplot()
.. image:: ../figs/barplot.jpg
:width: 600px
:align: center
"""
if len(self.columns) != 3:
Exception('Invalid call for result of function:' + self._call)
columns = self.columns.tolist()
plt.figure(figsize=figsize)
data = pd.DataFrame(self.copy())
data[columns[0]] = data[columns[0]].map(str) + ' [' + data[columns[1]].map(str) + ']'
data[data.columns[0]] = data[data.columns[0]].map(lambda x: cut_text(x))
data.plot.bar(columns[0], columns[1], color=color)
plt.gca().yaxis.grid(True)
#----------------------------------------------------------------------------------------------------
def chord_diagram(self, figsize=(12, 12), minval=None, R=3, n_bezier=100, dist=0.2):
"""Creates a chord diagram for representing clusters.
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.auto_corr(
... column='Authors',
... sep=',',
... top_n=20).chord_diagram()
>>> plt.savefig('./figs/chord-diagram.jpg')
.. image:: ../figs/chord-diagram.jpg
:width: 800px
:align: center
"""
if self._cluster_data is None:
Exception('Invalid call for result of function:' + self._call)
chord_diagram(
self[self.columns[0]].unique(),
self._cluster_data,
figsize=figsize,
minval=minval,
R=R,
n_bezier=n_bezier,
dist=dist)
#----------------------------------------------------------------------------------------------------
def heatmap(self, ascending_r=None, ascending_c=None, figsize=(10, 10), cmap='Blues'):
"""Heat map.
https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn'
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.terms_by_year(
... column='Authors',
... sep=',',
... top_n=20).heatmap(figsize=(8,4))
>>> plt.savefig('./figs/heatmap.jpg')
.. image:: ../figs//heatmap.jpg
:width: 600px
:align: center
"""
if len(self.columns) != 4:
Exception('Invalid call for result of function:' + self._call)
## force the same order of cells in rows and cols ------------------------------------------
if self._call == 'auto_corr':
if ascending_r is None and ascending_c is None:
ascending_r = True
ascending_c = True
elif ascending_r is not None and ascending_r != ascending_c:
ascending_c = ascending_r
elif ascending_c is not None and ascending_c != ascending_r:
ascending_r = ascending_c
else:
pass
## end -------------------------------------------------------------------------------------
x = self.tomatrix(ascending_r, ascending_c)
## rename columns and row index
x.columns = [cut_text(w) for w in x.columns]
x.index = [cut_text(w) for w in x.index]
plt.figure(figsize=figsize)
if self._call == 'factor_analysis':
x = self.tomatrix(ascending_r, ascending_c)
x = x.transpose()
## x = x.apply(lambda w: abs(w))
plt.pcolor(np.transpose(abs(x.values)), cmap=cmap)
else:
plt.pcolor(np.transpose(x.values), cmap=cmap)
#plt.pcolor(np.transpose(x.values), cmap=cmap)
plt.xticks(np.arange(len(x.index))+0.5, x.index, rotation='vertical')
plt.yticks(np.arange(len(x.columns))+0.5, x.columns)
## plt.gca().set_aspect('equal', 'box')
plt.gca().invert_yaxis()
## changes the color of rectangle for autocorrelation heatmaps ---------------------------
# if self._call == 'auto_corr':
# for idx in np.arange(len(x.index)):
# plt.gca().add_patch(
# Rectangle((idx, idx), 1, 1, fill=False, edgecolor='red')
# )
## end ------------------------------------------------------------------------------------
## annotation
for idx_row, row in enumerate(x.index):
for idx_col, col in enumerate(x.columns):
if self._call in ['auto_corr', 'cross_corr', 'factor_analysis']:
if abs(x.at[row, col]) > x.values.max() / 2.0:
color = 'white'
else:
color = 'black'
plt.text(
idx_row + 0.5,
idx_col + 0.5,
"{:3.2f}".format(x.at[row, col]),
ha="center",
va="center",
color=color)
else:
if x.at[row, col] > 0:
if x.at[row, col] > x.values.max() / 2.0:
color = 'white'
else:
color = 'black'
plt.text(
idx_row + 0.5,
idx_col + 0.5,
int(x.at[row, col]),
ha="center",
va="center",
color=color)
plt.tight_layout()
plt.show()
#----------------------------------------------------------------------------------------------------
def map(self, min_value=None, top_links=None, figsize = (10,10),
font_size=12, factor=None, size=(25,300)):
"""
Draw an autocorrelation, crosscorrelation or factor map.
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.auto_corr(
... column='Authors',
... sep=',',
... top_n=20).map()
>>> plt.savefig('./figs/autocorr-map.jpg')
.. image:: ../figs/autocorr-map.jpg
:width: 800px
:align: center
"""
if self._cluster_data is None:
Exception('Invalid call for result of function:' + self._call)
## cluster dataset
cluster_data = self._cluster_data.copy()
## figure properties
plt.figure(figsize=figsize)
## graph
graph = nx.Graph()
## adds nodes to graph
clusters = list(set(cluster_data.cluster))
nodes = list(set(self.tomatrix().index))
graph.add_nodes_from(clusters)
graph.add_nodes_from(nodes)
## adds edges and properties
weigth = []
style = []
value = []
for _, row in cluster_data.iterrows():
graph.add_edge(row[1], row[2])
if row[3] >= 0.75:
weigth += [4]
style += ['solid']
value += [row[3]]
elif row[3] >= 0.50:
weigth += [2]
style += ['solid']
value += [row[3]]
elif row[3] >= 0.25:
weigth += [1]
style += ['dashed']
value += [row[3]]
else:
weigth += [1]
style += ['dotted']
value += [row[3]]
edges = pd.DataFrame({
'edges' : graph.edges(),
'weight' : weigth,
'style' : style,
'value' : value
})
edges = edges.sort_values(by='value', ascending=False)
if top_links is not None and top_links < len(edges):
edges = edges[0:top_links]
if min_value is not None:
edges = edges[edges['value'] >= min_value]
## edges from center of cluster to nodes.
for _, row in cluster_data.iterrows():
graph.add_edge(row[0], row[1])
graph.add_edge(row[0], row[2])
## graph layout
path_length = nx.shortest_path_length(graph)
distances = pd.DataFrame(index=graph.nodes(), columns=graph.nodes())
for row, data in path_length:
for col, dist in data.items():
distances.loc[row,col] = dist
distances = distances.fillna(distances.max().max())
layout = nx.kamada_kawai_layout(graph, dist=distances.to_dict())
## nodes drawing
node_size = [x[(x.find('[')+1):-1] for x in nodes]
node_size = [float(x) for x in node_size]
max_node_size = max(node_size)
min_node_size = min(node_size)
node_size = [size[0] + x / (max_node_size - min_node_size) * size[1] for x in node_size]
nx.draw_networkx_nodes(
graph,
layout,
nodelist=nodes,
node_size=node_size,
node_color='red')
## edges drawing
for style in list(set(edges['style'].tolist())):
edges_set = edges[edges['style'] == style]
if len(edges_set) == 0:
continue
nx.draw_networkx_edges(
graph,
layout,
edgelist=edges_set['edges'].tolist(),
style=style,
width=edges_set['weight'].tolist(),
edge_color='black')
## node labels
x_left, x_right = plt.xlim()
y_left, y_right = plt.ylim()
delta_x = (x_right - x_left) * 0.01
delta_y = (y_right - y_left) * 0.01
for node in nodes:
x_pos, y_pos = layout[node]
plt.text(
x_pos + delta_x,
y_pos + delta_y,
node,
size=font_size,
ha='left',
va='bottom',
bbox=dict(
boxstyle="square",
ec='lightgray',
fc='white',
))
if factor is not None:
left, right = plt.xlim()
width = (right - left) * factor / 2.0
plt.xlim(left - width, right + width)
plt.axis('off')
#----------------------------------------------------------------------------------------------------
def ocurrence_map(self, min_value=None, top_links=None, figsize = (10,10),
font_size=12, factor=None, size=(300,1000)):
"""Cluster map for ocurrence and co-ocurrence matrices.
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.co_ocurrence(
... column_r='Authors',
... column_c='Authors',
... sep_r=',',
... sep_c=',',
... top_n=10
... ).heatmap()
>>> plt.savefig('./figs/heatmap-ocurrence-map.jpg')
.. image:: ../figs/heatmap-ocurrence-map.jpg
:width: 600px
:align: center
>>> rdf.co_ocurrence(
... column_r='Authors',
... column_c='Authors',
... sep_r=',',
... sep_c=',',
... top_n=10
... ).ocurrence_map(
... figsize=(11,11),
... font_size=10,
... factor = 0.1,
... size=(300,1000)
... )
>>> plt.savefig('./figs/ocurrence-map.jpg')
.. image:: ../figs/ocurrence-map.jpg
:width: 600px
:align: center
"""
if self._call not in ['ocurrence', 'co_ocurrence']:
Exception('Invalid call for result of function:' + self._call)
## figure properties
plt.figure(figsize=figsize)
## graph
graph = nx.Graph()
terms_r = list(set(self.tomatrix().index.tolist()))
terms_c = list(set(self.tomatrix().columns.tolist()))
nodes = list(set(terms_r + terms_c))
nodes = [cut_text(x) for x in nodes]
graph.add_nodes_from(nodes)
if sorted(terms_r) != sorted(terms_c):
numnodes = [str(i) for i in range(len(self))]
graph.add_nodes_from(numnodes)
for idx, row in self.iterrows():
graph.add_edge(row[0], str(idx))
graph.add_edge(row[1], str(idx))
labels={str(idx):row[2] for idx, row in self.iterrows()}
else:
mtx = self.tomatrix()
edges = []
labels = {}
n = 0
for idx_r, row in enumerate(mtx.index.tolist()):
for idx_c, col in enumerate(mtx.columns.tolist()):
if idx_c < idx_r:
continue
if mtx.at[row, col] > 0:
edges += [(row, str(n)), (col, str(n))]
labels[str(n)] = mtx.at[row, col]
n += 1
numnodes = [str(i) for i in range(n)]
graph.add_nodes_from(numnodes)
for a, b in edges:
graph.add_edge(a, b)
## graph layout
layout = _compute_graph_layout(graph)
## draw terms nodes
node_size = [int(n[n.find('[')+1:-1]) for n in nodes]
node_size = [size[0] + (n - min(node_size)) / (max(node_size) - min(node_size)) * (size[1] - size[0]) for n in node_size]
nx.draw_networkx_nodes(
graph,
layout,
nodelist=nodes,
node_size=node_size,
node_color='red')
x_left, x_right = plt.xlim()
y_left, y_right = plt.ylim()
delta_x = (x_right - x_left) * 0.01
delta_y = (y_right - y_left) * 0.01
for node in nodes:
x_pos, y_pos = layout[node]
plt.text(
x_pos + delta_x,
y_pos + delta_y,
node,
size=font_size,
ha='left',
va='bottom',
bbox=dict(
boxstyle="square",
ec='gray',
fc='white',
))
# nx.draw_networkx_labels(
# graph,
# layout,
# labels={t:t for t in terms},
# bbox=dict(facecolor='none', edgecolor='lightgray', boxstyle='round'))
## draw quantity nodes
node_size = [int(labels[n]) for n in labels.keys()]
node_size = [size[0] + (n - min(node_size)) / (max(node_size) - min(node_size)) * (size[1] - size[0]) for n in node_size]
nx.draw_networkx_nodes(
graph,
layout,
nodelist=numnodes,
node_size=node_size,
node_color='lightblue')
nx.draw_networkx_labels(
graph,
layout,
labels=labels,
font_color='black')
## edges
nx.draw_networkx_edges(
graph,
layout,
width=1
)
plt.axis('off')
#----------------------------------------------------------------------------------------------------
def print_IDs(self):
"""Auxiliary function to print IDs of documents.
"""
if self._call in ['co_ocurrence', 'cross_corr', 'auto_corr']:
for idx, row in self.iterrows():
if row[-1] is not None:
print(row[0], ', ', row[1], ' (', len(row[-1]), ')', ' : ', sep='', end='')
for i in row[-1]:
print(i, sep='', end='')
print()
elif self._call == 'terms_by_terms_by_year':
for idx, row in self.iterrows():
if row[-1] is not None:
print(row[0], ', ', row[1], ', ', row[2], ' (', len(row[-1]), ')', ' : ', sep='', end='')
for i in row[-1]:
print(i, sep='', end='')
print()
elif self._call == 'factor_analysis':
pass
else:
pass
#----------------------------------------------------------------------------------------------------
def sankey_plot(self, figsize=(7,10), minval=None):
"""Cross-relation sankey plot.
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.cross_corr(
... column_r='keywords (cleaned)',
... sep_r=';',
... column_c='Authors',
... sep_c=','
... ).sankey_plot(minval=0.1)
>>> plt.savefig('./figs/sankey-plot.jpg')
.. image:: ../figs//sankey-plot.jpg
:width: 600px
:align: center
"""
if self._call != 'cross_corr':
Exception('Invalid call for result of function:' + self._call)
x = self
llabels = sorted(list(set(x[x.columns[0]])))
rlabels = sorted(list(set(x[x.columns[1]])))
factorL = max(len(llabels)-1, len(rlabels)-1) / (len(llabels) - 1)
factorR = max(len(llabels)-1, len(rlabels)-1) / (len(rlabels) - 1)
lpos = {k:v*factorL for v, k in enumerate(llabels)}
rpos = {k:v*factorR for v, k in enumerate(rlabels)}
fig, ax1 = plt.subplots(figsize=(7, 10))
ax1.scatter([0] * len(llabels), llabels, color='black', s=50)
for index, r in x.iterrows():
row = r[0]
col = r[1]
val = r[2]
if val >= 0.75:
linewidth = 4
linestyle = '-'
elif val >= 0.50:
linewidth = 2
linstyle = '-'
elif val >= 0.25:
linewidth = 2
linestyle = '--'
elif val < 0.25:
linewidth = 1
linestyle = ':'
else:
linewidth = 0
linestyle = '-'
if minval is None:
plt.plot(
[0, 1],
[lpos[row], rpos[col]],
linewidth=linewidth,
linestyle=linestyle,
color='black')
elif abs(val) >= minval :
plt.plot(
[0, 1],
[lpos[row], rpos[col]],
linewidth=linewidth,
linestyle=linestyle,
color='black')
ax2 = ax1.twinx()
ax2.scatter([1] * len(rlabels), rlabels, color='black', s=50)
#ax2.set_ylim(0, len(rlabels)-1)
for txt in ['bottom', 'top', 'left', 'right']:
ax1.spines[txt].set_color('white')
ax2.spines[txt].set_color('white')
ax2.set_xticks([])
plt.tight_layout()
#----------------------------------------------------------------------------------------------
def seaborn_barhplot(self, color='gray'):
"""
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.documents_by_year().seaborn_barhplot()
.. image:: ../figs/seaborn_barhplot.jpg
:width: 600px
:align: center
"""
if len(self.columns) != 3:
Exception('Invalid call for result of function:' + self._call)
columns = self.columns.tolist()
data = pd.DataFrame(self.copy())
if data.columns[1] != 'Cited by':
data[columns[0]] = data[columns[0]].map(str) + ' [' + data[columns[1]].map(str) + ']'
data[data.columns[0]] = data[data.columns[0]].map(lambda x: cut_text(x))
if columns[0] == 'Year':
data = data.sort_values(by=columns[0], ascending=False)
else:
data = data.sort_values(by=columns[1], ascending=False)
sns.barplot(
x=columns[1],
y=columns[0],
data=data,
label=columns[0],
color=color)
plt.gca().xaxis.grid(True)
#----------------------------------------------------------------------------------------------
def seaborn_barplot(self, color='gray'):
"""Vertical bar plot in Seaborn.
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.documents_by_year().seaborn_barplot()
.. image:: ../figs/seaborn_barhplot.jpg
:width: 800px
:align: center
"""
if len(self.columns) != 3:
Exception('Invalid call for result of function:' + self._call)
columns = self.columns.tolist()
data = Result(self.copy())
data[columns[0]] = data[columns[0]].map(str) + ' [' + data[columns[1]].map(str) + ']'
data[data.columns[0]] = data[data.columns[0]].map(lambda x: cut_text(x))
columns = data.columns.tolist()
result = sns.barplot(
y=columns[1],
x=columns[0],
data=data,
label=columns[0],
color=color)
_, labels = plt.xticks()
result.set_xticklabels(labels, rotation=90)
plt.gca().yaxis.grid(True)
#----------------------------------------------------------------------------------------------------
def seaborn_heatmap(self, ascending_r=None, ascending_c=None, filename=None):
"""Heat map.
https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn'
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.terms_by_year(
... column='Authors',
... sep=',',
... top_n=20).seaborn_heatmap()
>>> plt.savefig('./figs/seaborn_heatmap.jpg')
.. image:: ../figs//seaborn_heatmap.jpg
:width: 600px
:align: center
"""
if len(self.columns) != 4:
Exception('Invalid call for result of function:' + self._call)
## force the same order of cells in rows and cols ------------------------------------------
if self._call == 'auto_corr':
if ascending_r is None and ascending_c is None:
ascending_r = True
ascending_c = True
elif ascending_r is not None and ascending_r != ascending_c:
ascending_c = ascending_r
elif ascending_c is not None and ascending_c != ascending_r:
ascending_r = ascending_c
else:
pass
## end -------------------------------------------------------------------------------------
sns.set()
_self = self.tomatrix(ascending_r, ascending_c)
_self = _self.transpose()
_self.columns = [cut_text(w) for w in _self.columns]
_self.index = [cut_text(w) for w in _self.index]
sns_plot = sns.heatmap(_self)
if filename is not None:
sns_plot.savefig(filename)
#return sns_plot
#----------------------------------------------------------------------------------------------------
def seaborn_relplot(self, ascending_r=None, ascending_c=None, filename=None):
"""Seaborn relplot plot with filled circles for visualizing relationships.
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> from techminer.datasets import load_test_cleaned
>>> rdf = load_test_cleaned().data
>>> rdf.auto_corr(
... column='Authors',
... sep=',',
... top_n=30
... ).seaborn_relplot(filename='./figs/seaborn_relplot.png')
.. image:: ../figs//seaborn_relplot.png
:width: 600px
:align: center
"""
if len(self.columns) != 4:
Exception('Invalid call for result of function:' + self._call)
sns_plot = sns.relplot(
x = self.columns[0],
y = self.columns[1],
size = self.columns[2],
alpha = 0.8,
palette = 'viridis',
data = self)
plt.xticks(rotation=90)
if filename is not None:
sns_plot.savefig(filename)
#----------------------------------------------------------------------------------------------------
def tomatrix(self, ascending_r=None, ascending_c=None):
"""Displays a term by term dataframe as a matrix.
>>> mtx = Result({
... 'rows':['r0', 'r1', 'r2', 'r0', 'r1', 'r2'],
... 'cols':['c0', 'c1', 'c0', 'c1', 'c0', 'c1'],
... 'vals':[ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
... })
>>> mtx
rows cols vals
0 r0 c0 1.0
1 r1 c1 2.0
2 r2 c0 3.0
3 r0 c1 4.0
4 r1 c0 5.0
5 r2 c1 6.0
>>> mtx.tomatrix() # doctest: +NORMALIZE_WHITESPACE
c0 c1
r0 1.0 4.0
r1 5.0 2.0
r2 3.0 6.0
"""
# if self._call not in [
# 'coo-matrix',
# 'cross-matrix',
# 'auto-matrix']:
# raise Exception('Invalid function call for type: ' + self._call )
if self.columns[0] == 'Year':
year = self.Year.copy()
dict_year = { x[0:x.find(' [')] : x for x in year}
year = year.map(lambda x: int(x[0:x.find('[')]))
year = [str(x) for x in range(min(year), max(year)+1)]
year = [y + ' [0]' if y not in dict_year.keys() else dict_year[y] for y in year]
termA_unique = year
# termA_unique = range(min(self.Year), max(self.Year)+1)
else:
termA_unique = self.iloc[:,0].unique()
if self.columns[1] == 'Year':
year = self.Year.copy()
dict_year = {x[0:x.find(' [')] : x for x in year}
year = year.map(lambda x: int(x[0:x.find('[')]))
year = [str(x) for x in range(min(year), max(year)+1)]
year = [y + ' [0]' if y not in dict_year.keys() else dict_year[y] for y in year]
termB_unique = year
# termB_unique = range(min(self.Year), max(self.Year)+1)
else:
termB_unique = self.iloc[:,1].unique()
if ascending_r is not None:
termA_unique = sorted(termA_unique, reverse = not ascending_r)
if ascending_c is not None:
termB_unique = sorted(termB_unique, reverse = not ascending_c)
if self._call == 'co_ocurrence':
result = pd.DataFrame(
np.full((len(termA_unique), len(termB_unique)), 0)
)
else:
result = pd.DataFrame(
np.zeros((len(termA_unique), len(termB_unique)))
)
result.columns = termB_unique
result.index = termA_unique
for index, r in self.iterrows():
row = r[0]
col = r[1]
val = r[2]
result.loc[row, col] = val
return Result(result, call='Matrix')
#----------------------------------------------------------------------------------------------------
def transpose(self, *args, **kwargs):
"""Transpose results matrix.
"""
return Result(super().transpose(), call=self._call)
#----------------------------------------------------------------------------------------------------
#TODO personalizar valor superior para escalar los pesos de los puentes
#TODO map
def network(self, save=False, name='network.png', corr_min=0.7, node_color='lightblue',
edge_color='lightgrey', edge_color2='lightcoral', node_size=None, fond_size=4,
figsize = (10,10)):
"""
This function generates network graph for matrix.
Args:
matrix (pandas.DataFrame): Matrix with variables on indexes and column titles
save (boolean): If True, the graph will save with the name given
name (str): Name to save the png file with the image
corr_min (int): Minimum absolute value for the relationships between variables
to be shown in the graph.
It is suggested when a correlation matrix is being used
node_color (str): Color name used to plot nodes
edge_color (str): Color name used to plot edges with positive weights
edge_color2 (str): Color name used to plot edges with negative weights
node_size (int): If None value, the size of the nodes is plotted according
to the weights of edges that arrive and leave each one of them.
If numeric value, all nodes will be plotted with this given size
fond_size (int): Node label fond size
figsize (float, float): size of figure drawn
Returns:
None
"""
if self._call not in [
'co_ocurrence',
'cross_corr',
'auto_corr',
'factor_analysis']:
raise Exception('Invalid function call for type: ' + self._call )
if self._call == 'factor_analysis':
x = self.copy()
else:
x = self.tomatrix()
plt.clf()
plt.figure(figsize=figsize)
#generate network graph
graph = nx.Graph()
# add nodes
rows = x.index
columns = x.columns
nodes = list(set(rows.append(columns)))
#add nodes
graph.add_nodes_from(nodes)
list_ = list(OrderedDict.fromkeys(itertools.product(rows, columns)))
if len(rows) == len(columns) and (all(rows.sort_values())==all(columns.sort_values())):
list_ = list(set(tuple(sorted(t)) for t in list_))
# add edges
for i in range(len(list_)):
combinations=list_[i]
from_node, to_node = combinations[0], combinations[1]
if from_node != to_node:
weight = x.loc[from_node, to_node]
if weight != 0 and abs(weight)>corr_min:
if weight<0:
weight=abs(weight)
edge_colour =edge_color2
else:
edge_colour = edge_color
graph.add_edge(from_node, to_node, weight=weight, color = edge_colour)
#calculate distance between relationated nodes to avoid overlaping
path_length = nx.shortest_path_length(graph)
distances = pd.DataFrame(index=graph.nodes(), columns=graph.nodes())
for row, data in path_length:
for col, dist in data.items():
distances.loc[row,col] = dist
distances = distances.fillna(distances.max().max() )
#layout of graph
pos = nx.kamada_kawai_layout(graph, dist=distances.to_dict())
#weights and colors of the relationships between nodes for edges thickness
weights = dict(((u, v), int(d["weight"])) for u, v, d in graph.edges(data=True))
colors = dict(((u, v), d["color"]) for u, v, d in graph.edges(data=True))
#Edges weights for plot
max_=max([i for i in weights.values()])
min_=min([i for i in weights.values()])
min_range=1
max_range=5
if max_<=1:
width = ([(1+x)*2 for x in weights.values()])
else:
width = ([((((x-min_)/(max_-min_))*(max_range-min_range))+min_range) for x in weights.values()])
# width=list(weights.values())
#node sizes
if not node_size:
node_sizes = dict(graph.degree())
node_sizes = ([(x)*10 for key,x in node_sizes.items()])
else:
node_sizes=node_size
#visual graph configuration
nx.draw(graph, pos,node_size=node_sizes, node_color=node_color,
edge_color=list(colors.values()), font_size=fond_size,
with_labels=True, width=width)
#save figure as png
if save:
plt.savefig(name, format="PNG", dpi=300, bbox_inches='tight')
plt.tight_layout()
plt.show()
return None
#----------------------------------------------------------------------------------------------------
#TODO networkmap validar como pasar lonlat,
#que pasa si valores negativos???
#personalizar tamaño de la figura,
#guardar archivo
#quitar ejes
def networkmap(matrix, color_edges ='grey', color_node='red',color_map = 'white', edge_map = 'lightgrey', node_size =None, edge_weight = None):
"""
This function generates network graph over map, for matrix with country relations.
Args:
matrix (pandas.DataFrame): Matrix with variables on indexes and column titles
color_edges (str): Color name used to plot edges
color_node (str): Color name used to plot nodes
color_map (str): Color name used to plot map countries
edge_map (str): Color name used to plot contries border
node_size (int): If None value, the size of the nodes is plotted according
to the weights of edges that arrive and leave each one of them.
If numeric value, all nodes will be plotted with this given size
edge_weight (int): If None value, the weigth of the edges is plotted according
to matrix values
If numeric value, all edges will be plotted with this given size
Returns:
None
#
"""
#Get longitudes and latituds
lonlat=
|
pd.read_csv('LonLat.csv',sep=';')
|
pandas.read_csv
|
import os
import pandas as pd
import numpy as np
from imblearn.over_sampling import SMOTE
def Imputer(data, kind = "mean"):
df = data.copy()
for feature in df.columns:
if df[feature].dtype == "float":
if kind == "mean":
df[feature] = df[feature].fillna(df[feature].mean())
elif kind == "median":
df[feature] = df[feature].fillna(df[feature].median())
elif kind == "mode":
df[feature] = df[feature].fillna(df[feature].mode()[0])
elif df[feature].dtype == "object":
df[feature] = df[feature].fillna(df[feature].mode()[0])
return df
def cimputer(fname: str,
kind: str = "mean",
dateCol: str = None,
dataDir: str = "data") -> None:
if not os.path.isdir(dataDir):
os.mkdir(dataDir)
if dateCol != "":
df =
|
pd.read_csv(fname, parse_dates=[dateCol])
|
pandas.read_csv
|
import os
import gc
import sys
print(sys.path)
import pickle
import warnings
import numpy as np
import pandas as pd
import datetime as dt
from diamond import helpers as helper
from diamond import utilities as util
from copy import deepcopy
from sklearn.preprocessing import StandardScaler
CONFIG = util.load_config()
class diamond(object):
"""
Class for handling relationships between normalized tables pulled from API
Standardizing adding starting pitchers, lineups (expected and/or actual)
Adding pitcher rolling stats
Adding batter rolling stats
"""
def __init__(self, seasonKey, min_date_gte=None, max_date_lte=None, upcoming_start_gte=None):
self.seasonKey = seasonKey
self.league = 'mlb'
self.min_date_gte = min_date_gte
self.max_date_lte = max_date_lte
self.upcoming_start_gte = upcoming_start_gte
# Pitching Stats attributes
self.pitching_roll_windows = [1, 3, 5, 10]
self.pitching_stats = ['fip', 'bb_per9', 'hr_fb_ratio', 'k_per9', 'gbpct']
self.pitching_roll_stats = [
'{}_roll{}'.format(s, w) for s in self.pitching_stats for
w in self.pitching_roll_windows
]
# Batting Stats Attributes
self.batting_roll_windows = [1, 3, 5, 10]
self.batting_stats = ['obp', 'slg', 'woba', 'iso']
self.batting_roll_stats = [
'{}_roll{}'.format(s, w) for s in self.batting_stats for
w in self.batting_roll_windows
]
self.batting_static_stats = ['atBats']
# Check args
assert not (
seasonKey and
(min_date_gte != None) and
(max_date_lte != None)
)
# Determine time period
if self.seasonKey:
self.min_date_gte = CONFIG.get(self.league)\
.get('seasons')\
.get(self.seasonKey)\
.get('seasonStart')
self.max_date_lte = CONFIG.get(self.league)\
.get('seasons')\
.get(self.seasonKey)\
.get('seasonEnd')
# Read in from daily game
path = CONFIG.get(self.league)\
.get('paths')\
.get('normalized').format(
f='daily_games'
)
paths = [
path+fname for fname in os.listdir(path) if (
(fname[:8] >= self.min_date_gte)
&
(fname[:8] <= self.max_date_lte)
)
]
self.summary = pd.concat(
objs=[pd.read_parquet(p) for p in paths],
axis=0
)
self.summary.drop_duplicates(subset=['gameId'], inplace=True)
self.summary.loc[:, 'gameStartDate'] = \
pd.to_datetime(self.summary['startTime'].str[:10])
def add_starting_pitchers(self, dispositions=['home', 'away']):
"""
ADDS DIMENSIONS TO SUMMARY
"""
helper.progress("Adding Starting Pitchers Attribute")
# Paths
atbats_path = CONFIG.get(self.league)\
.get('paths')\
.get('normalized').format(
f='game_atbats'
)
atbats_paths = [atbats_path+d+"/" for d in os.listdir(atbats_path) if (
(d >= self.min_date_gte)
&
(d <= self.max_date_lte)
)]
atbats_paths_full = []
for abp in atbats_paths:
atbats_paths_full.extend([abp+fname for fname in os.listdir(abp)])
# Get atbats
df_ab = pd.concat(
objs=[pd.read_parquet(p) for p in atbats_paths_full],
axis=0
)
df_ab.loc[:, 'gameStartTime'] = df_ab['gameStartTime'].str[:10]
df_ab.loc[:, 'gameStartTime'] = pd.to_datetime(df_ab['gameStartTime'])
# Save upcoming to use lineup approach with later
if self.upcoming_start_gte:
df_upc = df_ab.loc[df_ab['gameStartTime'] >= self.upcoming_start_gte, :]
df_ab = df_ab.loc[df_ab['gameStartTime'] < self.upcoming_start_gte, :]
else:
df_upc = df_ab.loc[df_ab['gameStartTime'] >= dt.datetime.now(), :]
df_ab = df_ab.loc[df_ab['gameStartTime'] < dt.datetime.now(), :]
# -------------------------
# -------------------------
# Filter to games in the past and use atbats to get starter (in case lineup wrong)
# Get Home Starters
df_top1 = df_ab.loc[(
(df_ab['inning']==1) &
(df_ab['inningHalf']=='TOP') &
(df_ab['outCount']==0)
), :]
df_home_starters = df_top1.loc[:, ['gameId', 'pitcherId']]\
.drop_duplicates(subset=['gameId'])
df_home_starters.rename(
columns={'pitcherId': 'homeStartingPitcherId'},
inplace=True
)
# Get Away Starters
df_bot1 = df_ab.loc[(
(df_ab['inning']==1) &
(df_ab['inningHalf']=='BOTTOM') &
(df_ab['outCount']==0)
), :]
df_away_starters = df_bot1.loc[:, ['gameId', 'pitcherId']]\
.drop_duplicates(subset=['gameId'])
df_away_starters.rename(
columns={'pitcherId': 'awayStartingPitcherId'},
inplace=True
)
# Assemble starters
df_hist_starters = pd.merge(
df_home_starters,
df_away_starters,
how='outer',
on=['gameId'],
validate='1:1'
)
# -------------------------
# -------------------------
# Filter to games in the current/future and use
# lineups to get starter (in case lineup wrong)
if not hasattr(self, 'lineups'):
self.add_lineups()
df_lup_home = self.lineups.loc[
self.lineups['batterDisposition'].str.lower() == 'home', :]
df_lup_away = self.lineups.loc[
self.lineups['batterDisposition'].str.lower() == 'away', :]
# Filter down
df_lup_home = df_lup_home.loc[(
(df_lup_home['playerPositionGeneral'] == 'P')
&
(df_lup_home['gameId'].isin(list(df_upc.gameId)))
), :]
df_lup_away = df_lup_away.loc[(
(df_lup_away['playerPositionGeneral'] == 'P')
&
(df_lup_away['gameId'].isin(list(df_upc.gameId)))
), :]
# Isolate
df_lup_home.rename(columns={'playerId': 'homeStartingPitcherId'}, inplace=True)
df_lup_home = df_lup_home.loc[:,
['gameId', 'homeStartingPitcherId']]\
.drop_duplicates(subset=['gameId'], inplace=False)
df_lup_away.rename(columns={'playerId': 'awayStartingPitcherId'}, inplace=True)
df_lup_away = df_lup_away.loc[:,
['gameId', 'awayStartingPitcherId']]\
.drop_duplicates(subset=['gameId'], inplace=False)
# Combine to one game per row
df_upc_starters = pd.merge(
df_lup_home,
df_lup_away,
how='left',
on=['gameId'],
validate='1:1'
)
# Concat hist and upc vertically to merge back to summary attrib
df_starters = pd.concat(
objs=[df_hist_starters, df_upc_starters],
axis=0
)
# Merge to summary attribute
self.summary = pd.merge(
self.summary,
df_starters,
how='left',
on=['gameId'],
validate='1:1'
)
def add_bullpen_summary(self, dispositions=['home', 'away']):
"""
ADDS ATTRIBUTE "bullpens_summary"
"""
helper.progress("Adding Bullpen Summary Attribute")
# Get atbats, filter to where not equal to starters
if not all(
s in self.summary.columns for s in \
['{}StartingPitcherId'.format(d) for d in dispositions]
):
self.add_starting_pitchers()
# Get atbats
# Paths
atbats_path = CONFIG.get(self.league)\
.get('paths')\
.get('normalized').format(
f='game_atbats'
)
atbats_paths = [atbats_path+d+"/" for d in os.listdir(atbats_path) if (
(d >= self.min_date_gte)
&
(d <= self.max_date_lte)
)]
atbats_paths_full = []
for abp in atbats_paths:
atbats_paths_full.extend([abp+fname for fname in os.listdir(abp)])
# Get atbats and sort by inning / outCount
df_ab = pd.concat(
objs=[pd.read_parquet(p) for p in atbats_paths_full],
axis=0
)
df_ab = df_ab.loc[:, ['gameId', 'gameStartTime', 'pitcherId', 'homeTeamId', 'awayTeamId',
'inning', 'inningHalf', 'outCount']]
# Select home, sort, dd, remove starter, and rerank
bullpen_summary = []
sides = {'TOP': 'home', 'BOTTOM': 'away'}
for half_, disp in sides.items():
# Set up starter map for later mask
startingPitcherMap = self.summary.set_index('gameId')\
['{}StartingPitcherId'.format(disp)].to_dict()
df_ab_h = df_ab.loc[df_ab['inningHalf']==half_, :]
# Sort
df_ab_h = df_ab_h.sort_values(
by=['gameId', 'gameStartTime', 'inning', 'outCount'],
ascending=True,
inplace=False
)
# Drop labels
df_ab_h = df_ab_h.drop(labels=['inning', 'outCount'], axis=1, inplace=False)
# Remove pitcher who was already identified as starter
# (self.summary['homeStartingPitcherId'].iloc[0]?
df_ab_h.loc[:, '{}StartingPitcherId'.format(disp)] = \
df_ab_h['gameId'].map(startingPitcherMap)
df_ab_h = df_ab_h.loc[
df_ab_h['pitcherId'] != df_ab_h['{}StartingPitcherId'.format(disp)], :]
# Handle ordering
df_ab_h['pitcherAppearOrder'] = df_ab_h\
.groupby(by=['gameId'])['pitcherId'].rank(method='first')
df_ab_h = df_ab_h.groupby(
by=['gameId', 'gameStartTime', '{}TeamId'.format(disp), 'pitcherId'],
as_index=False).agg({'pitcherAppearOrder': 'min'})
df_ab_h['pitcherAppearOrder'] = df_ab_h\
.groupby(by=['gameId'])['pitcherId'].rank(method='first')
df_ab_h['pitcherAppearOrderMax'] = df_ab_h\
.groupby('gameId')['pitcherAppearOrder'].transform('max')
# Label middle pitchers relief role and last pitcher closer` role
msk = (df_ab_h['pitcherAppearOrder']==df_ab_h['pitcherAppearOrderMax'])
df_ab_h.loc[msk, 'pitcherRoleType'] = 'closer'
df_ab_h.loc[~msk, 'pitcherRoleType'] = 'reliever'
# Subset (TODO add first inning appeared)
df_ab_h = df_ab_h.loc[:, ['gameId', 'gameStartTime', 'pitcherId', 'pitcherRoleType',
'{}TeamId'.format(disp), 'pitcherAppearOrder']]
df_ab_h.rename(columns={'{}TeamId'.format(disp): 'teamId'}, inplace=True)
df_ab_h['bullpenDisposition'] = disp
bullpen_summary.append(df_ab_h)
bullpen_summary = pd.concat(objs=bullpen_summary, axis=0)
self.bullpen_reliever_summary = bullpen_summary.loc[
bullpen_summary['pitcherRoleType'] == 'reliever', :]
self.bullpen_closer_summary = bullpen_summary.loc[
bullpen_summary['pitcherRoleType'] == 'closer', :]
def add_pitcher_rolling_stats(
self,
dispositions=['home', 'away'],
pitcher_roll_types=['starter', 'reliever', 'closer'],
shift_back=True
):
"""
"""
helper.progress("Adding Pitcher Rolling Stats to pitching-related attributes")
# Path
ptch_roll_path = CONFIG.get(self.league)\
.get('paths')\
.get('rolling_stats').format('pitching')+"player/"
# Read in
ptch_roll = pd.concat(
objs=[pd.read_parquet(ptch_roll_path+fname) for fname in
os.listdir(ptch_roll_path) if
((fname.replace(".parquet", "") >= self.min_date_gte)
&
(fname.replace(".parquet", "") <= self.max_date_lte))],
axis=0
)
# Create rolling metrics
cols = ['gameId', 'gameStartDate', 'playerId'] +\
self.pitching_roll_stats
# Subset
ptch_roll = ptch_roll.loc[:,
['gameId', 'gameStartDate', 'playerId'] +
self.pitching_roll_stats
]
# Sort
ptch_roll.sort_values(by=['gameStartDate'], ascending=True, inplace=True)
# Shift back if interested in rolling stats leading up to game
if shift_back:
for col in self.pitching_roll_stats:
msk = (ptch_roll['playerId'].shift(1)==ptch_roll['playerId'])
ptch_roll.loc[msk, col] = ptch_roll[col].shift(1)
# Handle Infs
for col in self.pitching_roll_stats:
ptch_roll = ptch_roll.loc[~ptch_roll[col].isin([np.inf, -np.inf]), :]
# Check if starter / all designation
if 'starter' in pitcher_roll_types:
print(" Adding stats for starters")
# Check that summary attribute has starting pitchers
if not any('StartingPitcherId' in col for col in
self.summary.columns):
self.add_starting_pitchers(dispositions=dispositions)
# Merge back to starters (one at a time)
pitcher_cols = ['{}StartingPitcherId'.format(d) for
d in dispositions]
# Prep self.starting_pitcher_stats
p = []
for pc in pitcher_cols:
df = self.summary.loc[:, ['gameId', pc]]
df = df.loc[df[pc].notnull(), :]
df.rename(columns={pc: 'pitcherId'}, inplace=True)
df.loc[:, 'pitcherDisposition'] = pc[:4].lower()
p.append(df)
# concatenate to form attribute
self.starting_pitcher_summary = \
pd.concat(objs=p, axis=0)
self.starting_pitcher_summary = pd.merge(
self.starting_pitcher_summary,
ptch_roll,
how='left',
left_on=['gameId', 'pitcherId'],
right_on=['gameId', 'playerId'],
validate='1:1'
)
self.starting_pitcher_summary.drop(
labels=['playerId'],
axis=1,
inplace=True
)
# Check if reliever / all designation
if 'reliever' in pitcher_roll_types:
print(" Adding stats for relievers")
# Check attribute (try / except cheaper but less readable)
if not hasattr(self, 'bullpen_reliever_summary'):
self.add_bullpen_summary(dispositions=dispositions)
# Merge back to relievers in bullpen summary
msk = (self.bullpen_reliever_summary['pitcherRoleType'].str.lower() == 'reliever')
bullpen = self.bullpen_reliever_summary.loc[msk, :]
if bullpen.shape[0] == 0:
warnings.warn(" No relief pitchers found in bullpen_summary attribute")
if not all(d in dispositions for d in ['home', 'away']):
assert len(dispositions) == 1 and dispositions[0] in ['home', 'away']
bullpen_reconstruct = []
for disp in dispositions:
bullpen_disp = bullpen.loc[bullpen['bullpenDisposition'] == disp, :]
bullpen_disp = bullpen_disp.loc[:, ['gameId', 'pitcherId']]
bullpen_disp = pd.merge(
bullpen_disp,
ptch_roll,
how='left',
left_on=['gameId', 'pitcherId'],
right_on=['gameId', 'playerId'],
validate='1:1'
)
bullpen_disp.drop(labels=['playerId'], axis=1, inplace=True)
bullpen_reconstruct.append(bullpen_disp)
bullpen_reconstruct = pd.concat(objs=bullpen_reconstruct, axis=0)
# Add back to summary / detail
self.bullpen_reliever_summary = pd.merge(
self.bullpen_reliever_summary,
bullpen_reconstruct,
how='left',
on=['gameId', 'pitcherId'],
validate='1:1'
)
# Set
# TODO Standard Deviation might not be best here
aggDict = {stat: ['mean', 'max', 'min'] for stat in [
x for x in self.bullpen_reliever_summary.columns if
any(y in x for y in self.pitching_stats)
]}
df = self.bullpen_reliever_summary.groupby(
by=['gameId', 'gameStartTime', 'teamId', 'bullpenDisposition'],
as_index=False
).agg(aggDict)
df.columns = [
x[0] if x[1] == '' else x[0]+"~"+x[1] for x in
df.columns
]
self.bullpen_reliever_summary = df
# TODO FIX CLOSER MERGE _x _y
if 'closer' in pitcher_roll_types:
print(" Adding stats for closers")
# Check if closer / all designation
if not hasattr(self, 'bullpen_closer_summary'):
self.add_bullpen_summary(dispositions=dispositions)
# Merge back to closers in bullpen summary
msk = (self.bullpen_closer_summary['pitcherRoleType'].str.lower() == 'closer')
bullpen = self.bullpen_closer_summary.loc[msk, :]
if bullpen.shape[0] == 0:
warnings.warn(" No closing pitchers found in bullpen_summary attribute")
if not all(d in dispositions for d in ['home', 'away']):
assert len(dispositions) == 1 and dispositions[0] in ['home', 'away']
bullpen_reconstruct = []
for disp in dispositions:
bullpen_disp = bullpen.loc[bullpen['bullpenDisposition'] == disp, :]
bullpen_disp = bullpen_disp.loc[:, ['gameId', 'pitcherId']]
bullpen_disp = pd.merge(
bullpen_disp,
ptch_roll,
how='left',
left_on=['gameId', 'pitcherId'],
right_on=['gameId', 'playerId'],
validate='1:1'
)
bullpen_disp.drop(labels=['playerId'], axis=1, inplace=True)
bullpen_reconstruct.append(bullpen_disp)
bullpen_reconstruct = pd.concat(objs=bullpen_reconstruct, axis=0)
# Add back to summary / detail
self.bullpen_closer_summary = pd.merge(
self.bullpen_closer_summary,
bullpen_reconstruct,
how='left',
on=['gameId', 'pitcherId'],
validate='1:1'
)
# Set
# TODO Standard Deviation might not be best here
aggDict = {stat: ['mean', 'max', 'min'] for stat in [
x for x in self.bullpen_closer_summary.columns if
any(y in x for y in self.pitching_stats)
]}
df = self.bullpen_closer_summary.groupby(
by=['gameId', 'gameStartTime', 'teamId', 'bullpenDisposition'],
as_index=False
).agg(aggDict)
df.columns = [
x[0] if x[1] == '' else x[0]+"~"+x[1] for x in
df.columns
]
self.bullpen_closer_summary = df
def add_lineups(self, status='auto'):
"""
status: 'auto' - expected/actual
"""
helper.progress("Adding Lineups Attribute")
# Add lineups
# add expected for upcoming game
# add actual for completed games
lineups_path = CONFIG.get(self.league)\
.get('paths')\
.get('normalized')\
.format(f='game_lineup')
df_lineup = pd.concat(
objs=[pd.read_parquet(lineups_path+fname) for fname in os.listdir(lineups_path) if
((fname.replace(".parquet", "") >= self.min_date_gte)
&
(fname.replace(".parquet", "") <= self.max_date_lte))],
axis=0
)
# Actual
actual = df_lineup.loc[df_lineup['positionStatus'] == 'actual', :]
actual = actual.drop_duplicates(subset=['gameId', 'playerId'])
actual_ids = list(set(actual.gameId))
# Expected
exp = df_lineup.loc[(
(df_lineup['positionStatus'] == 'expected')
&
~(df_lineup['gameId'].isin(actual_ids))
), :]
exp = exp.drop_duplicates(subset=['gameId', 'playerId'])
# Concat
actual = pd.concat(objs=[actual, exp], axis=0)
actual = actual.rename(columns={'teamDisposition': 'batterDisposition'})
self.lineups = actual
def add_batter_rolling_stats(self, shift_back=True):
"""
Adds:
attrib self.batter_summary
"""
# Path
bat_roll_path = CONFIG.get(self.league)\
.get('paths')\
.get('rolling_stats')\
.format('batting')+"player/"
# Read in
bat_roll = pd.concat(
objs=[
|
pd.read_parquet(bat_roll_path+fname)
|
pandas.read_parquet
|
import logging
import math
import random
import sys
import numpy
import pandas
import openbiolink.graphProperties as graphProp
from openbiolink import globalConfig
from openbiolink import globalConfig as glob
from openbiolink import utils
from openbiolink.graph_creation.metadata_edge import edgeMetadata as meta
from openbiolink.train_test_set_creation.sampler import NegativeSampler
from openbiolink.train_test_set_creation.trainTestSetWriter import TrainTestSetWriter
random.seed(glob.RANDOM_STATE)
numpy.random.seed(glob.RANDOM_STATE)
class TrainTestSetCreation():
"""
Manager class for handling the creation of train test splits given a graph
Attributes
----------
all_nodes : pandas.DataFrame
DataFrame with all nodes, columns = globalConfig.COL_NAMES_NODES
all_tp : pandas.DataFrame
DataFrame with edges from the positive graph, i.e. all positive examples
columns = globalConfig.COL_NAMES_EDGES + globalConfig.VALUE_COL_NAME
tp_edgeTypes : [str]
list of all edge types present in the positive examples
all_tn : pandas.DataFrame
DataFrame with edges from the negative graph, i.e. all negative examples
columns = globalConfig.COL_NAMES_EDGES + globalConfig.VALUE_COL_NAME
tn_edgeTypes : [str]
list of all edge types present in the negative examples
meta_edges_dic : {str: (str, str, str)}
dictionary for all possible h,r,t combinations, mapped to their types. The key consists of
%s_%s_%s'%(node1Type,edgeType,node2Type) the value (node1Type, edgeType, node2Type)
tmo_nodes : pandas.DataFrame
DataFrame with all nodes present in the t-1 graph, columns = globalConfig.COL_NAMES_NODES
tmo_all_tp : pandas.DataFrame
DataFrame with edges from the positive t-1 graph, i.e. all positive t-1 examples
columns = globalConfig.COL_NAMES_EDGES + globalConfig.VALUE_COL_NAME
tmo_tp_edgeTypes : [str]
list of all edge types present in the positive t-1 examples
tmo_all_tn : pandas.DataFrame
DataFrame with edges from the negative t-1 graph, i.e. all negative t-1 examples
columns = globalConfig.COL_NAMES_EDGES + globalConfig.VALUE_COL_NAME
tmo_tn_edgeTypes : [str]
list of all edge types present in the negative t-1 examples
"""
def __init__(self,
graph_path,
tn_graph_path,
all_nodes_path,
sep='\t',
#meta_edge_triples=None, #nicetohave (1) split for subsample of edges, define own meta edges
t_minus_one_graph_path=None,
t_minus_one_tn_graph_path=None,
t_minus_one_nodes_path=None):
self.writer = TrainTestSetWriter()
with open(all_nodes_path) as file:
self.all_nodes =
|
pandas.read_csv(file, sep=sep, names=globalConfig.COL_NAMES_NODES)
|
pandas.read_csv
|
import os
import numpy as np
import pandas as pd
from pipedown.cross_validation.splitters import RandomSplitter
from pipedown.dag import DAG
from pipedown.nodes.base import Input, Model, Node, Primary
from pipedown.nodes.filters import Collate, ItemFilter
from pipedown.nodes.metrics import MeanSquaredError
def test_dag_fit_run_and_fitrun():
run_list = []
fit_list = []
class MyLoader(Node):
def run(self, *args):
df = pd.DataFrame()
df["a"] = np.random.randn(10)
df["b"] = np.random.randn(10)
df["c"] = np.random.randn(10)
return df
class MyNode(Node):
def __init__(self, name):
self._name = name
def fit(self, X, y):
fit_list.append(self._name)
self.x_mean = X.mean()
def run(self, X, y):
run_list.append(self._name)
return X + self.x_mean, y
class MyDAG(DAG):
def nodes(self):
return {
"input": Input(),
"loader": MyLoader(),
"primary": Primary(["a", "b"], "c"),
"my_node1": MyNode("A"),
"my_node2": MyNode("B"),
}
def edges(self):
return {
"primary": {"test": "input", "train": "loader"},
"my_node1": "primary",
"my_node2": "my_node1",
}
# Fit
my_dag = MyDAG()
dag_outputs = my_dag.fit(outputs="my_node2")
assert dag_outputs is None
assert isinstance(fit_list, list)
assert isinstance(run_list, list)
assert len(fit_list) == 2
assert len(run_list) == 2
assert "A" in fit_list
assert "B" in fit_list
assert "A" in run_list
assert "B" in run_list
# Run
df = pd.DataFrame()
df["a"] = np.random.randn(5)
df["b"] = np.random.randn(5)
xo, yo = my_dag.run(inputs={"input": df}, outputs="my_node2")
assert isinstance(xo, pd.DataFrame)
assert xo.shape[0] == 5
assert yo is None
assert isinstance(fit_list, list)
assert isinstance(run_list, list)
assert len(fit_list) == 2
assert len(run_list) == 4
assert "A" in fit_list
assert "B" in fit_list
assert "A" in run_list[2:]
assert "B" in run_list[2:]
# Fit run
while len(fit_list) > 0:
fit_list.pop()
while len(run_list) > 0:
run_list.pop()
xo, yo = my_dag.fit_run(outputs="my_node2")
assert isinstance(xo, pd.DataFrame)
assert xo.shape[0] == 10
assert xo.shape[1] == 2
assert isinstance(yo, pd.Series)
assert yo.shape[0] == 10
assert isinstance(fit_list, list)
assert isinstance(run_list, list)
assert len(fit_list) == 2
assert len(run_list) == 2
assert "A" in fit_list
assert "B" in fit_list
assert "A" in run_list
assert "B" in run_list
def test_dag_default_outputs():
class MyLoader(Node):
def run(self, *args):
df = pd.DataFrame()
df["a"] = np.random.randn(10)
df["b"] = np.random.randn(10)
df["c"] = np.random.randn(10)
return df
class MyNode(Node):
def __init__(self, name):
self._name = name
def run(self, X, y):
return X + 1, y
class MyDAG(DAG):
def nodes(self):
return {
"input": Input(),
"loader": MyLoader(),
"primary": Primary(["a", "b"], "c"),
"item_filter_1": ItemFilter(lambda x: x["a"] < 3),
"item_filter_2": ItemFilter(
lambda x: (x["a"] >= 3) & (x["a"] < 10)
),
"my_node1": MyNode("A"),
"my_node2": MyNode("B"),
}
def edges(self):
return {
"primary": {"test": "input", "train": "loader"},
"item_filter_1": "primary",
"item_filter_2": "primary",
"my_node1": "item_filter_1",
"my_node2": "item_filter_2",
}
# During training, default outputs should be my_node1 and 2 (not Input)
my_dag = MyDAG()
my_dag.instantiate_dag("train")
def_outputs = my_dag.get_default_outputs("train")
assert isinstance(def_outputs, list)
assert len(def_outputs) == 2
assert "my_node1" in def_outputs
assert "my_node2" in def_outputs
# Same thing for test (output should not be loader)
my_dag = MyDAG()
my_dag.instantiate_dag("test")
def_outputs = my_dag.get_default_outputs("test")
assert isinstance(def_outputs, list)
assert len(def_outputs) == 2
assert "my_node1" in def_outputs
assert "my_node2" in def_outputs
def test_dag_eval_order_with_empty():
run_list = []
class MyNode(Node):
def __init__(self, name):
self._name = name
def run(self, X, y):
run_list.append(self._name)
return X + 1, y
class MyDAG(DAG):
def nodes(self):
return {
"input": Input(),
"primary": Primary(["a", "b"], "c"),
"item_filter_1": ItemFilter(lambda x: x["a"] < 3),
"item_filter_2": ItemFilter(
lambda x: (x["a"] >= 3) & (x["a"] < 10)
),
"my_node1": MyNode("A"),
"my_node2": MyNode("B"),
"collate": Collate(),
}
def edges(self):
return {
"primary": {"test": "input", "train": "input"},
"item_filter_1": "primary",
"item_filter_2": "primary",
"my_node1": "item_filter_1",
"my_node2": "item_filter_2",
"collate": ["my_node1", "my_node2"],
}
# Data split into two separate branches then recombined
df = pd.DataFrame()
df["a"] = [1, 2, 3, 4, 5, 6]
df["b"] = [10, 20, 30, 40, 50, 60]
df["c"] = [10, 20, 30, 40, 50, 60]
my_dag = MyDAG()
xo, yo = my_dag.run({"input": df})
assert len(run_list) == 2
assert "A" in run_list
assert "B" in run_list
assert isinstance(xo, pd.DataFrame)
assert xo.shape[0] == 6
assert xo.shape[1] == 2
assert xo["a"].iloc[0] == 2
assert xo["a"].iloc[1] == 3
assert xo["a"].iloc[2] == 4
assert xo["a"].iloc[3] == 5
assert xo["a"].iloc[4] == 6
assert xo["a"].iloc[5] == 7
assert xo["b"].iloc[0] == 11
assert xo["b"].iloc[1] == 21
assert xo["b"].iloc[2] == 31
assert xo["b"].iloc[3] == 41
assert xo["b"].iloc[4] == 51
assert xo["b"].iloc[5] == 61
# Reset the run list
while len(run_list) > 0:
run_list.pop()
# Data split into two separate branches but one is never executed
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import time
import numpy as np
import pandas as pd
from scipy import interpolate
import copy
from pandas.api.types import is_string_dtype
def get_GAM_df_by_models(models, x_values_lookup=None, aggregate=True):
models = iter(models)
first_model = next(models)
first_df = first_model.get_GAM_df(x_values_lookup)
is_x_values_lookup_none = x_values_lookup is None
if is_x_values_lookup_none:
x_values_lookup = first_df[['feat_name', 'x']].set_index('feat_name').x.to_dict()
all_dfs = [first_df]
for model in models:
the_df = model.get_GAM_df(x_values_lookup)
all_dfs.append(the_df)
if not aggregate:
return all_dfs
if len(all_dfs) == 1:
return first_df
all_ys = [np.concatenate(df.y) for df in all_dfs]
split_pts = first_df.y.apply(lambda x: len(x)).cumsum()[:-1]
first_df['y'] = np.split(np.mean(all_ys, axis=0), split_pts)
first_df['y_std'] = np.split(np.std(all_ys, axis=0), split_pts)
return first_df
def predict_score(model, X):
result = predict_score_with_each_feature(model, X)
return result.values.sum(axis=1)
def predict_score_by_df(GAM_plot_df, X):
result = predict_score_with_each_feature_by_df(GAM_plot_df, X, sum_directly=True)
return result
def predict_score_with_each_feature(model, X):
x_values_lookup = get_x_values_lookup(X, model.feature_names)
GAM_plot_df = model.get_GAM_df(x_values_lookup)
return predict_score_with_each_feature_by_df(GAM_plot_df, X)
def predict_score_with_each_feature_by_df(GAM_plot_df, X, sum_directly=False):
if isinstance(X, np.ndarray):
X = pd.DataFrame(X, columns=GAM_plot_df.feat_name.iloc[1:(X.shape[1]+1)].values.tolist())
from tqdm import tqdm
if sum_directly:
scores = np.zeros((X.shape[0]))
else:
scores = np.empty((X.shape[0], GAM_plot_df.shape[0]))
for f_idx, attrs in tqdm(GAM_plot_df.iterrows()):
if attrs.feat_idx == -1:
offset = attrs.y[0]
if sum_directly:
scores += offset
else:
scores[:, f_idx] = offset
continue
feat_idx = attrs.feat_idx if not isinstance(attrs.feat_idx, tuple) else list(attrs.feat_idx)
truncated_X = X.iloc[:, feat_idx]
if isinstance(attrs.feat_idx, tuple):
score_lookup = pd.Series(attrs.y, index=attrs.x)
truncated_X = pd.MultiIndex.from_frame(truncated_X) # list(truncated_X.itertuples(index=False, name=None))
else:
score_lookup = pd.Series(attrs.y, index=attrs.x)
truncated_X = truncated_X.values
if sum_directly:
scores += score_lookup[truncated_X].values
else:
scores[:, (f_idx)] = score_lookup[truncated_X].values
if sum_directly:
return scores
else:
return pd.DataFrame(scores, columns=GAM_plot_df.feat_name.values.tolist())
def sigmoid(x):
"Numerically stable sigmoid function."
return np.where(x >= 0,
1 / (1 + np.exp(-x)),
np.exp(x) / (1 + np.exp(x)))
def get_X_values_counts(X, feature_names=None):
if feature_names is None:
feature_names = ['f%d' % i for i in range(X.shape[1])] \
if isinstance(X, np.ndarray) else X.columns
if isinstance(X, np.ndarray):
X = pd.DataFrame(X, columns=feature_names)
# return {'f%d' % idx: dict(zip(*np.unique(X[:, idx], return_counts=True))) for idx in range(X.shape[1])}
return X.apply(lambda x: x.value_counts().sort_index().to_dict(), axis=0)
def bin_data(X, max_n_bins=256):
'''
Do a quantile binning for the X
'''
X = X.copy()
for col_name, dtype in zip(X.dtypes.index, X.dtypes):
if is_string_dtype(dtype): # categorical
continue
col_data = X[col_name].astype(np.float32)
uniq_vals = np.unique(col_data[~np.isnan(col_data)])
if len(uniq_vals) > max_n_bins:
print(f'bin features {col_name} with uniq val {len(uniq_vals)} to only {max_n_bins}')
bins = np.unique(
np.quantile(
col_data, q=np.linspace(0, 1, max_n_bins + 1),
)
)
_, bin_edges = np.histogram(col_data, bins=bins)
digitized = np.digitize(col_data, bin_edges, right=False)
digitized[digitized == 0] = 1
digitized -= 1
# NOTE: NA handling done later.
# digitized[np.isnan(col_data)] = self.missing_constant
X.loc[:, col_name] = pd.Series(bins)[digitized].values.astype(np.float32)
return X
def get_x_values_lookup(X, feature_names=None):
if isinstance(X, np.ndarray):
if feature_names is None:
feature_names = ['f%d' for idx in range(X.shape[1])]
X =
|
pd.DataFrame(X, columns=feature_names)
|
pandas.DataFrame
|
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas._libs.tslibs.ccalendar import (
DAYS,
MONTHS,
)
from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG
from pandas.compat import is_platform_windows
from pandas import (
DatetimeIndex,
Index,
Series,
Timestamp,
date_range,
period_range,
)
import pandas._testing as tm
from pandas.core.tools.datetimes import to_datetime
import pandas.tseries.frequencies as frequencies
import pandas.tseries.offsets as offsets
@pytest.fixture(
params=[
(timedelta(1), "D"),
(timedelta(hours=1), "H"),
(timedelta(minutes=1), "T"),
(timedelta(seconds=1), "S"),
(np.timedelta64(1, "ns"), "N"),
(timedelta(microseconds=1), "U"),
(timedelta(microseconds=1000), "L"),
]
)
def base_delta_code_pair(request):
return request.param
freqs = (
[f"Q-{month}" for month in MONTHS]
+ [f"{annual}-{month}" for annual in ["A", "BA"] for month in MONTHS]
+ ["M", "BM", "BMS"]
+ [f"WOM-{count}{day}" for count in range(1, 5) for day in DAYS]
+ [f"W-{day}" for day in DAYS]
)
@pytest.mark.parametrize("freq", freqs)
@pytest.mark.parametrize("periods", [5, 7])
def test_infer_freq_range(periods, freq):
freq = freq.upper()
gen = date_range("1/1/2000", periods=periods, freq=freq)
index = DatetimeIndex(gen.values)
if not freq.startswith("Q-"):
assert frequencies.infer_freq(index) == gen.freqstr
else:
inf_freq = frequencies.infer_freq(index)
is_dec_range = inf_freq == "Q-DEC" and gen.freqstr in (
"Q",
"Q-DEC",
"Q-SEP",
"Q-JUN",
"Q-MAR",
)
is_nov_range = inf_freq == "Q-NOV" and gen.freqstr in (
"Q-NOV",
"Q-AUG",
"Q-MAY",
"Q-FEB",
)
is_oct_range = inf_freq == "Q-OCT" and gen.freqstr in (
"Q-OCT",
"Q-JUL",
"Q-APR",
"Q-JAN",
)
assert is_dec_range or is_nov_range or is_oct_range
def test_raise_if_period_index():
index = period_range(start="1/1/1990", periods=20, freq="M")
msg = "Check the `freq` attribute instead of using infer_freq"
with pytest.raises(TypeError, match=msg):
frequencies.infer_freq(index)
def test_raise_if_too_few():
index = DatetimeIndex(["12/31/1998", "1/3/1999"])
msg = "Need at least 3 dates to infer frequency"
with pytest.raises(ValueError, match=msg):
frequencies.infer_freq(index)
def test_business_daily():
index = DatetimeIndex(["01/01/1999", "1/4/1999", "1/5/1999"])
assert frequencies.infer_freq(index) == "B"
def test_business_daily_look_alike():
# see gh-16624
#
# Do not infer "B when "weekend" (2-day gap) in wrong place.
index = DatetimeIndex(["12/31/1998", "1/3/1999", "1/4/1999"])
assert frequencies.infer_freq(index) is None
def test_day_corner():
index = DatetimeIndex(["1/1/2000", "1/2/2000", "1/3/2000"])
assert frequencies.infer_freq(index) == "D"
def test_non_datetime_index():
dates = to_datetime(["1/1/2000", "1/2/2000", "1/3/2000"])
assert frequencies.infer_freq(dates) == "D"
def test_fifth_week_of_month_infer():
# see gh-9425
#
# Only attempt to infer up to WOM-4.
index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
assert frequencies.infer_freq(index) is None
def test_week_of_month_fake():
# All of these dates are on same day
# of week and are 4 or 5 weeks apart.
index = DatetimeIndex(["2013-08-27", "2013-10-01", "2013-10-29", "2013-11-26"])
assert frequencies.infer_freq(index) != "WOM-4TUE"
def test_fifth_week_of_month():
# see gh-9425
#
# Only supports freq up to WOM-4.
msg = (
"Of the four parameters: start, end, periods, "
"and freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range("2014-01-01", freq="WOM-5MON")
def test_monthly_ambiguous():
rng = DatetimeIndex(["1/31/2000", "2/29/2000", "3/31/2000"])
assert rng.inferred_freq == "M"
def test_annual_ambiguous():
rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"])
assert rng.inferred_freq == "A-JAN"
@pytest.mark.parametrize("count", range(1, 5))
def test_infer_freq_delta(base_delta_code_pair, count):
b = Timestamp(datetime.now())
base_delta, code = base_delta_code_pair
inc = base_delta * count
index = DatetimeIndex([b + inc * j for j in range(3)])
exp_freq = f"{count:d}{code}" if count > 1 else code
assert frequencies.infer_freq(index) == exp_freq
@pytest.mark.parametrize(
"constructor",
[
lambda now, delta: DatetimeIndex(
[now + delta * 7] + [now + delta * j for j in range(3)]
),
lambda now, delta: DatetimeIndex(
[now + delta * j for j in range(3)] + [now + delta * 7]
),
],
)
def test_infer_freq_custom(base_delta_code_pair, constructor):
b = Timestamp(datetime.now())
base_delta, _ = base_delta_code_pair
index = constructor(b, base_delta)
assert frequencies.infer_freq(index) is None
@pytest.mark.parametrize(
"freq,expected", [("Q", "Q-DEC"), ("Q-NOV", "Q-NOV"), ("Q-OCT", "Q-OCT")]
)
def test_infer_freq_index(freq, expected):
rng = period_range("1959Q2", "2009Q3", freq=freq)
rng = Index(rng.to_timestamp("D", how="e").astype(object))
assert rng.inferred_freq == expected
@pytest.mark.parametrize(
"expected,dates",
list(
{
"AS-JAN": ["2009-01-01", "2010-01-01", "2011-01-01", "2012-01-01"],
"Q-OCT": ["2009-01-31", "2009-04-30", "2009-07-31", "2009-10-31"],
"M": ["2010-11-30", "2010-12-31", "2011-01-31", "2011-02-28"],
"W-SAT": ["2010-12-25", "2011-01-01", "2011-01-08", "2011-01-15"],
"D": ["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"],
"H": [
"2011-12-31 22:00",
"2011-12-31 23:00",
"2012-01-01 00:00",
"2012-01-01 01:00",
],
}.items()
),
)
def test_infer_freq_tz(tz_naive_fixture, expected, dates):
# see gh-7310
tz = tz_naive_fixture
idx = DatetimeIndex(dates, tz=tz)
assert idx.inferred_freq == expected
@pytest.mark.parametrize(
"date_pair",
[
["2013-11-02", "2013-11-5"], # Fall DST
["2014-03-08", "2014-03-11"], # Spring DST
["2014-01-01", "2014-01-03"], # Regular Time
],
)
@pytest.mark.parametrize(
"freq", ["H", "3H", "10T", "3601S", "3600001L", "3600000001U", "3600000000001N"]
)
def test_infer_freq_tz_transition(tz_naive_fixture, date_pair, freq):
# see gh-8772
tz = tz_naive_fixture
idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz)
assert idx.inferred_freq == freq
def test_infer_freq_tz_transition_custom():
index = date_range("2013-11-03", periods=5, freq="3H").tz_localize(
"America/Chicago"
)
assert index.inferred_freq is None
@pytest.mark.parametrize(
"data,expected",
[
# Hourly freq in a day must result in "H"
(
[
"2014-07-01 09:00",
"2014-07-01 10:00",
"2014-07-01 11:00",
"2014-07-01 12:00",
"2014-07-01 13:00",
"2014-07-01 14:00",
],
"H",
),
(
[
"2014-07-01 09:00",
"2014-07-01 10:00",
"2014-07-01 11:00",
"2014-07-01 12:00",
"2014-07-01 13:00",
"2014-07-01 14:00",
"2014-07-01 15:00",
"2014-07-01 16:00",
"2014-07-02 09:00",
"2014-07-02 10:00",
"2014-07-02 11:00",
],
"BH",
),
(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
"2014-07-07 09:00",
"2014-07-07 10:00",
"2014-07-07 11:00",
],
"BH",
),
(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
"2014-07-07 09:00",
"2014-07-07 10:00",
"2014-07-07 11:00",
"2014-07-07 12:00",
"2014-07-07 13:00",
"2014-07-07 14:00",
"2014-07-07 15:00",
"2014-07-07 16:00",
"2014-07-08 09:00",
"2014-07-08 10:00",
"2014-07-08 11:00",
"2014-07-08 12:00",
"2014-07-08 13:00",
"2014-07-08 14:00",
"2014-07-08 15:00",
"2014-07-08 16:00",
],
"BH",
),
],
)
def test_infer_freq_business_hour(data, expected):
# see gh-7905
idx = DatetimeIndex(data)
assert idx.inferred_freq == expected
def test_not_monotonic():
rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"])
rng = rng[::-1]
assert rng.inferred_freq == "-1A-JAN"
def test_non_datetime_index2():
rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"])
vals = rng.to_pydatetime()
result = frequencies.infer_freq(vals)
assert result == rng.inferred_freq
@pytest.mark.parametrize(
"idx", [tm.makeIntIndex(10), tm.makeFloatIndex(10), tm.makePeriodIndex(10)]
)
def test_invalid_index_types(idx):
msg = "|".join(
[
"cannot infer freq from a non-convertible",
"Check the `freq` attribute instead of using infer_freq",
]
)
with pytest.raises(TypeError, match=msg):
frequencies.infer_freq(idx)
@pytest.mark.skipif(is_platform_windows(), reason="see gh-10822: Windows issue")
@pytest.mark.parametrize("idx", [
|
tm.makeStringIndex(10)
|
pandas._testing.makeStringIndex
|
import pytest
import collections
from pathlib import Path
import pandas as pd
from mbf_genomics import DelayedDataFrame
from mbf_genomics.annotator import Constant, Annotator
import pypipegraph as ppg
from pypipegraph.testing import run_pipegraph, force_load
from pandas.testing import assert_frame_equal
from mbf_genomics.util import find_annos_from_column
class LenAnno(Annotator):
def __init__(self, name):
self.columns = [name]
def calc(self, df):
return pd.DataFrame(
{self.columns[0]: ["%s%i" % (self.columns[0], len(df))] * len(df)}
)
@pytest.mark.usefixtures("no_pipegraph")
@pytest.mark.usefixtures("clear_annotators")
class Test_DelayedDataFrameDirect:
def test_create(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
def test_create_from_df(self):
test_df = pd.DataFrame({"A": [1, 2]})
a = DelayedDataFrame("shu", test_df)
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
def test_write(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
assert Path("sha").exists()
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
fn = a.write()[1]
assert "/sha" in str(fn.parent.absolute())
assert fn.exists()
assert_frame_equal(pd.read_csv(fn, sep="\t"), test_df)
def test_write_excel(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
assert Path("sha").exists()
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
fn = a.write("sha.xls")[1]
assert fn.exists()
assert_frame_equal(pd.read_excel(fn), test_df)
def test_write_excel2(self):
data = {}
for i in range(0, 257):
c = "A%i" % i
d = [1, 1]
data[c] = d
test_df = pd.DataFrame(data)
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
fn = a.write("sha.xls")[1]
assert fn.exists()
assert_frame_equal(
|
pd.read_excel(fn)
|
pandas.read_excel
|
"""
Copyright (c) 2017, University of Southern Denmark
All rights reserved.
This code is licensed under BSD 2-clause license.
See LICENSE file in the project root for license terms.
"""
import unittest
import shutil
import tempfile
import json
import os
import pandas as pd
from modestpy.fmi.model import Model
from modestpy.utilities.sysarch import get_sys_arch
from modestpy.loginit import config_logger
class TestFMPy(unittest.TestCase):
def setUp(self):
# Platform (win32, win64, linux32, linix64)
platform = get_sys_arch()
assert platform, 'Unsupported platform type!'
# Temp directory
self.tmpdir = tempfile.mkdtemp()
# Parent directory
parent = os.path.dirname(__file__)
# Resources
self.fmu_path = os.path.join(parent, 'resources', 'simple2R1C',
'Simple2R1C_{}.fmu'.format(platform))
inp_path = os.path.join(parent, 'resources', 'simple2R1C',
'inputs.csv')
ideal_path = os.path.join(parent, 'resources', 'simple2R1C',
'result.csv')
est_path = os.path.join(parent, 'resources', 'simple2R1C', 'est.json')
known_path = os.path.join(parent, 'resources', 'simple2R1C',
'known.json')
# Assert there is an FMU for this platform
assert os.path.exists(self.fmu_path), \
"FMU for this platform ({}) doesn't exist.\n".format(platform) + \
"No such file: {}".format(self.fmu_path)
self.inp =
|
pd.read_csv(inp_path)
|
pandas.read_csv
|
import math
import pandas as pd
import csv
import pathlib
import wx
import matplotlib
import matplotlib.pylab as pL
import matplotlib.pyplot as plt
import matplotlib.backends.backend_wxagg as wxagg
import re
import numpy as np
import scipy
import scipy.interpolate
import sys
#from mpl_toolkits.mplot3d import Axes3D
#import wx.lib.inspection as wxli
class ERTAPP(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY, title='ERT Editing',pos=(100,100),size=(500,500))
#Built from template here: https://wiki.wxpython.org/GridSizerTutorial
#Set up Panels
def setUpPanels(self):
self.topPanel = wx.Panel(self, wx.ID_ANY,size = (1000,10),name='Top Panel')
self.infoPanel = wx.Panel(self, wx.ID_ANY,size = (1000,50),name='Info Panel')
self.chartPanel = wx.Panel(self, wx.ID_ANY,size = (1000,500),name='Chart Panel')
self.bottomPanel= wx.Panel(self, wx.ID_ANY,size = (1000,130),name='Bottom Panel')
#need to create more panels, see here: https://stackoverflow.com/questions/31286082/matplotlib-in-wxpython-with-multiple-panels
def titleSetup(self):
bmp = wx.ArtProvider.GetBitmap(wx.ART_INFORMATION, wx.ART_OTHER, (4, 4))
self.titleIco = wx.StaticBitmap(self.topPanel, wx.ID_ANY, bmp)
self.title = wx.StaticText(self.topPanel, wx.ID_ANY, 'Advanced ERT Editing')
#Declare inputs for first row
def inputSetup(self):
bmp = wx.ArtProvider.GetBitmap(wx.ART_TIP, wx.ART_OTHER, (4, 4))
self.inputOneIco = wx.StaticBitmap(self.topPanel, wx.ID_ANY, bmp)
self.labelOne = wx.StaticText(self.topPanel, wx.ID_ANY, 'Input ERT Data')
self.inputTxtOne = wx.TextCtrl(self.topPanel, wx.ID_ANY, '')
self.inputTxtOne.SetHint('Enter data file path here')
self.inputBrowseBtn = wx.Button(self.topPanel, wx.ID_ANY, 'Browse')
self.Bind(wx.EVT_BUTTON, self.onBrowse, self.inputBrowseBtn)
self.readInFileBtn = wx.Button(self.topPanel, wx.ID_ANY, 'Read Data')
self.Bind(wx.EVT_BUTTON, self.onReadIn, self.readInFileBtn)
self.inputDataType = wx.Choice(self.topPanel, id=wx.ID_ANY,choices=['.DAT (LS)','.TXT (LS)','.DAT (SAS)', '.VTK', '.XYZ'],name='.TXT (LS)')
self.Bind(wx.EVT_CHOICE,self.onDataType,self.inputDataType)
self.autoShiftBx = wx.CheckBox(self.topPanel,wx.ID_ANY, 'Auto Shift?')
self.autoShiftBx.SetValue(True)
#Row 3 item(s)
self.TxtProfileName = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Profile Name: ')
self.TxtProfileRange = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Profile Length: ')
self.TxtDataPts = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Data Points: ')
self.TxtBlank = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.TxtBlank2 = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.TxtMinElectSpcng = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Min. Electrode Spacing: ')
self.TxtProjectName = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Project Name: ')
self.TxtArray = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Array: ')
self.msgProfileName = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgProfileRange = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgDataPts = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgMinElectSpcng = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgProjectName = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgArray = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
# DataViz Area item(s)
def dataVizSetup(self):
self.editSlider = wx.Slider(self.chartPanel, pos=(200,0), id=wx.ID_ANY, style=wx.SL_TOP | wx.SL_AUTOTICKS | wx.SL_LABELS, name='Edit Data')
self.Bind(wx.EVT_SCROLL, self.onSliderEditEVENT, self.editSlider)
self.dataVizMsg1 = wx.StaticText(self.chartPanel, wx.ID_ANY, '')
self.dataVizMsg2 = wx.StaticText(self.chartPanel, wx.ID_ANY, '')
self.dataVizInput = wx.TextCtrl(self.chartPanel, wx.ID_ANY, '')
self.dataVizInputBtn = wx.Button(self.chartPanel, -1, "Use Value")
self.dataVizInputBtn.Bind(wx.EVT_BUTTON, self.ONdataVizInput)
self.saveEditsBtn = wx.Button(self.chartPanel, -1, "Save Edits")
self.saveEditsBtn.Bind(wx.EVT_BUTTON, self.ONSaveEdits)
self.saveEditsBtn.SetBackgroundColour((100,175,100))
self.currentChart = 'Graph'
self.editDataChoiceList = ['AppResist','Resistance','Electrode x-Dists','Variance','PctErr','PseudoX','PseudoZ']
self.editDataChoiceBool = [False]*len(self.editDataChoiceList)
self.editDataValues = []
for i in self.editDataChoiceList:
self.editDataValues.append([0,0])
self.editDataType = wx.Choice(self.chartPanel, id=wx.ID_ANY,choices=self.editDataChoiceList,name='Edit Data')
self.editDataType.Bind(wx.EVT_CHOICE, self.onSelectEditDataType)
self.setEditToggleBtn = wx.ToggleButton(self.chartPanel,wx.ID_ANY,'Unused',size=(25,30))
self.setEditToggleBtn.Bind(wx.EVT_TOGGLEBUTTON, self.onSetEditToggle)
self.labelMinRem = wx.StaticText(self.chartPanel, wx.ID_ANY, 'Min.')
self.inputTxtMinRem = wx.TextCtrl(self.chartPanel, wx.ID_ANY,style=wx.TE_PROCESS_ENTER, name='')
self.inputTxtMinRem.Bind(wx.EVT_TEXT_ENTER, self.onEditDataValueChangeEvent)
self.labelMaxRem = wx.StaticText(self.chartPanel, wx.ID_ANY,'Max.')
self.inputTxtMaxRem = wx.TextCtrl(self.chartPanel, wx.ID_ANY,style=wx.TE_PROCESS_ENTER,name= '')
self.inputTxtMaxRem.Bind(wx.EVT_TEXT_ENTER, self.onEditDataValueChangeEvent)
self.editTypeToggleBtn = wx.ToggleButton(self.chartPanel,wx.ID_ANY,'Remove',size=(25,50))
self.editTypeToggleBtn.Bind(wx.EVT_TOGGLEBUTTON, self.onEditTypeToggle)
self.editLogicToggleBtn = wx.ToggleButton(self.chartPanel,wx.ID_ANY,'OR',size=(25,25))
self.editLogicToggleBtn.Bind(wx.EVT_TOGGLEBUTTON, self.onLogicToggle)
self.removePtsBtn = wx.Button(self.chartPanel, -1, "Edit Points")
self.removePtsBtn.Bind(wx.EVT_BUTTON, self.onRemovePts)
self.electrodeToggleBtn = wx.ToggleButton(self.chartPanel,wx.ID_ANY,'On',size=(25,25))
self.electrodeToggleBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ONtoggle)
self.GraphEditBtn = wx.Button(self.chartPanel, -1, "Graphic Editor", size=(100, 30))
self.GraphEditBtn.Bind(wx.EVT_BUTTON, self.graphChartEvent)
self.StatEditBtn = wx.Button(self.chartPanel, -1, "Statistical Editor", size=(100, 30))
self.Bind(wx.EVT_BUTTON, self.statChartEvent, self.StatEditBtn)
self.addGPSBtn = wx.Button(self.chartPanel, -1, "GPS Data", size=(100, 30))
self.addGPSBtn.Bind(wx.EVT_BUTTON, self.GPSChartEvent)
self.addTopoBtn = wx.Button(self.chartPanel, -1, "Topography Data", size=(100, 30))
self.addTopoBtn.Bind(wx.EVT_BUTTON, self.topoChartEvent)
self.reviewBtn = wx.Button(self.chartPanel, -1, "Review Edits", size=(100, 15))
self.reviewBtn.Bind(wx.EVT_BUTTON, self.reviewEvent)
def bottomAreaSetup(self):
# Row 4 items
self.reverseBx = wx.CheckBox(self.bottomPanel,wx.ID_ANY, 'Reverse Profile')
self.labelGPSIN = wx.StaticText(self.bottomPanel, wx.ID_ANY, 'GPS Data')
self.inputTxtGPS = wx.TextCtrl(self.bottomPanel, wx.ID_ANY, 'Enter GPS Filepath Here')
self.inputGPSBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Browse')
self.Bind(wx.EVT_BUTTON, self.onGPSBrowse, self.inputGPSBtn)
self.Bind(wx.EVT_CHECKBOX, self.onReverse, self.reverseBx)
self.dataEditMsg = wx.StaticText(self.bottomPanel, wx.ID_ANY, '')
self.labelTopoIN = wx.StaticText(self.bottomPanel, wx.ID_ANY, 'Topo Data')
self.inputTxtTopo = wx.TextCtrl(self.bottomPanel, wx.ID_ANY, 'Enter Topo Filepath Here')
self.inputTopoBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Browse')
self.includeTopoBx = wx.CheckBox(self.bottomPanel,wx.ID_ANY, 'Include Topography')
self.Bind(wx.EVT_BUTTON, self.onTopoBrowse, self.inputTopoBtn)
self.Bind(wx.EVT_CHECKBOX, self.onIncludeTopo, self.includeTopoBx)
#Bottom Row items
self.saveBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Export and Save Data')
self.cancelBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Cancel')
self.Bind(wx.EVT_BUTTON, self.onExport, self.saveBtn)
self.Bind(wx.EVT_BUTTON, self.onCancel, self.cancelBtn)
self.labelExport = wx.StaticText(self.bottomPanel, wx.ID_ANY, 'Export Data')
self.exportTXT = wx.TextCtrl(self.bottomPanel, wx.ID_ANY, 'Enter Export Filepath Here')
self.exportDataBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Browse')
self.Bind(wx.EVT_BUTTON, self.onExportBrowse, self.exportDataBtn)
#Set up chart
def chartSetup(self):
self.chartSizer = wx.BoxSizer(wx.VERTICAL)
self.figure = matplotlib.figure.Figure()
self.canvas = wxagg.FigureCanvasWxAgg(self.chartPanel, -1, self.figure)
self.axes = self.figure.add_subplot(111)
self.axes.set_xlabel('X-Distance (m)')
self.axes.set_ylabel('Depth (m)')
self.toolbar = wxagg.NavigationToolbar2WxAgg(self.canvas)
def sizersSetup(self):
#Set up sizers
self.baseSizer = wx.BoxSizer(wx.VERTICAL)
self.topSizer = wx.BoxSizer(wx.VERTICAL)
self.titleSizer = wx.BoxSizer(wx.HORIZONTAL)
self.inputSizer = wx.BoxSizer(wx.HORIZONTAL)
#self.readMsgSizer = wx.BoxSizer(wx.HORIZONTAL)
self.profileInfoSizer = wx.BoxSizer(wx.HORIZONTAL)
self.profileTxtSizer1 = wx.BoxSizer(wx.VERTICAL)
self.profileTxtSizer2 = wx.BoxSizer(wx.VERTICAL)
self.profileMsgSizer1 = wx.BoxSizer(wx.VERTICAL)
self.profileMsgSizer2 = wx.BoxSizer(wx.VERTICAL)
self.profileInfoSizer = wx.BoxSizer(wx.HORIZONTAL)
self.ctrlSizer = wx.BoxSizer(wx.VERTICAL)
self.chartSizer = wx.BoxSizer(wx.VERTICAL)
self.dataVizSizer = wx.BoxSizer(wx.HORIZONTAL)
self.vizInfoSizer = wx.BoxSizer(wx.HORIZONTAL)
self.dataEditSizer = wx.BoxSizer(wx.HORIZONTAL)
self.bottomSizer = wx.BoxSizer(wx.VERTICAL)
self.GPSSizer = wx.BoxSizer(wx.HORIZONTAL)
self.TopoSizer = wx.BoxSizer(wx.HORIZONTAL)
self.botSizer = wx.BoxSizer(wx.HORIZONTAL)
def addtoSizers(self):
#Add items to sizers
self.titleSizer.Add(self.title, 0, wx.ALIGN_CENTER)
self.inputSizer.Add(self.labelOne, 1,wx.ALIGN_CENTER,5)
self.inputSizer.Add(self.inputTxtOne, 8,wx.EXPAND,5)
self.inputSizer.Add(self.inputBrowseBtn,1,wx.ALIGN_CENTER,5)
self.inputSizer.Add(self.inputDataType,1,wx.ALIGN_CENTER,5)
self.inputSizer.Add(self.readInFileBtn,1,wx.ALIGN_CENTER,5)
self.inputSizer.Add(self.autoShiftBx, 1, wx.ALIGN_CENTER, 5)
#self.readMsgSizer.Add(self.msgLabelOne, 0, wx.ALL,5)
self.profileTxtSizer1.Add(self.TxtProfileName, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer1.Add(self.TxtProfileRange, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer1.Add(self.TxtDataPts, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer2.Add(self.TxtMinElectSpcng, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer2.Add(self.TxtArray, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer2.Add(self.TxtProjectName, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer1.Add(self.msgProfileName, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer1.Add(self.msgProfileRange, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer1.Add(self.msgDataPts, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer2.Add(self.msgMinElectSpcng, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer2.Add(self.msgArray, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer2.Add(self.msgProjectName, 0, wx.ALIGN_LEFT,5)
self.profileInfoSizer.Add(self.profileTxtSizer1, 1,wx.ALL,5)
self.profileInfoSizer.Add(self.profileMsgSizer1,3,wx.ALL,5)
self.profileInfoSizer.Add(self.profileTxtSizer2, 1, wx.ALL, 5)
self.profileInfoSizer.Add(self.profileMsgSizer2, 3, wx.ALL, 5)
self.topSizer.Add(self.titleSizer,1,wx.ALL,5)
self.topSizer.Add(self.inputSizer, 2, wx.ALL, 5)
#self.topSizer.Add(self.readMsgSizer, 1, wx.ALL, 5)
self.vizInfoSizer.Add(self.dataVizMsg1,16,wx.ALL,5)
self.vizInfoSizer.Add(self.dataVizMsg2, 24, wx.ALL, 5)
self.vizInfoSizer.Add(self.electrodeToggleBtn,1,wx.ALL,5)
self.vizInfoSizer.Add(self.dataVizInput, 1, wx.ALL, 5)
self.vizInfoSizer.Add(self.dataVizInputBtn,3,wx.ALL,5)
self.vizInfoSizer.Add(self.saveEditsBtn,3,wx.ALL,5)
self.ctrlSizer.Add(self.GraphEditBtn, 2, wx.ALL, 5)
self.ctrlSizer.Add(self.StatEditBtn, 2, wx.ALL, 5)
self.ctrlSizer.Add(self.addGPSBtn, 2, wx.ALL, 5)
self.ctrlSizer.Add(self.addTopoBtn, 2, wx.ALL, 5)
self.ctrlSizer.Add(self.reviewBtn,1,wx.ALL,5)
self.dataEditSizer.Add(self.editDataType,5, wx.ALL, 5)
self.dataEditSizer.Add(self.setEditToggleBtn,2,wx.ALL,5)
self.dataEditSizer.Add(self.labelMinRem, 2, wx.ALL, 5)
self.dataEditSizer.Add(self.inputTxtMinRem, 3, wx.ALL, 5)
self.dataEditSizer.Add(self.inputTxtMaxRem, 3, wx.ALL, 5)
self.dataEditSizer.Add(self.labelMaxRem, 2, wx.ALL, 5)
self.dataEditSizer.Add(self.editTypeToggleBtn,3,wx.ALL,5)
self.dataEditSizer.Add(self.editLogicToggleBtn,2,wx.ALL,5)
self.dataEditSizer.Add(self.removePtsBtn, 3, wx.ALL, 5)
self.chartSizer.Add(self.vizInfoSizer, 1, wx.ALL, 5)
self.chartSizer.Add(self.editSlider,1, wx.LEFT | wx.RIGHT | wx.EXPAND,94)
self.chartSizer.Add(self.canvas, 12, wx.EXPAND)
self.chartSizer.Add(self.toolbar, 1, wx.EXPAND)
self.chartSizer.Add(self.dataEditSizer,1,wx.EXPAND)
self.dataVizSizer.Add(self.ctrlSizer,1,wx.EXPAND)
self.dataVizSizer.Add(self.chartSizer,6,wx.EXPAND)
self.GPSSizer.Add(self.dataEditMsg, 2, wx.ALL, 5)
self.GPSSizer.Add(self.reverseBx, 1, wx.ALL, 5)
self.GPSSizer.Add(self.labelGPSIN, 1, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.GPSSizer.Add(self.inputTxtGPS, 8, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.GPSSizer.Add(self.inputGPSBtn, 1, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.TopoSizer.Add(self.includeTopoBx, 2, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.TopoSizer.Add(self.labelTopoIN, 1, wx.ALIGN_CENTER_VERTICAL| wx.ALL, 5)
self.TopoSizer.Add(self.inputTxtTopo, 8, wx.ALIGN_CENTER_VERTICAL| wx.ALL, 5)
self.TopoSizer.Add(self.inputTopoBtn, 1, wx.ALIGN_CENTER_VERTICAL| wx.ALL, 5)
self.botSizer.Add(self.labelExport, 1, wx.ALL, 5)
self.botSizer.Add(self.exportTXT,6, wx.ALL, 5)
self.botSizer.Add(self.exportDataBtn,1, wx.ALL, 5)
self.botSizer.Add(self.cancelBtn, 1, wx.ALL, 5)
self.botSizer.Add(self.saveBtn, 1, wx.ALL, 5)
#btnSizer.Add(saveEditsBtn,0,wx.ALL,5)
self.bottomSizer.Add(self.GPSSizer,0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.bottomSizer.Add(self.TopoSizer,0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.bottomSizer.Add(self.botSizer,0, wx.ALIGN_RIGHT | wx.ALL, 5)
def addtoPanels(self):
self.topPanel.SetSizer(self.topSizer)
self.infoPanel.SetSizer(self.profileInfoSizer)
self.chartPanel.SetSizer(self.dataVizSizer)
self.bottomPanel.SetSizer(self.bottomSizer)
self.topPanel.Layout()
self.baseSizer.Add(self.topPanel,1, wx.EXPAND,1)
self.baseSizer.Add(self.infoPanel,1,wx.EXPAND,1)
self.baseSizer.Add(self.chartPanel, 10, wx.EXPAND | wx.ALL, 5)
self.baseSizer.Add(self.bottomPanel, 1, wx.EXPAND | wx.ALL, 1)
self.SetSizer(self.baseSizer)
self.SetSize(1100,950)
def variableInfo(): #To see what the 'global' variables are
pass
#self.electxDataIN: list of all electrode xdistances
#self.xCols: list with numbers of columns with x-values, from initial read-in table. varies with datatype
#self.xData: list with all x-values of data points
#self.zData: list with all z-values of data points (depth)
#self.values: list with all resist. values of data points
#self.inputDataExt: extension of file read in, selected from initial drop-down (default = .dat (LS))
#self.xDF : dataframe with only x-dist of electrodes, and all of them
#self.dataHeaders: headers from original file read in, used for column names for dataframeIN
#self.dataListIN: nested list that will be used to create dataframe, with all read-in data
#self.dataframeIN: initial dataframe from data that is read in
#self.df: dataframe formatted for editing, but remaining static as initial input data
#self.dataframeEDIT: dataframe that is manipulated during editing
#self.electrodes: sorted list of all electrode xdistances
#self.electrodesShifted: shifted, sorted list of all electrode xdistances
#self.electState:list of booleans giving status of electrode (True = in use, False = edited out)
#self.electrodeElevs: surface elevation values at each electrode
#self.dataLengthIN: number of measurements in file/length of dataframes
#self.dataframeEDITColHeaders
#self.dataShifted: indicates whether data has been shifted
setUpPanels(self)
titleSetup(self)
inputSetup(self)
dataVizSetup(self)
bottomAreaSetup(self)
chartSetup(self)
sizersSetup(self)
addtoSizers(self)
addtoPanels(self)
#wxli.InspectionTool().Show(self)
#Initial Plot
def nullFunction(self,event):
pass
def onBrowse(self,event):
with wx.FileDialog(self,"Open Data File", style= wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
self.dataPath = pathlib.Path(fileDialog.GetPath())
fName = str(self.dataPath.parent) + '\\' + self.dataPath.name
self.inputDataExt = self.dataPath.suffix
try:
with open(self.dataPath,'r') as datafile:
self.inputTxtOne.SetValue(fName)
except IOError:
wx.LogError("Cannot Open File")
if self.inputDataExt.lower() == '.txt':
self.inputDataExt = '.TXT (LS)'
n = 1
elif self.inputDataExt.lower() == '.dat':
if self.dataPath.stem.startswith('lr'):
self.inputDataExt = '.DAT (SAS)'
n = 2
else:
self.inputDataExt = '.DAT (LS)'
n = 0
elif self.inputDataExt.lower() == '.vtk':
self.inputDataExt = '.VTK'
n=3
elif self.inputDataExt.lower() == '.xyz':
self.inputDataExt = '.XYZ'
n=4
else:
wx.LogError("Cannot Open File")
if self.inputDataExt == '.DAT (LS)' or self.inputDataExt == '.TXT (LS)':
outPath = self.dataPath.stem.split('-')[0]
else:
outPath = self.dataPath.stem.split('.')[0]
if outPath.startswith('lr'):
outPath = outPath[2:]
outPath = outPath +'_pyEdit.dat'
if self.includeTopoBx.GetValue():
outPath = outPath[:-4]
outPath = outPath + "_topo.dat"
self.exportTXT.SetValue(str(self.dataPath.with_name(outPath)))
self.inputDataType.SetSelection(n)
self.readInFileBtn.SetLabelText('Read Data')
def onGPSBrowse(self,event):
with wx.FileDialog(self,"Open GPS File", style= wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
self.GPSpath = pathlib.Path(fileDialog.GetPath())
gpsFName = str(self.GPSpath.parent) + '\\' + self.GPSpath.name
self.inputTxtGPS.SetValue(gpsFName)
self.getGPSVals()
def getGPSVals(self):
with open(self.GPSpath) as GPSFile:
data = csv.reader(GPSFile)
self.gpsXData = []
self.gpsYData = []
self.gpsLabels = []
for row in enumerate(data):
if row[0] == 0:
pass #headerline
else:
r = re.split('\t+', str(row[1][0]))
if row[0] == '':
pass
else:
self.gpsLabels.append(r[2])
self.gpsXData.append(float(r[3]))
self.gpsYData.append(float(r[4]))
def onTopoBrowse(self,event):
with wx.FileDialog(self,"Open Topo File", style= wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
self.topoPath = pathlib.Path(fileDialog.GetPath())
topoFName = str(self.topoPath.parent) + '\\' + self.topoPath.name
self.inputTxtTopo.SetValue(topoFName)
self.includeTopoBx.SetValue(True)
self.getTopoVals()
self.topoText()
def onIncludeTopo(self,event):
self.topoText()
def topoText(self):
if self.includeTopoBx.GetValue() == True:
#print('topo' not in self.exportTXT.GetValue())
if 'topo' not in self.exportTXT.GetValue():
#print("It's Not in")
if len(self.exportTXT.GetValue())>0:
outPath = self.exportTXT.GetValue()
outPath = outPath[:int(len(outPath)-4)]
outPath = outPath + "_topo.dat"
self.exportTXT.SetValue(outPath)
elif self.includeTopoBx.GetValue() == False:
if '_topo' in self.exportTXT.GetValue():
outPath = self.exportTXT.GetValue()
#print(outPath)
strInd = int(outPath.find("_topo"))
strInd2 = strInd + 5
outPath = outPath[:strInd]+outPath[strInd2:]
self.exportTXT.SetValue(outPath)
def onReverse(self,event):
self.reverseText()
def reverseText(self):
if self.reverseBx.GetValue() == True:
if '_rev' not in self.exportTXT.GetValue():
if len(self.exportTXT.GetValue())>0:
outPath = self.exportTXT.GetValue()
outPath = outPath[:int(len(outPath)-4)]
outPath = outPath + "_rev.dat"
self.exportTXT.SetValue(outPath)
elif self.reverseBx.GetValue() == False:
if '_rev' in self.exportTXT.GetValue():
outPath = self.exportTXT.GetValue()
#print(outPath)
strInd = int(outPath.find("_rev"))
strInd2 = strInd + 4
outPath = outPath[:strInd]+outPath[strInd2:]
self.exportTXT.SetValue(outPath)
def getTopoVals(self):
with open(self.topoPath) as topoFile:
data = csv.reader(topoFile)
topoXData = []
topoYData = []
topoLabels = []
for row in enumerate(data):
if row[0] == 0:
pass
else:
r = re.split('\t+', str(row[1][0]))
if r[0] == '':
pass
else:
topoLabels.append(r[0])
topoXData.append(float(r[1]))
topoYData.append(float(r[2]))
self.topoDF = pd.DataFrame([topoXData, topoYData]).transpose()
self.topoDF.columns = ["xDist", "Elev"]
def onDataType(self,event):
self.inputDataExt = self.inputDataType.GetString(self.inputDataType.GetSelection())
if self.inputDataExt == '.DAT (LS)':
self.headerlines = 8
elif self.inputDataExt == '.DAT (SAS)':
self.headerlines = 5
elif self.inputDataExt == '.VTK':
self.headerlines = 5 #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
elif self.inputDataExt == '.XYZ':
self.header = 5 #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
elif self.inputDataExt =='':
self.headerlines = 8
else:
if len(self.inputTxtOne.GetValue()) > 0:
try:
with open(self.dataPath, 'r') as datafile:
filereader = csv.reader(datafile)
start = 0
for row in enumerate(filereader):
if start == 0:
if 'N\\tTime' in str(row[1]):
start = 1
self.headerlines = row[0]
else:
continue
else:
continue
except:
self.headerlines = -1
wx.LogError('Data File not selected')
else:
self.headerlines = -1
def onReadIn(self, event):
self.onDataType(self) #initialize number of headerlines to use
self.dataHeader = []
filepath = pathlib.Path(self.inputTxtOne.GetValue())
self.ext = str(filepath.suffix)
filename = str(filepath.stem)
self.dataframeEDITColHeaders = ['MeasID','A(x)','A(z)','B(x)','B(z)','M(x)','M(z)','N(x)','N(z)', 'aVal', 'nFac','PseudoX','PseudoZ','Resistance','AppResist','Cycles','Variance','DataLevel','DtLvlMean','PctErr','Keep']
if self.ext.lower() == '.dat':
###############Need to update to fit .txt data format
dataLst = []
self.dataLead = []
self.dataTail = []
with open(filepath) as dataFile:
data = csv.reader(dataFile)
if self.inputDataExt == '.DAT (SAS)':
self.dataHeaders = ['M(x)','aVal','nFac','AppResist']
i = 0
dataList=[]
for row in enumerate(data):
if row[0]>self.headerlines: #Read in actual data
if row[0] > self.headerlines + datalength: #Read in data tail
self.dataTail.append(row[1])
else:
#It sometimes reads the lines differently. Sometimes as a list (as it should) other times as a long string
if len(row[1]) < 4:
#Entire row is read as string
dataList.append(re.split(' +', row[1][0]))
else:
#Row is read correctly as separate columns
dataList.append(row[1])
i+=1
else:
if row[0] == 3: #Read in data length
datalength = float(row[1][0])
self.dataLead.append(row[1])#Create data lead variable for later use
datalengthIN = i
self.fileHeaderDict = {}
self.dataListIN = dataList #Formatted global nested list is created of data read in
project = self.dataLead[0][0]
array = self.dataLead[2][0]
if float(array) == 3:
array = "Dipole-Dipole"
msrmtType = 'Apparent Resistivity'
self.fileHeaderDict['Filename'] = filename
self.fileHeaderDict['Project'] = project
self.fileHeaderDict['minElectSpcng'] = round(float(self.dataLead[1][0]),2)
self.fileHeaderDict['Array'] = array
self.fileHeaderDict["Type of Measurement"] = msrmtType
self.fileHeaderDict['DataPts'] = self.dataLead[3][0]
self.dataframeIN =
|
pd.DataFrame(self.dataListIN)
|
pandas.DataFrame
|
import unittest
import numpy as np
import pandas as pd
from pyvvo import cluster
class TestCluster(unittest.TestCase):
def test_euclidean_distance_sum_squared_array(self):
# All numbers are one away, so squares will be 1. Sum of squares
#
a1 = np.array((1, 2, 3))
a2 = np.array((2, 1, 4))
self.assertEqual(3, cluster.euclidean_distance_squared(a1, a2))
def test_euclidean_distance_sum_squared_series(self):
s1 = pd.Series((1, 10, 7, 5), index=['w', 'x', 'y', 'z'])
s2 = pd.Series((3, 5, -1, 10), index=['w', 'x', 'y', 'z'])
# 4+25+64+25 = 118
self.assertEqual(118, cluster.euclidean_distance_squared(s1, s2))
def test_euclidean_distance_sum_squared_dataframe(self):
d1 = pd.DataFrame({'c1': [1, 2, 3], 'c2': [1, 4, 9]})
d2 = pd.DataFrame({'c1': [2, 3, 4], 'c2': [0, -2, 7]})
# row one: 1 + 1
# row two: 1 + 36
# row three: 1 + 4
expected = pd.Series([2, 37, 5])
actual = cluster.euclidean_distance_squared(d1, d2)
self.assertTrue(expected.equals(actual))
def test_euclidean_distance_sum_squared_df_series(self):
v1 = pd.Series([1, 2, 3], index=['x', 'y', 'z'])
v2 =
|
pd.DataFrame({'x': [1, 2, 3], 'y': [1, 2, 3], 'z': [1, 2, 3]})
|
pandas.DataFrame
|
"""Calculate statistics for a CheXpert dataset.
It works on the DataFrame, not on the CheXPertDataset class, to allow statistics on filtered data.
Example::
import chexpert_dataset as cxd
import chexpert_statistics as cxs
# Get a ChexPert dataset
cxdata = cxd.CheXpertDataset()
cxdata.fix_dataset() # optional
# Calculate statistics on it
stats = cxs.patient_study_image_count(cxdata.df)
The functions take as input a DataFrame create by a CheXpertDataset class and return the requested
statistics/summary. The DataFrame may be filtered (e.g. only the training rows), as long as it has
the same number of columns as the original CheXPertDataset DataFrame.
The returned DataFrames are in long format, i.e. one observation per row.
"""
import pandas as pd
import chexpert_dataset as cxd
# Index names, index values, and column names for functions that return DataFrames with statistics
# Most statistics DataFrames are long (stacked) - these index names are combined into MultiIndex
# indices as needed
# Whenever possible, they match the name of the column used to group the statistics
INDEX_NAME_SET = 'Set'
INDEX_NAME_ITEM = 'Item'
COL_COUNT = 'Count'
COL_PATIENT_STUDY = 'Patient/Study'
COL_AGED = 'Aged'
COL_PERCENTAGE = '%'
COL_PERCENTAGE_CUMULATIVE = 'Cum. %'
COL_PERCENTAGE_PATIENTS = 'Patients %'
COL_PERCENTAGE_IMAGES = 'Images %'
COL_LABEL_POSITIVE = 'Positive'
COL_LABEL_NEGATIVE = 'Negative'
COL_LABEL_UNCERTAIN = 'Uncertain'
COL_LABEL_NO_MENTION = 'No mention'
PATIENTS = 'Patients'
IMAGES = 'Images'
STUDIES = 'Studies'
def _pct(x): return 100 * x / x.sum()
def _summary_stats_by_set(df: pd.DataFrame, column: str) -> pd.DataFrame:
"""Calculate the summary statistics of a DataFrame that has counts."""
summary = df.groupby([cxd.COL_TRAIN_VALIDATION], as_index=True, observed=True).agg(
Minimum=(column, 'min'),
Maximum=(column, 'max'),
Median=(column, 'median'),
Mean=(column, 'mean'),
Std=(column, 'std'))
idx = pd.MultiIndex.from_product([summary.index, [column]], names=[
INDEX_NAME_SET, INDEX_NAME_ITEM])
summary.index = idx
return summary
def _add_percentage(df: pd.DataFrame, level=0, cumulative=False) -> pd.DataFrame:
"""Add percentages to a multi-index DataFrame in long format, using the given index level."""
# It must be a dataset in long format
assert len(df.columns) == 1
df[COL_PERCENTAGE] = df.groupby(level=level).apply(_pct)
if cumulative:
df[COL_PERCENTAGE_CUMULATIVE] = df.groupby(level=level)[COL_PERCENTAGE].cumsum()
return df
def _add_aux_patient_study_column(df: pd.DataFrame) -> pd.DataFrame:
"""Add a column that is unique for patient/study to help with grouping and counting."""
df = df.copy() # preserve the caller's data
df[COL_PATIENT_STUDY] = ['{}-{}'.format(p, s) for p, s in
zip(df[cxd.COL_PATIENT_ID], df[cxd.COL_STUDY_NUMBER])]
return df
def patient_study_image_count(df: pd.DataFrame, add_percentage: bool = False,
filtered: bool = False) -> pd.DataFrame:
"""Calculate count of patients, studies, and images, split by training/validation set."""
df = _add_aux_patient_study_column(df)
stats = df.groupby([cxd.COL_TRAIN_VALIDATION], as_index=True, observed=True).agg(
Patients=(cxd.COL_PATIENT_ID, pd.Series.nunique),
Studies=(COL_PATIENT_STUDY, pd.Series.nunique),
Images=(cxd.COL_VIEW_NUMBER, 'count'))
# Validate against expected CheXpert number when the DataFrame is not filtered
if not filtered:
assert stats[PATIENTS].sum() == cxd.PATIENT_NUM_TOTAL
assert stats[STUDIES].sum() == cxd.STUDY_NUM_TOTAL
assert stats[IMAGES].sum() == cxd.IMAGE_NUM_TOTAL
stats = pd.DataFrame(stats.stack())
stats = stats.rename(columns={0: COL_COUNT})
stats.index.names = [INDEX_NAME_SET, INDEX_NAME_ITEM]
if add_percentage:
stats = _add_percentage(stats, level=1)
return stats
def studies_per_patient(df: pd.DataFrame) -> pd.DataFrame:
"""Calculate the number of studies for each patient."""
# The same study number may shows up more than once for the same patient (a study that has
# more than one image), thus we need the unique count of studies in this case
stats = df.groupby([cxd.COL_TRAIN_VALIDATION, cxd.COL_PATIENT_ID], as_index=True,
observed=True).agg(
Studies=(cxd.COL_STUDY_NUMBER, pd.Series.nunique))
return stats
def images_per_patient(df: pd.DataFrame) -> pd.DataFrame:
"""Calculate the number of images for each patient."""
# Image (view) numbers may be repeated for the same patient (they are unique only within
# each study), thus in this case we need the overall count and not unique count
stats = df.groupby([cxd.COL_TRAIN_VALIDATION, cxd.COL_PATIENT_ID], as_index=True,
observed=True).agg(
Images=(cxd.COL_VIEW_NUMBER, 'count'))
return stats
def images_per_patient_sex(df: pd.DataFrame) -> pd.DataFrame:
"""Calculate the number of images for each patient, split by sex."""
# Image (view) numbers may be repeated for the same patient (they are unique only within
# each study), thus in this case we need the overall count and not unique count
stats = df.groupby([cxd.COL_TRAIN_VALIDATION, cxd.COL_SEX], as_index=True, observed=True).agg(
Patients=(cxd.COL_PATIENT_ID, pd.Series.nunique),
Images=(cxd.COL_VIEW_NUMBER, 'count'))
stats[COL_PERCENTAGE_PATIENTS] = stats.groupby(level=0)[PATIENTS].apply(_pct)
stats[COL_PERCENTAGE_IMAGES] = stats.groupby(level=0)[IMAGES].apply(_pct)
# Adjust the column order to a logical sequence
stats = stats[[PATIENTS, COL_PERCENTAGE_PATIENTS, IMAGES, COL_PERCENTAGE_IMAGES]]
# Improve presentation of the columns headers
columns = pd.MultiIndex.from_product([[PATIENTS, IMAGES],
[COL_COUNT, COL_PERCENTAGE]])
stats.columns = columns
return stats
def images_per_patient_binned(df: pd.DataFrame, filtered: bool = False) -> pd.DataFrame:
"""Calculate the binned number of images per patient, split by training/validation set."""
stats = images_per_patient(df)
bins = [0, 1, 2, 3, 4, 5, 10, 20, 30, 100]
bin_labels = ['1 image', '2 images', '3 images', '4 images', '5 images', '6 to 10 images',
'11 to 20 images', '21 to 30 images', 'More than 30 images']
num_images = 'Number of images'
stats[num_images] = pd.cut(stats.Images, bins=bins, labels=bin_labels, right=True)
group = stats.reset_index().groupby([cxd.COL_TRAIN_VALIDATION, num_images], as_index=True,
observed=True)
patient_summary = group.agg(Patients=(cxd.COL_PATIENT_ID, pd.Series.nunique))
if not filtered:
assert patient_summary.loc[cxd.TRAINING].sum()[0] == cxd.PATIENT_NUM_TRAINING
assert patient_summary.loc[cxd.VALIDATION].sum()[0] == cxd.PATIENT_NUM_VALIDATION
patient_summary = _add_percentage(patient_summary, level=0, cumulative=True)
image_summary = group.agg(Images=(IMAGES, 'sum'))
if not filtered:
assert image_summary.loc[cxd.TRAINING].sum()[0] == cxd.IMAGE_NUM_TRAINING
assert image_summary.loc[cxd.VALIDATION].sum()[0] == cxd.IMAGE_NUM_VALIDATION
image_summary = _add_percentage(image_summary, level=0, cumulative=True)
summary = patient_summary.join(image_summary, lsuffix=' ' + PATIENTS, rsuffix=' ' + IMAGES)
columns = pd.MultiIndex.from_product([[PATIENTS, IMAGES],
[COL_COUNT, COL_PERCENTAGE, COL_PERCENTAGE_CUMULATIVE]])
summary.columns = columns
return summary
def studies_summary_stats(df: pd.DataFrame) -> pd.DataFrame:
"""Calculate summary statistics for the number of studies per patient."""
stats = studies_per_patient(df)
summary = _summary_stats_by_set(stats, STUDIES)
return summary
def images_summary_stats(df: pd.DataFrame) -> pd.DataFrame:
"""Calculate summary statistics for the number of images per patient."""
stats = images_per_patient(df)
summary = _summary_stats_by_set(stats, IMAGES)
return summary
def label_image_frequency(df: pd.DataFrame) -> pd.DataFrame:
"""Calculate the number and percentage of times each observation appears in images."""
images_in_set = len(df[cxd.COL_VIEW_NUMBER])
observations = cxd.OBSERVATION_OTHER + cxd.OBSERVATION_PATHOLOGY
all_labels = [cxd.LABEL_POSITIVE, cxd.LABEL_NEGATIVE, cxd.LABEL_UNCERTAIN, cxd.LABEL_NO_MENTION]
col_names = [COL_LABEL_POSITIVE, COL_PERCENTAGE, COL_LABEL_NEGATIVE, COL_PERCENTAGE,
COL_LABEL_UNCERTAIN, COL_PERCENTAGE, COL_LABEL_NO_MENTION, COL_PERCENTAGE]
stats =
|
pd.DataFrame(index=observations, columns=col_names)
|
pandas.DataFrame
|
import functions
import numpy as np
import pandas as pd
import streamlit as st
from tensorflow.keras.models import load_model
import time
from datetime import datetime
from sklearn.metrics import r2_score
st.set_page_config(page_title='FEX Forecasting',
layout="wide")
path = '~'
df = functions.import_main_df(f'{path}/02_Model/final_dataset.csv')
path2020 = f'{path}/02_Model/all_countries2020.csv'
first_column = ['Canada', 'Australia', 'Brazil', 'China', 'Denmark', 'Japan', 'Korea', 'Mexico', 'New Zealand',
'Norway', 'Sweden', 'Switzerland', 'South Africa', 'UK', 'US']
pageop = st.sidebar.radio(
"Go to",
('Best Model', 'Beta'))
if pageop == 'Best Model':
option = st.sidebar.selectbox(
'Choose A Country',
first_column)
if option == 'Australia':
country_code = 'AUD'
if option == 'Brazil':
country_code = 'BRL'
if option == 'Switzerland':
country_code = 'CHF'
if option == 'Canada':
country_code = 'CND'
if option == 'China':
country_code = 'CNY'
if option == 'Denmark':
country_code = 'DKK'
if option == 'UK':
country_code = 'GBP'
if option == 'US':
country_code = 'USD'
if option == 'Japan':
country_code = 'JPY'
if option == 'Korea':
country_code = 'KRW'
if option == 'Mexico':
country_code = 'MXN'
if option == 'Norway':
country_code = 'NOK'
if option == 'New Zealand':
country_code = 'NZD'
if option == 'Sweden':
country_code = 'SEK'
if option == 'South Africa':
country_code = 'ZAR'
fitted_future_predictions = functions.import_main_df(f'data_files/fitted_future_predictions_{country_code}.csv')
fitted_future_predictions['index'] = pd.to_datetime(fitted_future_predictions['index'])
fitted_future_predictions = fitted_future_predictions.set_index('index')
fitted_training_predictions = functions.import_main_df(f'data_files/fitted_training_predictions_{country_code}.csv')
fitted_training_predictions['index'] = pd.to_datetime(fitted_training_predictions['index'])
fitted_training_predictions = fitted_training_predictions.set_index('index')
fitted_dataset_train = functions.import_main_df(f'data_files/fitted_dataset_train_{country_code}.csv')
fitted_dataset_train['index'] = pd.to_datetime(fitted_dataset_train['index'])
fitted_dataset_train = fitted_dataset_train.set_index('index')
r2 = r2_score(fitted_dataset_train.loc["2001-06-20":][f"{country_code}_USD"],
fitted_training_predictions.loc["2001-06-20":][f"{country_code}_USD"])
rmse = functions.mean_squared_error(fitted_dataset_train.loc["2001-06-20":][f"{country_code}_USD"],
fitted_training_predictions.loc["2001-06-20":][f"{country_code}_USD"], squared=False)
mse = functions.mean_squared_error(fitted_dataset_train.loc["2001-06-20":][f"{country_code}_USD"],
fitted_training_predictions.loc["2001-06-20":][f"{country_code}_USD"])
df2020 = functions.get_2020(fitted_future_predictions, country_code, path2020)
start_time2 = st.slider("Select Start Date", min_value=datetime(2000, 1, 3), max_value=datetime(2020, 11, 30),
value=datetime(2001, 6, 14), format="MM/DD/YY", key=f'only_{country_code}')
with st.beta_container():
st.bokeh_chart(functions.bokeh_plotting(fitted_future_predictions, country_code, fitted_training_predictions,
fitted_dataset_train, df2020, start_time2), use_container_width=False)
expander = st.beta_expander("Evaluation Metrics")
expander.write(f'R-squared: {r2}')
expander.write(f'Root Mean Square Error: {rmse}')
expander.write(f'Mean Square Error: {mse}')
###################
if pageop == 'Beta':
fitted_modelCND = load_model('data_files/trained_model_on_CND.h5', compile=True)
fitted_modelUSD = load_model('data_files/trained_model_on_USD.h5', compile=True)
st.text('Choose a Base Model')
left_column, right_column = st.beta_columns(2)
pressed = left_column.checkbox('Canada')
if pressed:
fitted_model = fitted_modelCND
country_code = 'CND'
fitted_future_predictions = functions.import_main_df(f'data_files/fitted_future_predictions_{country_code}.csv')
fitted_future_predictions['index'] = pd.to_datetime(fitted_future_predictions['index'])
fitted_future_predictions = fitted_future_predictions.set_index('index')
fitted_training_predictions = functions.import_main_df(f'data_files/fitted_training_predictions_{country_code}.csv')
fitted_training_predictions['index'] = pd.to_datetime(fitted_training_predictions['index'])
fitted_training_predictions = fitted_training_predictions.set_index('index')
fitted_dataset_train = functions.import_main_df(f'data_files/fitted_dataset_train_{country_code}.csv')
fitted_dataset_train['index'] = pd.to_datetime(fitted_dataset_train['index'])
fitted_dataset_train = fitted_dataset_train.set_index('index')
r2 = r2_score(fitted_dataset_train.loc["2001-06-20":][f"{country_code}_USD"],
fitted_training_predictions.loc["2001-06-20":][f"{country_code}_USD"])
rmse = functions.mean_squared_error(fitted_dataset_train.loc["2001-06-20":][f"{country_code}_USD"],
fitted_training_predictions.loc["2001-06-20":][f"{country_code}_USD"], squared=False)
mse = functions.mean_squared_error(fitted_dataset_train.loc["2001-06-20":][f"{country_code}_USD"],
fitted_training_predictions.loc["2001-06-20":][f"{country_code}_USD"])
df2020 = functions.get_2020(fitted_future_predictions, country_code, path2020)
start_time2 = st.slider("Select Start Date", min_value=datetime(2000, 1, 3), max_value=datetime(2020, 11, 30),
value=datetime(2001, 6, 14), format="MM/DD/YY", key=f'only_{country_code}')
with st.beta_container():
st.bokeh_chart(functions.bokeh_plotting(fitted_future_predictions, country_code, fitted_training_predictions,
fitted_dataset_train, df2020, start_time2), use_container_width=False)
pressed2 = right_column.checkbox('United States')
if pressed2:
fitted_model = fitted_modelUSD
country_code = 'USD'
fitted_future_predictions = functions.import_main_df(f'data_files/fitted_future_predictions_{country_code}.csv')
fitted_future_predictions['index'] = pd.to_datetime(fitted_future_predictions['index'])
fitted_future_predictions = fitted_future_predictions.set_index('index')
fitted_training_predictions = functions.import_main_df(f'data_files/fitted_training_predictions_{country_code}.csv')
fitted_training_predictions['index'] =
|
pd.to_datetime(fitted_training_predictions['index'])
|
pandas.to_datetime
|
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from functools import reduce
from tqdm import tqdm
from collections import OrderedDict
import pandas as pd
import torch
import torch.nn.functional as F
import numpy as np
import scipy
import warnings
import matplotlib.pyplot as plt
def model_output(model, dataloader, export_prob=True, export_feat=True, softmax=True, device=None,
feature_layer='pool', progress_bar=True):
"""
Function to get representation and probability of each trajectory in a loader. Increasing the batch_size of
dataloader can greatly reduce computation time.
:param model: str or pytorch model. If str, path to the model file.
:param dataloader: pytorch Dataloader, classification output will be created for each element in the loader. Pay
attention to the attribute drop_last, if True last batch would not be processed. Increase to lower computation time.
:param export_prob: bool, whether to export classification output.
:param export_feat: bool, whether to export latent features. feature_layer defines the layer output to hook.
:param softmax: bool, whether to apply softmax to the classification output.
:param device: str, pytorch device. If None will try to use cuda, if not available will use cpu.
:param feature_layer: str, name of the model module from which to hook output if export_feat is True.
:return: A pandas DataFrame with columns: ID, Class. If export_prob, one column for each class of the output named:
'Prob_XXX' where XXX is the class name. If export_feat, one column for each element in the hooked layer output
named: 'Feat_I' where I is an increasing integer starting at 0.
"""
# Default arguments and checks
batch_size = dataloader.batch_size
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if dataloader.drop_last:
warnings.warn('dataloader.drop_last is True, some data might be discarded.')
# path to model file
if isinstance(model, str):
model = torch.load(model)
model.eval()
model.double()
model.batch_size = batch_size
model = model.to(device)
else:
model.eval()
model.double()
model.batch_size = batch_size
model = model.to(device)
# Lists with results of every batch
lidt = []
llab = []
if export_prob:
lprob = []
if export_feat:
lfeat = []
def hook_feature(module, input, output):
lfeat.append(output.data.squeeze().cpu().numpy())
model._modules.get(feature_layer).register_forward_hook(hook_feature)
if progress_bar:
pbar = tqdm(total=len(dataloader))
nbatch = len(dataloader)
for ibatch, sample in enumerate(iter(dataloader)):
# Flag last batch, can have different size from the others
if ibatch + 1 == nbatch:
model.batch_size = len(sample['label'])
image_tensor, label, identifier = sample['series'], sample['label'], sample['identifier']
image_tensor = image_tensor.to(device)
# uni: batch, 1 dummy channel, length TS
# (1,1,length) for uni; (1,1,2,length) for bi
assert len(dataloader.dataset[0]['series'].shape) == 2
nchannel, univar_length = dataloader.dataset[0]['series'].shape
if nchannel == 1:
view_size = (model.batch_size, 1, univar_length)
elif nchannel >= 2:
view_size = (model.batch_size, 1, nchannel, univar_length)
image_tensor = image_tensor.view(view_size)
scores = model(image_tensor)
if softmax:
scores = F.softmax(scores, dim=1).data.squeeze()
# Store batch results
llab.append(label.data.cpu().numpy())
lidt.append(identifier)
if export_prob:
lprob.append(scores.data.squeeze().cpu().numpy())
if progress_bar:
pbar.update(1)
frames = []
# 1) Frame with ID
lidt = np.concatenate(lidt)
llab = np.concatenate(llab)
frames.append(pd.DataFrame(list(zip(lidt, llab)), columns=['ID', 'Class']))
# 2) Frame with proba
if export_prob:
nclass = lprob[0].shape[1] if batch_size > 1 else len(lprob[0])
lprob = np.vstack(lprob)
colnames = ['Prob_' + str(i) for i in range(nclass)]
frames.append(pd.DataFrame(lprob, columns=colnames))
# 3) Frame with features
if export_feat:
nfeats = lfeat[0].shape[1] if batch_size > 1 else len(lfeat[0])
lfeat = np.vstack(lfeat)
colnames = ['Feat_' + str(i) for i in range(nfeats)]
frames.append(
|
pd.DataFrame(lfeat, columns=colnames)
|
pandas.DataFrame
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(IncompatibleFrequency):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))
expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = pd.PeriodIndex([p1_d], freq=freq)
p2 = pd.PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
with pytest.raises(TypeError):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
pd.date_range("2016-01-01", periods=3, freq="S")._data,
pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
# Miscellaneous invalid types
],
)
def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
other + rng
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = pd.period_range("1/1/2000", freq="Q", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
with pytest.raises(IncompatibleFrequency):
rng + tdarr
with pytest.raises(IncompatibleFrequency):
tdarr + rng
with pytest.raises(IncompatibleFrequency):
rng - tdarr
with pytest.raises(TypeError):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
# PeriodIndex + Timedelta-like is allowed only with
# tick-like frequencies
rng = pd.period_range("1/1/2000", freq="90D", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.period_range("12/31/1999", freq="90D", periods=3)
result = rng + tdi
tm.assert_index_equal(result, expected)
result = rng + tdarr
tm.assert_index_equal(result, expected)
result = tdi + rng
tm.assert_index_equal(result, expected)
result = tdarr + rng
tm.assert_index_equal(result, expected)
expected = pd.period_range("1/2/2000", freq="90D", periods=3)
result = rng - tdi
tm.assert_index_equal(result, expected)
result = rng - tdarr
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
tdarr - rng
with pytest.raises(TypeError):
tdi - rng
# -----------------------------------------------------------------
# operations with array/Index of DateOffset objects
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_add_offset_array(self, box):
# GH#18849
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
offs = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = pd.PeriodIndex([pd.Period("2015Q2"), pd.Period("2015Q4")])
with tm.assert_produces_warning(PerformanceWarning):
res = pi + offs
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = offs + pi
tm.assert_index_equal(res2, expected)
unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
# addition/subtraction ops with incompatible offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi + unanchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
unanchored + pi
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_sub_offset_array(self, box):
# GH#18824
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
other = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])
with tm.assert_produces_warning(PerformanceWarning):
res = pi - other
tm.assert_index_equal(res, expected)
anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi - anchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
anchored - pi
def test_pi_add_iadd_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng + one
expected = pd.period_range("2000-01-01 10:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_int(self, one):
"""
PeriodIndex.__sub__ and __isub__ with several representations of
the integer 1, e.g. int, np.int64, np.uint8, ...
"""
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng - one
expected = pd.period_range("2000-01-01 08:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng -= one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)])
def test_pi_sub_intlike(self, five):
rng = period_range("2007-01", periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_pi_sub_isub_offset(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range("2009", "2019", freq="A")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
rng = pd.period_range("2014-01", "2016-12", freq="M")
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range("2013-08", "2016-07", freq="M")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_offset_n_gt1(self, box_transpose_fail):
# GH#23215
# add offset to PeriodIndex with freq.n > 1
box, transpose = box_transpose_fail
per = pd.Period("2016-01", freq="2M")
pi = pd.PeriodIndex([per])
expected = pd.PeriodIndex(["2016-03"], freq="2M")
pi = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = pi + per.freq
tm.assert_equal(result, expected)
result = per.freq + pi
tm.assert_equal(result, expected)
def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array):
# GH#23215
# PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0
pi = pd.PeriodIndex(["2016-01"], freq="2M")
expected = pd.PeriodIndex(["2016-04"], freq="2M")
# FIXME: with transposing these tests fail
pi = tm.box_expected(pi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = pi + to_offset("3M")
tm.assert_equal(result, expected)
result = to_offset("3M") + pi
tm.assert_equal(result, expected)
# ---------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_pi_add_intarray(self, int_holder, op):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = op(pi, other)
expected = pd.PeriodIndex([pd.Period("2016Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_pi_sub_intarray(self, int_holder):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = pi - other
expected = pd.PeriodIndex([pd.Period("2014Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - pi
# ---------------------------------------------------------------
# Timedelta-like (timedelta, timedelta64, Timedelta, Tick)
# TODO: Some of these are misnomers because of non-Tick DateOffsets
def test_pi_add_timedeltalike_minute_gt1(self, three_days):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# minute frequency with n != 1. A more general case is tested below
# in test_pi_add_timedeltalike_tick_gt1, but here we write out the
# expected result more explicitly.
other = three_days
rng = pd.period_range("2014-05-01", periods=3, freq="2D")
expected = pd.PeriodIndex(["2014-05-04", "2014-05-06", "2014-05-08"], freq="2D")
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.PeriodIndex(["2014-04-28", "2014-04-30", "2014-05-02"], freq="2D")
result = rng - other
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - rng
@pytest.mark.parametrize("freqstr", ["5ns", "5us", "5ms", "5s", "5T", "5h", "5d"])
def test_pi_add_timedeltalike_tick_gt1(self, three_days, freqstr):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# tick-like frequency with n != 1
other = three_days
rng = pd.period_range("2014-05-01", periods=6, freq=freqstr)
expected = pd.period_range(rng[0] + other, periods=6, freq=freqstr)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.period_range(rng[0] - other, periods=6, freq=freqstr)
result = rng - other
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - rng
def test_pi_add_iadd_timedeltalike_daily(self, three_days):
# Tick
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-05-04", "2014-05-18", freq="D")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_timedeltalike_daily(self, three_days):
# Tick-like 3 Days
other = three_days
rng =
|
pd.period_range("2014-05-01", "2014-05-15", freq="D")
|
pandas.period_range
|
import unittest, sys, os
from datetime import datetime
import pandas as pd
import src.pandaserv as pandaserv
import numpy as np
class Testpandaserv(unittest.TestCase):
def setUp(self):
self.dates = pd.date_range('20130101', periods=6)
self.df = pd.DataFrame(
np.random.randn(6,4),
index=self.dates,
columns=list('ABCD'))
self.df2 = pd.DataFrame({ 'A' : 1.,
'B' : pd.Timestamp('20130102'),
'C' : pd.Series(1,index=list(range(4)),dtype='float32'),
'D' : np.array([3] * 4,dtype='int32'),
'E' : pd.Categorical(["test","train","test","train"]),
'F' : '20140101' })
self.moneydf = pd.DataFrame({'transactions':
['AUD1,234.01',
'-AUD1,234.01',
'AUD234.01',
'AUD1,234',],
'Order Total':
['AUD1,234.01',
'-AUD1,234.01',
'AUD234.01',
'AUD1,234',]})
self.datesdf =
|
pd.DataFrame(self.dates)
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.