prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pytest
import unittest
from unittest import mock
from ops.tasks.anomalyDetection import anomalyService
from anomaly.models import Anomaly
from pandas import Timestamp
from decimal import Decimal
from mixer.backend.django import mixer
import pandas as pd
@pytest.mark.django_db(transaction=True)
def test_createAnomalyService(client, mocker):
fakedata = [{'ds': Timestamp('2021-06-01 00:00:00+0000', tz='UTC'),
'y': Decimal('1.000000000')},
{'ds': Timestamp('2021-06-02 00:00:00+0000', tz='UTC'),
'y': Decimal('1.000000000')},
{'ds': Timestamp('2021-06-03 00:00:00+0000', tz='UTC'), 'y': Decimal('0E-9')},
{'ds': Timestamp('2021-06-04 00:00:00+0000', tz='UTC'), 'y': Decimal('0E-9')},
{'ds': Timestamp('2021-06-05 00:00:00+0000', tz='UTC'),
'y': Decimal('4.000000000')},
{'ds': Timestamp('2021-06-06 00:00:00+0000', tz='UTC'), 'y': Decimal('0E-9')},
{'ds': Timestamp('2021-06-07 00:00:00+0000', tz='UTC'),
'y': Decimal('4.000000000')},
{'ds': Timestamp('2021-06-08 00:00:00+0000', tz='UTC'), 'y': Decimal('0E-9')},
{'ds': | Timestamp('2021-06-09 00:00:00+0000', tz='UTC') | pandas.Timestamp |
import pandas
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn import preprocessing
from setlist import setlist
import sys
import os
path=os.getcwd()
path=path.strip('complete_model')
sys.path.append(path)
from helper import svm,misc_helper
class train_test_generator:
def generate(self):
dataFrame = pandas.read_csv('../../CSV_Data/master_dataset.csv')
feature_columns = list(dataFrame.columns.values)[0:-1]
features,target = misc_helper.split_feature_target(dataFrame)
train,test,train_target,test_target = train_test_split(features,target,test_size = 0.2,stratify=target)
train,test = misc_helper.get_scaled_data(train,test)
#Initial Datasets
train = pandas.DataFrame(train,columns=feature_columns)
train.to_csv('datasets/train.csv',index=False)
train_target = pandas.DataFrame(train_target,columns=['label'])
train_target.to_csv('datasets/train_target.csv',index=False)
test = pandas.DataFrame(test,columns=feature_columns)
test.to_csv('datasets/test.csv',index=False)
test_target = pandas.DataFrame(test_target,columns=['label'])
test_target.to_csv('datasets/test_target.csv',index=False)
#
train_target_sets = train_target.copy(deep=True)
test_target_sets = test_target.copy(deep=True)
for i in range(len(setlist)):
train_target_sets['label'][train_target['label'].isin(setlist[i])] = str(i)
train_target_sets.to_csv('datasets/train_target_sets.csv',index=False)
for i in range(len(setlist)):
test_target_sets['label'][test_target['label'].isin(setlist[i])] = str(i)
test_target_sets.to_csv('datasets/test_target_sets.csv',index=False)
#Diving into sets
train_sets_features = [[] for i in range(len(setlist)) if len(setlist[i]) > 1]
train_sets_targets = [[] for i in range(len(setlist)) if len(setlist[i]) > 1]
test_sets_features = [[] for i in range(len(setlist)) if len(setlist[i]) > 1]
test_sets_targets = [[] for i in range(len(setlist)) if len(setlist[i]) > 1]
for index,row in train.iterrows():
setIndex = int(train_target_sets['label'][index])
if setIndex < len(train_sets_features):
train_sets_features[setIndex].append(row)
train_sets_targets[setIndex].append(train_target['label'][index])
for index,row in test.iterrows():
setIndex = int(test_target_sets['label'][index])
if setIndex < len(test_sets_features):
test_sets_features[setIndex].append(row)
test_sets_targets[setIndex].append(test_target['label'][index])
for i in range(len(train_sets_features)):
df = | pandas.DataFrame(train_sets_features[i],columns=feature_columns) | pandas.DataFrame |
import geopandas as gpd
import networkx as nx
import numpy as np
import pandas as pd
from quetzal.analysis import analysis
from quetzal.engine import engine, linearsolver_utils, nested_logit
from quetzal.io import export
from quetzal.model import model, summarymodel, transportmodel
from syspy.spatial import geometries, spatial
from syspy.syspy_utils import neighbors
from tqdm import tqdm
def read_hdf(filepath):
m = AnalysisModel()
m.read_hdf(filepath)
return m
def read_json(folder, **kwargs):
m = AnalysisModel()
m.read_json(folder, **kwargs)
return m
track_args = model.track_args
log = model.log
class AnalysisModel(summarymodel.SummaryModel):
def _aggregate(self, nb_clusters, cluster_column=None, volume_column='volume'):
"""
Aggregates a model (in order to perform optimization)
* requires: nb_clusters, cluster_series, od_stack, indicator
* builds: cluster_series, aggregated model, reduced indicator
"""
self.agg = self.copy()
self.agg.preparation_clusterize_zones(
nb_clusters, cluster_column, is_od_stack=True,
volume_columns=[volume_column], volume_od_columns=[volume_column]
)
self.cluster_series = self.agg.cluster_series
self.agg.indicator = linearsolver_utils.reduce_indicator(
self.indicator,
self.cluster_series,
self.od_stack,
volume_column=volume_column
)
def _disaggregate(self):
self.pivot_stack_matrix, self.od_stack = linearsolver_utils.extrapolate(
self.agg.pivot_stack_matrix,
self.od_stack,
self.cluster_series
)
def _build_pivot_stack_matrix(self, constrained_links, linprog_kwargs, **kwargs):
"""
Builds the pivot_stack_matrix. Performs the optimization.
* requires: constrained_links, od_stack, indicator
* builds: pivot_stack_matrix
"""
self.pivot_stack_matrix = linearsolver_utils.linearsolver(
self.indicator,
constrained_links,
self.od_stack,
**linprog_kwargs,
**kwargs
)
def _analysis_road_link_path(self, include_road_footpaths=False):
"""
Build road_link_path column of pt_los based on link_path
"""
try:
link_to_road_links = self.links['road_link_list'].to_dict()
except KeyError:
raise KeyError('road_link_list column missing: links must be networkasted.')
self.pt_los['road_link_path'] = self.pt_los['link_path'].apply(
lambda x: [i for l in map(link_to_road_links.get, x) if l is not None for i in l]
)
if include_road_footpaths:
# Footpath to road_link_path
road_links_dict = self.road_links.reset_index().set_index(['a', 'b'])[self.road_links.index.name].to_dict()
nan_loc = self.pt_los['footpaths'].isnull()
self.pt_los.loc[nan_loc, 'footpaths'] = [[]] * nan_loc.sum()
self.pt_los['road_link_path'] += self.pt_los['footpaths'].apply(
lambda x: [a for a in list(map(lambda l: road_links_dict.get(l), x)) if a is not None]
)
def analysis_linear_solver(
self,
constrained_links,
nb_clusters=20,
cluster_column=None,
link_path_column='link_path',
linprog_kwargs={
'bounds_A': [0.75, 1.5],
'bounds_emissions': [0.8, 1.2],
'bounds_tot_emissions': [0.95, 1.05],
'pas_distance': 200,
'maxiter': 3000,
'tolerance': 1e-5
},
**kwargs,
):
"""
To perform the optimization on a model object once it is built and run,
in order to match the observed volumes.
* requires: od_stack, constrained_links
* builds: aggregated model, pivot_stack_matrix
Le but de linear_solver est de modifier la matrice des volumes par OD
en la multipliant par un pivot, afin de coller aux observations
recueillies sur certains nœuds/liens du réseau.
Etapes:
0. Construction de l'indicatrice (matrice qui indique la présence des
liens contraints dans chaque OD)
1. Agrégation du modèle.
2. Résolution du problème d'optimisation linéaire pour construire
pivot_stack_matrix (matrice pivot). Plus de détails dans
linearsolver_utils.
3. Désagrégation de la matrice pivot pour revenir au modèle de base.
"""
self.indicator = linearsolver_utils.build_indicator(
self.od_stack,
constrained_links,
link_path_column=link_path_column
)
if len(self.zones) < nb_clusters:
self._build_pivot_stack_matrix(constrained_links, linprog_kwargs, **kwargs)
else:
self._aggregate(nb_clusters, cluster_column, **kwargs)
self.agg._build_pivot_stack_matrix(constrained_links, linprog_kwargs, **kwargs)
self._disaggregate()
def analysis_pt_route_type(self, hierarchy):
route_type_dict = self.links['route_type'].to_dict()
def higher_route_type(route_types):
for mode in hierarchy:
if mode in route_types:
return mode
return hierarchy[-1]
self.pt_los['route_types'] = self.pt_los['link_path'].apply(
lambda p: tuple({route_type_dict[l] for l in p})
)
self.pt_los['route_type'] = self.pt_los['route_types'].apply(higher_route_type)
try:
self.pr_los['route_types'] = self.pr_los['link_path'].apply(
lambda p: ('car',) + tuple({route_type_dict[l] for l in p})
)
self.pr_los['route_type'] = self.pr_los['route_types'].apply(higher_route_type)
except (AttributeError, KeyError):
pass
def analysis_car_los(self):
def path_to_ntlegs(path):
try:
return [(path[0], path[1]), (path[-2], path[-1])]
except IndexError:
return []
def node_path_to_link_path(road_node_list, ab_indexed_dict):
tuples = list(zip(road_node_list[:-1], road_node_list[1:]))
road_link_list = [ab_indexed_dict[t] for t in tuples]
return road_link_list
road_links = self.road_links
road_links['index'] = road_links.index
indexed = road_links.set_index(['a', 'b']).sort_index()
ab_indexed_dict = indexed['index'].to_dict()
los = self.car_los
los['node_path'] = los['path'].apply(lambda p: p[1:-1])
los['link_path'] = [node_path_to_link_path(p, ab_indexed_dict) for p in los['node_path']]
los['ntlegs'] = los['path'].apply(path_to_ntlegs)
self.car_los = los
def analysis_pt_los(self, walk_on_road=False):
analysis_nodes = pd.concat([self.nodes, self.road_nodes]) if walk_on_road else self.nodes
self.pt_los = analysis.path_analysis_od_matrix(
od_matrix=self.pt_los,
links=self.links,
nodes=analysis_nodes,
centroids=self.zones,
)
def lighten_car_los(self):
self.car_los = self.car_los.drop(
['node_path', 'link_path', 'ntlegs'],
axis=1, errors='ignore'
)
def lighten_pt_los(self):
to_drop = [
'alighting_links', 'alightings', 'all_walk', 'boarding_links', 'boardings',
'footpaths', 'length_link_path', 'link_path', 'node_path', 'ntlegs',
'time_link_path', 'transfers'
]
self.pt_los = self.pt_los.drop(to_drop, axis=1, errors='ignore')
def lighten_los(self):
try:
self.lighten_pt_los()
except AttributeError:
pass
try:
self.lighten_pr_los()
except AttributeError:
pass
try:
self.lighten_car_los()
except AttributeError:
pass
def lighten(self):
# to be completed
self.lighten_los()
def analysis_car_route_type(self):
self.car_los['route_types'] = [tuple(['car']) for i in self.car_los.index]
self.car_los['route_type'] = 'car'
def analysis_pr_time(self, boarding_time=None):
footpaths = self.footpaths
road_links = self.road_links.copy()
road_to_transit = self.road_to_transit.copy()
road_to_transit['length'] = road_to_transit['distance']
footpaths = pd.concat([road_to_transit, self.footpaths])
access = pd.concat([self.zone_to_road, self.zone_to_transit])
d = access.set_index(['a', 'b'])['time'].to_dict()
self.pr_los['access_time'] = self.pr_los['ntlegs'].apply(
lambda l: sum([d[t] for t in l]))
d = footpaths.set_index(['a', 'b'])['time'].to_dict()
self.pr_los['footpath_time'] = self.pr_los['footpaths'].apply(
lambda l: sum([d.get(t, 0) for t in l]))
d = road_links.set_index(['a', 'b'])['time'].to_dict()
self.pr_los['car_time'] = self.pr_los['footpaths'].apply(
lambda l: sum([d.get(t, 0) for t in l]))
d = self.links['time'].to_dict()
self.pr_los['pt_time'] = self.pr_los['link_path'].apply(
lambda l: sum([d[t] for t in l]))
d = self.links['headway'].to_dict()
self.pr_los['waiting_time'] = self.pr_los['boarding_links'].apply(
lambda l: sum([d[t] / 2 for t in l]))
self.pr_los['boarding_time'] = self.pr_los['boarding_links'].apply(
lambda t: len(t) * boarding_time)
self.pr_los['in_vehicle_time'] = self.pr_los[['pt_time', 'car_time']].T.sum()
self.pr_los['time'] = self.pr_los[
['access_time', 'footpath_time', 'waiting_time', 'boarding_time', 'in_vehicle_time']
].T.sum()
def analysis_pr_length(self):
footpaths = self.footpaths
road_links = self.road_links.copy()
road_to_transit = self.road_to_transit.copy()
road_to_transit['length'] = road_to_transit['distance']
footpaths = pd.concat([road_to_transit, self.footpaths])
access = | pd.concat([self.zone_to_road, self.zone_to_transit]) | pandas.concat |
try:
import pandas as pd
except ImportError:
pd = None
if pd:
import numpy as np
from . import Converter, Options
class PandasDataFrameConverter(Converter):
writes_types = pd.DataFrame
@classmethod
def base_reader(cls, options):
return (
super(PandasDataFrameConverter, cls).base_reader(
Options(options)
.override(ndim=2)
)
)
@classmethod
def read_value(cls, value, options):
index = options.get('index', 1)
header = options.get('header', 1)
dtype = options.get('dtype', None)
copy = options.get('copy', False)
# build dataframe with only columns (no index) but correct header
if header == 1:
columns = pd.Index(value[0])
elif header > 1:
columns = pd.MultiIndex.from_arrays(value[:header])
else:
columns = None
df = pd.DataFrame(value[header:], columns=columns, dtype=dtype, copy=copy)
# handle index by resetting the index to the index first columns
# and renaming the index according to the name in the last row
if index > 0:
# rename uniquely the index columns to some never used name for column
# we do not use the column name directly as it would cause issues if several
# columns have the same name
df.columns = pd.Index(range(len(df.columns)))
df.set_index(list(df.columns)[:index], inplace=True)
df.index.names = pd.Index(value[header - 1][:index] if header else [None]*index)
if header:
df.columns = columns[index:]
else:
df.columns = pd.Index(range(len(df.columns)))
return df
@classmethod
def write_value(cls, value, options):
index = options.get('index', True)
header = options.get('header', True)
index_names = value.index.names
index_names = ['' if i is None else i for i in index_names]
index_levels = len(index_names)
if index:
if value.index.name in value.columns:
# Prevents column name collision when resetting the index
value.index.rename(None, inplace=True)
value = value.reset_index()
if header:
if isinstance(value.columns, pd.MultiIndex):
columns = list(zip(*value.columns.tolist()))
columns = [list(i) for i in columns]
# Move index names right above the index
if index:
for c in columns[:-1]:
c[:index_levels] = [''] * index_levels
columns[-1][:index_levels] = index_names
else:
columns = [value.columns.tolist()]
if index:
columns[0][:index_levels] = index_names
value = columns + value.values.tolist()
else:
value = value.values.tolist()
return value
PandasDataFrameConverter.register(pd.DataFrame)
class PandasSeriesConverter(Converter):
writes_types = pd.Series
@classmethod
def read_value(cls, value, options):
index = options.get('index', 1)
header = options.get('header', True)
dtype = options.get('dtype', None)
copy = options.get('copy', False)
if header:
columns = value[0]
if not isinstance(columns, list):
columns = [columns]
data = value[1:]
else:
columns = None
data = value
df = pd.DataFrame(data, columns=columns, dtype=dtype, copy=copy)
if index:
df.columns = pd.Index(range(len(df.columns)))
df.set_index(list(df.columns)[:index], inplace=True)
df.index.names = | pd.Index(value[header - 1][:index] if header else [None] * index) | pandas.Index |
import time
import numpy as np
import pandas as pd
def add_new_category(x):
"""
Aimed at 'trafficSource.keyword' to tidy things up a little
"""
x = str(x).lower()
if x == 'nan':
return 'nan'
x = ''.join(x.split())
if r'provided' in x:
return 'not_provided'
if r'youtube' in x or r'you' in x or r'yo' in x or r'tub' in x or r'yout' in x or r'y o u' in x:
return 'youtube'
if r'google' in x or r'goo' in x or r'gle' in x:
return 'google'
else:
return 'other'
# Dump cleaned data to parquets for later.
train_df = pd.read_parquet('input/cleaned/train.parquet.gzip')
test_df = pd.read_parquet('input/cleaned/test.parquet.gzip')
# Remove target col.
y_train = train_df['totals.transactionRevenue'].values
train_df = train_df.drop(['totals.transactionRevenue'], axis=1)
# Join datasets for rowise feature engineering.
trn_len = train_df.shape[0]
merged_df = pd.concat([train_df, test_df])
num_cols = ["totals.hits", "totals.pageviews", "visitNumber", "visitStartTime"]
for col in num_cols:
merged_df[col] = merged_df[col].astype(float)
merged_df['diff_visitId_time'] = merged_df['visitId'] - merged_df['visitStartTime']
merged_df['diff_visitId_time'] = (merged_df['diff_visitId_time'] != 0).astype(float)
merged_df['totals.hits'] = merged_df['totals.hits'].astype(float)
# Build Time based features.
merged_df['formated_date'] = pd.to_datetime(merged_df['date'], format='%Y%m%d')
merged_df['month'] = pd.DatetimeIndex(merged_df['formated_date']).month
merged_df['year'] = pd.DatetimeIndex(merged_df['formated_date']).year
merged_df['day'] = pd.DatetimeIndex(merged_df['formated_date']).day
merged_df['quarter'] = pd.DatetimeIndex(merged_df['formated_date']).quarter
merged_df['weekday'] = pd.DatetimeIndex(merged_df['formated_date']).weekday
merged_df['weekofyear'] = pd.DatetimeIndex(merged_df['formated_date']).weekofyear
merged_df['is_month_start'] = pd.DatetimeIndex(merged_df['formated_date']).is_month_start
merged_df['is_month_end'] = pd.DatetimeIndex(merged_df['formated_date']).is_month_end
merged_df['is_quarter_start'] = pd.DatetimeIndex(merged_df['formated_date']).is_quarter_start
merged_df['is_quarter_end'] = pd.DatetimeIndex(merged_df['formated_date']).is_quarter_end
merged_df['is_year_start'] = pd.DatetimeIndex(merged_df['formated_date']).is_year_start
merged_df['is_year_end'] = pd.DatetimeIndex(merged_df['formated_date']).is_year_end
merged_df['month_unique_user_count'] = merged_df.groupby('month')['fullVisitorId'].transform('nunique')
merged_df['day_unique_user_count'] = merged_df.groupby('day')['fullVisitorId'].transform('nunique')
merged_df['weekday_unique_user_count'] = merged_df.groupby('weekday')['fullVisitorId'].transform('nunique')
merged_df['visitStartTime'] = pd.to_datetime(merged_df['visitStartTime'], unit='s')
merged_df['hour'] = | pd.DatetimeIndex(merged_df['visitStartTime']) | pandas.DatetimeIndex |
from alphaVantageAPI.alphavantage import AlphaVantage
from unittest import TestCase
from unittest.mock import patch
from pandas import DataFrame, read_csv
from .utils import Path
from .utils import Constant as C
from .utils import load_json, _mock_response
## Python 3.7 + Pandas DeprecationWarning
# /alphaVantageAPI/env/lib/python3.7/site-packages/pandas/core/frame.py:7476:
# DeprecationWarning: Using or importing the ABCs from "collections" instead of from "collections.abc" is deprecated, and in 3.8 it will stop working elif isinstance(data[0], collections.Mapping):
class TestAlphaVantageAPI(TestCase):
@classmethod
def setUpClass(cls):
cls.test_data_path = C.TEST_DATA_PATH
# Set premium to True to avoid API throttling for testing
av = AlphaVantage(api_key=C.API_KEY_TEST, premium=True)
# Minimum parameters
cls.fx_parameters = {"function":"CURRENCY_EXCHANGE_RATE", "from_currency":"USD", "to_currency":"JPY"}
cls.fx_daily_parameters = {"function":"FX_DAILY", "from_currency":"EUR", "to_currency":"USD"}
cls.fx_intraday_parameters = {"function":"FX_INTRADAY", "from_currency":"EUR", "to_currency":"USD"}
cls.fx_monthly_parameters = {"function":"FX_MONTHLY", "from_currency":"EUR", "to_currency":"USD"}
cls.fx_weekly_parameters = {"function":"FX_WEEKLY", "from_currency":"EUR", "to_currency":"USD"}
cls.data_parameters = {"function":"TIME_SERIES_DAILY_ADJUSTED", "symbol":C.API_DATA_TEST}
cls.intraday_parameters = {"function":"TIME_SERIES_INTRADAY", "symbol":C.API_DATA_TEST}
cls.indicator_parameters = {"function":"RSI", "symbol":C.API_DATA_TEST, "interval":"weekly", "series_type":"open", "time_period":10}
cls.digital_parameters = {"function":"DIGITAL_CURRENCY_DAILY", "symbol":C.API_DIGITAL_TEST, "market":"CNY"}
cls.digital_rating_parameters = {"function":"CRYPTO_RATING", "symbols":C.API_DIGITAL_TEST}
cls.global_quote_parameters = {"function":"GLOBAL_QUOTE", "symbols":C.API_DIGITAL_TEST}
cls.overview_parameters = {"function":"OVERVIEW", "symbols":C.API_FUNDA_TEST}
cls.balance_parameters = {"function":"BALANCE_SHEET", "symbols":C.API_FUNDA_TEST}
cls.income_parameters = {"function":"INCOME_STATEMENT", "symbols":C.API_FUNDA_TEST}
cls.cashflow_parameters = {"function":"CASH_FLOW", "symbols":C.API_FUNDA_TEST}
cls.earnings_parameters = {"function": "EARNINGS_CALENDAR"}
cls.ipos_parameters = {"function": "IPO_CALENDAR"}
cls.listing_parameters = {"function": "LISTING_STATUS"}
# json files of sample data
cls.json_fx = load_json(cls.test_data_path / "mock_fx.json")
cls.json_fx_daily = load_json(cls.test_data_path / "mock_fx_daily.json")
cls.json_fx_intraday = load_json(cls.test_data_path / "mock_fx_intraday.json")
cls.json_fx_monthly = load_json(cls.test_data_path / "mock_fx_monthly.json")
cls.json_fx_weekly = load_json(cls.test_data_path / "mock_fx_weekly.json")
cls.json_data = load_json(cls.test_data_path / "mock_data.json")
cls.json_indicator = load_json(cls.test_data_path / "mock_indicator.json")
cls.json_digital = load_json(cls.test_data_path / "mock_digital.json")
cls.json_digital_rating = load_json(cls.test_data_path / "mock_digital_rating.json")
cls.json_global_quote = load_json(cls.test_data_path / "mock_global_quote.json")
cls.json_overview = load_json(cls.test_data_path / "mock_overview.json")
cls.json_balance = load_json(cls.test_data_path / "mock_balance_sheet.json")
cls.json_income = load_json(cls.test_data_path / "mock_income_statement.json")
cls.json_cashflow = load_json(cls.test_data_path / "mock_cash_flow.json")
# csv files of sample data
cls.csv_earnings_cal = read_csv(cls.test_data_path / "mock_earnings_cal.csv")
cls.csv_ipos_cal = read_csv(cls.test_data_path / "mock_ipos_cal.csv")
cls.csv_delisted = read_csv(cls.test_data_path / "mock_delisted_status.csv")
cls.csv_listed = read_csv(cls.test_data_path / "mock_listed_status.csv")
# Pandas DataFrames of sample data
cls.df_fx = av._to_dataframe("CURRENCY_EXCHANGE_RATE", cls.json_fx)
cls.df_fx_daily = av._to_dataframe("FX_DAILY", cls.json_fx_daily)
cls.df_fx_intraday = av._to_dataframe("FX_INTRADAY", cls.json_fx_intraday)
cls.df_fx_monthly = av._to_dataframe("FX_MONTHLY", cls.json_fx_monthly)
cls.df_fx_weekly = av._to_dataframe("FX_WEEKLY", cls.json_fx_weekly)
cls.df_data = av._to_dataframe("TIME_SERIES_DAILY_ADJUSTED", cls.json_data)
cls.df_indicator = av._to_dataframe("RSI", cls.json_indicator)
cls.df_digital = av._to_dataframe("DIGITAL_CURRENCY_DAILY", cls.json_digital)
cls.df_digital_rating = av._to_dataframe("CRYPTO_RATING", cls.json_digital_rating)
cls.df_global_quote = av._to_dataframe("GLOBAL_QUOTE", cls.json_global_quote)
cls.df_overview = av._to_dataframe("OVERVIEW", cls.json_overview)
cls.df_balance = av._to_dataframe("BALANCE_SHEET", cls.json_balance)
cls.df_income = av._to_dataframe("INCOME_STATEMENT", cls.json_income)
cls.df_cashflow = av._to_dataframe("CASH_FLOW", cls.json_cashflow)
cls.df_earnings = DataFrame(cls.csv_earnings_cal)
cls.df_ipos = DataFrame(cls.csv_ipos_cal)
cls.df_delisted = | DataFrame(cls.csv_delisted) | pandas.DataFrame |
"""Unit tests for orbitpy.coveragecalculator.gridcoverage class.
``TestGridCoverage`` class:
* ``test_execute_0``: Test format of output access files.
* ``test_execute_1``: Roll Circular sensor tests
* ``test_execute_2``: Yaw Circular sensor tests
* ``test_execute_3``: Pitch Circular sensor tests
* ``test_execute_4``: Roll Rectangular sensor tests
* ``test_execute_5``: Pitch Rectangular sensor tests
* ``test_execute_6``: Satellite-bus orientation vs sensor orientation tests
* ``test_execute_7``: Test spacecraft with multiple sensors.
* ``test_execute_8``: Test FOV vs FOR coverage. Coverage of FOR >= Coverage of FOV.
* ``test_execute_9``: Test coverage with DOUBLE_ROLL_ONLY maneuver will which result in 2 ``ViewGeometry`` objects for the field-of-regard.
"""
import json
import os, shutil
import sys
import unittest
import pandas as pd
import random
import warnings
import json
from orbitpy.coveragecalculator import CoverageOutputInfo, GridCoverage
from orbitpy.grid import Grid
from orbitpy.util import Spacecraft
from orbitpy.propagator import PropagatorFactory
sys.path.append('../')
from util.spacecrafts import spc1_json, spc4_json, spc5_json
RE = 6378.137 # radius of Earth in kilometers
class TestGridCoverage(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Create new working directory to store output of all the class functions.
cls.dir_path = os.path.dirname(os.path.realpath(__file__))
cls.out_dir = os.path.join(cls.dir_path, 'temp')
if os.path.exists(cls.out_dir):
shutil.rmtree(cls.out_dir)
os.makedirs(cls.out_dir)
# make propagator
factory = PropagatorFactory()
cls.step_size = 1
cls.j2_prop = factory.get_propagator({"@type": 'J2 ANALYTICAL PROPAGATOR', "stepSize": cls.step_size})
def test_from_dict(self):
o = GridCoverage.from_dict({ "grid":{"@type": "autogrid", "@id": 1, "latUpper":25, "latLower":-25, "lonUpper":180, "lonLower":-180, "gridRes": 2},
"spacecraft": json.loads(spc1_json),
"cartesianStateFilePath":"../../state.csv",
"@id": 12})
self.assertEqual(o._id, 12)
self.assertEqual(o._type, 'GRID COVERAGE')
self.assertEqual(o.grid, Grid.from_dict({"@type": "autogrid", "@id": 1, "latUpper":25, "latLower":-25, "lonUpper":180, "lonLower":-180, "gridRes": 2}))
self.assertEqual(o.spacecraft, Spacecraft.from_json(spc1_json))
self.assertEqual(o.state_cart_file, "../../state.csv")
def test_to_dict(self): #TODO
pass
def test_execute_0(self):
""" Check the produced access file format.
"""
# setup spacecraft with some parameters setup randomly
duration=0.05
orbit_dict = {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":26, "hour":12, "minute":0, "second":0}, # JD: 2458265.00000
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": RE+random.uniform(350,850),
"ecc": 0, "inc": random.uniform(0,180), "raan": random.uniform(0,360),
"aop": random.uniform(0,360), "ta": random.uniform(0,360)}
}
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": random.uniform(5,35) },
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, "@id":"bs1", "@type":"Basic Sensor"}
spacecraftBus_dict = {"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# execute propagator
self.j2_prop.execute(spacecraft=sat, start_date=None, out_file_cart=state_cart_file, duration=duration)
# generate grid object
grid = Grid.from_autogrid_dict({"@type": "autogrid", "@id": 1, "latUpper":90, "latLower":-90, "lonUpper":180, "lonLower":-180, "gridRes": 1})
# set output file path
out_file_access = self.out_dir+'/test_cov_access.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access) # the first instrument, mode available in the spacecraft is considered for the coverage calculation.
# check the outputs
cov_calc_type = pd.read_csv(out_file_access, nrows=1, header=None).astype(str) # 1st row contains the coverage calculation type
cov_calc_type = str(cov_calc_type[0][0])
self.assertEqual(cov_calc_type, 'GRID COVERAGE')
epoch_JDUT1 = pd.read_csv(out_file_access, skiprows = [0], nrows=1, header=None).astype(str) # 2nd row contains the epoch
epoch_JDUT1 = float(epoch_JDUT1[0][0].split()[3])
self.assertEqual(epoch_JDUT1, 2458265.0)
_step_size = pd.read_csv(out_file_access, skiprows = [0,1], nrows=1, header=None).astype(str) # 3rd row contains the stepsize
_step_size = float(_step_size[0][0].split()[4])
self.assertAlmostEqual(_step_size, self.step_size)
_duration = pd.read_csv(out_file_access, skiprows = [0,1,2], nrows=1, header=None).astype(str) # 4th row contains the mission duration
_duration = float(_duration[0][0].split()[4])
self.assertAlmostEqual(_duration, duration)
column_headers = pd.read_csv(out_file_access, skiprows = [0,1,2,3], nrows=1, header=None).astype(str) # 5th row contains the columns headers
self.assertEqual(column_headers.iloc[0][0],"time index")
self.assertEqual(column_headers.iloc[0][1],"GP index")
self.assertEqual(column_headers.iloc[0][2],"lat [deg]")
self.assertEqual(column_headers.iloc[0][3],"lon [deg]")
# check that the grid indices are interpreted correctly
access_data = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
access_data = access_data.round(3)
if not access_data.empty:
(lat, lon) = grid.get_lat_lon_from_index(access_data['GP index'].tolist())
self.assertTrue(lat==access_data['lat [deg]'].tolist())
self.assertTrue(lon==access_data['lon [deg]'].tolist())
else:
warnings.warn('No data was generated in test_execute_0(.). Run the test again.')
def test_execute_1(self):
""" Orient the sensor with roll, and an equatorial orbit and check that the ground-points captured are on either
side of hemisphere only. (Conical Sensor)
"""
############ Common attributes for both positive and negative roll tests ############
duration = 0.1
spacecraftBus_dict = {"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}}
grid = Grid.from_autogrid_dict({"@type": "autogrid", "@id": 1, "latUpper":25, "latLower":-25, "lonUpper":180, "lonLower":-180, "gridRes": 2})
orbit_dict = {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":26, "hour":12, "minute":0, "second":0}, # JD: 2458265.00000
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": RE+500,
"ecc": 0.001, "inc": 0, "raan": 20,
"aop": 0, "ta": 120}
}
############ positive roll ############
# setup spacecraft with some parameters setup randomly
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle":12.5},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# execute propagator
self.j2_prop.execute(spacecraft=sat, start_date=None, out_file_cart=state_cart_file, duration=duration)
# set output file path
out_file_access = self.out_dir+'/test_cov_accessX.csv'
# run the coverage calculator
cov = GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file)
out_info = cov.execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
self.assertEqual(out_info, CoverageOutputInfo.from_dict({ "coverageType": "GRID COVERAGE",
"spacecraftId": sat._id,
"instruId": sat.get_instrument(None)._id,
"modeId": sat.get_instrument(None).get_mode_id()[0],
"usedFieldOfRegard": False,
"filterMidIntervalAccess": False,
"gridId": grid._id,
"stateCartFile": state_cart_file,
"accessFile": out_file_access,
"startDate": 2458265.00000,
"duration": duration, "@id":None}))
# check the outputs
access_data = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
if not access_data.empty:
(lat, lon) = grid.get_lat_lon_from_index(access_data['GP index'].tolist())
self.assertTrue(all(x > 0 for x in lat))
else:
warnings.warn('No data was generated in test_execute_1(.) positive roll test. Run the test again.')
############ negative roll ############
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle":-12.5},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
# no need to rerun propagator, since propagation does not depend on sensor
# set output file path
out_file_access = self.out_dir+'/test_cov_accessY.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
# check the outputs
access_data = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
if not access_data.empty:
(lat, lon) = grid.get_lat_lon_from_index(access_data['GP index'].tolist())
self.assertTrue(all(x < 0 for x in lat))
else:
warnings.warn('No data was generated in test_execute_1(.) negative roll test. Run the test again.')
def test_execute_2(self):
""" Orient the sensor with varying yaw but same pitch and roll, and test that the captured ground-points remain the same
(Conical Sensor).
"""
####### Common attributes for both simulations #######
duration = 0.1
orbit_dict = {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":26, "hour":12, "minute":0, "second":0}, # JD: 2458265.00000
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": RE+500,
"ecc": 0.001, "inc": 0, "raan": 0,
"aop": 0, "ta": 0}
}
spacecraftBus_dict = {"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}}
# generate grid object
grid = Grid.from_autogrid_dict({"@type": "autogrid", "@id": 1, "latUpper":90, "latLower":-90, "lonUpper":180, "lonLower":-180, "gridRes": 5})
pitch = 15
roll = 10.5
######## Simulation 1 #######
yaw = random.uniform(0,360)
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": roll, "zRotation": yaw},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# execute propagator
self.j2_prop.execute(spacecraft=sat, start_date=None, out_file_cart=state_cart_file, duration=duration)
# set output file path
out_file_access = self.out_dir+'/test_cov_access.csv'
# run the coverage calculator
out_info = GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
self.assertEqual(out_info, CoverageOutputInfo.from_dict({ "coverageType": "GRID COVERAGE",
"spacecraftId": sat._id,
"instruId": sat.get_instrument(None)._id,
"modeId": sat.get_instrument(None).get_mode_id()[0],
"usedFieldOfRegard": False,
"filterMidIntervalAccess": False,
"gridId": grid._id,
"stateCartFile": state_cart_file,
"accessFile": out_file_access,
"startDate": 2458265.00000,
"duration": duration, "@id":None}))
access_data1 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## Simulation 2 ########
yaw = random.uniform(0,360)
instrument_dict = {"mode":[{"@id":"m1", "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": roll, "zRotation": yaw}}],
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"sen1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
# no need to rerun propagator, since propagation does not depend on sensor
# set output file path
out_file_access = self.out_dir+'/test_cov_access.csv'
# run the coverage calculator
out_info = GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
self.assertEqual(out_info, CoverageOutputInfo.from_dict({ "coverageType": "GRID COVERAGE",
"spacecraftId": sat._id,
"instruId": "sen1",
"modeId": "m1",
"usedFieldOfRegard": False,
"filterMidIntervalAccess": False,
"gridId": grid._id,
"stateCartFile": state_cart_file,
"accessFile": out_file_access,
"startDate": 2458265.00000,
"duration": duration, "@id":None}))
access_data2 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## compare the results of both the simulations ########
if not access_data1.empty:
(lat1, lon1) = grid.get_lat_lon_from_index(access_data1['GP index'].tolist())
(lat2, lon2) = grid.get_lat_lon_from_index(access_data2['GP index'].tolist())
self.assertTrue(lat1==lat2)
else:
warnings.warn('No data was generated in test_execute_2(.). Run the test again.')
def test_execute_3(self):
""" Orient the sensor with pitch and test that the times the ground-points are captured lag or lead (depending on direction of pitch)
as compared to the coverage from a zero pitch sensor. (Conical Sensor)
Fixed inputs used.
"""
####### Common attributes for all the simulations #######
duration = 0.1
orbit_dict = {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":26, "hour":12, "minute":0, "second":0}, # JD: 2458265.00000
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": RE+500,
"ecc": 0.001, "inc": 45, "raan": 245,
"aop": 0, "ta": 0}
}
spacecraftBus_dict = {"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}}
# generate grid object
grid = Grid.from_autogrid_dict({"@type": "autogrid", "@id": 1, "latUpper":90, "latLower":-90, "lonUpper":180, "lonLower":-180, "gridRes": 5})
grid.write_to_file(self.out_dir+'/grid.csv')
######## Simulation 1 #######
pitch = 0
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": 0, "zRotation": 0},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# execute propagator
self.j2_prop.execute(spacecraft=sat, out_file_cart=state_cart_file, duration=duration)
# set output file path
out_file_access = self.out_dir+'/test_cov_access1.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
access_data1 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## Simulation 2 #######
pitch = 25
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": 0, "zRotation": 0},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# no need to rerun propagator, since propagation does not depend on sensor
# set output file path
out_file_access = self.out_dir+'/test_cov_access2.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
access_data2 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## Simulation 3 #######
pitch = -25
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": 0, "zRotation": 0},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# no need to rerun propagator, since propagation does not depend on sensor
# set output file path
out_file_access = self.out_dir+'/test_cov_access3.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
access_data3 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## compare the results of both the simulations ########
# the first gpi in pitch forward pitch case is detected earlier than in the zero pitch case and (both) earlier than the pitch backward case
self.assertEqual(access_data3["GP index"][0], 1436)
self.assertEqual(access_data3["time index"][0], 51)
self.assertEqual(access_data1["GP index"][0], 1436)
self.assertEqual(access_data1["time index"][0], 91)
self.assertEqual(access_data2["GP index"][34], 1436)
self.assertEqual(access_data2["time index"][34], 123)
def test_execute_4(self):
""" Orient the sensor with roll, and an equatorial orbit and check that the ground-points captured are on either
side of hemisphere only. (Rectangular Sensor)
"""
############ Common attributes for both positive and negative roll tests ############
duration = 0.1
spacecraftBus_dict = {"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}}
grid = Grid.from_autogrid_dict({"@type": "autogrid", "@id": 1, "latUpper":25, "latLower":-25, "lonUpper":180, "lonLower":-180, "gridRes": 2})
orbit_dict = {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":26, "hour":12, "minute":0, "second":0}, # JD: 2458265.00000
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": RE+500,
"ecc": 0.001, "inc": 0, "raan": 20,
"aop": 0, "ta": 120}
}
############ positive roll ############
# setup spacecraft with some parameters setup randomly
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle":12.5},
"fieldOfViewGeometry": {"shape": "rectangular", "angleHeight": 15, "angleWidth": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# execute propagator
self.j2_prop.execute(spacecraft=sat, out_file_cart=state_cart_file, duration=duration)
# set output file path
out_file_access = self.out_dir+'/test_cov_accessX.csv'
# run the coverage calculator
cov = GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file)
cov.execute(out_file_access=out_file_access)
# check the outputs
access_data = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
if not access_data.empty:
(lat, lon) = grid.get_lat_lon_from_index(access_data['GP index'].tolist())
self.assertTrue(all(x > 0 for x in lat))
else:
warnings.warn('No data was generated in test_execute_1(.) positive roll test. Run the test again.')
############ negative roll ############
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle":-12.5},
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
# no need to rerun propagator, since propagation does not depend on sensor
# set output file path
out_file_access = self.out_dir+'/test_cov_accessY.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, mode_id=None, use_field_of_regard=False, out_file_access=out_file_access)
# check the outputs
access_data = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
if not access_data.empty:
(lat, lon) = grid.get_lat_lon_from_index(access_data['GP index'].tolist())
self.assertTrue(all(x < 0 for x in lat))
else:
warnings.warn('No data was generated in test_execute_1(.) negative roll test. Run the test again.')
def test_execute_5(self):
""" Orient the sensor with pitch and test that the times the ground-points are captured lag or lead (depending on direction of pitch)
as compared to the coverage from a zero pitch sensor. (Rectangular Sensor)
Fixed inputs used.
"""
####### Common attributes for all the simulations #######
duration = 0.1
orbit_dict = {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":26, "hour":12, "minute":0, "second":0}, # JD: 2458265.00000
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": RE+500,
"ecc": 0.001, "inc": 45, "raan": 245,
"aop": 0, "ta": 0}
}
spacecraftBus_dict = {"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}}
# generate grid object
grid = Grid.from_autogrid_dict({"@type": "autogrid", "@id": 1, "latUpper":90, "latLower":-90, "lonUpper":180, "lonLower":-180, "gridRes": 5})
grid.write_to_file(self.out_dir+'/grid.csv')
######## Simulation 1 #######
pitch = 0
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": 0, "zRotation": 0},
"fieldOfViewGeometry": {"shape": "rectangular", "angleHeight": 15, "angleWidth": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# execute propagator
#factory = PropagatorFactory()
#prop = factory.get_propagator({"@type": 'J2 ANALYTICAL PROPAGATOR', "stepSize": 1})
#prop.execute(spacecraft=sat, start_date=None, out_file_cart=state_cart_file, duration=duration)
self.j2_prop.execute(spacecraft=sat, start_date=None, out_file_cart=state_cart_file, duration=duration)
# set output file path
out_file_access = self.out_dir+'/test_cov_access1.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(out_file_access=out_file_access)
access_data1 = pd.read_csv(out_file_access, skiprows = [0,1,2,3]) # 5th row header, 6th row onwards contains the data
######## Simulation 2 #######
pitch = 25
instrument_dict = {"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "XYZ", "xRotation": pitch, "yRotation": 0, "zRotation": 0},
"fieldOfViewGeometry": {"shape": "rectangular", "angleHeight": 15, "angleWidth": 25 },
"@id":"bs1", "@type":"Basic Sensor"}
sat = Spacecraft.from_dict({"orbitState":orbit_dict, "instrument":instrument_dict, "spacecraftBus":spacecraftBus_dict})
state_cart_file = self.out_dir+'/test_cov_cart_states.csv'
# no need to rerun propagator, since propagation does not depend on sensor
# set output file path
out_file_access = self.out_dir+'/test_cov_access2.csv'
# run the coverage calculator
GridCoverage(grid=grid, spacecraft=sat, state_cart_file=state_cart_file).execute(instru_id=None, out_file_access=out_file_access)
access_data2 = | pd.read_csv(out_file_access, skiprows = [0,1,2,3]) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 16 11:13:38 2016
@author: adityanagarajan
"""
import pandas as pd
import os
import numpy as np
import time
import multiprocessing
def unwrap_self_f(arg, **kwarg):
"""Taken from
http://www.rueckstiess.net/research/snippets/show/ca1d7d90
"""
return parse_AHRF_ascii.create_ahrf_frame(*arg, **kwarg)
class parse_AHRF_ascii(object):
"""This class extracts information from AHRF ascii
Attributes:
----------------------
file_path: the path of the ascii file to parse
meta_data_path: path of the .sas file
meta_data: data frame containing column name and location of variable in ascii
num_cores: number of cores/workers to use while parsing the file
"""
def __init__(self, num_cores=4, ascii_file_path='../data/ahrf2016.asc',
sas_file_path='../data/DOC/ahrf2015-16.sas'):
self.file_path = ascii_file_path
self.meta_data_path = sas_file_path
self.num_cores = num_cores
self.meta_data = self.load_meta_data()
self.ahrf_columns = []
def load_meta_data(self):
if os.path.exists('DOC/meta_data.csv'):
print('meta data file exists loading...')
meta_data = pd.read_csv('DOC/meta_data.csv')
else:
print('meta data file does not exists, creating and saving to DOC/meta_data.csv')
meta_data = self.parse_meta_data()
return meta_data
def parse_meta_data(self):
"""This function extracts the meta data to parse the AHRF
"""
meta_data_frame = pd.DataFrame(columns=['FieldId', 'Position', 'FieldLength', 'FieldName'])
ctr = 0
with open(self.meta_data_path, 'r') as md:
for line in md.readlines():
temp = line.split()
# Check for position indicator field
if len(temp) > 1 and temp[0][0] == '@':
if temp[2] == '$':
field_length = float(temp[3].strip('.'))
else:
field_length = float(temp[2].strip('.'))
field = [temp[1], int(temp[0].strip('@')), field_length]
meta_data_frame.loc[ctr, :3] = field
ctr += 1
# check for field name definitions and fill the meta_data_frame
if len(temp) > 1 and temp[0][0] == 'f':
meta_data_frame.loc[meta_data_frame.FieldId == temp[0], 'FieldName'] = ''.join(temp[2:]).strip('"')
if not os.path.exists('DOC/'):
os.mkdir('DOC/')
meta_data_frame.to_csv('DOC/meta_data.csv', index=False)
return meta_data_frame
def parse_ahrf_file(self, columns):
"""Single core implementation much slower than the multi core version
input : columns to extract
output: data frame with the column and the fields size = (3230,len(columns))
"""
self.ahrf_columns = columns
start_time = time.time()
with open(self.file_path, 'rb') as ahrf_file:
ahrf_lines = ahrf_file.readlines()
ahrf_data = self.create_ahrf_frame(ahrf_lines)
# Ensure that the total number of records is 3230 as per the
# technical documentation
assert ahrf_data.shape[0] == 3230
if not os.path.exists('DATA/'):
os.mkdir('DATA/')
ahrf_data.to_csv('DATA/ahrf_data.csv', index=False)
end_time = time.time()
print('Total time taken %.4f s' % (end_time - start_time))
return ahrf_data
def _divide_data_set(self):
"""Divides the AHRF into blocks for each worker to process
"""
divide = 3230 / self.num_cores
index_list = [slice(x * divide, (x + 1) * divide) for x in range(self.num_cores - 1)]
index_list.extend([slice((self.num_cores - 1) * divide, None)])
print(index_list)
return index_list
def create_ahrf_frame(self, ahrf_lines):
ahrf_data = pd.DataFrame(columns=self.ahrf_columns)
ctr = 0
for line in ahrf_lines:
for c_name in self.ahrf_columns:
start_pos = self.meta_data[self.meta_data['FieldName'] == c_name].Position.values[0] - 1
temp_end_pos = self.meta_data[self.meta_data['FieldName'] == c_name].FieldLength.values[0]
end_pos = start_pos + int(round(temp_end_pos))
# check for decimal multiplier
if round(temp_end_pos % 1.0, 1) > 0.0:
multi = 0.1 ** (round(temp_end_pos % 1.0, 1) * 10)
if line[start_pos:end_pos].strip() != '.':
ahrf_data.loc[ctr, c_name] = int(line[start_pos:end_pos]) * multi
else:
ahrf_data.loc[ctr, c_name] = line[start_pos:end_pos]
else:
ahrf_data.loc[ctr, c_name] = line[start_pos:end_pos]
ctr += 1
return ahrf_data
def parse_ahrf_file_multicore(self, ahrf_columns=[]):
"""parses the county level ascii AHRF and returns information
as a dataframe.
input : columns to extract (look at meta_data.csv)
output: data frame with the column and the fields size = (3230,len(columns))
"""
self.ahrf_columns = ahrf_columns
print('Loading variables to a DataFrame...')
start_time = time.time()
p = multiprocessing.Pool(self.num_cores)
with open(self.file_path, 'rb') as ahrf_file:
ahrf_lines = ahrf_file.readlines()
slices = self._divide_data_set()
ahrf_blocks = [ahrf_lines[sl] for sl in slices]
frames = p.map(unwrap_self_f, zip([self] * len(ahrf_blocks), ahrf_blocks))
ahrf_data = | pd.concat(frames, ignore_index=True) | pandas.concat |
"""
Created on Thursday Mar 26 2020
<NAME>
based on
https://www.kaggle.com/bardor/covid-19-growing-rate
https://github.com/CSSEGISandData/COVID-19
https://github.com/imdevskp
https://www.kaggle.com/yamqwe/covid-19-status-israel
https://www.kaggle.com/vanshjatana/machine-learning-on-coronavirus
https://www.lewuathe.com/covid-19-dynamics-with-sir-model.html
"""
import sys
from datetime import date, timedelta
import time
import os
import numpy as np
import pandas as pd
import logging
from scipy.integrate import solve_ivp
from scipy.optimize import minimize
from Utils import *
from scipy.signal import argrelextrema
# Run following algos for learning and prediction
do_SIR = True
do_prophet = False
do_ARIMA = False
do_LSTM = False
do_reg = False
if do_prophet:
from fbprophet import Prophet
from fbprophet.plot import plot_plotly, add_changepoints_to_plot
if do_ARIMA:
from statsmodels.tsa.arima_model import ARIMA
from pandas.plotting import autocorrelation_plot
from pmdarima import auto_arima
from sklearn.cluster import KMeans
from sklearn.metrics import mean_squared_error
from statsmodels.tools.eval_measures import rmse
from statsmodels.tools.eval_measures import rmse
import statsmodels.api as sm
if do_LSTM:
from keras.models import Sequential
from keras.layers import LSTM, Dense
from keras.layers import Dropout
from sklearn.preprocessing import MinMaxScaler, RobustScaler, Normalizer, StandardScaler
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
if do_reg:
from sklearn.neural_network import MLPRegressor
# seed ###################
seed = 1234
np.random.seed(seed)
##############################
class suppress_stdout_stderr(object):
'''
A context manager for doing a "deep suppression" of stdout and stderr in
Python, i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
This will not suppress raised exceptions, since exceptions are printed
to stderr just before a script exits, and after the context manager has
exited (at least, I think that is why it lets exceptions through).
'''
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = [os.dup(1), os.dup(2)]
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
# Close the null files
for fd in self.null_fds + self.save_fds:
os.close(fd)
def calc_factor(db):
# Factor to boost the calculation if the values are big
if db.Confirmed.values[-1] > 1e8:
factor = 100000.0
elif db.Confirmed.values[-1] > 1e7:
factor = 10000.0
elif db.Confirmed.values[-1] > 1e6:
factor = 1000.0
elif db.Confirmed.values[-1] > 1e5:
factor = 100.0
elif db.Confirmed.values[-1] > 1e4:
factor = 10.0
else:
factor = 1.0
print('Boost factor %d' % factor)
return factor
def local_extrema(in_data, do_smooth=True, window_len=15):
if do_smooth:
# moving average
w = np.ones(window_len, 'd')
data = np.convolve(w/w.sum(), in_data, mode='valid')
else:
data = in_data.values
# for local maxima
loc_maxima = argrelextrema(data, np.greater)[0]
# for local minima
loc_minima = argrelextrema(data, np.less)[0]
return loc_maxima, loc_minima
def extend_index(date, new_size):
values = date.values
current = values[-1]
while len(values) < new_size:
current = current + np.timedelta64(1, 'D')
values = np.append(values, current)
return values
def extended_data(db, inputs, dates, prefix='Real'):
size = len(dates)
df = pd.DataFrame(index=np.datetime_as_string(dates, unit='D'))
for cnt in range(len(inputs)):
k = inputs[cnt]
df[prefix + k] = np.concatenate((db[k].values, [None] * (size - len(db[k].values))))
return df
def SIR_algo(data, predict_range=450, s_0=None, threshConfrirm=1, threshDays=None, active_ratio=0.11, debug_mode=None):
# interactive site http://www.public.asu.edu/~hnesse/classes/sir.html
# beta - parameter controlling how much the disease can be transmitted through exposure.
# gamma - parameter expressing how much the disease can be recovered in a specific period
# delta - "killing" parameter, parameter expressing the disease fatal
# R0 - basic reproduction number, the average number of people infected from one to other person betta/gamma
# days - the average days to recover from infectious 1/gamma
# epsilon- the D/R ratio describing whether overload of the health care system is approaching (delta/gamma)
# delta_0 - learning cost of the system
def loss(point, active, recovered, death, s_0, i_0, r_0, d_0, alpha):
# size = len(data)
size = len(active)
beta, gamma, delta = point
def SIR(t, y):
S = y[0]
I = y[1]
R = y[2]
D = y[3]
# return [-beta * S * I, beta * S * I - gamma * I, gamma * I]
return [-beta * S * I, beta * S * I - gamma * I - delta * D, gamma * I, delta * I]
solution = solve_ivp(SIR, [0, size], [s_0, i_0, r_0, d_0], t_eval=np.arange(0, size, 1), vectorized=True)
l1 = np.sqrt(np.mean((solution.y[1] - active) ** 2))
l2 = np.sqrt(np.mean((solution.y[2] - recovered) ** 2))
l3 = np.sqrt(np.mean((solution.y[3] - death) ** 2))
return alpha[0] * l1 + np.max([0, 1 - alpha[0] - alpha[1] ]) * l2 + alpha[1] * l3
def predict(dataDate, beta, gamma, delta, active, recovered, death, s_0, i_0, r_0, d_0):
dates = extend_index(dataDate, predict_range)
size = len(dates)
def SIR(t, y):
S = y[0]
I = y[1]
R = y[2]
D = y[3]
# return [-beta * S * I, beta * S * I - gamma * I, gamma * I]
return [-beta * S * I, beta * S * I - gamma * I - delta * D, gamma * I, delta * I]
prediction = solve_ivp(SIR, [0, size], [s_0, i_0, r_0, d_0], t_eval=np.arange(0, size, 1))
pr_size = prediction.y.shape[1]
if pr_size != size:
new_size = pr_size - len(active.values)
dates = dates[:pr_size]
else:
new_size = size - len(active.values)
extended_active = np.concatenate((active.values, [None] * new_size))
extended_recovered = np.concatenate((recovered.values, [None] * new_size))
extended_death = np.concatenate((death.values, [None] * new_size))
return dates, extended_active, extended_recovered, extended_death, prediction
data = (data.loc[data.loc[:, 'Confirmed'] > threshConfrirm, :]).reset_index()
if threshDays:
data = data.loc[:threshDays, :]
cur_day = data.Date.max().strftime('%d%m%y')
run_daily = False
# Factor to boost the calculation if the values are big
factor = calc_factor(data)
Dsir = 0
out_text = ''
begin_idx = 0
epsilon = []
delta_0 = []
max_id = data['Active'].idxmax()
# idx_min_after_max = data['Active'][idx_max:].idxmin() + 1
len_data = len(data['Active'])
window_len = 15
loc_max, loc_min = local_extrema(data['Active'], window_len=window_len)
if len(loc_max) > len(loc_min):
loc_min = np.append(loc_min, len_data + 1)
elif len(loc_min) > len(loc_max):
loc_max = np.append(loc_max, loc_max[-1])
idx_max = loc_max[abs(loc_max - loc_min) > 10]
idx_min_after_max = loc_min[abs(loc_max - loc_min) > 10]
if len(idx_max) > 1:
idx_max = np.unique(np.append((idx_max + (window_len - 1) / 2 - 1).astype(int), max_id))
idx_min_after_max = np.unique(np.append((idx_min_after_max + (window_len - 1) / 2 - 1).astype(int), len_data + 1))
idx_min_after_max = np.append(idx_min_after_max[0], idx_min_after_max[1:][np.diff(idx_min_after_max) > 10])
wave = len(idx_min_after_max)
print('There is exist ' + str(wave) + ' waves!')
else:
wave = 1
idx_min_after_max = [len_data + 1]
for cnt in range(wave):
active_ratio = data['Active'][begin_idx:idx_min_after_max[cnt]].values[-1] / data['Confirmed'][begin_idx:idx_min_after_max[cnt]].values[-1]
recovered = (data['Recovered'][begin_idx:idx_min_after_max[cnt]] / factor).reset_index().Recovered
death = (data['Deaths'][begin_idx:idx_min_after_max[cnt]] / factor).reset_index().Deaths
active = (data['Active'][begin_idx:idx_min_after_max[cnt]] / factor).reset_index().Active
confirmed = (data['Confirmed'][begin_idx:idx_min_after_max[cnt]]).reset_index().Confirmed
dataDate = data['Date'][begin_idx:idx_min_after_max[cnt]]
try:
country = data.Country.values[0]
except:
country = 'world'
# cumulative
i_0 = active.values[0]
r_0 = recovered.values[0]
d_0 = death.values[0]
if s_0 is None:
s_0 = (confirmed.values[-1] / factor)
else:
s_0 = (s_0 / factor)
if run_daily:
# daily
daily_recovered = (data['NewRecovered'][begin_idx:idx_min_after_max[cnt]]).reset_index().NewRecovered.clip(0)
daily_death = (data['NewDeaths'][begin_idx:idx_min_after_max[cnt]]).reset_index().NewDeaths.clip(0)
daily_active = (data['NewActive'][begin_idx:idx_min_after_max[cnt]]).reset_index().NewActive.clip(0)
daily_confirmed = (data['NewConfirmed'][begin_idx:idx_min_after_max[cnt]]).reset_index().NewConfirmed.clip(0)
di_0 = daily_active.values[0]
dr_0 = daily_recovered.values[0]
dd_0 = daily_death.values[0]
ds_0 = daily_confirmed.values[0]
try:
daily_optimal = minimize(loss, [0.001, 0.001, 0.001], args=(daily_active, daily_recovered, daily_death,
ds_0, di_0, dr_0, dd_0, [0.45, 0.05]),
method='L-BFGS-B', bounds=[(0.00000001, 1), (0.00000001, 1), (0.00000001, 1)],
options={'eps': 1e-5, 'maxls': 40, 'disp': debug_mode})
print(daily_optimal)
# R0 - basic reproduction number, the average number of people infected from one to other person betta/gamma
R0 = daily_optimal.x[0]/(daily_optimal.x[1] + daily_optimal.x[2])
except Exception as exc:
print(exc)
R0 = None
else:
R0 = None
alpha = [0.11, np.min([0.75, np.max([0.44, round(active_ratio, 3)])])]
print('Suspected, WeightActive, WeightDeath')
print([s_0, alpha])
try:
optimal = minimize(loss, [0.001, 0.001, 0.001], args=(active, recovered, death, s_0, i_0, r_0, d_0, alpha),
method='L-BFGS-B', bounds=[(0.00000001, 0.8), (0.00000001, 0.8), (0.00000001, 0.6)],
options={'maxls': 40, 'disp': debug_mode})
print(optimal)
if optimal.nit < 10 or ((round(1 / optimal.x[1]) < 13 or (1 / optimal.x[1]) > predict_range)
and active_ratio > 0.075) or optimal.fun > 500:
raise Exception('the parameters are not reliable')
except Exception as exc:
print(exc)
try:
optimal = minimize(loss, [0.001, 0.001, 0.001], args=(active, recovered, death, s_0, i_0, r_0, d_0, alpha),
method='L-BFGS-B', bounds=[(0.00000001, 1), (0.00000001, 1), (0.00000001, 0.6)],
options={'eps': 1e-7, 'maxls': 40, 'disp': debug_mode})
print(optimal)
if optimal.nit < 10 or ((round(1 / optimal.x[1]) < 14 or (1 / optimal.x[1]) > predict_range + 60)
and active_ratio > 0.075) or optimal.fun > 600:
raise Exception('the parameters are not reliable')
except Exception as exc:
print(exc)
optimal = minimize(loss, [0.01, 0.01, 0.01], args=(active, recovered, death, s_0, i_0, r_0, d_0, alpha),
method='L-BFGS-B', bounds=[(0.00000001, 1), (0.00000001, 1), (0.00000001, 0.6)],
options={'eps': 1e-5, 'maxls': 40, 'disp': debug_mode})
print(optimal)
if optimal.nit < 10 or ((round(1 / optimal.x[1]) < 15 or (1 / optimal.x[1]) > predict_range + 90)
and active_ratio > 0.075) or optimal.fun > 700:
raise Exception('the parameters are not reliable')
beta, gamma, delta = optimal.x
dates, extended_active, extended_recovered, extended_death, prediction = \
predict(dataDate, beta, gamma, delta, active, recovered, death, s_0, i_0, r_0, d_0)
df = pd.DataFrame(
{'Active Real': extended_active, 'Recovered Real': extended_recovered, 'Deaths Real': extended_death,
# 'Susceptible': (prediction.y[0]).astype(int),
'Active Predicted': (prediction.y[1]).astype(int),
'Recovered Predicted': (prediction.y[2]).astype(int),
'Deaths Predicted': (prediction.y[3]).astype(int)}, index=np.datetime_as_string(dates, unit='D'))
df = df.mul(factor)
df = df[df['Active Predicted'] >= 1]
Dsir = Dsir + int((1 / gamma))
dday = (data['Date'][idx_min_after_max[cnt]-2] + timedelta(1/gamma)).strftime('%d/%m/%y')
# R0 - basic reproduction number, the average number of people infected from one to other person betta/gamma
if R0 is None:
R0 = beta / (gamma + delta)
# epsilon- the D/R ratio describing whether overload of the health care system is approaching
epsilon.append(delta / gamma)
# delta_0 - learning cost of the system
delta_0.append(epsilon[cnt] * r_0 * factor - d_0 * factor)
print('country=%s, wave=%d, beta=%.8f, gamma=%.8f, delta=%.8f, R_0=%.8f, d_0=%8.2f, epsilon=%.8f, days_to_recovery=%.1f'
% (country, cnt+1, beta, gamma, delta, R0, delta_0[cnt], epsilon[cnt], (1 / gamma)))
if cnt == 0:
full_data = df
out_text = country + ' ' + str(data.Date.max().strftime('%d/%m/%y')) \
+ ': Since the ' + str(threshConfrirm) + ' Confirmed Case.'
if wave > 1:
full_data = df[: idx_min_after_max[cnt]]
# begin_idx = idx_min_after_max + data['Active'][idx_min_after_max:].values.nonzero()[0][0]
# idx_min_after_max = len_data + 1
begin_idx = idx_min_after_max[cnt] + 1
s_0 = None
country_folder = os.path.join(os.getcwd(), time.strftime("%d%m%Y"), base_country)
if not os.path.exists(country_folder):
os.makedirs(country_folder, exist_ok=True)
else:
df_text = country + ' ' + str(data.Date.max().strftime('%d/%m/%y')) \
+ ': Since the ' + str(threshConfrirm) + ' Confirmed Case in wave ' + str(cnt + 1) \
+ '. Days to recovery=' \
+ str(Dsir) + ' - ' + str(dday) \
+ '<br>\N{GREEK SMALL LETTER BETA}= ' + str(round(beta, 7)) \
+ ', \u03B3= ' + str(round(gamma, 7)) + ', \u03B4= ' + str(round(delta, 7)) \
+ ', r\N{SUBSCRIPT ZERO}= ' + str(round((beta / gamma), 7)) \
+ ', \u03B4\N{SUBSCRIPT ZERO}= ' + str(round((delta_0[cnt]), 2)) \
+ ', \u03B5= ' + str(round((epsilon[cnt]), 7))
fig, ax = plt.subplots(figsize=(14, 9))
ax.set_title(df_text.replace('<br>', '\n'), loc='left')
df.plot(ax=ax)
save_string = cur_day + '_SIR_Prediction_' + country + 'wave ' + str(cnt + 1) + ' only' + '.png'
fig.savefig(os.path.join(country_folder, save_string))
if cnt == wave - 1:
full_data = pd.concat([full_data, df], axis=0, sort=False)
else:
full_data = pd.concat([full_data, df[: (idx_min_after_max[cnt] - idx_min_after_max[cnt-1])]], axis=0, sort=False)
begin_idx = idx_min_after_max[cnt] + 1
s_0 = None
out_text = out_text \
+ '<br>Wave ' + str(cnt + 1) + ': Days to recovery=' + str(Dsir) + ' - ' + str(dday) \
+ ', \N{GREEK SMALL LETTER BETA}= ' + str(round(beta, 7)) \
+ ', \u03B3= ' + str(round(gamma, 7)) + ', \u03B4= ' + str(round(delta, 7)) \
+ ', r\N{SUBSCRIPT ZERO}= ' + str(round((beta / gamma), 7)) \
+ ', \u03B4\N{SUBSCRIPT ZERO}= ' + str(round((delta_0[cnt]), 2)) \
+ ', \u03B5= ' + str(round((epsilon[cnt]), 7))
fig, ax = plt.subplots(figsize=(14, 9))
ax.set_title(out_text.replace('<br>', '\n'), loc='left')
full_data.plot(ax=ax)
plt.tight_layout()
save_string = cur_day + '_SIR_Prediction_' + country + ' waves ' + str(cnt+1) + '.png'
if wave > 1:
fig.savefig(os.path.join(country_folder, save_string))
if wave == cnt + 1:
fig.savefig(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), save_string))
return full_data, out_text, Dsir
##################################################################################################
def prophet_modeling_and_predicting(base_db, column_name, predict_range=365, first_n=45, last_n=30, threshConfrirm=1,
threshDays=None, logistic=False, debug_mode=None):
# Prophet Algorithm
# Implements a procedure for forecasting time series data based on an additive model where non-linear trends are fit
# with yearly, weekly, and daily seasonality, plus holiday effects. It works best with time series that have strong
# seasonal effects and several seasons of historical data. Prophet is robust to missing data and shifts in the trend
# and typically handles outliers well.
data = (base_db.loc[base_db.loc[:, 'Confirmed'] > threshConfrirm, :]).reset_index()
if threshDays:
data = data.loc[:threshDays, :]
pr_data = data.loc[:, ['Date', column_name]].copy()
pr_data.columns = ['ds', 'y']
if logistic:
growth = 'logistic'
pr_data['cap'] = 2*pr_data.y.max()
else:
growth = 'linear'
# Turn off fbprophet stdout logger
logging.getLogger('Prophet').setLevel(logging.ERROR)
# pr_data.y = pr_data.y.astype('float')
# Modeling
m = Prophet(growth=growth, yearly_seasonality=False, weekly_seasonality=True, daily_seasonality=True)
with suppress_stdout_stderr():
m.fit(pr_data)
future = m.make_future_dataframe(periods=predict_range)
if logistic:
future['cap'] = 2 * pr_data.y.max()
forecast_test = m.predict(future)
# Predicting
test = forecast_test.loc[:, ['ds', 'trend']]
# test = test[test['trend'] > 0]
test = test.head(first_n)
forecast_test = forecast_test.head(first_n)
if last_n < first_n:
test = test.tail(last_n)
forecast_test = forecast_test.tail(last_n)
# Graphical Representation of Predicted Screening
fig_test = plot_plotly(m, forecast_test, xlabel='Date', ylabel=column_name, trend=True)
# fig_test.show()
# # py.iplot(fig_test) # only for Jupiter
# f_test = m.plot(forecast_test, xlabel='Date', ylabel=column_name + ' Count')
# figure_test = m.plot_components(forecast_test)
test.columns = ['Date', column_name]
return test, forecast_test, fig_test
##################################################################################################
def arima_modeling_and_predicting(base_db, column_name, predict_range=450, threshConfrirm=1, threshDays=None,
debug_mode=None):
# https://machinelearningmastery.com/arima-for-time-series-forecasting-with-python/
# <NAME> - Autoregressive Integrated Moving Average Model
# p - auto-regressive aspect. this parameter says
# it’s likely to rain tomorrow if it has been raining for the last 5 days
# d - integrated part, this parameter says
# it’s likely to rain the same amount tomorrow if the difference in rain in the last 5 days has been small
# q - moving average part. this parameter sets the error of the model as a linear combination of the error values
# observed at previous time points in the series
# The Akaike information criterion (AIC) is an estimator of the relative quality of statistical models for a given
# set of data. Lower - better
data = (base_db.loc[base_db.loc[:, 'Confirmed'] > threshConfrirm, :]).reset_index()
# Factor to boost the calculation if the values are big
factor = calc_factor(base_db)
if threshDays:
data = data.loc[:threshDays, :]
arima_data = data.loc[:, ['Date', column_name]].copy()
arima_data.columns = ['Date', 'Count']
arima_data['Count'] = arima_data['Count'] / factor
arima_data['Date'] = pd.to_datetime(arima_data['Date'])
dates = extend_index(arima_data.Date, predict_range)
size = len(dates)
len_data = len(arima_data['Count'].values)
period = size - len_data
if debug_mode is not None:
plt.figure()
# Running the autocorrelation_plot, we can see where is a positive correlation (with the first PP lags)
# and where that is perhaps significant for the first p lags (above the confidence line).
autocorrelation_plot(arima_data['Count'])
do_print = True
else:
do_print = False
stepwise_fit = auto_arima(arima_data['Count'], d=2, D=2, trace=do_print, # trace print log
error_action='ignore', # we don't want to know if an order does not work
suppress_warnings=True, # we don't want convergence warnings
stepwise=True) # set to stepwise
if do_print:
# To print the summary
print(stepwise_fit.summary())
print('Arima order :' + str(stepwise_fit.order))
order = tuple(np.array(stepwise_fit.order).clip(1, 3))
model = ARIMA(arima_data['Count'].values, order=order)
# Model and prediction
# if stepwise_fit.order[0] == 0 or stepwise_fit.order[2] == 0:
# model = ARIMA(arima_data['Count'].values, order=(1, 2, 1))
# else:
# model = ARIMA(arima_data['Count'].values, order=stepwise_fit.order)
fit_model = model.fit(trend='c', full_output=True, disp=False)
if do_print:
print(fit_model.summary())
fig, ax = plt.subplots(2, 2)
# Graphical Representation for Prediction
fit_model.plot_predict(ax=ax[0, 0])
ax[0, 0].set_title('Forecast vs Actual for ' + column_name)
# Plot residual errors
residuals = pd.DataFrame(fit_model.resid)
# if in the residual errors may still be some trend information not captured by the model.
residuals.plot(title="Residual Error'", ax=ax[1, 0])
# the density plot of the residual error values, suggesting the errors are Gaussian, but may not be centered on zero
residuals.plot(kind='kde', title='Density', ax=ax[1, 1])
# Forcast for next days (performs a one-step forecast using the model)
forcast = fit_model.forecast(steps=period)
pred_y = forcast[0].tolist()
# Predictions of y values based on "model", namely fitted values
# try:
# yhat = stepwise_fit.predict_in_sample(start=0, end=len_data-1)
# except Exception as e:
# print(e)
yhat = stepwise_fit.predict_in_sample()
predictions = stepwise_fit.predict(period)
# Calculate root mean squared error
root_mse = rmse(arima_data['Count'], yhat)
# Calculate mean squared error
mse = mean_squared_error(arima_data['Count'], yhat)
print('rmse=%d, mse=%d' % (root_mse, mse))
if do_print:
pd.DataFrame(pred_y).plot(title='Prediction, rmse=' + str(int(root_mse)), ax=ax[0, 1])
test = pd.concat([pd.DataFrame(yhat, columns=[column_name]), pd.DataFrame(predictions, columns=[column_name])],
ignore_index=True)
test = test.mul(factor)
test.index = np.datetime_as_string(dates, unit='D')
return test, root_mse
###########################################################################################################
def LSTM_modeling_and_predicting(base_db, column_name, predict_range=450, threshConfrirm=1, threshDays=None,
debug_mode=None):
dataset = (base_db.loc[base_db.loc[:, 'Confirmed'] > threshConfrirm, :]).reset_index()
if threshDays:
dataset = dataset.loc[:threshDays, :]
data = dataset.loc[:, ['Date', column_name]].copy()
data['Date'] = | pd.to_datetime(data['Date']) | pandas.to_datetime |
import cv2
import face_recognition
import json
import numpy as np
import pandas as pd
def myChangeFace(BASE_DIR,timestamp):
'''
换脸模块
@refer: https://blog.csdn.net/qq_41562735/article/details/104978448?spm=1001.2014.3001.5501
@param:
BASE_DIR: 服务器存储文件全局路径
timestamp: 时间戳用于定义图片名
@return:
返回生成图片相对前端的路径
'''
face1_path = BASE_DIR+"\\posts\\changeface\\image0.jpg"
face2_path = BASE_DIR+"\\posts\\changeface\\image1.jpg"
image_save = BASE_DIR+"\\results\\changeface_"+timestamp+".jpg"
changeFaceMain(image_save,face1_path,face2_path)
return json.dumps({'result_list':['\\results\\changeface_'+timestamp+'.jpg']},ensure_ascii=False)
def ladmasktuple(img):
faces_loaction=face_recognition.face_locations(img,number_of_times_to_upsample = 0,model ='cnn')
face_feature=face_recognition.face_landmarks(img,face_locations=faces_loaction)
face_feature1= | pd.DataFrame(face_feature) | pandas.DataFrame |
import logging
import os
import re
import shutil
from datetime import datetime
from itertools import combinations
from random import randint
import numpy as np
import pandas as pd
import psutil
import pytest
from dask import dataframe as dd
from distributed.utils_test import cluster
from tqdm import tqdm
import featuretools as ft
from featuretools import EntitySet, Timedelta, calculate_feature_matrix, dfs
from featuretools.computational_backends import utils
from featuretools.computational_backends.calculate_feature_matrix import (
FEATURE_CALCULATION_PERCENTAGE,
_chunk_dataframe_groups,
_handle_chunk_size,
scatter_warning
)
from featuretools.computational_backends.utils import (
bin_cutoff_times,
create_client_and_cluster,
n_jobs_to_workers
)
from featuretools.feature_base import (
AggregationFeature,
DirectFeature,
IdentityFeature
)
from featuretools.primitives import (
Count,
Max,
Min,
Percentile,
Sum,
TransformPrimitive
)
from featuretools.tests.testing_utils import (
backward_path,
get_mock_client_cluster,
to_pandas
)
from featuretools.utils.gen_utils import Library, import_or_none
ks = import_or_none('databricks.koalas')
def test_scatter_warning(caplog):
logger = logging.getLogger('featuretools')
match = "EntitySet was only scattered to {} out of {} workers"
warning_message = match.format(1, 2)
logger.propagate = True
scatter_warning(1, 2)
logger.propagate = False
assert warning_message in caplog.text
# TODO: final assert fails w/ Dask
def test_calc_feature_matrix(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed dataframe result not ordered')
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
instances = range(17)
cutoff_time = pd.DataFrame({'time': times, es['log'].index: instances})
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
property_feature = ft.Feature(es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
verbose=True)
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
error_text = 'features must be a non-empty list of features'
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix('features', es, cutoff_time=cutoff_time)
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([], es, cutoff_time=cutoff_time)
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([1, 2, 3], es, cutoff_time=cutoff_time)
error_text = "cutoff_time times must be datetime type: try casting via "\
"pd\\.to_datetime\\(\\)"
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
instance_ids=range(17),
cutoff_time=17)
error_text = 'cutoff_time must be a single value or DataFrame'
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
instance_ids=range(17),
cutoff_time=times)
cutoff_times_dup = pd.DataFrame({'time': [datetime(2018, 3, 1),
datetime(2018, 3, 1)],
es['log'].index: [1, 1]})
error_text = 'Duplicated rows in cutoff time dataframe.'
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([property_feature],
entityset=es,
cutoff_time=cutoff_times_dup)
cutoff_reordered = cutoff_time.iloc[[-1, 10, 1]] # 3 ids not ordered by cutoff time
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_reordered,
verbose=True)
assert all(feature_matrix.index == cutoff_reordered["id"].values)
# fails with Dask and Koalas entitysets, cutoff time not reordered; cannot verify out of order
# - can't tell if wrong/different all are false so can't check positional
def test_cfm_warns_dask_cutoff_time(es):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
instances = range(17)
cutoff_time = pd.DataFrame({'time': times,
es['log'].index: instances})
cutoff_time = dd.from_pandas(cutoff_time, npartitions=4)
property_feature = ft.Feature(es['log']['value']) > 10
match = "cutoff_time should be a Pandas DataFrame: " \
"computing cutoff_time, this may take a while"
with pytest.warns(UserWarning, match=match):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
def test_cfm_compose(es, lt):
property_feature = ft.Feature(es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=lt,
verbose=True)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert (feature_matrix[property_feature.get_name()] ==
feature_matrix['label_func']).values.all()
def test_cfm_compose_approximate(es, lt):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('dask does not support approximate')
property_feature = ft.Feature(es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=lt,
approximate='1s',
verbose=True)
assert(type(feature_matrix) == pd.core.frame.DataFrame)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert (feature_matrix[property_feature.get_name()] ==
feature_matrix['label_func']).values.all()
def test_cfm_dask_compose(dask_es, lt):
property_feature = ft.Feature(dask_es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
dask_es,
cutoff_time=lt,
verbose=True)
feature_matrix = feature_matrix.compute()
assert (feature_matrix[property_feature.get_name()] == feature_matrix['label_func']).values.all()
# tests approximate, skip for dask/koalas
def test_cfm_approximate_correct_ordering():
trips = {
'trip_id': [i for i in range(1000)],
'flight_time': [datetime(1998, 4, 2) for i in range(350)] + [datetime(1997, 4, 3) for i in range(650)],
'flight_id': [randint(1, 25) for i in range(1000)],
'trip_duration': [randint(1, 999) for i in range(1000)]
}
df = pd.DataFrame.from_dict(trips)
es = EntitySet('flights')
es.entity_from_dataframe("trips",
dataframe=df,
index="trip_id",
time_index='flight_time')
es.normalize_entity(base_entity_id="trips",
new_entity_id="flights",
index="flight_id",
make_time_index=True)
features = dfs(entityset=es, target_entity='trips', features_only=True)
flight_features = [feature for feature in features
if isinstance(feature, DirectFeature) and
isinstance(feature.base_features[0],
AggregationFeature)]
property_feature = IdentityFeature(es['trips']['trip_id'])
cutoff_time = pd.DataFrame.from_dict({'instance_id': df['trip_id'],
'time': df['flight_time']})
time_feature = IdentityFeature(es['trips']['flight_time'])
feature_matrix = calculate_feature_matrix(flight_features + [property_feature, time_feature],
es,
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
feature_matrix.index.names = ['instance', 'time']
assert(np.all(feature_matrix.reset_index('time').reset_index()[['instance', 'time']].values == feature_matrix[['trip_id', 'flight_time']].values))
feature_matrix_2 = calculate_feature_matrix(flight_features + [property_feature, time_feature],
es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
approximate=Timedelta(2, 'd'))
feature_matrix_2.index.names = ['instance', 'time']
assert(np.all(feature_matrix_2.reset_index('time').reset_index()[['instance', 'time']].values == feature_matrix_2[['trip_id', 'flight_time']].values))
for column in feature_matrix:
for x, y in zip(feature_matrix[column], feature_matrix_2[column]):
assert ((pd.isnull(x) and pd.isnull(y)) or (x == y))
# uses approximate, skip for dask/koalas entitysets
def test_cfm_no_cutoff_time_index(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat4 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat4, pd_es['sessions'])
cutoff_time = pd.DataFrame({
'time': [datetime(2013, 4, 9, 10, 31, 19), datetime(2013, 4, 9, 11, 0, 0)],
'instance_id': [0, 2]
})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
cutoff_time_in_index=False,
approximate=Timedelta(12, 's'),
cutoff_time=cutoff_time)
assert feature_matrix.index.name == 'id'
assert feature_matrix.index.values.tolist() == [0, 2]
assert feature_matrix[dfeat.get_name()].tolist() == [10, 10]
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
cutoff_time = pd.DataFrame({
'time': [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)],
'instance_id': [0, 2]
})
feature_matrix_2 = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
cutoff_time_in_index=False,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
assert feature_matrix_2.index.name == 'id'
assert feature_matrix_2.index.tolist() == [0, 2]
assert feature_matrix_2[dfeat.get_name()].tolist() == [7, 10]
assert feature_matrix_2[agg_feat.get_name()].tolist() == [5, 1]
# TODO: fails with dask entitysets
# TODO: fails with koalas entitysets
def test_cfm_duplicated_index_in_cutoff_time(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed results not ordered, missing duplicates')
times = [datetime(2011, 4, 1), datetime(2011, 5, 1),
datetime(2011, 4, 1), datetime(2011, 5, 1)]
instances = [1, 1, 2, 2]
property_feature = ft.Feature(es['log']['value']) > 10
cutoff_time = pd.DataFrame({'id': instances, 'time': times},
index=[1, 1, 1, 1])
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
chunk_size=1)
assert (feature_matrix.shape[0] == cutoff_time.shape[0])
# TODO: fails with Dask, Koalas
def test_saveprogress(es, tmpdir):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('saveprogress fails with distributed entitysets')
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
cutoff_time = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = ft.Feature(es['log']['value']) > 10
save_progress = str(tmpdir)
fm_save = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
save_progress=save_progress)
_, _, files = next(os.walk(save_progress))
files = [os.path.join(save_progress, file) for file in files]
# there are 17 datetime files created above
assert len(files) == 17
list_df = []
for file_ in files:
df = pd.read_csv(file_, index_col="id", header=0)
list_df.append(df)
merged_df = pd.concat(list_df)
merged_df.set_index(pd.DatetimeIndex(times), inplace=True, append=True)
fm_no_save = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
assert np.all((merged_df.sort_index().values) == (fm_save.sort_index().values))
assert np.all((fm_no_save.sort_index().values) == (fm_save.sort_index().values))
assert np.all((fm_no_save.sort_index().values) == (merged_df.sort_index().values))
shutil.rmtree(save_progress)
def test_cutoff_time_correctly(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 1, 2]})
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
labels = [10, 5, 0]
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_cutoff_time_binning():
cutoff_time = pd.DataFrame({
'time': [
datetime(2011, 4, 9, 12, 31),
datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10, 1)
],
'instance_id': [1, 2, 3]
})
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(4, 'h'))
labels = [datetime(2011, 4, 9, 12),
datetime(2011, 4, 10, 8),
datetime(2011, 4, 10, 12)]
for i in binned_cutoff_times.index:
assert binned_cutoff_times['time'][i] == labels[i]
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(25, 'h'))
labels = [datetime(2011, 4, 8, 22),
datetime(2011, 4, 9, 23),
datetime(2011, 4, 9, 23)]
for i in binned_cutoff_times.index:
assert binned_cutoff_times['time'][i] == labels[i]
error_text = "Unit is relative"
with pytest.raises(ValueError, match=error_text):
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(1, 'mo'))
def test_training_window_fails_dask(dask_es):
property_feature = ft.Feature(dask_es['log']['id'],
parent_entity=dask_es['customers'],
primitive=Count)
error_text = "Using training_window is not supported with Dask Entities"
with pytest.raises(ValueError, match=error_text):
calculate_feature_matrix([property_feature],
dask_es,
training_window='2 hours')
def test_cutoff_time_columns_order(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
id_col_names = ['instance_id', es['customers'].index]
time_col_names = ['time', es['customers'].time_index]
for id_col in id_col_names:
for time_col in time_col_names:
cutoff_time = pd.DataFrame({'dummy_col_1': [1, 2, 3],
id_col: [0, 1, 2],
'dummy_col_2': [True, False, False],
time_col: times})
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
labels = [10, 5, 0]
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_cutoff_time_df_redundant_column_names(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
cutoff_time = pd.DataFrame({es['customers'].index: [0, 1, 2],
'instance_id': [0, 1, 2],
'dummy_col': [True, False, False],
'time': times})
err_msg = 'Cutoff time DataFrame cannot contain both a column named "instance_id" and a column' \
' with the same name as the target entity index'
with pytest.raises(AttributeError, match=err_msg):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
cutoff_time = pd.DataFrame({es['customers'].time_index: [0, 1, 2],
'instance_id': [0, 1, 2],
'dummy_col': [True, False, False],
'time': times})
err_msg = 'Cutoff time DataFrame cannot contain both a column named "time" and a column' \
' with the same name as the target entity time index'
with pytest.raises(AttributeError, match=err_msg):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
def test_training_window(pd_es):
property_feature = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['customers'], primitive=Count)
top_level_agg = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
# make sure features that have a direct to a higher level agg
# so we have multiple "filter eids" in get_pandas_data_slice,
# and we go through the loop to pull data with a training_window param more than once
dagg = DirectFeature(top_level_agg, pd_es['customers'])
# for now, warns if last_time_index not present
times = [datetime(2011, 4, 9, 12, 31),
datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 1, 2]})
warn_text = "Using training_window but last_time_index is not set on entity customers"
with pytest.warns(UserWarning, match=warn_text):
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours')
pd_es.add_last_time_indexes()
error_text = 'Training window cannot be in observations'
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([property_feature],
pd_es,
cutoff_time=cutoff_time,
training_window=Timedelta(2, 'observations'))
# Case1. include_cutoff_time = True
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=True)
prop_values = [4, 5, 1]
dagg_values = [3, 2, 1]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case2. include_cutoff_time = False
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=False)
prop_values = [5, 5, 2]
dagg_values = [3, 2, 1]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case3. include_cutoff_time = False with single cutoff time value
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=pd.to_datetime("2011-04-09 10:40:00"),
training_window='9 minutes',
include_cutoff_time=False)
prop_values = [0, 4, 0]
dagg_values = [3, 3, 3]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case4. include_cutoff_time = True with single cutoff time value
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=pd.to_datetime("2011-04-10 10:40:00"),
training_window='2 days',
include_cutoff_time=True)
prop_values = [0, 10, 1]
dagg_values = [3, 3, 3]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
def test_training_window_overlap(pd_es):
pd_es.add_last_time_indexes()
count_log = ft.Feature(
base=pd_es['log']['id'],
parent_entity=pd_es['customers'],
primitive=Count,
)
cutoff_time = pd.DataFrame({
'id': [0, 0],
'time': ['2011-04-09 10:30:00', '2011-04-09 10:40:00'],
}).astype({'time': 'datetime64[ns]'})
# Case1. include_cutoff_time = True
actual = calculate_feature_matrix(
features=[count_log],
entityset=pd_es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
training_window='10 minutes',
include_cutoff_time=True,
)['COUNT(log)']
np.testing.assert_array_equal(actual.values, [1, 9])
# Case2. include_cutoff_time = False
actual = calculate_feature_matrix(
features=[count_log],
entityset=pd_es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
training_window='10 minutes',
include_cutoff_time=False,
)['COUNT(log)']
np.testing.assert_array_equal(actual.values, [0, 9])
def test_include_cutoff_time_without_training_window(es):
es.add_last_time_indexes()
count_log = ft.Feature(
base=es['log']['id'],
parent_entity=es['customers'],
primitive=Count,
)
cutoff_time = pd.DataFrame({
'id': [0, 0],
'time': ['2011-04-09 10:30:00', '2011-04-09 10:31:00'],
}).astype({'time': 'datetime64[ns]'})
# Case1. include_cutoff_time = True
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
include_cutoff_time=True,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [1, 6])
# Case2. include_cutoff_time = False
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
include_cutoff_time=False,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [0, 5])
# Case3. include_cutoff_time = True with single cutoff time value
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=pd.to_datetime("2011-04-09 10:31:00"),
instance_ids=[0],
cutoff_time_in_index=True,
include_cutoff_time=True,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [6])
# Case4. include_cutoff_time = False with single cutoff time value
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=pd.to_datetime("2011-04-09 10:31:00"),
instance_ids=[0],
cutoff_time_in_index=True,
include_cutoff_time=False,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [5])
def test_approximate_dfeat_of_agg_on_target_include_cutoff_time(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
cutoff_time = pd.DataFrame({'time': [datetime(2011, 4, 9, 10, 31, 19)], 'instance_id': [0]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat2, agg_feat],
pd_es,
approximate=Timedelta(20, 's'),
cutoff_time=cutoff_time,
include_cutoff_time=False)
# binned cutoff_time will be datetime(2011, 4, 9, 10, 31, 0) and
# log event 5 at datetime(2011, 4, 9, 10, 31, 0) will be
# excluded due to approximate cutoff time point
assert feature_matrix[dfeat.get_name()].tolist() == [5]
assert feature_matrix[agg_feat.get_name()].tolist() == [5]
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(20, 's'),
cutoff_time=cutoff_time,
include_cutoff_time=True)
# binned cutoff_time will be datetime(2011, 4, 9, 10, 31, 0) and
# log event 5 at datetime(2011, 4, 9, 10, 31, 0) will be
# included due to approximate cutoff time point
assert feature_matrix[dfeat.get_name()].tolist() == [6]
assert feature_matrix[agg_feat.get_name()].tolist() == [5]
def test_training_window_recent_time_index(pd_es):
# customer with no sessions
row = {
'id': [3],
'age': [73],
u'région_id': ['United States'],
'cohort': [1],
'cancel_reason': ["Lost interest"],
'loves_ice_cream': [True],
'favorite_quote': ["Don't look back. Something might be gaining on you."],
'signup_date': [datetime(2011, 4, 10)],
'upgrade_date': [datetime(2011, 4, 12)],
'cancel_date': [datetime(2011, 5, 13)],
'date_of_birth': [datetime(1938, 2, 1)],
'engagement_level': [2],
}
to_add_df = pd.DataFrame(row)
to_add_df.index = range(3, 4)
# have to convert category to int in order to concat
old_df = pd_es['customers'].df
old_df.index = old_df.index.astype("int")
old_df["id"] = old_df["id"].astype(int)
df = pd.concat([old_df, to_add_df], sort=True)
# convert back after
df.index = df.index.astype("category")
df["id"] = df["id"].astype("category")
pd_es['customers'].update_data(df=df, recalculate_last_time_indexes=False)
pd_es.add_last_time_indexes()
property_feature = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['customers'], primitive=Count)
top_level_agg = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dagg = DirectFeature(top_level_agg, pd_es['customers'])
instance_ids = [0, 1, 2, 3]
times = [datetime(2011, 4, 9, 12, 31), datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10, 1), datetime(2011, 4, 10, 1, 59, 59)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': instance_ids})
# Case1. include_cutoff_time = True
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=True,
)
prop_values = [4, 5, 1, 0]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
dagg_values = [3, 2, 1, 3]
feature_matrix.sort_index(inplace=True)
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case2. include_cutoff_time = False
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=False,
)
prop_values = [5, 5, 1, 0]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
dagg_values = [3, 2, 1, 3]
feature_matrix.sort_index(inplace=True)
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# TODO: add test to fail w/ koalas
def test_approximate_fails_dask(dask_es):
agg_feat = ft.Feature(dask_es['log']['id'],
parent_entity=dask_es['sessions'],
primitive=Count)
error_text = "Using approximate is not supported with Dask Entities"
with pytest.raises(ValueError, match=error_text):
calculate_feature_matrix([agg_feat],
dask_es,
approximate=Timedelta(1, 'week'))
def test_approximate_multiple_instances_per_cutoff_time(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(1, 'week'),
cutoff_time=cutoff_time)
assert feature_matrix.shape[0] == 2
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approximate_with_multiple_paths(pd_diamond_es):
pd_es = pd_diamond_es
path = backward_path(pd_es, ['regions', 'customers', 'transactions'])
agg_feat = ft.AggregationFeature(pd_es['transactions']['id'],
parent_entity=pd_es['regions'],
relationship_path=path,
primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat],
pd_es,
approximate=Timedelta(1, 'week'),
cutoff_time=cutoff_time)
assert feature_matrix[dfeat.get_name()].tolist() == [6, 2]
def test_approximate_dfeat_of_agg_on_target(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
instance_ids=[0, 2],
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
assert feature_matrix[dfeat.get_name()].tolist() == [7, 10]
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approximate_dfeat_of_need_all_values(pd_es):
p = ft.Feature(pd_es['log']['value'], primitive=Percentile)
agg_feat = ft.Feature(p, parent_entity=pd_es['sessions'], primitive=Sum)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
log_df = pd_es['log'].df
instances = [0, 2]
cutoffs = [pd.Timestamp('2011-04-09 10:31:19'), pd.Timestamp('2011-04-09 11:00:00')]
approxes = [pd.Timestamp('2011-04-09 10:31:10'), pd.Timestamp('2011-04-09 11:00:00')]
true_vals = []
true_vals_approx = []
for instance, cutoff, approx in zip(instances, cutoffs, approxes):
log_data_cutoff = log_df[log_df['datetime'] < cutoff]
log_data_cutoff['percentile'] = log_data_cutoff['value'].rank(pct=True)
true_agg = log_data_cutoff.loc[log_data_cutoff['session_id'] == instance, 'percentile'].fillna(0).sum()
true_vals.append(round(true_agg, 3))
log_data_approx = log_df[log_df['datetime'] < approx]
log_data_approx['percentile'] = log_data_approx['value'].rank(pct=True)
true_agg_approx = log_data_approx.loc[log_data_approx['session_id'].isin([0, 1, 2]), 'percentile'].fillna(0).sum()
true_vals_approx.append(round(true_agg_approx, 3))
lapprox = [round(x, 3) for x in feature_matrix[dfeat.get_name()].tolist()]
test_list = [round(x, 3) for x in feature_matrix[agg_feat.get_name()].tolist()]
assert lapprox == true_vals_approx
assert test_list == true_vals
def test_uses_full_entity_feat_of_approximate(pd_es):
agg_feat = ft.Feature(pd_es['log']['value'], parent_entity=pd_es['sessions'], primitive=Sum)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
agg_feat3 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Max)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
dfeat2 = DirectFeature(agg_feat3, pd_es['sessions'])
p = ft.Feature(dfeat, primitive=Percentile)
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
# only dfeat2 should be approximated
# because Percentile needs all values
feature_matrix_only_dfeat2 = calculate_feature_matrix(
[dfeat2],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
assert feature_matrix_only_dfeat2[dfeat2.get_name()].tolist() == [50, 50]
feature_matrix_approx = calculate_feature_matrix(
[p, dfeat, dfeat2, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
assert feature_matrix_only_dfeat2[dfeat2.get_name()].tolist() == feature_matrix_approx[dfeat2.get_name()].tolist()
feature_matrix_small_approx = calculate_feature_matrix(
[p, dfeat, dfeat2, agg_feat],
pd_es,
approximate=Timedelta(10, 'ms'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
feature_matrix_no_approx = calculate_feature_matrix(
[p, dfeat, dfeat2, agg_feat],
pd_es,
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
for f in [p, dfeat, agg_feat]:
for fm1, fm2 in combinations([feature_matrix_approx,
feature_matrix_small_approx,
feature_matrix_no_approx], 2):
assert fm1[f.get_name()].tolist() == fm2[f.get_name()].tolist()
def test_approximate_dfeat_of_dfeat_of_agg_on_target(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(ft.Feature(agg_feat2, pd_es["sessions"]), pd_es['log'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
assert feature_matrix[dfeat.get_name()].tolist() == [7, 10]
def test_empty_path_approximate_full(pd_es):
pd_es['sessions'].df['customer_id'] = pd.Series([np.nan, np.nan, np.nan, 1, 1, 2], dtype="category")
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
vals1 = feature_matrix[dfeat.get_name()].tolist()
assert (vals1[0] == 0)
assert (vals1[1] == 0)
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
# todo: do we need to test this situation?
# def test_empty_path_approximate_partial(pd_es):
# pd_es = copy.deepcopy(pd_es)
# pd_es['sessions'].df['customer_id'] = pd.Categorical([0, 0, np.nan, 1, 1, 2])
# agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
# agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
# dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
# times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
# cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
# feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
# pd_es,
# approximate=Timedelta(10, 's'),
# cutoff_time=cutoff_time)
# vals1 = feature_matrix[dfeat.get_name()].tolist()
# assert vals1[0] == 7
# assert np.isnan(vals1[1])
# assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approx_base_feature_is_also_first_class_feature(pd_es):
log_to_products = DirectFeature(pd_es['products']['rating'], pd_es['log'])
# This should still be computed properly
agg_feat = ft.Feature(log_to_products, parent_entity=pd_es['sessions'], primitive=Min)
customer_agg_feat = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
# This is to be approximated
sess_to_cust = DirectFeature(customer_agg_feat, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([sess_to_cust, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
vals1 = feature_matrix[sess_to_cust.get_name()].tolist()
assert vals1 == [8.5, 7]
vals2 = feature_matrix[agg_feat.get_name()].tolist()
assert vals2 == [4, 1.5]
def test_approximate_time_split_returns_the_same_result(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:07:30'),
pd.Timestamp('2011-04-09 10:07:40')],
'instance_id': [0, 0]})
feature_matrix_at_once = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_df)
divided_matrices = []
separate_cutoff = [cutoff_df.iloc[0:1], cutoff_df.iloc[1:]]
# Make sure indexes are different
# Not that this step is unecessary and done to showcase the issue here
separate_cutoff[0].index = [0]
separate_cutoff[1].index = [1]
for ct in separate_cutoff:
fm = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=ct)
divided_matrices.append(fm)
feature_matrix_from_split = pd.concat(divided_matrices)
assert feature_matrix_from_split.shape == feature_matrix_at_once.shape
for i1, i2 in zip(feature_matrix_at_once.index, feature_matrix_from_split.index):
assert (pd.isnull(i1) and pd.isnull(i2)) or (i1 == i2)
for c in feature_matrix_from_split:
for i1, i2 in zip(feature_matrix_at_once[c], feature_matrix_from_split[c]):
assert (pd.isnull(i1) and pd.isnull(i2)) or (i1 == i2)
def test_approximate_returns_correct_empty_default_values(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['customers'], primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['sessions'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-08 11:00:00'),
pd.Timestamp('2011-04-09 11:00:00')],
'instance_id': [0, 0]})
fm = calculate_feature_matrix([dfeat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_df)
assert fm[dfeat.get_name()].tolist() == [0, 10]
# def test_approximate_deep_recurse(pd_es):
# pd_es = pd_es
# agg_feat = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
# dfeat1 = DirectFeature(agg_feat, pd_es['sessions'])
# agg_feat2 = Sum(dfeat1, pd_es['customers'])
# dfeat2 = DirectFeature(agg_feat2, pd_es['sessions'])
# agg_feat3 = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['products'], primitive=Count)
# dfeat3 = DirectFeature(agg_feat3, pd_es['log'])
# agg_feat4 = Sum(dfeat3, pd_es['sessions'])
# feature_matrix = calculate_feature_matrix([dfeat2, agg_feat4],
# pd_es,
# instance_ids=[0, 2],
# approximate=Timedelta(10, 's'),
# cutoff_time=[datetime(2011, 4, 9, 10, 31, 19),
# datetime(2011, 4, 9, 11, 0, 0)])
# # dfeat2 and agg_feat4 should both be approximated
def test_approximate_child_aggs_handled_correctly(pd_es):
agg_feat = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
agg_feat_2 = ft.Feature(pd_es['log']['value'], parent_entity=pd_es['customers'], primitive=Sum)
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-08 10:30:00'),
pd.Timestamp('2011-04-09 10:30:06')],
'instance_id': [0, 0]})
fm = calculate_feature_matrix([dfeat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_df)
fm_2 = calculate_feature_matrix([dfeat, agg_feat_2],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_df)
assert fm[dfeat.get_name()].tolist() == [2, 3]
assert fm_2[agg_feat_2.get_name()].tolist() == [0, 5]
def test_cutoff_time_naming(es):
agg_feat = ft.Feature(es['customers']['id'], parent_entity=es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-08 10:30:00'),
pd.Timestamp('2011-04-09 10:30:06')],
'instance_id': [0, 0]})
cutoff_df_index_name = cutoff_df.rename(columns={"instance_id": "id"})
cutoff_df_wrong_index_name = cutoff_df.rename(columns={"instance_id": "wrong_id"})
cutoff_df_wrong_time_name = cutoff_df.rename(columns={"time": "cutoff_time"})
fm1 = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df)
fm1 = to_pandas(fm1, index='id', sort_index=True)
fm2 = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df_index_name)
fm2 = to_pandas(fm2, index='id', sort_index=True)
assert all((fm1 == fm2.values).values)
error_text = 'Cutoff time DataFrame must contain a column with either the same name' \
' as the target entity index or a column named "instance_id"'
with pytest.raises(AttributeError, match=error_text):
calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df_wrong_index_name)
time_error_text = 'Cutoff time DataFrame must contain a column with either the same name' \
' as the target entity time_index or a column named "time"'
with pytest.raises(AttributeError, match=time_error_text):
calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df_wrong_time_name)
# TODO: order doesn't match, but output matches
def test_cutoff_time_extra_columns(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed result not ordered')
agg_feat = ft.Feature(es['customers']['id'], parent_entity=es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0],
'label': [True, True, False]},
columns=['time', 'instance_id', 'label'])
fm = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df)
# check column was added to end of matrix
assert 'label' == fm.columns[-1]
assert (fm['label'].values == cutoff_df['label'].values).all()
def test_cutoff_time_extra_columns_approximate(pd_es):
agg_feat = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0],
'label': [True, True, False]},
columns=['time', 'instance_id', 'label'])
fm = calculate_feature_matrix([dfeat],
pd_es,
cutoff_time=cutoff_df,
approximate="2 days")
# check column was added to end of matrix
assert 'label' in fm.columns
assert (fm['label'].values == cutoff_df['label'].values).all()
def test_cutoff_time_extra_columns_same_name(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed result not ordered')
agg_feat = ft.Feature(es['customers']['id'], parent_entity=es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0],
'régions.COUNT(customers)': [False, False, True]},
columns=['time', 'instance_id', 'régions.COUNT(customers)'])
fm = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df)
assert (fm['régions.COUNT(customers)'].values == cutoff_df['régions.COUNT(customers)'].values).all()
def test_cutoff_time_extra_columns_same_name_approximate(pd_es):
agg_feat = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0],
'régions.COUNT(customers)': [False, False, True]},
columns=['time', 'instance_id', 'régions.COUNT(customers)'])
fm = calculate_feature_matrix([dfeat],
pd_es,
cutoff_time=cutoff_df,
approximate="2 days")
assert (fm['régions.COUNT(customers)'].values == cutoff_df['régions.COUNT(customers)'].values).all()
def test_instances_after_cutoff_time_removed(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
cutoff_time = datetime(2011, 4, 8)
fm = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True)
fm = to_pandas(fm, index='id', sort_index=True)
actual_ids = [id for (id, _) in fm.index] if isinstance(fm.index, pd.MultiIndex) else fm.index
# Customer with id 1 should be removed
assert set(actual_ids) == set([2, 0])
# TODO: Dask and Koalas do not keep instance_id after cutoff
def test_instances_with_id_kept_after_cutoff(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed result not ordered, missing extra instances')
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
cutoff_time = datetime(2011, 4, 8)
fm = calculate_feature_matrix([property_feature],
es,
instance_ids=[0, 1, 2],
cutoff_time=cutoff_time,
cutoff_time_in_index=True)
# Customer #1 is after cutoff, but since it is included in instance_ids it
# should be kept.
actual_ids = [id for (id, _) in fm.index] if isinstance(fm.index, pd.MultiIndex) else fm.index
assert set(actual_ids) == set([0, 1, 2])
# TODO: Fails with Dask
# TODO: Fails with Koalas
def test_cfm_returns_original_time_indexes(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed result not ordered, indexes are lost due to not multiindexing')
agg_feat = ft.Feature(es['customers']['id'], parent_entity=es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0]})
fm = calculate_feature_matrix([dfeat],
es, cutoff_time=cutoff_df,
cutoff_time_in_index=True)
instance_level_vals = fm.index.get_level_values(0).values
time_level_vals = fm.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df['instance_id'].values).all()
assert (time_level_vals == cutoff_df['time'].values).all()
def test_cfm_returns_original_time_indexes_approximate(pd_es):
agg_feat = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
agg_feat_2 = ft.Feature(pd_es['sessions']['id'], parent_entity=pd_es['customers'], primitive=Count)
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0]})
# approximate, in different windows, no unapproximated aggs
fm = calculate_feature_matrix([dfeat], pd_es, cutoff_time=cutoff_df,
cutoff_time_in_index=True, approximate="1 m")
instance_level_vals = fm.index.get_level_values(0).values
time_level_vals = fm.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df['instance_id'].values).all()
assert (time_level_vals == cutoff_df['time'].values).all()
# approximate, in different windows, unapproximated aggs
fm = calculate_feature_matrix([dfeat, agg_feat_2], pd_es, cutoff_time=cutoff_df,
cutoff_time_in_index=True, approximate="1 m")
instance_level_vals = fm.index.get_level_values(0).values
time_level_vals = fm.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df['instance_id'].values).all()
assert (time_level_vals == cutoff_df['time'].values).all()
# approximate, in same window, no unapproximated aggs
fm2 = calculate_feature_matrix([dfeat], pd_es, cutoff_time=cutoff_df,
cutoff_time_in_index=True, approximate="2 d")
instance_level_vals = fm2.index.get_level_values(0).values
time_level_vals = fm2.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df['instance_id'].values).all()
assert (time_level_vals == cutoff_df['time'].values).all()
# approximate, in same window, unapproximated aggs
fm3 = calculate_feature_matrix([dfeat, agg_feat_2], pd_es, cutoff_time=cutoff_df,
cutoff_time_in_index=True, approximate="2 d")
instance_level_vals = fm3.index.get_level_values(0).values
time_level_vals = fm3.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df['instance_id'].values).all()
assert (time_level_vals == cutoff_df['time'].values).all()
def test_dask_kwargs(pd_es):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
cutoff_time = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = IdentityFeature(pd_es['log']['value']) > 10
with cluster() as (scheduler, [a, b]):
dkwargs = {'cluster': scheduler['address']}
feature_matrix = calculate_feature_matrix([property_feature],
entityset=pd_es,
cutoff_time=cutoff_time,
verbose=True,
chunk_size=.13,
dask_kwargs=dkwargs,
approximate='1 hour')
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_dask_persisted_es(pd_es, capsys):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
cutoff_time = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = IdentityFeature(pd_es['log']['value']) > 10
with cluster() as (scheduler, [a, b]):
dkwargs = {'cluster': scheduler['address']}
feature_matrix = calculate_feature_matrix([property_feature],
entityset=pd_es,
cutoff_time=cutoff_time,
verbose=True,
chunk_size=.13,
dask_kwargs=dkwargs,
approximate='1 hour')
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
feature_matrix = calculate_feature_matrix([property_feature],
entityset=pd_es,
cutoff_time=cutoff_time,
verbose=True,
chunk_size=.13,
dask_kwargs=dkwargs,
approximate='1 hour')
captured = capsys.readouterr()
assert "Using EntitySet persisted on the cluster as dataset " in captured[0]
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
class TestCreateClientAndCluster(object):
def test_user_cluster_as_string(self, monkeypatch):
monkeypatch.setattr(utils, "get_client_cluster",
get_mock_client_cluster)
# cluster in dask_kwargs case
client, cluster = create_client_and_cluster(n_jobs=2,
dask_kwargs={'cluster': 'tcp://127.0.0.1:54321'},
entityset_size=1)
assert cluster == 'tcp://127.0.0.1:54321'
def test_cluster_creation(self, monkeypatch):
total_memory = psutil.virtual_memory().total
monkeypatch.setattr(utils, "get_client_cluster",
get_mock_client_cluster)
try:
cpus = len(psutil.Process().cpu_affinity())
except AttributeError:
cpus = psutil.cpu_count()
# jobs < tasks case
client, cluster = create_client_and_cluster(n_jobs=2,
dask_kwargs={},
entityset_size=1)
num_workers = min(cpus, 2)
memory_limit = int(total_memory / float(num_workers))
assert cluster == (min(cpus, 2), 1, None, memory_limit)
# jobs > tasks case
match = r'.*workers requested, but only .* workers created'
with pytest.warns(UserWarning, match=match) as record:
client, cluster = create_client_and_cluster(n_jobs=1000,
dask_kwargs={'diagnostics_port': 8789},
entityset_size=1)
assert len(record) == 1
num_workers = cpus
memory_limit = int(total_memory / float(num_workers))
assert cluster == (num_workers, 1, 8789, memory_limit)
# dask_kwargs sets memory limit
client, cluster = create_client_and_cluster(n_jobs=2,
dask_kwargs={'diagnostics_port': 8789,
'memory_limit': 1000},
entityset_size=1)
num_workers = min(cpus, 2)
assert cluster == (num_workers, 1, 8789, 1000)
def test_not_enough_memory(self, monkeypatch):
total_memory = psutil.virtual_memory().total
monkeypatch.setattr(utils, "get_client_cluster",
get_mock_client_cluster)
# errors if not enough memory for each worker to store the entityset
with pytest.raises(ValueError, match=''):
create_client_and_cluster(n_jobs=1,
dask_kwargs={},
entityset_size=total_memory * 2)
# does not error even if worker memory is less than 2x entityset size
create_client_and_cluster(n_jobs=1,
dask_kwargs={},
entityset_size=total_memory * .75)
def test_parallel_failure_raises_correct_error(pd_es):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
cutoff_time = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = IdentityFeature(pd_es['log']['value']) > 10
error_text = 'Need at least one worker'
with pytest.raises(AssertionError, match=error_text):
calculate_feature_matrix([property_feature],
entityset=pd_es,
cutoff_time=cutoff_time,
verbose=True,
chunk_size=.13,
n_jobs=0,
approximate='1 hour')
def test_warning_not_enough_chunks(pd_es, capsys):
property_feature = IdentityFeature(pd_es['log']['value']) > 10
with cluster(nworkers=3) as (scheduler, [a, b, c]):
dkwargs = {'cluster': scheduler['address']}
calculate_feature_matrix([property_feature],
entityset=pd_es,
chunk_size=.5,
verbose=True,
dask_kwargs=dkwargs)
captured = capsys.readouterr()
pattern = r'Fewer chunks \([0-9]+\), than workers \([0-9]+\) consider reducing the chunk size'
assert re.search(pattern, captured.out) is not None
def test_n_jobs():
try:
cpus = len(psutil.Process().cpu_affinity())
except AttributeError:
cpus = psutil.cpu_count()
assert n_jobs_to_workers(1) == 1
assert n_jobs_to_workers(-1) == cpus
assert n_jobs_to_workers(cpus) == cpus
assert n_jobs_to_workers((cpus + 1) * -1) == 1
if cpus > 1:
assert n_jobs_to_workers(-2) == cpus - 1
error_text = 'Need at least one worker'
with pytest.raises(AssertionError, match=error_text):
n_jobs_to_workers(0)
# TODO: add dask version of int_es
def test_integer_time_index(int_es):
times = list(range(8, 18)) + list(range(19, 26))
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
cutoff_df = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = IdentityFeature(int_es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df,
cutoff_time_in_index=True)
time_level_vals = feature_matrix.index.get_level_values(1).values
sorted_df = cutoff_df.sort_values(['time', 'instance_id'], kind='mergesort')
assert (time_level_vals == sorted_df['time'].values).all()
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_integer_time_index_single_cutoff_value(int_es):
labels = [False] * 3 + [True] * 2 + [False] * 4
property_feature = IdentityFeature(int_es['log']['value']) > 10
cutoff_times = [16, pd.Series([16])[0], 16.0, pd.Series([16.0])[0]]
for cutoff_time in cutoff_times:
feature_matrix = calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True)
time_level_vals = feature_matrix.index.get_level_values(1).values
assert (time_level_vals == [16] * 9).all()
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
# TODO: add dask version of int_es
def test_integer_time_index_datetime_cutoffs(int_es):
times = [datetime.now()] * 17
cutoff_df = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = IdentityFeature(int_es['log']['value']) > 10
error_text = "cutoff_time times must be numeric: try casting via pd\\.to_numeric\\(\\)"
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df,
cutoff_time_in_index=True)
# TODO: add Dask version of int_es
def test_integer_time_index_passes_extra_columns(int_es):
times = list(range(8, 18)) + list(range(19, 23)) + [25, 24, 23]
labels = [False] * 3 + [True] * 2 + [False] * 9 + [False] * 2 + [True]
instances = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 15, 14]
cutoff_df = pd.DataFrame({'time': times,
'instance_id': instances,
'labels': labels})
cutoff_df = cutoff_df[['time', 'instance_id', 'labels']]
property_feature = IdentityFeature(int_es['log']['value']) > 10
fm = calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df,
cutoff_time_in_index=True)
assert (fm[property_feature.get_name()] == fm['labels']).all()
# TODO: add Dask version of int_es
def test_integer_time_index_mixed_cutoff(int_es):
times_dt = list(range(8, 17)) + [datetime(2011, 1, 1), 19, 20, 21, 22, 25, 24, 23]
labels = [False] * 3 + [True] * 2 + [False] * 9 + [False] * 2 + [True]
instances = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 15, 14]
cutoff_df = pd.DataFrame({'time': times_dt,
'instance_id': instances,
'labels': labels})
cutoff_df = cutoff_df[['time', 'instance_id', 'labels']]
property_feature = IdentityFeature(int_es['log']['value']) > 10
error_text = 'cutoff_time times must be.*try casting via.*'
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df)
times_str = list(range(8, 17)) + ["foobar", 19, 20, 21, 22, 25, 24, 23]
cutoff_df['time'] = times_str
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df)
times_date_str = list(range(8, 17)) + ['2018-04-02', 19, 20, 21, 22, 25, 24, 23]
cutoff_df['time'] = times_date_str
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df)
times_int_str = [0, 1, 2, 3, 4, 5, '6', 7, 8, 9, 9, 10, 11, 12, 15, 14, 13]
times_int_str = list(range(8, 17)) + ['17', 19, 20, 21, 22, 25, 24, 23]
cutoff_df['time'] = times_int_str
# calculate_feature_matrix should convert time column to ints successfully here
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df)
def test_datetime_index_mixed_cutoff(es):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[17] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
labels = [False] * 3 + [True] * 2 + [False] * 9 + [False] * 2 + [True]
instances = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 15, 14]
cutoff_df = pd.DataFrame({'time': times,
'instance_id': instances,
'labels': labels})
cutoff_df = cutoff_df[['time', 'instance_id', 'labels']]
property_feature = IdentityFeature(es['log']['value']) > 10
error_text = 'cutoff_time times must be.*try casting via.*'
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_df)
times[9] = "foobar"
cutoff_df['time'] = times
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_df)
cutoff_df['time'].iloc[9] = '2018-04-02 18:50:45.453216'
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_df)
times[9] = '17'
cutoff_df['time'] = times
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_df)
def test_string_time_values_in_cutoff_time(es):
times = ['2011-04-09 10:31:27', '2011-04-09 10:30:18']
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 0]})
agg_feature = ft.Feature(es['log']['value'], parent_entity=es['customers'], primitive=Sum)
error_text = 'cutoff_time times must be.*try casting via.*'
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([agg_feature], es, cutoff_time=cutoff_time)
# TODO: Dask version fails (feature matrix is empty)
# TODO: Koalas version fails (koalas groupby agg doesn't support custom functions)
def test_no_data_for_cutoff_time(mock_customer):
if not all(isinstance(entity.df, pd.DataFrame) for entity in mock_customer.entities):
pytest.xfail("Dask fails because returned feature matrix is empty; Koalas doesn't support custom agg functions")
es = mock_customer
cutoff_times = pd.DataFrame({"customer_id": [4],
"time": pd.Timestamp('2011-04-08 20:08:13')})
trans_per_session = ft.Feature(es["transactions"]["transaction_id"], parent_entity=es["sessions"], primitive=Count)
trans_per_customer = ft.Feature(es["transactions"]["transaction_id"], parent_entity=es["customers"], primitive=Count)
features = [trans_per_customer, ft.Feature(trans_per_session, parent_entity=es["customers"], primitive=Max)]
fm = calculate_feature_matrix(features, entityset=es, cutoff_time=cutoff_times)
# due to default values for each primitive
# count will be 0, but max will nan
np.testing.assert_array_equal(fm.values, [[0, np.nan]])
# adding missing instances not supported in Dask or Koalas
def test_instances_not_in_data(pd_es):
last_instance = max(pd_es['log'].df.index.values)
instances = list(range(last_instance + 1, last_instance + 11))
identity_feature = IdentityFeature(pd_es['log']['value'])
property_feature = identity_feature > 10
agg_feat = AggregationFeature(pd_es['log']['value'],
parent_entity=pd_es["sessions"],
primitive=Max)
direct_feature = DirectFeature(agg_feat, pd_es["log"])
features = [identity_feature, property_feature, direct_feature]
fm = calculate_feature_matrix(features, entityset=pd_es, instance_ids=instances)
assert all(fm.index.values == instances)
for column in fm.columns:
assert fm[column].isnull().all()
fm = calculate_feature_matrix(features,
entityset=pd_es,
instance_ids=instances,
approximate="730 days")
assert all(fm.index.values == instances)
for column in fm.columns:
assert fm[column].isnull().all()
def test_some_instances_not_in_data(pd_es):
a_time = datetime(2011, 4, 10, 10, 41, 9) # only valid data
b_time = datetime(2011, 4, 10, 11, 10, 5) # some missing data
c_time = datetime(2011, 4, 10, 12, 0, 0) # all missing data
times = [a_time, b_time, a_time, a_time, b_time, b_time] + [c_time] * 4
cutoff_time = pd.DataFrame({"instance_id": list(range(12, 22)),
"time": times})
identity_feature = IdentityFeature(pd_es['log']['value'])
property_feature = identity_feature > 10
agg_feat = AggregationFeature(pd_es['log']['value'],
parent_entity=pd_es["sessions"],
primitive=Max)
direct_feature = DirectFeature(agg_feat, pd_es["log"])
features = [identity_feature, property_feature, direct_feature]
fm = calculate_feature_matrix(features,
entityset=pd_es,
cutoff_time=cutoff_time)
ifeat_answer = [0, 7, 14, np.nan] + [np.nan] * 6
prop_answer = [0, 0, 1, np.nan, 0] + [np.nan] * 5
dfeat_answer = [14, 14, 14, np.nan] + [np.nan] * 6
assert all(fm.index.values == cutoff_time["instance_id"].values)
for x, y in zip(fm.columns, [ifeat_answer, prop_answer, dfeat_answer]):
np.testing.assert_array_equal(fm[x], y)
fm = calculate_feature_matrix(features,
entityset=pd_es,
cutoff_time=cutoff_time,
approximate="5 seconds")
dfeat_answer[0] = 7 # approximate calculated before 14 appears
dfeat_answer[2] = 7 # approximate calculated before 14 appears
prop_answer[3] = 0 # no_unapproximated_aggs code ignores cutoff time
assert all(fm.index.values == cutoff_time["instance_id"].values)
for x, y in zip(fm.columns, [ifeat_answer, prop_answer, dfeat_answer]):
np.testing.assert_array_equal(fm[x], y)
def test_missing_instances_with_categorical_index(pd_es):
instance_ids = [0, 1, 3, 2]
features = ft.dfs(entityset=pd_es, target_entity='customers', features_only=True)
fm = calculate_feature_matrix(entityset=pd_es,
features=features,
instance_ids=instance_ids)
assert all(fm.index.values == instance_ids)
assert isinstance(fm.index, pd.CategoricalIndex)
def test_handle_chunk_size():
total_size = 100
# user provides no chunk size
assert _handle_chunk_size(None, total_size) is None
# user provides fractional size
assert _handle_chunk_size(.1, total_size) == total_size * .1
assert _handle_chunk_size(.001, total_size) == 1 # rounds up
assert _handle_chunk_size(.345, total_size) == 35 # rounds up
# user provides absolute size
assert _handle_chunk_size(1, total_size) == 1
assert _handle_chunk_size(100, total_size) == 100
assert isinstance(_handle_chunk_size(100.0, total_size), int)
# test invalid cases
with pytest.raises(AssertionError, match="Chunk size must be greater than 0"):
_handle_chunk_size(0, total_size)
with pytest.raises(AssertionError, match="Chunk size must be greater than 0"):
_handle_chunk_size(-1, total_size)
def test_chunk_dataframe_groups():
df = pd.DataFrame({
"group": [1, 1, 1, 1, 2, 2, 3]
})
grouped = df.groupby("group")
chunked_grouped = _chunk_dataframe_groups(grouped, 2)
# test group larger than chunk size gets split up
first = next(chunked_grouped)
assert first[0] == 1 and first[1].shape[0] == 2
second = next(chunked_grouped)
assert second[0] == 1 and second[1].shape[0] == 2
# test that equal to and less than chunk size stays together
third = next(chunked_grouped)
assert third[0] == 2 and third[1].shape[0] == 2
fourth = next(chunked_grouped)
assert fourth[0] == 3 and fourth[1].shape[0] == 1
def test_calls_progress_callback(mock_customer):
class MockProgressCallback:
def __init__(self):
self.progress_history = []
self.total_update = 0
self.total_progress_percent = 0
def __call__(self, update, progress_percent, time_elapsed):
self.total_update += update
self.total_progress_percent = progress_percent
self.progress_history.append(progress_percent)
mock_progress_callback = MockProgressCallback()
es = mock_customer
# make sure to calculate features that have different paths to same base feature
trans_per_session = ft.Feature(es["transactions"]["transaction_id"], parent_entity=es["sessions"], primitive=Count)
trans_per_customer = ft.Feature(es["transactions"]["transaction_id"], parent_entity=es["customers"], primitive=Count)
features = [trans_per_session, ft.Feature(trans_per_customer, entity=es["sessions"])]
calculate_feature_matrix(features, entityset=es, progress_callback=mock_progress_callback)
# second to last entry is the last update from feature calculation
assert np.isclose(mock_progress_callback.progress_history[-2], FEATURE_CALCULATION_PERCENTAGE * 100)
assert np.isclose(mock_progress_callback.total_update, 100.0)
assert np.isclose(mock_progress_callback.total_progress_percent, 100.0)
# test with cutoff time dataframe
mock_progress_callback = MockProgressCallback()
cutoff_time = pd.DataFrame({"instance_id": [1, 2, 3],
"time": [pd.to_datetime("2014-01-01 01:00:00"),
pd.to_datetime("2014-01-01 02:00:00"),
| pd.to_datetime("2014-01-01 03:00:00") | pandas.to_datetime |
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import overhang.tree as tree
import overhang.reaction_node as node
import logging
from overhang.dnastorage_utils.system.dnafile import *
import os
import sys
import shutil
import math
import numpy as np
import overhang.plot_utils.plot_utils as plt_util
import time
import pickle as pi
import gc
import pandas as pd
import scipy
tlogger=logging.getLogger('dna.overhang.tools.tree_analysis')
tlogger.addHandler(logging.NullHandler())
def _sweep_overhangs_1_bit(self,data_buffer,workloadID,debug_fname=None): #workloadID is a tuple indicating the complete name of the workload
output_filename=debug_fname
overhang_list=[3,5,9,17,65]
#overhang_list=[9,17,65]
strand_length_bytes=509 #508 bytes to account for the extra 4 bytes of indexing
index_bytes=3
root_prefix=os.path.normpath(self._out_dir[workloadID[0]]["root"])+'/'
#set up result dictionary for the file we are analyzing
if workloadID[0] not in self._1_bit_results:
self._1_bit_results[workloadID[0]]={}
if workloadID[1] not in self._1_bit_results[workloadID[0]]:
self._1_bit_results[workloadID[0]][workloadID[1]]={}
else:
if workloadID[1] not in self._1_bit_results[workloadID[0]]:
self._1_bit_results[workloadID[0]][workloadID[1]]={}
self._1_bit_results[workloadID[0]][workloadID[1]]["opt_reaction_count"]=[] #array of integers
self._1_bit_results[workloadID[0]][workloadID[1]]["transform_reaction_count"]=[]
self._1_bit_results[workloadID[0]][workloadID[1]]["ideal_reaction_count"]=[] #array of integers
self._1_bit_results[workloadID[0]][workloadID[1]]["overhang_array"]=overhang_list
self._1_bit_results[workloadID[0]][workloadID[1]]["no_opt_reaction_count"]=[] #array of integers
self._1_bit_results[workloadID[0]][workloadID[1]]["rotate_reaction_count"]=[]
self._1_bit_results[workloadID[0]][workloadID[1]]["ideal_height_map"]=np.zeros((len(overhang_list),int(math.ceil(math.log((strand_length_bytes+index_bytes)*8,2)))),dtype=np.uint) #build np array to be used as heat map
self._1_bit_results[workloadID[0]][workloadID[1]]["opt_height_map"]=np.zeros((len(overhang_list),int(math.ceil(math.log((strand_length_bytes+index_bytes)*8,2)))),dtype=np.uint)
for overhang_index,overhang_count in enumerate(overhang_list):
print("{} {}".format(workloadID,overhang_count))
dna_file=OverhangBitStringWriteDNAFile(primer5=self._primer5, formatid=self._format_ID, primer3=self._primer3, out_fd=output_filename,fsmd_abbrev='OH_BITSTRING_XXX',\
bits_per_block=1,strand_length=strand_length_bytes*8,num_overhangs=overhang_count)
dna_file.write(data_buffer)
dna_file.header_flush()
strand_list=dna_file.get_strands() #get strands after encoding
start_time=time.time()
transform_tree=tree.construct_tree_transform_lite(strand_list,overhang_count,int(math.ceil(math.log(overhang_count,4))),1,
(index_bytes+strand_length_bytes)*8,h_array=None) #+4 for the number of bytes used for indexing
self._1_bit_results[workloadID[0]][workloadID[1]]["transform_reaction_count"].append(transform_tree.order())
#print transform_tree.order()
del transform_tree
print("---- transform tree build on {} took {} seconds ---".format(workloadID[1],time.time()-start_time))
start_time=time.time()
optimized_tree=tree.construct_tree_baseopt_lite_w(strand_list,overhang_count,int(math.ceil(math.log(overhang_count,4))),1,
(index_bytes+strand_length_bytes)*8,h_array=self._1_bit_results[workloadID[0]][workloadID[1]]["opt_height_map"][overhang_index][:]) #+4 for the number of bytes used for indexing
self._1_bit_results[workloadID[0]][workloadID[1]]["opt_reaction_count"].append(optimized_tree.order())
#print optimized_tree.order()
opt_hash=optimized_tree.strand_hash_table #grab the hash table
del optimized_tree
print("---- optimized tree build on {} took {} seconds ---".format(workloadID[1],time.time()-start_time))
start_time=time.time()
rotate_tree=tree.construct_tree_rotate_lite(strand_list,overhang_count,int(math.ceil(math.log(overhang_count,4))),1,
(index_bytes+strand_length_bytes)*8,h_array=None,opt_dictionary=opt_hash) #+4 for the number of bytes used for indexing
self._1_bit_results[workloadID[0]][workloadID[1]]["rotate_reaction_count"].append(rotate_tree.order())
#print transform_tree.order()
del rotate_tree
print("---- rotate tree build on {} took {} seconds ---".format(workloadID[1],time.time()-start_time))
start_time=time.time()
unoptimized_tree=tree.construct_tree_unoptimized_lite(strand_list,overhang_count,int(math.ceil(math.log(overhang_count,4))),1,
(index_bytes+strand_length_bytes)*8)
self._1_bit_results[workloadID[0]][workloadID[1]]["no_opt_reaction_count"].append(unoptimized_tree.order())
del unoptimized_tree
gc.collect()
print("---- no optimized tree build on {} took {} seconds ---".format(workloadID[1],time.time()-start_time))
start_time=time.time()
ideal_tree=tree.construct_tree_ideal_lite(strand_list,overhang_count,int(math.ceil(math.log(overhang_count,4))),1,
(index_bytes+strand_length_bytes)*8,h_array=self._1_bit_results[workloadID[0]][workloadID[1]]["ideal_height_map"][overhang_index][:])
self._1_bit_results[workloadID[0]][workloadID[1]]["ideal_reaction_count"].append(ideal_tree.order())
del ideal_tree
gc.collect()
print("---- ideal tree build on {} took {} seconds ---".format(workloadID[1],time.time()-start_time))
tlogger.debug('Finished building trees for '+output_filename)
#collect the number of nodes in the constructed graphs, this equals the number of reactions the have to be performed
sys.stdout.flush()
#checkpoint results by pickling data for each file
picklefile=open(root_prefix+'1_bit_results','wb')
pi.dump(self._1_bit_results,picklefile)
picklefile.close()#store the ultimate results file
def analyze_1_bit(self):
#analyze all workloads across different overhangs and data-in-block sizes
for category in self._workloadDict:
for work_file in self._workloadDict[category]:
data_buffer=self._workloadDict[category][work_file]
output_filename=category+'_'+work_file+'.output'
self._sweep_overhangs_1_bit(data_buffer,(category,work_file),output_filename)
#draw figures and dump results to csv
def draw_1_bit(self):
assert len(self._1_bit_results)>0
#figure font settings
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 6}
matplotlib.rc('font',**font)
#draw results from sweeping the number of overhangs for 1 bit building blocks
#create line graphs for optimized and unoptimized reaction counts
for category in self._1_bit_results:
#self._out_dir and self._1_bit_results should both have the same keys and key structure
root_prefix=os.path.normpath(self._out_dir[category]["root"])+'/'
#create plots and axes for each category of data
opt_react_norm_fig,opt_react_norm_axes=plt.subplots(nrows=1,ncols=1,figsize=(5.2,2.5),constrained_layout=True)
no_opt_react_norm_fig,no_opt_react_norm_axes=plt.subplots(nrows=1,ncols=1,figsize=(6,2.5), constrained_layout=True)
opt_to_no_opt_normalized_fig, opt_to_no_opt_normalized_axes=plt.subplots(nrows=1,ncols=1,figsize=(5.2,2), constrained_layout=True)
ideal_to_no_opt_fig, ideal_to_no_opt_axes=plt.subplots(nrows=1,ncols=1,figsize=(9,2.5),constrained_layout=True)
opt_react_raw_fig,opt_react_raw_axes=plt.subplots(nrows=1,ncols=1,figsize=(3,2.5), constrained_layout=True)
no_opt_react_raw_fig,no_opt_react_raw_axes=plt.subplots(nrows=1,ncols=1,figsize=(6,2.5), constrained_layout=True)
ideal_react_norm_fig,ideal_react_norm_axes=plt.subplots(nrows=1,ncols=1,figsize=(5.2,2.5), constrained_layout=True)
ideal_react_raw_fig,ideal_react_raw_axes=plt.subplots(nrows=1,ncols=1,figsize=(6,2.5), constrained_layout=True)
#transform optimization graphs
transform_react_norm_fig,transform_react_norm_axes=plt.subplots(nrows=1,ncols=1,figsize=(5.2,2.5), constrained_layout=True)
transform_react_raw_fig,transform_react_raw_axes=plt.subplots(nrows=1,ncols=1,figsize=(6,2.5), constrained_layout=True)
transform_to_no_opt_fig, transform_to_no_opt_axes=plt.subplots(nrows=1,ncols=1,figsize=(5.2,2.5),constrained_layout=True)
#rotate optimization graphs
rotate_react_norm_fig,rotate_react_norm_axes=plt.subplots(nrows=1,ncols=1,figsize=(5.2,2.5), constrained_layout=True)
rotate_react_raw_fig,rotate_react_raw_axes=plt.subplots(nrows=1,ncols=1,figsize=(6,2.5), constrained_layout=True)
rotate_to_no_opt_fig, rotate_to_no_opt_axes=plt.subplots(nrows=1,ncols=1,figsize=(5.2,2.5),constrained_layout=True)
#dump files
opt_react_raw_dump=open(root_prefix+'1_bit_opt_raw_'+category+'.csv','w+')
no_opt_react_raw_dump=open(root_prefix+'1_bit_no_opt_raw_'+category+'.csv','w+')
opt_react_norm_dump=open(root_prefix+'1_bit_opt_react_norm_'+category+'.csv','w+')
ideal_react_norm_dump=open(root_prefix+'1_bit_ideal_react_norm_'+category+'.csv','w+')
no_opt_react_norm_dump=open(root_prefix+'1_bit_no_opt_react_norm_'+category+'.csv','w+')
opt_to_no_opt_dump=open(root_prefix+'1_bit_opt_to_no_opt_'+category+'.csv','w+')
ideal_to_no_opt_dump=open(root_prefix+'1_bit_ideal_to_no_opt_'+category+'.csv','w+')
ideal_react_raw_dump=open(root_prefix+'1_bit_ideal_raw_'+category+'.csv','w+')
#transform dump files
transform_to_no_opt_dump=open(root_prefix+'1_bit_transform_to_no_opt_'+category+'.csv','w+')
transform_react_raw_dump=open(root_prefix+'1_bit_transform_raw_'+category+'.csv','w+')
transform_react_norm_dump=open(root_prefix+'1_bit_transform_react_norm_'+category+'.csv','w+')
#rotate dump files
rotate_to_no_opt_dump=open(root_prefix+'1_bit_rotate_to_no_opt_'+category+'.csv','w+')
rotate_react_raw_dump=open(root_prefix+'1_bit_rotate_raw_'+category+'.csv','w+')
rotate_react_norm_dump=open(root_prefix+'1_bit_rotate_react_norm_'+category+'.csv','w+')
#arrays to hold data to be plotted
file_name_array=[]
opt_react_norm_data_array=[]
no_opt_react_norm_data_array=[]
opt_to_no_opt_data_array=[]
ideal_react_norm_data_array=[]
ideal_to_no_opt_data_array=[]
opt_react_raw_data_array=[]
ideal_react_raw_data_array=[]
no_opt_react_raw_data_array=[]
#transform arrays
transform_react_norm_data_array=[]
transform_to_no_opt_data_array=[]
transform_react_raw_data_array=[]
#transform arrays
rotate_react_norm_data_array=[]
rotate_to_no_opt_data_array=[]
rotate_react_raw_data_array=[]
#gather together data for each category and normalize results when necessary
for _file in self._1_bit_results[category]:
file_prefix=os.path.normpath(self._out_dir[category][_file])+'/'
opt_heat_map_dump=open(file_prefix+'opt_heat_map'+category+"_"+_file+'.csv','w+')
ideal_heat_map_dump=open(file_prefix+'ideal_heat_map'+category+'_'+_file+'.csv','w+')
#heat maps
ideal_heat_fig,ideal_heat_axes=plt.subplots(nrows=1,ncols=1,figsize=(6,2.5), constrained_layout=True)
opt_heat_fig,opt_heat_axes=plt.subplots(nrows=1,ncols=1,figsize=(6,2.5),constrained_layout=True)
resultsDict=self._1_bit_results[category][_file]
#get data for heat maps
ideal_heat_data=resultsDict["ideal_height_map"]
opt_heat_data=resultsDict["opt_height_map"]
#get data for line and group them together into arrays
overhang_array=resultsDict["overhang_array"]
opt_react_raw_data_array.append(resultsDict["opt_reaction_count"])
no_opt_react_raw_data_array.append(resultsDict["no_opt_reaction_count"])
ideal_react_raw_data_array.append(resultsDict["ideal_reaction_count"])
opt_react_norm_data=[float(_)/float(resultsDict["opt_reaction_count"][0]) for _ in resultsDict["opt_reaction_count"]]
ideal_react_norm_data=[float(_)/float(resultsDict["ideal_reaction_count"][0]) for _ in resultsDict["ideal_reaction_count"]]
no_opt_react_norm_data=[float(_)/float(resultsDict["no_opt_reaction_count"][0]) for _ in resultsDict["no_opt_reaction_count"]]
opt_to_no_opt_data=[float(opt)/float(no_opt) for opt,no_opt in zip(resultsDict["opt_reaction_count"],resultsDict["no_opt_reaction_count"])]
ideal_to_no_opt_data=[float(ideal)/float(no_opt) for ideal,no_opt in zip(resultsDict["ideal_reaction_count"],resultsDict["no_opt_reaction_count"])]
#transform calcs
transform_react_raw_data_array.append(resultsDict["transform_reaction_count"])
transform_react_norm_data=[float(_)/float(resultsDict["transform_reaction_count"][0]) for _ in resultsDict["transform_reaction_count"]]
transform_to_no_opt_data=[float(ideal)/float(no_opt) for ideal,no_opt in zip(resultsDict["transform_reaction_count"],resultsDict["no_opt_reaction_count"])]
#rotate calcs
rotate_react_raw_data_array.append(resultsDict["rotate_reaction_count"])
rotate_react_norm_data=[float(_)/float(resultsDict["rotate_reaction_count"][0]) for _ in resultsDict["rotate_reaction_count"]]
rotate_to_no_opt_data=[float(ideal)/float(no_opt) for ideal,no_opt in zip(resultsDict["rotate_reaction_count"],resultsDict["no_opt_reaction_count"])]
if "." in _file:
file_name_array.append(_file.split('.')[0])
else:
file_name_array.append(_file)
opt_react_norm_data_array.append(opt_react_norm_data)
ideal_react_norm_data_array.append(ideal_react_norm_data)
no_opt_react_norm_data_array.append(no_opt_react_norm_data)
opt_to_no_opt_data_array.append(opt_to_no_opt_data)
ideal_to_no_opt_data_array.append(ideal_to_no_opt_data)
#append transform data
transform_to_no_opt_data_array.append(transform_to_no_opt_data)
transform_react_norm_data_array.append(transform_react_norm_data)
#append rotate data
rotate_to_no_opt_data_array.append(rotate_to_no_opt_data)
rotate_react_norm_data_array.append(rotate_react_norm_data)
#plot the heat maps, 1 for each file and 1 for ideal/opt (may want to compress opt and ideal into one: 1 array or 2 subplots)
heat_fig_label=category+" "+_file+" "
heat_xLabel="height in tree"
heat_yLabel="number of overhangs"
cbar_label="match count"
plt_util.plot_heatmap(ideal_heat_data,ideal_heat_axes,overhang_array,range(1,len(ideal_heat_data[0][:])+1),heat_fig_label+" ideal",heat_xLabel,heat_yLabel,cbar_label,dumpFile=ideal_heat_map_dump, fontsize=4)
plt_util.plot_heatmap(opt_heat_data,opt_heat_axes,overhang_array,range(1,len(ideal_heat_data[0][:])+1),heat_fig_label+" opt",heat_xLabel,heat_yLabel,cbar_label,dumpFile=opt_heat_map_dump,fontsize=4)
#save a heat map for each file studied
opt_heat_fig.savefig(file_prefix+'opt_heat_map_'+category+"_"+_file+'.eps',format='eps')
ideal_heat_fig.savefig(file_prefix+'ideal_heat_map_'+category+"_"+_file+'.eps',format='eps')
opt_heat_map_dump.close()
ideal_heat_map_dump.close()
markerSet=(None,None,'o','^','x','D',None,None,None,'H','+','X')
linestyleSet=('-','-','-','-','--','-','-','-','-','--','--','--')
markeverySet=[1]*12
#draw line charts
#optimized reactions raw graph
plt_util.plot_components_wrapper(overhang_array,opt_react_raw_axes,opt_react_raw_data_array,category+" (opt raw reaction count)","Number of Overhangs","Number of Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=opt_react_raw_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = opt_react_raw_axes.get_position()
opt_react_raw_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
opt_react_raw_axes.get_legend().remove()
opt_react_raw_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.835,0.7),ncol=1)
#ideal reactions raw graph
plt_util.plot_components_wrapper(overhang_array,ideal_react_raw_axes,ideal_react_raw_data_array,category+" (ideal raw reaction count)","Number of Overhangs","Number of Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=ideal_react_raw_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = opt_react_raw_axes.get_position()
ideal_react_raw_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
ideal_react_raw_axes.get_legend().remove()
ideal_react_raw_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.835,0.7),ncol=1)
#trasform raw graph
plt_util.plot_components_wrapper(overhang_array,transform_react_raw_axes,transform_react_raw_data_array,category+" (transform raw reaction count)","Number of Overhangs","Number of Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=transform_react_raw_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = transform_react_raw_axes.get_position()
transform_react_raw_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
transform_react_raw_axes.get_legend().remove()
transform_react_raw_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.835,0.7),ncol=1)
#trasform raw graph
plt_util.plot_components_wrapper(overhang_array,rotate_react_raw_axes,rotate_react_raw_data_array,category+" (rotate raw reaction count)","Number of Overhangs","Number of Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=rotate_react_raw_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = rotate_react_raw_axes.get_position()
rotate_react_raw_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
rotate_react_raw_axes.get_legend().remove()
rotate_react_raw_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.835,0.7),ncol=1)
#No optimized reactions raw graph
plt_util.plot_components_wrapper(overhang_array,no_opt_react_raw_axes,no_opt_react_raw_data_array,category+" (no-opt raw reaction count)","Number of Overhangs","Number of Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=no_opt_react_raw_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = no_opt_react_raw_axes.get_position()
no_opt_react_raw_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
no_opt_react_raw_axes.get_legend().remove()
no_opt_react_raw_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.4,0.7),ncol=1)
#opt self normalized graph
plt_util.plot_components_wrapper(overhang_array,opt_react_norm_axes,opt_react_norm_data_array,category+" (opt normalized)","Number of Overhangs","Normalized Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=opt_react_norm_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = opt_react_norm_axes.get_position()
opt_react_norm_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
opt_react_norm_axes.get_legend().remove()
opt_react_norm_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.5,0.68),ncol=1)
#no opt self normalized graph
plt_util.plot_components_wrapper(overhang_array,no_opt_react_norm_axes,no_opt_react_norm_data_array,category+" (no opt normalized)","Number of Overhangs","Normalized Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=no_opt_react_norm_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = no_opt_react_norm_axes.get_position()
no_opt_react_norm_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
no_opt_react_norm_axes.get_legend().remove()
no_opt_react_norm_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.61,0.43),ncol=1)
#ideal self normalized graph
plt_util.plot_components_wrapper(overhang_array,ideal_react_norm_axes,ideal_react_norm_data_array,category+" (ideal normalized)","Number of Overhangs","Normalized Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=ideal_react_norm_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = ideal_react_norm_axes.get_position()
ideal_react_norm_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
ideal_react_norm_axes.get_legend().remove()
#ideal_react_norm_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.835,0.7),ncol=1)
#transform self normalized
plt_util.plot_components_wrapper(overhang_array,transform_react_norm_axes,transform_react_norm_data_array,category+" (transform normalized)","Number of Overhangs","Normalized Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=transform_react_norm_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = transform_react_norm_axes.get_position()
transform_react_norm_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
transform_react_norm_axes.get_legend().remove()
#ideal_react_norm_fig.legend(fontsize=4,loc='center',bb
#rotate self normalized
plt_util.plot_components_wrapper(overhang_array,rotate_react_norm_axes,rotate_react_norm_data_array,category+" (rotate normalized)","Number of Overhangs","Normalized Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=rotate_react_norm_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = rotate_react_norm_axes.get_position()
rotate_react_norm_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
rotate_react_norm_axes.get_legend().remove()
#ideal_react_norm_fig.legend(fontsize=4,loc='center',bb
#opt reactions normalized to no-opt reactions
plt_util.plot_components_wrapper(overhang_array,opt_to_no_opt_normalized_axes,opt_to_no_opt_data_array,category + " (opt/no-opt)","Number of Overhangs","Normalized Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=opt_to_no_opt_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = opt_to_no_opt_normalized_axes.get_position()
opt_to_no_opt_normalized_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
opt_to_no_opt_normalized_axes.get_legend().remove()
#opt_to_no_opt_normalized_fig.legend(fontsize=4.3,loc='center',bbox_to_anchor=(0.3,0.68),ncol=1)
opt_to_no_opt_normalized_fig.legend(fontsize=4.3,loc='center',bbox_to_anchor=(0.61,0.43),ncol=1)
#ideal reactions normalized to no-opt reactions
plt_util.plot_components_wrapper(overhang_array,ideal_to_no_opt_axes,ideal_to_no_opt_data_array,category + " (ideal/no-opt)","Number of Overhangs","Normalized Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=ideal_to_no_opt_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = ideal_to_no_opt_axes.get_position()
ideal_to_no_opt_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
ideal_to_no_opt_axes.get_legend().remove()
#ideal_to_no_opt_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.729,0.7),ncol=1)
#transform reactions normalized to no-opt reactions
plt_util.plot_components_wrapper(overhang_array,transform_to_no_opt_axes,transform_to_no_opt_data_array,category + " (transform/no-opt)","Number of Overhangs","Normalized Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=transform_to_no_opt_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = transform_to_no_opt_axes.get_position()
transform_to_no_opt_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
transform_to_no_opt_axes.get_legend().remove()
#ideal_to_no_opt_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.729,0.7),ncol=1)
#rotate reactions normalized to no-opt reactions
plt_util.plot_components_wrapper(overhang_array,rotate_to_no_opt_axes,rotate_to_no_opt_data_array,category + " (rotate/no-opt)","Number of Overhangs","Normalized Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=rotate_to_no_opt_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = rotate_to_no_opt_axes.get_position()
rotate_to_no_opt_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
rotate_to_no_opt_axes.get_legend().remove()
#ideal_to_no_opt_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.729,0.7),ncol=1)
#close dump files
opt_react_norm_dump.close()
no_opt_react_norm_dump.close()
opt_to_no_opt_dump.close()
ideal_to_no_opt_dump.close()
opt_react_raw_dump.close()
no_opt_react_raw_dump.close()
ideal_react_raw_dump.close()
ideal_react_norm_dump.close()
#close transform dumps
transform_react_raw_dump.close()
transform_react_norm_dump.close()
transform_to_no_opt_dump.close()
#close rotate dumps
rotate_react_raw_dump.close()
rotate_react_norm_dump.close()
rotate_to_no_opt_dump.close()
#ideal to no opt bar graph
df= | pd.read_csv(root_prefix+'1_bit_ideal_to_no_opt_'+category+'.csv',index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 3 17:16:12 2019
@author: Meagatron
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import defaultdict
import math
import itertools
from dtw import dtw
import timeit
from helper_functions import normalize,alphabetize_ts,hamming_distance
"""------------- Intialization ------------- """
start = timeit.default_timer()
data = pd.read_csv('test_data2.csv', sep=',', header=None)
x1 = data.iloc[1:,1].values.flatten()
x1=np.asfarray(x1,float)
y_alphabet_size=4
word_lenth=3
window_size=round( len(x1) *10 /100 )
skip_offset=round(window_size/2)
ham_distance=1
epsilon = 1e-6
def segment_ts():
ts_len=len(x1)
mod = ts_len%window_size
rnge=0
if(skip_offset==0):
ts_len=int((ts_len-mod-window_size)/1)
rnge=int(ts_len/window_size)
else:
ts_len=int(math.ceil((ts_len-window_size)/skip_offset))
rnge=int(ts_len)
curr_count=0
words=list()
indices=list()
complete_indices=list()
for i in range(0, rnge):
sub_section = x1[curr_count:(curr_count+window_size)]
sub_section=normalize(sub_section)
curr_word=""
chunk_size=int(len(sub_section)/word_lenth)
num=0
curr_letter=""
for j in range(0,word_lenth):
chunk = sub_section[num:num + chunk_size]
curr_letter=alphabetize_ts(chunk)
curr_word+=str(curr_letter)
complete_indices.append(curr_count)
num+=chunk_size
words.append(curr_word)
indices.append(curr_count)
temp_list=[]
temp_list.append(sub_section)
temp_df = pd.DataFrame()
temp_df.insert(loc=0, column='sub_section', value=temp_list)
temp_df.insert(loc=0, column='keys', value=curr_word)
temp_df.insert(loc=0, column='position', value=sorted(sub_section)[len(sub_section) // 2])
temp_df.insert(loc=0, column='scale_high', value=np.max(sub_section))
temp_df.insert(loc=0, column='scale_low', value=np.min(sub_section))
temp_df.insert(loc=0, column='indices', value=curr_count)
curr_count=curr_count+skip_offset-1
if(i==0):
df_sax =temp_df.copy()
else:
df_sax=df_sax.append(temp_df, ignore_index=True)
return (words,indices,df_sax)
alphabetize,indices,df_sax=segment_ts()
""" Complete Words """
def complete_word():
complete_word=list()
complete_indices=indices
""" Simillar Words """
complete_word=alphabetize
sax = defaultdict(list)
for i in range(0,len(complete_word)):
if(len(complete_word[i])==word_lenth):
sax[complete_word[i]].append(complete_indices[i])
return sax
#alphabetize1,indices1,df_sax=segment_ts()
def Compare_Shape():
simillar_word=complete_word()
map_keys = defaultdict(list)
map_indices=defaultdict(list)
for key_i in simillar_word:
temp_list=list()
temp_list.append(simillar_word.get(key_i))
map_keys[key_i].append(key_i)
for key_j in simillar_word:
dist=hamming_distance(key_i, key_j)
if(dist==ham_distance and key_i !=key_j):
map_keys[key_i].append(key_j)
temp_list.append(simillar_word.get(key_j))
else:
map_keys[key_i].append([])
tempp = list(itertools.chain(*temp_list))
map_indices[key_i].append(tempp)
return (map_keys,map_indices)
compare_strings,compare_list=Compare_Shape()
def dtw_test2 ():
df_dtw_prep=df_sax
dtw_df= | pd.DataFrame() | pandas.DataFrame |
from numpy.core.fromnumeric import shape
import pytest
import pandas as pd
import datetime
from fast_trade.build_data_frame import (
build_data_frame,
detect_time_unit,
load_basic_df_from_csv,
apply_transformers_to_dataframe,
apply_charting_to_df,
prepare_df,
process_res_df,
)
def test_detect_time_unit_s():
mock_timestring = 1595115901734
result = detect_time_unit(mock_timestring)
assert result == "ms"
def test_detect_time_unit_ms():
mock_timestring = 1595115901
result = detect_time_unit(mock_timestring)
assert result == "s"
def test_load_basic_df_from_csv_str_1():
mock_data_path = "./test/ohlcv_data.csv.txt"
result_df = load_basic_df_from_csv(mock_data_path)
header = list(result_df.head())
assert "close" in header
assert "open" in header
assert "high" in header
assert "low" in header
assert "volume" in header
assert result_df.index.name == "date"
def test_load_basic_df_from_csv_list_1():
mock_data_path = "./test/ohlcv_data.csv.txt"
result_df = load_basic_df_from_csv(mock_data_path)
expected_line = [0.01404, 0.01, 0.025, 0.01, 3117.0]
assert list(result_df.iloc[1]) == expected_line
def test_load_basic_df_from_csv_str_error_1():
mock_data_path = "./test/SomeFakeNews.csv.txt"
with pytest.raises(Exception, match=r"File not found:*"):
load_basic_df_from_csv(mock_data_path)
def test_apply_transformers_to_dataframe_1_ind():
mock_df = pd.read_csv("./test/ohlcv_data.csv.txt")
mock_transformers = [
{"transformer": "sma", "name": "example_transformer_name", "args": [3]}
]
result_df = apply_transformers_to_dataframe(mock_df, mock_transformers)
header = list(result_df.head())
assert "example_transformer_name" in header
assert "FAKE_transformer_name" not in header
def test_apply_transformers_to_dataframe_no_args():
mock_df = pd.read_csv("./test/ohlcv_data.csv.txt")
mock_transformers = [{"transformer": "rsi", "name": "rsi", "args": []}]
result_df = apply_transformers_to_dataframe(mock_df, mock_transformers)
assert "rsi" in list(result_df.columns)
def test_apply_transformers_to_dataframe_no_args_multi_col():
mock_df = pd.read_csv("./test/ohlcv_data.csv.txt")
mock_transformers = [{"transformer": "wto", "name": "wto", "args": []}]
result_df = apply_transformers_to_dataframe(mock_df, mock_transformers)
assert "wto_wt1" in list(result_df.columns)
assert "wto_wt2" in list(result_df.columns)
def test_apply_charting_to_df_1():
mock_df = pd.read_csv("./test/ohlcv_data.csv.txt", index_col="date")
# mock_df.set_index(["date"], inplace=True)
mock_df.index = pd.to_datetime(mock_df.index, unit="s")
mock_chart_period = "2Min"
mock_start_time = "2018-04-17"
mock_stop_time = ""
result_df = apply_charting_to_df(
mock_df, mock_chart_period, mock_start_time, mock_stop_time
)
assert (result_df.iloc[2].name - result_df.iloc[1].name).total_seconds() == 120
assert (result_df.iloc[4].name - result_df.iloc[1].name).total_seconds() == 360
def test_apply_charting_to_df_2():
mock_df = pd.read_csv("./test/ohlcv_data.csv.txt")
mock_df.set_index(["date"], inplace=True)
mock_df.index = pd.to_datetime(mock_df.index, unit="s")
mock_chart_period = "1Min"
mock_start_time = "2018-04-17 04:00:00"
mock_stop_time = "2018-04-17 04:10:00"
past_stop_time = datetime.datetime.strptime(
"2018-04-17 04:11:00", "%Y-%m-%d %H:%M:%S"
)
result_df = apply_charting_to_df(
mock_df, mock_chart_period, mock_start_time, mock_stop_time
)
assert result_df.iloc[-1].name < past_stop_time
def test_apply_charting_to_df_3():
mock_df = pd.read_csv("./test/ohlcv_data.csv.txt")
mock_df.set_index(["date"], inplace=True)
mock_df.index = pd.to_datetime(mock_df.index, unit="s")
mock_chart_period = "1Min"
mock_start_time = ""
mock_stop_time = "2018-04-17 04:10:00"
# past_stop_time = "2018-04-17 04:11:00"
# result_df = apply_charting_to_df(
# mock_df, mock_chart_period, mock_start_time, mock_stop_time
# )
past_stop_time = datetime.datetime.strptime(
"2018-04-17 04:11:00", "%Y-%m-%d %H:%M:%S"
)
result_df = apply_charting_to_df(
mock_df, mock_chart_period, mock_start_time, mock_stop_time
)
assert result_df.index[0] < past_stop_time
def test_apply_charting_to_df_stop_time_int():
mock_df = pd.read_csv("./test/ohlcv_data.csv.txt")
mock_df.index = pd.to_datetime(mock_df.date, unit="s")
mock_chart_period = "1Min"
mock_start_time = ""
mock_stop_time = 1523938200
past_stop_time = datetime.datetime.strptime(
"2018-04-17 04:11:00", "%Y-%m-%d %H:%M:%S"
)
result_df = apply_charting_to_df(
mock_df, mock_chart_period, mock_start_time, mock_stop_time
)
assert result_df.index[0] < past_stop_time
def test_apply_charting_to_df_start_time_int():
mock_df = pd.read_csv("./test/ohlcv_data.csv.txt")
mock_df.index = pd.to_datetime(mock_df.date, unit="s")
mock_chart_period = "1Min"
mock_start_time = 1523938200
mock_stop_time = ""
past_stop_time = datetime.datetime.strptime(
"2018-04-17 04:11:00", "%Y-%m-%d %H:%M:%S"
)
result_df = apply_charting_to_df(
mock_df, mock_chart_period, mock_start_time, mock_stop_time
)
assert result_df.index[0] < past_stop_time
def test_process_res_df():
mock_df = pd.read_csv("./test/ohlcv_data.csv.txt", parse_dates=True)
mock_df.index = pd.to_datetime(mock_df.date, unit="s")
mock_ind = {"name": "ind_1"}
val1 = [0, 1, 2, 3, 4, 5, 6, 7, 8]
val2 = [8, 7, 6, 5, 4, 3, 2, 1, 0]
mock_trans_res = pd.DataFrame(
data={"Val 1": val1, "Val 2": val2},
index=mock_df.index,
)
res = process_res_df(mock_df, mock_ind, mock_trans_res)
assert list(res.ind_1_val_1.values) == val1
assert list(res.ind_1_val_2.values) == val2
def test_apply_transformers_to_dataframe():
mock_df = pd.read_csv("./test/ohlcv_data.csv.txt", parse_dates=True)
mock_df.index = pd.to_datetime(mock_df.date, unit="s")
def test_prepare_df():
mock_df = | pd.read_csv("./test/ohlcv_data.csv.txt", parse_dates=True) | pandas.read_csv |
import os
import pickle
import sys
from pathlib import Path
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from thoipapy.utils import convert_truelike_to_bool, convert_falselike_to_bool
import thoipapy
def fig_plot_BOcurve_mult_train_datasets(s):
"""Plot the BO-curve for multiple training datasets.
Takes the datasets listed in settings under "train_datasets" and "test_datasets"
and plots the BO-curve of each combination in a single figure.
The Area Under the BO Curve for a sample size of 0 to 10 (AUBOC) is shown in the legend.
Currently plots both the new and old performance method.
NEW METHOD
----------
Performance = overlap between experiment and predicted MINUS the overlap expected in random selections
OLD METHOD
----------
Performance = overlap between experiment and predicted DIVIDED BY the overlap expected in random selections
Parameters
----------
s : dict
Settings dictionary for figures.
"""
# plt.rcParams.update({'font.size': 7})
test_set_list, train_set_list = thoipapy.utils.get_test_and_train_set_lists(s)
test_dataset_str = "-".join([str(n) for n in test_set_list])
train_dataset_str = "-".join([str(n) for n in train_set_list])
mult_testname = "testsets({})_trainsets({})".format(test_dataset_str, train_dataset_str)
sys.stdout.write(mult_testname)
mult_THOIPA_dir = os.path.join(s["data_dir"], "results", "compare_testset_trainset", "summaries", mult_testname)
thoipapy.utils.make_sure_path_exists(mult_THOIPA_dir)
plot_BOcurve(s, train_set_list, test_set_list, mult_THOIPA_dir, mult_testname)
plot_BOcurve(s, train_set_list, test_set_list, mult_THOIPA_dir, mult_testname, sheet_name="df_o_over_r", suffix="_BO_curve_old_method")
def plot_BOcurve(s, train_set_list, test_set_list, mult_THOIPA_dir, mult_testname, sheet_name="df_o_minus_r", suffix="_BO_curve"):
""" Separate function allowing a toggle of the OLD or NEW performance methods
Parameters
----------
s : dict
Settings dictionary for figures.
train_set_list : list
List of training datasets in selection
E.g. ["set02", "set04"]
test_set_list : list
List of test datasets in selection
E.g. ["set03", "set31"]
mult_THOIPA_dir : str
Path to folder containing results for multiple THOIPA comparisons.
mult_testname : str
String denoting this combination of test and training datasets
E.g. testsets(2)_trainsets(2)
sheet_name : str
Excel sheet_name
This is the toggle deciding whether the OLD or NEW performance measure is used
Default = new method ("df_o_minus_r"), where the overlap MINUS random_overlap is used
suffix : str
Suffix for figure
E.g. "" or "_old_method_o_over_r"
"""
BO_curve_png = os.path.join(mult_THOIPA_dir, "{}{}.png".format(mult_testname, suffix))
figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
for train_set in train_set_list:
trainsetname = "set{:02d}".format(int(train_set))
for test_set in test_set_list:
testsetname = "set{:02d}".format(int(test_set))
# /media/mark/sindy/m_data/THOIPA_data/results/Bo_Curve/Testset03_Trainset01.THOIPA.validation/bocurve_data.xlsx
bocurve_data_xlsx = os.path.join(s["data_dir"], "results", "compare_testset_trainset", "data", "Test{}_Train{}.THOIPA".format(testsetname, trainsetname), "data", "bocurve_data.xlsx")
df = pd.read_excel(bocurve_data_xlsx, sheet_name=sheet_name, index_col=0)
df["mean_"] = df.mean(axis=1)
# apply cutoff (e.g. 5 residues for AUBOC5)
auboc_ser = df["mean_"].iloc[:s["n_residues_AUBOC_validation"]]
# use the composite trapezoidal rule to get the area under the curve
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.trapz.html
AUBOC = np.trapz(y=auboc_ser, x=auboc_ser.index)
df["mean_"].plot(ax=ax, label="Test{}_Train{}(AUBOC={:0.1f})".format(testsetname, trainsetname, AUBOC))
ax.set_xlabel("sample size")
ax.set_ylabel("performance\n(observed overlap - random overlap)")
ax.set_xticks(range(1, df.shape[0] + 1))
ax.set_xticklabels(df.index)
ax.legend()
fig.tight_layout()
fig.savefig(BO_curve_png, dpi=240)
# fig.savefig(thoipapy.utils.pdf_subpath(BO_curve_png))
sys.stdout.write("\nfig_plot_BO_curve_mult_train_datasets finished ({})".format(BO_curve_png))
def compare_selected_predictors(s, logging):
"""Plot the BO-curve for multiple prediction methods
Takes the datasets listed in settings under the "selected_predictors" tab
(e.g. ["Testset03_Trainset04.THOIPA","Testset03.LIPS"])
and plots the BO-curves in a single figure.
The Area Under the BO Curve for a sample size of 0 to 10 (AUBOC) is shown in the legend.
Currently plots both the new and old performance method.
Performance is measured with the NEW METHOD:
Performance = overlap between experiment and predicted MINUS the overlap expected in random selections
Parameters
----------
s : dict
Settings dictionary for figures.
"""
# if s["set_number"] != s["test_datasets"]:
# raise Exception("set_number and test_datasets are not identical in settings file. This is recommended for test/train validation.")
# plt.rcParams.update({'font.size': 7})
logging.info("\n--------------- starting compare_selected_predictors ---------------\n")
BO_curve_png: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/blindvalidation/compare_selected_predictors_BO_curve.png"
AUBOC_bar_png: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/blindvalidation/compare_selected_predictors_AUBOC_barchart.png"
ROC_png: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/blindvalidation/compare_selected_predictors_ROC.png"
thoipapy.utils.make_sure_path_exists(BO_curve_png, isfile=True)
figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
predictors_df = pd.read_excel(s["settings_path"], sheet_name="selected_predictors")
predictors_df["include"] = predictors_df["include"].apply(convert_truelike_to_bool, convert_nontrue=False)
predictors_df["include"] = predictors_df["include"].apply(convert_falselike_to_bool)
predictors_df = predictors_df.loc[predictors_df.include == True]
predictor_list = predictors_df.predictor.tolist()
area_under_curve_dict = {}
# create an empty dataframe to keep the pycharm IDE happy
df = pd.DataFrame()
for predictor_name in predictor_list:
bocurve_data_xlsx: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/data/{s['setname']}_thoipa_loo_bo_curve_data.xlsx"
if not os.path.isfile(bocurve_data_xlsx):
raise FileNotFoundError("bocurve_data_xlsx does not exist ({}). Try running run_testset_trainset_validation".format(bocurve_data_xlsx))
df = pd.read_excel(bocurve_data_xlsx, sheet_name="df_o_minus_r", index_col=0)
df["mean_"] = df.mean(axis=1)
# apply cutoff (e.g. 5 residues for AUBOC5)
auboc_ser = df["mean_"].iloc[:s["n_residues_AUBOC_validation"]]
# use the composite trapezoidal rule to get the area under the curve
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.trapz.html
AUBOC = np.trapz(y=auboc_ser, x=auboc_ser.index)
area_under_curve_dict[predictor_name] = AUBOC
df["mean_"].plot(ax=ax, label="{}(AUBOC={:0.1f})".format(predictor_name, AUBOC))
ax.set_xlabel("sample size")
ax.set_ylabel("performance\n(observed overlap - random overlap)")
ax.set_xticks(range(1, df.shape[0] + 1))
ax.set_xticklabels(df.index)
ax.legend()
fig.tight_layout()
fig.savefig(BO_curve_png, dpi=240)
# fig.savefig(thoipapy.utils.pdf_subpath(BO_curve_png))
plt.close("all")
AUBOC_ser = pd.Series(area_under_curve_dict).sort_index()
figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
AUBOC_ser.plot(ax=ax, kind="bar")
ax.set_ylabel("performance (AUBOC)")
fig.tight_layout()
fig.savefig(AUBOC_bar_png, dpi=240)
# fig.savefig(thoipapy.utils.pdf_subpath(AUBOC_bar_png))
plt.close("all")
figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
for predictor_name in predictor_list:
# "D:\data_thoipapy\results\compare_testset_trainset\data\Testset03_Trainset04.THOIPA\Testset03_Trainset04.THOIPA.ROC_data.pkl"
# ROC_pkl = os.path.join(s["data_dir"], "results", "compare_testset_trainset", "data", predictor_name, "data", "{}.ROC_data.pkl".format(predictor_name))
testsetname = "set{:02d}".format(int(s['test_datasets']))
ROC_pkl = Path(s["data_dir"]) / "results" / testsetname / f"blindvalidation/{predictor_name}/ROC_data.pkl"
if os.path.isfile(ROC_pkl):
with open(ROC_pkl, "rb") as f:
ROC_out_dict = pickle.load(f)
ax.plot(ROC_out_dict["false_positive_rate_mean"], ROC_out_dict["true_positive_rate_mean"], label='{} ({:0.2f})'.format(predictor_name, ROC_out_dict["mean_roc_auc"]), lw=1.5)
else:
sys.stdout.write("\nPICKLE WITH ROC DATA NOT FOUND : {}".format(ROC_pkl))
continue
ax.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='random')
ax.set_xlim([-0.05, 1.05])
ax.set_ylim([-0.05, 1.05])
ax.set_xlabel("False positive rate")
ax.set_ylabel("True positive rate")
ax.legend(loc="lower right")
fig.tight_layout()
fig.savefig(ROC_png, dpi=240)
# fig.savefig(thoipapy.utils.pdf_subpath(ROC_png))
sys.stdout.write("\nBO_curve_png ({})\n".format(BO_curve_png))
logging.info("\n--------------- finished compare_selected_predictors ---------------\n")
def combine_BOcurve_files_hardlinked(s):
Train04_Test01_BoCurve_file = r"D:\THOIPA_data\results\Bo_Curve\Trainset04_Testset01.bocurve.csv"
df41 = pd.read_csv(Train04_Test01_BoCurve_file, index_col=0)
df41_ratio = df41[df41.parameters == "observed_overlap"].drop("parameters", axis=1).mean(axis=1) / df41[df41.parameters == "random_overlap"].drop("parameters", axis=1).mean(axis=1)
df41_ratio_df = df41_ratio.to_frame(name="Tr4Te1Ratio")
df41_LIPS_ratio = df41[df41.parameters == "LIPS_observed_overlap"].drop("parameters", axis=1).mean(axis=1) / df41[df41.parameters == "random_overlap"].drop("parameters", axis=1).mean(axis=1)
df41_LIPS_ratio_df = df41_LIPS_ratio.to_frame(name="Tr4Te1LIPSRatio")
Train04_Test02_BoCurve_file = r"D:\THOIPA_data\results\Bo_Curve\Trainset04_Testset02.bocurve.csv"
df42 = pd.read_csv(Train04_Test02_BoCurve_file, index_col=0)
df42_ratio = df42[df42.parameters == "observed_overlap"].drop("parameters", axis=1).mean(axis=1) / df42[df42.parameters == "random_overlap"].drop("parameters", axis=1).mean(axis=1)
df42_ratio_df = df42_ratio.to_frame(name="Tra4Tes2Ratio")
df42_LIPS_ratio = df42[df42.parameters == "LIPS_observed_overlap"].drop("parameters", axis=1).mean(axis=1) / df42[df42.parameters == "random_overlap"].drop("parameters", axis=1).mean(axis=1)
df42_LIPS_ratio_df = df42_LIPS_ratio.to_frame(name="Tr4Te2LIPSRatio")
Train04_Test03_BoCurve_file = r"D:\THOIPA_data\results\Bo_Curve\Trainset04_Testset03.bocurve.csv"
df43 = pd.read_csv(Train04_Test03_BoCurve_file, index_col=0)
df43_ratio = df43[df43.parameters == "observed_overlap"].drop("parameters", axis=1).mean(axis=1) / df43[df43.parameters == "random_overlap"].drop("parameters", axis=1).mean(axis=1)
df43_ratio_df = df43_ratio.to_frame(name="Tra4Tes3Ratio")
df43_LIPS_ratio = df43[df43.parameters == "LIPS_observed_overlap"].drop("parameters", axis=1).mean(axis=1) / df43[df43.parameters == "random_overlap"].drop("parameters", axis=1).mean(axis=1)
df43_LIPS_ratio_df = df43_LIPS_ratio.to_frame(name="Tr4Te3LIPSRatio")
Train02_Test01_BoCurve_file = r"D:\THOIPA_data\results\Bo_Curve\Trainset02_Testset01.bocurve.csv"
df21 = pd.read_csv(Train02_Test01_BoCurve_file, index_col=0)
df21_ratio = df21[df21.parameters == "observed_overlap"].drop("parameters", axis=1).mean(axis=1) / df21[df21.parameters == "random_overlap"].drop("parameters", axis=1).mean(axis=1)
df21_ratio_df = df21_ratio.to_frame(name="Tra2Te1Ratio")
Train02_Test02_BoCurve_file = r"D:\THOIPA_data\results\Bo_Curve\Trainset02_Testset02.bocurve.csv"
df22 = | pd.read_csv(Train02_Test02_BoCurve_file, index_col=0) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from constants import *
import numpy as np
import pandas as pd
import utils
import time
from collections import deque, defaultdict
from scipy.spatial.distance import cosine
from scipy import stats
import math
seed = SEED
cur_stage = CUR_STAGE
mode = cur_mode
#used_recall_source = 'i2i_w02-b2b-i2i2i'
#used_recall_source = 'i2i_w02-b2b-i2i2i-i2i_w10'
#used_recall_source = 'i2i_w02-b2b-i2i2i-i2i_w10-i2i2b'
used_recall_source = cur_used_recall_source
sum_mode = 'nosum'
used_recall_source = used_recall_source+'-'+sum_mode
print( f'Recall Source Use {used_recall_source}')
def feat_item_sum_mean_sim_weight_loc_weight_time_weight_rank_weight(data):
df = data.copy()
df = df[ ['user','item','sim_weight','loc_weight','time_weight','rank_weight','index'] ]
feat = df[ ['index','user','item'] ]
df = df.groupby( ['user','item'] )[ ['sim_weight','loc_weight','time_weight','rank_weight'] ].agg( ['sum','mean'] ).reset_index()
cols = [ f'item_{j}_{i}' for i in ['sim_weight','loc_weight','time_weight','rank_weight'] for j in ['sum','mean'] ]
df.columns = [ 'user','item' ]+ cols
feat = pd.merge( feat, df, on=['user','item'], how='left')
feat = feat[ cols ]
return feat
def feat_sum_sim_loc_time_weight(data):
df = data.copy()
df = df[ ['index','sim_weight','loc_weight','time_weight'] ]
feat = df[ ['index'] ]
feat['sum_sim_loc_time_weight'] = df['sim_weight'] + df['loc_weight'] + df['time_weight']
feat = feat[ ['sum_sim_loc_time_weight'] ]
return feat
def feat_road_item_text_cossim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
c = np.dot( item1_text, item2_text )
a = np.linalg.norm( item1_text )
b = np.linalg.norm( item2_text )
return c/(a*b+(1e-9))
else:
return np.nan
feat['road_item_text_cossim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_cossim'] ]
return feat
def feat_road_item_text_eulasim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
a = np.linalg.norm( item1_text - item2_text )
return a
else:
return np.nan
feat['road_item_text_eulasim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_eulasim'] ]
return feat
def feat_road_item_text_mansim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
a = np.linalg.norm( item1_text - item2_text, ord=1 )
return a
else:
return np.nan
feat['road_item_text_mansim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_mansim'] ]
return feat
def feat_road_item_image_cossim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_image = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_image[k] = v[1]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_image ) and ( item2 in item_image ):
item1_image = item_image[item1]
item2_image = item_image[item2]
c = np.dot( item1_image, item2_image )
a = np.linalg.norm( item1_image )
b = np.linalg.norm( item2_image )
return c/(a*b+(1e-9))
else:
return np.nan
feat['road_item_image_cossim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_image_cossim'] ]
return feat
def feat_road_item_image_eulasim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_image = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_image[k] = v[1]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_image ) and ( item2 in item_image ):
item1_image = item_image[item1]
item2_image = item_image[item2]
a = np.linalg.norm( item1_image - item2_image )
return a
else:
return np.nan
feat['road_item_image_eulasim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_image_eulasim'] ]
return feat
def feat_road_item_image_mansim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_image = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_image[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_image ) and ( item2 in item_image ):
item1_image = item_image[item1]
item2_image = item_image[item2]
a = np.linalg.norm( item1_image - item2_image, ord=1 )
return a
else:
return np.nan
feat['road_item_image_mansim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_image_mansim'] ]
return feat
def feat_i2i_seq(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
vals = feat[ ['road_item', 'item'] ].values
new_keys = set()
for val in vals:
new_keys.add( (val[0], val[1]) )
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
i2i_sim_seq = {}
st0 = time.time()
tot = 0
for user, items in user_item_dict.items():
times = user_time_dict[user]
if tot % 500 == 0:
print( f'tot: {len(user_item_dict)}, now: {tot}' )
tot += 1
for loc1, item in enumerate(items):
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
if (item,relate_item) not in new_keys:
continue
t1 = times[loc1]
t2 = times[loc2]
i2i_sim_seq.setdefault((item,relate_item), [])
i2i_sim_seq[ (item,relate_item) ].append( (loc1, loc2, t1, t2, len(items) ) )
st1 = time.time()
print(st1-st0)
return i2i_sim_seq
def feat_i2i2i_seq(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
vals = feat[ ['road_item', 'item'] ].values
new_keys = set()
for val in vals:
new_keys.add( (val[0], val[1]) )
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
all_pair_num = 0
sim_item_p2 = {}
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item_p2.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
all_pair_num += 1
t1 = times[loc1]
t2 = times[loc2]
sim_item_p2[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
sim_item_p1 = {}
for i, related_items in sim_item_p2.items():
sim_item_p1[i] = {}
for j, cij in related_items.items():
sim_item_p1[i][j] = cij / (item_cnt[i] * item_cnt[j])
sim_item_p2[i][j] = cij / ((item_cnt[i] * item_cnt[j]) ** 0.2)
print('all_pair_num',all_pair_num)
for key in sim_item_p2.keys():
t = sim_item_p2[key]
t = sorted(t.items(), key=lambda d:d[1], reverse = True )
res = {}
for i in t[0:50]:
res[i[0]]=i[1]
sim_item_p2[key] = res
i2i2i_sim_seq = {}
t1 = time.time()
for idx,item1 in enumerate( sim_item_p2.keys() ):
if idx%10000==0:
t2 = time.time()
print( f'use time {t2-t1} for 10000, now {idx} , tot {len(sim_item_p2.keys())}' )
t1 = t2
for item2 in sim_item_p2[item1].keys():
if item2 == item1:
continue
for item3 in sim_item_p2[item2].keys():
if item3 == item1 or item3 == item2:
continue
if (item1,item3) not in new_keys:
continue
i2i2i_sim_seq.setdefault((item1,item3), [])
i2i2i_sim_seq[ (item1,item3) ].append( ( item2, sim_item_p2[item1][item2], sim_item_p2[item2][item3],
sim_item_p1[item1][item2], sim_item_p1[item2][item3] ) )
return i2i2i_sim_seq
def feat_i2i2b_seq(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
vals = feat[ ['road_item', 'item'] ].values
new_keys = set()
for val in vals:
new_keys.add( (val[0], val[1]) )
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
all_pair_num = 0
sim_item_p2 = {}
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item_p2.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
all_pair_num += 1
t1 = times[loc1]
t2 = times[loc2]
sim_item_p2[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
sim_item_p1 = {}
for i, related_items in sim_item_p2.items():
sim_item_p1[i] = {}
for j, cij in related_items.items():
sim_item_p1[i][j] = cij / (item_cnt[i] * item_cnt[j])
sim_item_p2[i][j] = cij / ((item_cnt[i] * item_cnt[j]) ** 0.2)
print('all_pair_num',all_pair_num)
for key in sim_item_p2.keys():
t = sim_item_p2[key]
t = sorted(t.items(), key=lambda d:d[1], reverse = True )
res = {}
for i in t[0:100]:
res[i[0]]=i[1]
sim_item_p2[key] = res
blend_sim = utils.load_sim(item_blend_sim_path)
blend_score = {}
for item in blend_sim:
i = item[0]
blend_score.setdefault(i,{})
for j,cij in item[1][:100]:
blend_score[i][j] = cij
i2i2b_sim_seq = {}
t1 = time.time()
for idx,item1 in enumerate( sim_item_p2.keys() ):
if idx%10000==0:
t2 = time.time()
print( f'use time {t2-t1} for 10000, now {idx} , tot {len(sim_item_p2.keys())}' )
t1 = t2
for item2 in sim_item_p2[item1].keys():
if (item2 == item1) or (item2 not in blend_score.keys()):
continue
for item3 in blend_score[item2].keys():
if item3 == item1 or item3 == item2:
continue
if (item1,item3) not in new_keys:
continue
i2i2b_sim_seq.setdefault((item1,item3), [])
i2i2b_sim_seq[ (item1,item3) ].append( ( item2, sim_item_p2[item1][item2], blend_score[item2][item3],
sim_item_p1[item1][item2], blend_score[item2][item3] ) )
return i2i2b_sim_seq
def feat_i2i_sim(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
for key in new_keys:
if np.isnan( result[key] ):
continue
result[key] = result[key] / ((item_cnt[key[0]] * item_cnt[key[1]]) ** 0.2)
print('Finished getting result')
feat['i2i_sim'] = feat['new_keys'].map(result)
#import pdb
#pdb.set_trace()
#i2i_seq_feat = pd.concat( [feat,i2i_seq_feat], axis=1 )
#i2i_seq_feat['itemAB'] = i2i_seq_feat['road_item'].astype('str') + '-' + i2i_seq_feat['item'].astype('str')
feat = feat[ ['i2i_sim'] ]
return feat
def feat_i2i_sim_abs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += loc_weight
feat['i2i_sim_abs_loc_weights_loc_base'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_sim_abs_loc_weights_loc_base'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_sim_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
loc_diff = loc1-loc2
loc_weight = (loc_base**loc_diff)
if abs(loc_weight) <= 0.2:
if loc_weight > 0:
loc_weight = 0.2
else:
loc_weight = -0.2
result[key] += loc_weight
feat['i2i_sim_loc_weights_loc_base'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_sim_loc_weights_loc_base'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_sim_abs_time_weights(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
result[key] += time_weight
feat['i2i_sim_abs_time_weights'] = feat['new_keys'].map(result)
print('Finished getting result')
cols = [ 'i2i_sim_abs_time_weights' ]
feat = feat[ cols ]
return feat
def feat_i2i_sim_time_weights(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
time_weight = (1 - (t1 - t2) * 100)
if abs(time_weight)<=0.2:
if time_weight > 0:
time_weight = 0.2
else:
time_weight = -0.2
result[key] += time_weight
feat['i2i_sim_time_weights'] = feat['new_keys'].map(result)
print('Finished getting result')
cols = [ 'i2i_sim_time_weights' ]
feat = feat[ cols ]
return feat
def feat_i2i_cijs_abs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
feat['i2i_cijs_abs_loc_weights_loc_base_'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_cijs_abs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_cijs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
time_weight = (1 - abs(t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = abs(loc2-loc1)
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
if loc1-loc2>0:
result[key] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
else:
result[key] -= 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
feat['i2i_cijs_loc_weights_loc_base_'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_cijs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_cijs_mean_abs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += ( 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len) ) / len(records)
feat['i2i_cijs_mean_abs_loc_weights_loc_base_'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_cijs_mean_abs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_bottom_itemcnt_sum_weight(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
#print('Loading i2i_sim_seq')
#i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
#print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
weights = [0.2,0.4,0.6,0.8,1.0]
for weight in weights:
print(f'Starting {weight}')
result = {}
for key in new_keys:
if (key[0] in item_cnt.keys()) and (key[1] in item_cnt.keys()):
result[key] = ((item_cnt[key[0]] + item_cnt[key[1]]) ** weight)
feat['i2i_bottom_itemcnt_sum_weight_'+str(weight)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for weight in weights:
cols.append( 'i2i_bottom_itemcnt_sum_weight_'+str(weight) )
feat = feat[ cols ]
return feat
def feat_i2i_bottom_itemcnt_multi_weight(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
#print('Loading i2i_sim_seq')
#i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
#print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
weights = [0.2,0.4,0.6,0.8,1.0]
for weight in weights:
print(f'Starting {weight}')
result = {}
for key in new_keys:
if (key[0] in item_cnt.keys()) and (key[1] in item_cnt.keys()):
result[key] = ((item_cnt[key[0]] * item_cnt[key[1]]) ** weight)
feat['i2i_bottom_itemcnt_multi_weight_'+str(weight)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for weight in weights:
cols.append( 'i2i_bottom_itemcnt_multi_weight_'+str(weight) )
feat = feat[ cols ]
return feat
def feat_b2b_sim(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
blend_sim = utils.load_sim(item_blend_sim_path)
b2b_sim = {}
for item in blend_sim:
i = item[0]
b2b_sim.setdefault(i,{})
for j,cij in item[1][:100]:
b2b_sim[i][j] = cij
vals = feat[ ['road_item','item'] ].values
result = []
for val in vals:
item1 = val[0]
item2 = val[1]
if item1 in b2b_sim.keys():
if item2 in b2b_sim[item1].keys():
result.append( b2b_sim[ item1 ][ item2 ] )
else:
result.append( np.nan )
else:
result.append( np.nan )
feat['b2b_sim'] = result
feat = feat[ ['b2b_sim'] ]
return feat
def feat_itemqa_loc_diff(data):
df = data.copy()
feat = df[ ['index','query_item_loc','road_item_loc'] ]
feat['itemqa_loc_diff'] = feat['road_item_loc'] - feat['query_item_loc']
def func(s):
if s<0:
return -s
return s
feat['abs_itemqa_loc_diff'] = feat['itemqa_loc_diff'].apply(func)
feat = feat[ ['itemqa_loc_diff','abs_itemqa_loc_diff'] ]
return feat
def feat_sim_three_weight(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
loc_weights = {}
time_weights = {}
record_weights = {}
com_item_cnt = {}
item_set = set()
item_dict_set = {}
st0 = time.time()
for user, items in user_item_dict.items():
for item in items:
item_set.add(item)
item_dict_set[item] = set()
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
loc_weights.setdefault(item, {})
time_weights.setdefault(item, {})
record_weights.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
item_dict_set[ item ].add( relate_item )
t1 = times[loc1]
t2 = times[loc2]
loc_weights[item].setdefault(relate_item, 0)
time_weights[item].setdefault(relate_item, 0)
record_weights[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
loc_weights[item][relate_item] += loc_weight
time_weights[item][relate_item] += time_weight
record_weights[item][relate_item] += len(items)
com_item_cnt[item][relate_item] += 1
st1 = time.time()
print(st1-st0)
print('start')
num = feat.shape[0]
road_item = feat['road_item'].values
t_item = feat['item'].values
com_item_loc_weights_sum = np.zeros( num, dtype=float )
com_item_time_weights_sum = np.zeros( num, dtype=float )
com_item_record_weights_sum = np.zeros( num, dtype=float )
t_com_item_cnt = np.zeros( num, dtype=float )
for i in range(num):
if road_item[i] in item_set:
if t_item[i] in item_dict_set[ road_item[i] ]:
com_item_loc_weights_sum[i] = loc_weights[ road_item[i] ][ t_item[i] ]
com_item_time_weights_sum[i] = time_weights[ road_item[i] ][ t_item[i] ]
com_item_record_weights_sum[i] = record_weights[ road_item[i] ][ t_item[i] ]
t_com_item_cnt[i] = com_item_cnt[ road_item[i] ][ t_item[i] ]
else:
com_item_loc_weights_sum[i] = np.nan
com_item_time_weights_sum[i] = np.nan
com_item_record_weights_sum[i] = np.nan
t_com_item_cnt[i] = np.nan
else:
com_item_loc_weights_sum[i] = np.nan
com_item_time_weights_sum[i] = np.nan
com_item_record_weights_sum[i] = np.nan
t_com_item_cnt[i] = np.nan
feat['com_item_loc_weights_sum'] = com_item_loc_weights_sum
feat['com_item_time_weights_sum'] = com_item_time_weights_sum
feat['com_item_record_weights_sum'] = com_item_record_weights_sum
feat['com_item_cnt'] = t_com_item_cnt
feat['com_item_loc_weights_mean'] = feat['com_item_loc_weights_sum'] / feat['com_item_cnt']
feat['com_item_time_weights_mean'] = feat['com_item_time_weights_sum'] / feat['com_item_cnt']
feat['com_item_record_weights_mean'] = feat['com_item_record_weights_sum'] / feat['com_item_cnt']
feat = feat[ ['com_item_loc_weights_sum','com_item_time_weights_sum','com_item_record_weights_sum',
'com_item_loc_weights_mean','com_item_time_weights_mean','com_item_record_weights_mean' ] ]
st2 = time.time()
print(st2-st1)
return feat
def feat_different_type_road_score_sum_mean(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
feat['i2i_score'] = feat['sim_weight']
feat['blend_score'] = feat['sim_weight']
feat['i2i2i_score'] = feat['sim_weight']
feat.loc[ feat['recall_type']!=0 , 'i2i_score'] = np.nan
feat.loc[ feat['recall_type']!=1 , 'blend_score'] = np.nan
feat.loc[ feat['recall_type']!=2 , 'i2i2i_score'] = np.nan
feat['user_item'] = feat['user'].astype('str') + '-' + feat['item'].astype('str')
for col in ['i2i_score','blend_score','i2i2i_score']:
df = feat[ ['user_item',col,'index'] ]
df = df.groupby('user_item')[col].sum().reset_index()
df[col+'_sum'] = df[col]
df = df[ ['user_item',col+'_sum'] ]
feat = pd.merge( feat, df, on='user_item', how='left')
df = feat[ ['user_item',col,'index'] ]
df = df.groupby('user_item')[col].mean().reset_index()
df[col+'_mean'] = df[col]
df = df[ ['user_item',col+'_mean'] ]
feat = pd.merge( feat, df, on='user_item', how='left')
feat = feat[ ['i2i_score','i2i_score_sum','i2i_score_mean',
'blend_score','blend_score_sum','blend_score_mean',
'i2i2i_score','i2i2i_score_sum','i2i2i_score_mean',] ]
return feat
def feat_different_type_road_score_sum_mean_new(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
recall_source_names = ['i2i_w02','b2b','i2i2i','i2i_w10','i2i2b']
recall_source_names = [ i+'_score' for i in recall_source_names ]
for idx,col in enumerate(recall_source_names):
feat[col] = feat['sim_weight']
feat.loc[ feat['recall_type']!=idx, col ] = np.nan
for col in recall_source_names:
df = feat[ ['user','item',col,'index'] ]
df = df.groupby( ['user','item'] )[col].sum().reset_index()
df[col+'_sum'] = df[col]
df = df[ ['user','item',col+'_sum'] ]
feat = pd.merge( feat, df, on=['user','item'], how='left')
df = feat[ ['user','item',col,'index'] ]
df = df.groupby( ['user','item'] )[col].mean().reset_index()
df[col+'_mean'] = df[col]
df = df[ ['user','item',col+'_mean'] ]
feat = pd.merge( feat, df, on=['user','item'], how='left')
feat_list = recall_source_names + [ col+'_sum' for col in recall_source_names ] + [ col+'_mean' for col in recall_source_names ]
feat = feat[ feat_list ]
return feat
def feat_sim_base(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
sim_item = {}
item_cnt = defaultdict(int)
com_item_cnt = {}
item_set = set()
item_dict_set = {}
st0 = time.time()
for user, items in user_item_dict.items():
for item in items:
item_set.add(item)
item_dict_set[item] = set()
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
item_dict_set[ item ].add( relate_item )
t1 = times[loc1]
t2 = times[loc2]
sim_item[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
com_item_cnt[item][relate_item] += 1.0
st1 = time.time()
print(st1-st0)
print('start')
num = feat.shape[0]
road_item = feat['road_item'].values
t_item = feat['item'].values
road_item_cnt = np.zeros( num, dtype=float )
t_item_cnt = np.zeros( num, dtype=float )
com_item_cij = np.zeros( num, dtype=float )
t_com_item_cnt = np.zeros( num, dtype=float )
for i in range(num):
if road_item[i] in item_set:
road_item_cnt[i] = item_cnt[ road_item[i] ]
if t_item[i] in item_dict_set[ road_item[i] ]:
com_item_cij[i] = sim_item[ road_item[i] ][ t_item[i] ]
t_com_item_cnt[i] = com_item_cnt[ road_item[i] ][ t_item[i] ]
else:
com_item_cij[i] = np.nan
t_com_item_cnt[i] = np.nan
else:
road_item_cnt[i] = np.nan
com_item_cij[i] = np.nan
t_com_item_cnt[i] = np.nan
if t_item[i] in item_set:
t_item_cnt[i] = item_cnt[ t_item[i] ]
else:
t_item_cnt[i] = np.nan
feat['road_item_cnt'] = road_item_cnt
feat['item_cnt'] = t_item_cnt
feat['com_item_cij'] = com_item_cij
feat['com_item_cnt'] = t_com_item_cnt
feat = feat[ ['road_item_cnt','item_cnt','com_item_cij','com_item_cnt' ] ]
st2 = time.time()
print(st2-st1)
return feat
def feat_u2i_abs_loc_weights_loc_base(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_loc','road_item_loc'] ].values
loc_bases = [0.1,0.3,0.5,0.7,0.9]
for loc_base in loc_bases:
result = []
for val in vals:
loc1 = val[0]
loc2 = val[1]
if loc2 >= loc1:
loc_diff = loc2-loc1
else:
loc_diff = loc1-loc2-1
loc_weight = loc_base**loc_diff
if loc_weight<=0.1:
loc_weight = 0.1
result.append(loc_weight)
feat['u2i_abs_loc_weights_loc_base_'+str(loc_base)] = result
cols = []
for loc_base in loc_bases:
cols.append( 'u2i_abs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_u2i_loc_weights_loc_base(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_loc','road_item_loc'] ].values
loc_bases = [0.1,0.3,0.5,0.7,0.9]
for loc_base in loc_bases:
result = []
for val in vals:
loc1 = val[0]
loc2 = val[1]
if loc2 >= loc1:
loc_diff = loc2-loc1
else:
loc_diff = loc1-loc2-1
loc_weight = loc_base**loc_diff
if abs(loc_weight)<=0.1:
loc_weight = 0.1
if loc2 < loc1:
loc_weight = -loc_weight
result.append(loc_weight)
feat['u2i_loc_weights_loc_base_'+str(loc_base)] = result
cols = []
for loc_base in loc_bases:
cols.append( 'u2i_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_u2i_abs_time_weights(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_time','road_item_time'] ].values
result = []
for val in vals:
t1 = val[0]
t2 = val[1]
time_weight = (1 - abs( t1 - t2 ) * 100)
if time_weight<=0.1:
time_weight = 0.1
result.append(time_weight)
feat['u2i_abs_time_weights'] = result
cols = [ 'u2i_abs_time_weights' ]
feat = feat[ cols ]
return feat
def feat_u2i_time_weights(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_time','road_item_time'] ].values
result = []
for val in vals:
t1 = val[0]
t2 = val[1]
time_weight = (1 - abs( t1 - t2 ) * 100)
if abs(time_weight)<=0.1:
time_weight = 0.1
if t1 > t2:
time_weight = -time_weight
result.append(time_weight)
feat['u2i_time_weights'] = result
cols = [ 'u2i_time_weights' ]
feat = feat[ cols ]
return feat
def feat_automl_cate_count(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
feat['road_item-item'] = feat['road_item'].astype('str') + '-' + feat['item'].astype('str')
cate_list = [ 'road_item','item','road_item-item' ]
cols = []
for cate in cate_list:
feat[cate+'_count'] = feat[ cate ].map( feat[ cate ].value_counts() )
cols.append( cate+'_count' )
feat = feat[ cols ]
return feat
def feat_automl_user_cate_count(data):
df = data.copy()
feat = df[ ['index','user','road_item','item'] ]
feat['user-road_item'] = feat['user'].astype('str') + '-' + feat['road_item'].astype('str')
feat['user-item'] = feat['user'].astype('str') + '-' + feat['item'].astype('str')
feat['user-road_item-item'] = feat['user'].astype('str') + '-' + feat['road_item'].astype('str') + '-' + feat['item'].astype('str')
cate_list = [ 'user-road_item','user-item','user-road_item-item' ]
cols = []
for cate in cate_list:
feat[cate+'_count'] = feat[ cate ].map( feat[ cate ].value_counts() )
cols.append( cate+'_count' )
feat = feat[ cols ]
return feat
def feat_u2i_road_item_time_diff(data):
df = data.copy()
feat = df[['user','road_item_loc','road_item_time']]
feat = feat.groupby(['user','road_item_loc']).first().reset_index()
feat_group = feat.sort_values(['user','road_item_loc']).set_index(['user','road_item_loc']).groupby('user')
feat1 = feat_group['road_item_time'].diff(1)
feat2 = feat_group['road_item_time'].diff(-1)
feat1.name = 'u2i_road_item_time_diff_history'
feat2.name = 'u2i_road_item_time_diff_future'
feat = df.merge(pd.concat([feat1,feat2],axis=1),how='left',on=['user','road_item_loc'])
cols = [ 'u2i_road_item_time_diff_history', 'u2i_road_item_time_diff_future' ]
feat = feat[ cols ]
return feat
def feat_road_item_text_dot(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
c = np.dot( item1_text, item2_text )
return c
else:
return np.nan
feat['road_item_text_dot'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_dot'] ]
return feat
def feat_road_item_text_norm2(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func1(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
a = np.linalg.norm( item1_text )
b = np.linalg.norm( item2_text )
return a*b
else:
return np.nan
def func2(ss):
item1 = ss
if ( item1 in item_text ):
item1_text = item_text[item1]
a = np.linalg.norm( item1_text )
return a
else:
return np.nan
feat['road_item_text_product_norm2'] = df[ ['road_item','item'] ].apply(func1, axis=1)
feat['road_item_text_norm2'] = df['road_item'].apply(func2)
feat['item_text_norm2'] = df['item'].apply(func2)
feat = feat[ ['road_item_text_product_norm2','road_item_text_norm2','item_text_norm2'] ]
return feat
def feat_automl_cate_count_all_1(data):
df = data.copy()
categories = [ 'user','item','road_item','road_item_loc',
'query_item_loc','recall_type']
feat = df[ ['index']+categories ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categories += ['loc_diff']
n = len(categories)
cols = []
for a in range(n):
cate1 = categories[a]
feat[cate1+'_count_'] = feat[cate1].map( feat[cate1].value_counts() )
cols.append( cate1+'_count_' )
print(f'feat {cate1} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_cate_count_all_2(data):
df = data.copy()
categories = [ 'user','item','road_item','road_item_loc',
'query_item_loc','recall_type']
feat = df[ ['index']+categories ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categories += ['loc_diff']
n = len(categories)
cols = []
for a in range(n):
cate1 = categories[a]
for b in range(a+1,n):
cate2 = categories[b]
name2 = f'{cate1}_{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count_'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count_' )
print(f'feat {feat_tmp.name} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_cate_count_all_3(data):
df = data.copy()
categories = [ 'user','item','road_item','road_item_loc',
'query_item_loc','recall_type']
feat = df[ ['index']+categories ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categories += ['loc_diff']
n = len(categories)
cols = []
for a in range(n):
cate1 = categories[a]
for b in range(a+1,n):
cate2 = categories[b]
for c in range(b+1,n):
cate3 = categories[c]
name3 = f'{cate1}_{cate2}_{cate3}'
feat_tmp = feat.groupby([cate1,cate2,cate3]).size()
feat_tmp.name = f'{name3}_count_'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2,cate3])
cols.append( name3+'_count_' )
print(f'feat {feat_tmp.name} fuck done')
feat = feat[ cols ]
return feat
def feat_time_window_cate_count(data):
if mode=='valid':
all_train_data = utils.load_pickle(all_train_data_path.format(cur_stage))
else:
all_train_data = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_with_time = all_train_data[["item_id", "time"]].sort_values(["item_id", "time"])
item2time = item_with_time.groupby("item_id")["time"].agg(list).to_dict()
utils.dump_pickle(item2time, item2time_path.format(mode))
item2times = utils.load_pickle(item2time_path.format(mode))
df = data.copy()
df["item_time"] = df.set_index(["item", "time"]).index
feat = df[["item_time"]]
del df
def find_count_around_time(item_time, mode, delta):
item, t = item_time
if mode == "left":
left = t - delta
right = t
elif mode == "right":
left = t
right = t + delta
else:
left = t - delta
right = t + delta
click_times = item2times[item]
count = 0
for ts in click_times:
if ts < left:
continue
elif ts > right:
break
else:
count += 1
return count
feat["item_cnt_around_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.01))
feat["item_cnt_before_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.01))
feat["item_cnt_after_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.01))
feat["item_cnt_around_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.02))
feat["item_cnt_before_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.02))
feat["item_cnt_after_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.02))
feat["item_cnt_around_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.05))
feat["item_cnt_before_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.05))
feat["item_cnt_after_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.05))
return feat[[
"item_cnt_around_time_0.01", "item_cnt_before_time_0.01", "item_cnt_after_time_0.01",
"item_cnt_around_time_0.02", "item_cnt_before_time_0.02", "item_cnt_after_time_0.02",
"item_cnt_around_time_0.05", "item_cnt_before_time_0.05", "item_cnt_after_time_0.05",
]]
def feat_time_window_cate_count(data):
# 做这个特征之前,先做一次item2time.py
try:
item2times = utils.load_pickle(item2time_path.format(mode, cur_stage))
except:
raise Exception("做这个特征之前,先做一次item2time.py")
df = data.copy()
df["item_time"] = df.set_index(["item", "time"]).index
feat = df[["item_time"]]
del df
def find_count_around_time(item_time, mode, delta):
item, t = item_time
if mode == "left":
left = t - delta
right = t
elif mode == "right":
left = t
right = t + delta
else:
left = t - delta
right = t + delta
click_times = item2times[item]
count = 0
for ts in click_times:
if ts < left:
continue
elif ts > right:
break
else:
count += 1
return count
feat["item_cnt_around_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.01))
feat["item_cnt_before_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.01))
feat["item_cnt_after_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.01))
feat["item_cnt_around_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.02))
feat["item_cnt_before_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.02))
feat["item_cnt_after_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.02))
feat["item_cnt_around_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.05))
feat["item_cnt_before_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.05))
feat["item_cnt_after_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.05))
feat["item_cnt_around_time_0.07"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.07))
feat["item_cnt_before_time_0.07"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.07))
feat["item_cnt_after_time_0.07"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.07))
feat["item_cnt_around_time_0.1"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.1))
feat["item_cnt_before_time_0.1"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.1))
feat["item_cnt_after_time_0.1"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.1))
feat["item_cnt_around_time_0.15"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.15))
feat["item_cnt_before_time_0.15"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.15))
feat["item_cnt_after_time_0.15"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.15))
return feat[[
"item_cnt_around_time_0.01", "item_cnt_before_time_0.01", "item_cnt_after_time_0.01",
"item_cnt_around_time_0.02", "item_cnt_before_time_0.02", "item_cnt_after_time_0.02",
"item_cnt_around_time_0.05", "item_cnt_before_time_0.05", "item_cnt_after_time_0.05",
"item_cnt_around_time_0.07", "item_cnt_before_time_0.07", "item_cnt_after_time_0.07",
"item_cnt_around_time_0.1", "item_cnt_before_time_0.1", "item_cnt_after_time_0.1",
"item_cnt_around_time_0.15", "item_cnt_before_time_0.15", "item_cnt_after_time_0.15",
]]
#在召回集内,限定时间(qtime 附近) 这个item被召回了多少次
# item2times 改变了 其他的逻辑不变
def item_recall_cnt_around_qtime(data):
item2times = data.groupby("item")["time"].agg(list).to_dict()
df = data.copy()
df["item_time"] = df.set_index(["item", "time"]).index
feat = df[["item_time"]]
del df
def find_count_around_time(item_time, mode, delta):
item, t = item_time
if mode == "left":
left = t - delta
right = t
elif mode == "right":
left = t
right = t + delta
else:
left = t - delta
right = t + delta
click_times = item2times[item]
count = 0
for ts in click_times:
if ts < left:
continue
elif ts > right:
break
else:
count += 1
return count
new_cols = []
new_col_name = "item_recall_cnt_{}_time_{}"
for delta in [0.01, 0.02, 0.05, 0.07, 0.1, 0.15]:
print('running delta: ', delta)
for mode in ["all", "left", "right"]:
new_col = new_col_name.format(mode, delta)
new_cols.append(new_col)
feat[new_col] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode=mode, delta=delta))
return feat[new_cols]
def feat_automl_recall_type_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type'] ]
feat['road_item-item'] = feat['road_item'].astype('str')+ '-' + feat['item'].astype('str')
cols = []
for cate1 in ['recall_type']:
for cate2 in ['item','road_item','road_item-item']:
name2 = f'{cate1}-{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count' )
print(f'feat {cate1} {cate2} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_loc_diff_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type'] ]
feat['road_item-item'] = feat['road_item'].astype('str')+ '-' + feat['item'].astype('str')
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
cols = []
for cate1 in ['loc_diff']:
for cate2 in ['item','road_item','recall_type','road_item-item']:
name2 = f'{cate1}-{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count' )
print(f'feat {cate1} {cate2} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_user_and_recall_type_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type','user'] ]
feat['road_item-item'] = feat['road_item'].astype('str') + '-' + feat['item'].astype('str')
cols = []
for cate1 in ['user']:
for cate2 in ['recall_type']:
for cate3 in ['item','road_item','road_item-item']:
name3 = f'{cate1}-{cate2}-{cate3}'
feat_tmp = feat.groupby([cate1,cate2,cate3]).size()
feat_tmp.name = f'{name3}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2,cate3])
cols.append( name3+'_count' )
print(f'feat {cate1} {cate2} {cate3} fuck done')
feat = feat[ cols ]
return feat
def feat_i2i_cijs_topk_by_loc(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_bases = [0.9]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
result_topk_by_loc = {}
result_history_loc_diff1_cnt = {}
result_future_loc_diff1_cnt = {}
result_history_loc_diff1_time_mean = {}
result_future_loc_diff1_time_mean = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = []
result_history_loc_diff1_cnt[key] = 0.0
result_future_loc_diff1_cnt[key] = 0.0
result_history_loc_diff1_time_mean[key] = 0
result_future_loc_diff1_time_mean[key] = 0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
if loc1-loc2==1:
result_history_loc_diff1_cnt[key] += 1
result_history_loc_diff1_time_mean[key] += (t1 - t2)
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
if loc2-loc1==1:
result_future_loc_diff1_cnt[key] += 1
result_future_loc_diff1_time_mean[key] += (t2 - t1)
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key].append( (loc_diff,1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)))
result_history_loc_diff1_time_mean[key] /=(result_history_loc_diff1_cnt[key]+1e-5)
result_future_loc_diff1_time_mean[key] /=(result_future_loc_diff1_cnt[key]+1e-5)
result_one = sorted(result[key],key=lambda x:x[0])
result_one_len = len(result_one)
result_topk_by_loc[key] = [x[1] for x in result_one[:topk]]+[np.nan]*max(0,topk-result_one_len)
feat['history_loc_diff1_com_item_time_mean'] = feat['new_keys'].map(result_history_loc_diff1_time_mean).fillna(0)
feat['future_loc_diff1_com_item_time_mean'] = feat['new_keys'].map(result_future_loc_diff1_time_mean).fillna(0)
feat['history_loc_diff1_com_item_cnt'] = feat['new_keys'].map(result_history_loc_diff1_cnt).fillna(0)
feat['future_loc_diff1_com_item_cnt'] = feat['new_keys'].map(result_future_loc_diff1_cnt).fillna(0)
feat_top = []
for key,value in result_topk_by_loc.items():
feat_top.append([key[0],key[1]]+value)
feat_top = pd.DataFrame(feat_top,columns=['road_item','item']+[f'i2i_cijs_top{k}_by_loc' for k in range(1,topk+1)])
feat = feat.merge(feat_top,how='left',on=['road_item','item'])
print('Finished getting result')
cols = ['history_loc_diff1_com_item_time_mean',
'future_loc_diff1_com_item_time_mean',
'history_loc_diff1_com_item_cnt',
'future_loc_diff1_com_item_cnt']+[f'i2i_cijs_top{k}_by_loc' for k in range(1,topk+1)]
feat = feat[ cols ]
return feat
def feat_i2i_cijs_median_mean_topk(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_bases = [0.9]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
result_median = {}
result_mean = {}
result_topk = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = []
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key].append( 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len))
result_one = sorted(result[key],reverse=True)
result_one_len = len(result_one)
result_median[key] = result_one[result_one_len//2] if result_one_len%2==1 else (result_one[result_one_len//2]+result_one[result_one_len//2-1])/2
result_mean[key] = sum(result[key])/len(result[key])
result_topk[key] = result_one[:topk]+[np.nan]*max(0,topk-result_one_len)
feat['i2i_cijs_median'] = feat['new_keys'].map(result_median)
feat['i2i_cijs_mean'] = feat['new_keys'].map(result_mean)
feat_top = []
for key,value in result_topk.items():
feat_top.append([key[0],key[1]]+value)
feat_top = pd.DataFrame(feat_top,columns=['road_item','item']+[f'i2i_cijs_top{k}_by_cij' for k in range(1,topk+1)])
feat = feat.merge(feat_top,how='left',on=['road_item','item'])
print('Finished getting result')
cols = ['i2i_cijs_median','i2i_cijs_mean']+[f'i2i_cijs_top{k}_by_cij' for k in range(1,topk+1)]
feat = feat[ cols ]
return feat
def feat_different_type_road_score_sum_mean_by_item(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
cols = ['i2i_score','blend_score','i2i2i_score']#,'i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['item',col,'index'] ]
df = df.groupby('item')[col].sum().reset_index()
df[col+'_by_item_sum'] = df[col]
df = df[ ['item',col+'_by_item_sum'] ]
feat = pd.merge( feat, df, on='item', how='left')
df = feat[ ['item',col,'index'] ]
df = df.groupby('item')[col].mean().reset_index()
df[col+'_by_item_mean'] = df[col]
df = df[ ['item',col+'_by_item_mean'] ]
feat = pd.merge( feat, df, on='item', how='left')
feat = feat[[f'{i}_by_item_{j}' for i in cols for j in ['sum','mean']]]
return feat
def feat_different_type_road_score_mean_by_road_item(data):
df = data.copy()
feat = df[ ['user','road_item','index','sim_weight','recall_type'] ]
cols = ['i2i_score','blend_score','i2i2i_score']#'i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['road_item',col,'index'] ]
df = df.groupby('road_item')[col].mean().reset_index()
df[col+'_by_road_item_mean'] = df[col]
df = df[ ['road_item',col+'_by_road_item_mean'] ]
feat = pd.merge( feat, df, on='road_item', how='left')
feat = feat[[f'{i}_by_road_item_mean' for i in cols]]
return feat
def feat_different_type_road_score_mean_by_loc_diff(data):
df = data.copy()
feat = df[ ['user','index','sim_weight','recall_type'] ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
cols = ['i2i_score','blend_score','i2i2i_score','i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['loc_diff',col,'index'] ]
df = df.groupby('loc_diff')[col].mean().reset_index()
df[col+'_by_loc_diff_mean'] = df[col]
df = df[ ['loc_diff',col+'_by_loc_diff_mean'] ]
feat = pd.merge( feat, df, on='loc_diff', how='left')
feat = feat[[f'{i}_by_loc_diff_mean' for i in cols]]
return feat
def feat_different_type_road_score_sum_mean_by_recall_type_and_item(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
cols = ['i2i_score','blend_score','i2i2i_score','i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['item','recall_type',col,'index'] ]
df = df.groupby(['item','recall_type'])[col].sum().reset_index()
df[col+'_by_item-recall_type_sum'] = df[col]
df = df[ ['item','recall_type',col+'_by_item-recall_type_sum'] ]
feat = pd.merge( feat, df, on=['item','recall_type'], how='left')
df = feat[ ['item','recall_type',col,'index'] ]
df = df.groupby(['item','recall_type'])[col].mean().reset_index()
df[col+'_by_item-recall_type_mean'] = df[col]
df = df[ ['item','recall_type',col+'_by_item-recall_type_mean'] ]
feat = pd.merge( feat, df, on=['item','recall_type'], how='left')
feat = feat[[f'{i}_by_item-recall_type_{j}' for i in cols for j in ['sum','mean']]]
return feat
def feat_base_info_in_stage(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
#all_train_stage_data = pd.concat( all_train_stage_data.iloc[0:1000], all_train_stage_data.iloc[-10000:] )
df_train_stage = all_train_stage_data
df = data.copy()
feat = df[ ['index','road_item','item','stage'] ]
stage2sim_item = {}
stage2item_cnt = {}
stage2com_item_cnt = {}
for sta in range(cur_stage+1):
df_train = df_train_stage[ df_train_stage['stage']==sta ]
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
sim_item = {}
item_cnt = defaultdict(int)
com_item_cnt = {}
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
t1 = times[loc1]
t2 = times[loc2]
sim_item[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
com_item_cnt[item][relate_item] += 1.0
stage2sim_item[sta] = sim_item
stage2item_cnt[sta] = item_cnt
stage2com_item_cnt[sta] = com_item_cnt
sta_list = []
itemb_list = []
sum_sim_list = []
count_sim_list = []
mean_sim_list = []
nunique_itema_count_list = []
for sta in range(cur_stage+1):
for key1 in stage2sim_item[sta].keys():
val = 0
count = 0
for key2 in stage2sim_item[sta][key1].keys():
val += stage2sim_item[sta][key1][key2]
count += stage2com_item_cnt[sta][key1][key2]
sta_list.append( sta )
itemb_list.append( key1 )
sum_sim_list.append( val )
count_sim_list.append( count )
mean_sim_list.append( val/count )
nunique_itema_count_list.append( len( stage2sim_item[sta][key1].keys() ) )
data1 = pd.DataFrame( {'stage':sta_list, 'item':itemb_list, 'sum_sim_in_stage':sum_sim_list, 'count_sim_in_stage':count_sim_list,
'mean_sim_in_stage':mean_sim_list, 'nunique_itema_count_in_stage':nunique_itema_count_list } )
'''
sta_list = []
item_list = []
cnt_list = []
for sta in range(cur_stage+1):
for key1 in stage2item_cnt[sta].keys():
sta_list.append(sta)
item_list.append(key1)
cnt_list.append( stage2item_cnt[sta][key1] )
data2 = pd.DataFrame( {'stage':sta_list, 'road_item':item_list, 'stage_road_item_cnt':cnt_list } )
data3 = pd.DataFrame( {'stage':sta_list, 'item':item_list, 'stage_item_cnt':cnt_list } )
'''
#feat = pd.merge( feat,data1, how='left',on=['stage','road_item','item'] )
#feat = pd.merge( feat,data2, how='left',on=['stage','road_item'] )
feat = pd.merge( feat,data1, how='left',on=['stage','item'] )
feat = feat[ ['sum_sim_in_stage','count_sim_in_stage','mean_sim_in_stage','nunique_itema_count_in_stage'] ]
return feat
def feat_item_time_info_in_stage(data):
df = data.copy()
feat = df[ ['index','item','stage','time'] ]
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
df_train_stage = all_train_stage_data
data1 = df_train_stage.groupby( ['stage','item_id'] )['time'].agg( ['max','min','mean'] ).reset_index()
data1.columns = [ 'stage','item','time_max_in_stage','time_min_in_stage','time_mean_in_stage' ]
data1['time_dura_in_stage'] = data1['time_max_in_stage'] - data1['time_min_in_stage']
feat = pd.merge( feat,data1, how='left',on=['stage','item'] )
feat['time_diff_min_in_stage'] = feat['time'] - feat['time_min_in_stage']
feat['time_diff_max_in_stage'] = feat['time_max_in_stage'] - feat['time']
cols = [ 'time_dura_in_stage','time_max_in_stage','time_min_in_stage','time_mean_in_stage','time_diff_min_in_stage','time_diff_max_in_stage' ]
feat = feat[ cols ]
return feat
def feat_user_info_in_stage(data):
df = data.copy()
feat = df[ ['index','item','user','stage'] ]
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
df_train_stage = all_train_stage_data
data1 = df_train_stage.groupby( ['stage','user_id'] )['index'].count()
data1.name = 'user_count_in_stage'
data1 = data1.reset_index()
data1 = data1.rename( columns={'user_id':'user'} )
data2 = df_train_stage.groupby( ['stage','item_id'] )['user_id'].nunique()
data2.name = 'item_nunique_in_stage'
data2 = data2.reset_index()
data2 = data2.rename( columns={'item_id':'item'} )
data3 = df_train_stage.groupby( ['stage','item_id'] )['user_id'].count()
data3.name = 'item_count_in_stage'
data3 = data3.reset_index()
data3 = data3.rename( columns={'item_id':'item'} )
data3[ 'item_ratio_in_stage' ] = data3[ 'item_count_in_stage' ] / data2['item_nunique_in_stage']
feat = | pd.merge( feat,data1, how='left',on=['stage','user'] ) | pandas.merge |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 30 20:25:08 2019
@author: alexandradarmon
"""
### RUN TIME SERIES
import pandas as pd
from punctuation.recognition.training_testing_split import (
get_nn_indexes
)
from punctuation.feature_operations.distances import d_KL
from punctuation.recognition.recognition_algorithms import (
launch_nearest_neighbour,
launch_neural_net
)
from punctuation.config import options
from punctuation.utils.utils import (
load_corpus,
int_or_nan
)
from punctuation.time_series.time_functions import (
get_temporal,
plot_histogram_years,
plot_freq_overtime,
plot_col_overtime
)
import pandas as pd
import numpy as np
import matplotlib.style
import matplotlib as mpl
mpl.style.use('seaborn-paper')
df = load_corpus()
df_temporal = get_temporal(df=df)
plot_histogram_years(df_temporal, show_middleyear=False,
to_show=True, print_legend=False)
plot_histogram_years(df_temporal,show_middleyear=True,
to_show=True, print_legend=False)
list_freq_pun_col = list(range(options.nb_signs))
freq_pun_col_1 = [1,4,5]
freq_pun_col_2 = [0,7]
freq_pun_col_3 = [2,3,6,8,9]
for f in [freq_pun_col_1,freq_pun_col_2,freq_pun_col_3]:
plot_freq_overtime(df_temporal, f,
col_date='author_middle_age',
min_date=1700, max_date=1950,
to_show=True, print_legend=True)
plot_freq_overtime(df_temporal, list_freq_pun_col,
col_date='author_middle_age',
min_date=1700, max_date=1950,
to_show=True, print_legend=False)
wells = pd.read_csv('data/Marya_Wells.csv').sort_values('Date')
wells = pd.merge(wells, df_temporal, how='inner', on='title')
wells['Date_bin'] = wells['Date']
plot_freq_overtime(wells, list_freq_pun_col,
col_date='Date',
min_date=min(wells['Date']),
max_date=1922,
print_legend=False, show_ci=True)
fleming = pd.read_csv('data/Alex_Fleming.csv').sort_values('Date')
fleming = pd.merge(fleming, df_temporal, how='left', on='title')
fleming['Date_bin'] = fleming['Date']
plot_freq_overtime(fleming, list_freq_pun_col,
col_date='Date',
min_date=min(fleming['Date']),
max_date=max(fleming['Date']),
print_legend=False, show_ci=True)
shakespeare = | pd.read_csv('data/Alex_Shakespeare.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 20 14:54:41 2020
@author: aschauer
"""
import socket
import pandas as pd
from pathlib import Path
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Float
# store locally if on my machine (database loses connection to H: drive)
if socket.gethostname() == 'SLFPC954':
CV_RESULTS_DIR = Path(r'D:\\HS_gap_filling_paper\results\cross_validation')
CV_MODELED_SERIES_DIR = CV_RESULTS_DIR / 'modeled_series'
else:
PROJECT_DIR = Path(__file__).parent.parent
CV_RESULTS_DIR = PROJECT_DIR / 'results' / 'cross_validation'
CV_MODELED_SERIES_DIR = CV_RESULTS_DIR / 'modeled_series'
for d in [CV_RESULTS_DIR, CV_MODELED_SERIES_DIR]:
if not d.exists():
d.mkdir(parents=True, exist_ok=True)
DBFILE = CV_RESULTS_DIR / 'cv_scores_database.db'
Base = declarative_base()
class ModeledGap(Base):
__tablename__ = "modeled_gaps"
gap_stn = Column(String, primary_key=True)
fill_method = Column(String, primary_key=True)
station_grid = Column(String, primary_key=True)
gap_type = Column(String, primary_key=True)
gap_winter = Column(Integer, primary_key=True)
gap_start = Column(String, primary_key=True)
gap_end = Column(String, primary_key=True)
train_start = Column(String, primary_key=True)
train_end = Column(String, primary_key=True)
gap_stn_altitude = Column(Integer)
HS_true_file = Column(String) # file reference to pickled Series
HS_pred_file = Column(String) # file reference to pickled Series
HSavg_true = Column(Float)
HSavg_pred = Column(Float)
dHS1_true = Column(Float)
dHS1_pred = Column(Float)
HSmax_true = Column(Float)
HSmax_pred = Column(Float)
RMSE = Column(Float)
RMSE_nonzero = Column(Float)
RMSE_nonzero_true = Column(Float)
RMSE_nonzero_pred = Column(Float)
MAAPE = Column(Float)
MAAPE_nonzero = Column(Float)
MAAPE_nonzero_true = Column(Float)
MAAPE_nonzero_pred = Column(Float)
bias = Column(Float)
HSavg_diff = Column(Float)
HSavg_abs_diff = Column(Float)
HSavg_relative_diff = Column(Float)
HSavg_relative_abs_diff = Column(Float)
dHS1_diff = Column(Float)
dHS1_abs_diff = Column(Float)
dHS1_relative_diff = Column(Float)
dHS1_relative_abs_diff = Column(Float)
HSmax_diff = Column(Float)
HSmax_abs_diff = Column(Float)
HSmax_relative_diff = Column(Float)
HSmax_relative_abs_diff = Column(Float)
r2_score = Column(Float)
r2_score_nonzero = Column(Float)
r2_score_nonzero_true = Column(Float)
r2_score_nonzero_pred = Column(Float)
def create_file_references(self):
gap_base = f"{self.gap_stn}_{self.gap_type}_{self.gap_start}-{self.gap_end}"
model_base = f"{self.fill_method}_{self.station_grid}_{self.train_start}-{self.train_end}"
setattr(self, 'HS_true_file',
str(CV_MODELED_SERIES_DIR / f"_y_true_{gap_base}.pkl"))
setattr(self, 'HS_pred_file',
str(CV_MODELED_SERIES_DIR / f"_y_pred_{gap_base}_{model_base}.pkl"))
engine = sa.create_engine(f'sqlite:///{DBFILE}', echo=False)
Base.metadata.create_all(engine)
def make_session():
session_factory = sa.orm.sessionmaker()
session_factory.configure(bind=engine)
session = session_factory()
return session
def get_cv_results_as_df():
return pd.read_sql('modeled_gaps', engine)
def get_predictions_from_one_gap_as_df(gap_stn,
gap_winter,
fill_methods=None,
station_grids=None):
"""
Query predictions and true data from one gap and gap station and concatenate
result into a single dataframe.
Parameters
----------
gap_stn : str
gap_winter : int
fill_methods : list or tuple, optional
The default is None and queries all methods.
station_grids : list or tuple, optional
The default is None and queris all station grids.
Returns
-------
out_df : TYPE
DESCRIPTION.
"""
query = f"""select *
from modeled_gaps
where gap_winter=?
and gap_stn=?"""
res = pd.read_sql(query, engine, params=[gap_winter, gap_stn])
if fill_methods is not None:
res = res.loc[res.fill_method.isin(fill_methods)]
if station_grids is not None:
res = res.loc[res.station_grid.isin(station_grids)]
# station:
predictions = []
hs_true_file = res['HS_true_file'].unique().tolist()
predictions.append(pd.read_pickle(hs_true_file[0]))
for station_grid, data in res.groupby('station_grid'):
hs_pred_files = data['HS_pred_file'].tolist()
df = pd.concat([ | pd.read_pickle(file) | pandas.read_pickle |
"""
October 2020
Updated: August 2021
Software version: Python 3.7
This code retrieves the calculation of building material demand and embodied greenhouse gas emissions in 26 global regions between 2020-2060. For the original code & latest updates, see: https://github.com/oucxiaoyang/GloBUME
The building material model is based on the BUMA model developed by <NAME>, Leiden University, the Netherlands. For the original code & latest updates, see: https://github.com/SPDeetman/BUMA
The dynamic stock model is based on the ODYM model developed by <NAME>, Uni Freiburg, Germany. For the original code & latest updates, see: https://github.com/IndEcol/ODYM
@author: <NAME>; <EMAIL>
<NAME>; <EMAIL>
<NAME>; <EMAIL>
contributions from: <NAME>
*NOTE: Insert location of the GloBUME-main folder in 'dir_path' (line 28) to run the code.
"""
#%% GENERAL SETTING & STATEMENTS
import pandas as pd
import numpy as np
import os
import ctypes
import math
# set current directory
dir_path = ""
os.chdir(dir_path)
# Set general constants
regions = 26 #26 IMAGE regions
res_building_types = 4 #4 residential building types: detached, semi-detached, appartments & high-rise
area = 2 #2 areas: rural & urban
materials = 7 #7 materials: Steel, brick, Concrete, Wood, Copper, Aluminium, Glass
inflation = 1.2423 # gdp/cap inflation correction between 2005 (IMAGE data) & 2016 (commercial calibration) according to https://www.bls.gov/data/inflation_calculator.htm
# Set Flags for sensitivity analysis
flag_alpha = 0 # switch for the sensitivity analysis on alpha, if 1 the maximum alpha is 10% above the maximum found in the data
flag_ExpDec = 0 # switch to choose between Gompertz and Exponential Decay function for commercial floorspace demand (0 = Gompertz, 1 = Expdec)
flag_Normal = 0 # switch to choose between Weibull and Normal lifetime distributions (0 = Weibull, 1 = Normal)
flag_Mean = 0 # switch to choose between material intensity settings (0 = regular regional, 1 = mean, 2 = high, 3 = low, 4 = median)
#%%Load files & arrange tables ----------------------------------------------------
if flag_Mean == 0:
file_addition = ''
elif flag_Mean == 1:
file_addition = '_mean'
elif flag_Mean == 2:
file_addition = '_high'
elif flag_Mean == 3:
file_addition = '_low'
else:
file_addition = '_median'
# Load Population, Floor area, and Service value added (SVA) Database csv-files
pop = pd.read_csv('files_population/pop.csv', index_col = [0]) # Pop; unit: million of people; meaning: global population (over time, by region)
rurpop = pd.read_csv('files_population/rurpop.csv', index_col = [0]) # rurpop; unit: %; meaning: the share of people living in rural areas (over time, by region)
housing_type = pd.read_csv('files_population\Housing_type.csv') # Housing_type; unit: %; meaning: the share of the NUMBER OF PEOPLE living in a particular building type (by region & by area)
floorspace = pd.read_csv('files_floor_area/res_Floorspace.csv') # Floorspace; unit: m2/capita; meaning: the average m2 per capita (over time, by region & area)
floorspace = floorspace[floorspace.Region != regions + 1] # Remove empty region 27
avg_m2_cap = pd.read_csv('files_floor_area\Average_m2_per_cap.csv') # Avg_m2_cap; unit: m2/capita; meaning: average square meters per person (by region & area (rural/urban) & building type)
sva_pc_2005 = pd.read_csv('files_GDP/sva_pc.csv', index_col = [0])
sva_pc = sva_pc_2005 * inflation # we use the inflation corrected SVA to adjust for the fact that IMAGE provides gdp/cap in 2005 US$
# load material density data csv-files
building_materials = pd.read_csv('files_material_density\Building_materials' + file_addition + '.csv') # Building_materials; unit: kg/m2; meaning: the average material use per square meter (by building type, by region & by area)
materials_commercial = pd.read_csv('files_material_density\materials_commercial' + file_addition + '.csv') # 7 building materials in 4 commercial building types; unit: kg/m2; meaning: the average material use per square meter (by commercial building type)
# Load fitted regression parameters for comercial floor area estimate
if flag_alpha == 0:
gompertz = pd.read_csv('files_floor_area//files_commercial/Gompertz_parameters.csv', index_col = [0])
else:
gompertz = pd.read_csv('files_floor_area//files_commercial/Gompertz_parameters_alpha.csv', index_col = [0])
# Ensure full time series for pop & rurpop (interpolation, some years are missing)
rurpop2 = rurpop.reindex(list(range(1970,2061,1))).interpolate()
pop2 = pop.reindex(list(range(1970,2061,1))).interpolate()
# Remove 1st year, to ensure same Table size as floorspace data (from 1971)
pop2 = pop2.iloc[1:]
rurpop2 = rurpop2.iloc[1:]
# pre-calculate urban population
urbpop = 1 - rurpop2 # urban population is 1 - the fraction of people living in rural areas (rurpop)
# Restructure the tables to regions as columns; for floorspace
floorspace_rur = floorspace.pivot(index = "t", columns = "Region", values = "Rural")
floorspace_urb = floorspace.pivot(index = "t", columns = "Region", values = "Urban")
# Restructuring for square meters (m2/cap)
avg_m2_cap_urb = avg_m2_cap.loc[avg_m2_cap['Area'] == 'Urban'].drop('Area', 1).T # Remove area column & Transpose
avg_m2_cap_urb.columns = list(map(int,avg_m2_cap_urb.iloc[0])) # name columns according to the row containing the region-labels
avg_m2_cap_urb2 = avg_m2_cap_urb.drop(['Region']) # Remove idle row
avg_m2_cap_rur = avg_m2_cap.loc[avg_m2_cap['Area'] == 'Rural'].drop('Area', 1).T # Remove area column & Transpose
avg_m2_cap_rur.columns = list(map(int,avg_m2_cap_rur.iloc[0])) # name columns according to the row containing the region-labels
avg_m2_cap_rur2 = avg_m2_cap_rur.drop(['Region']) # Remove idle row
# Restructuring for the Housing types (% of population living in them)
housing_type_urb = housing_type.loc[housing_type['Area'] == 'Urban'].drop('Area', 1).T # Remove area column & Transpose
housing_type_urb.columns = list(map(int,housing_type_urb.iloc[0])) # name columns according to the row containing the region-labels
housing_type_urb2 = housing_type_urb.drop(['Region']) # Remove idle row
housing_type_rur = housing_type.loc[housing_type['Area'] == 'Rural'].drop('Area', 1).T # Remove area column & Transpose
housing_type_rur.columns = list(map(int,housing_type_rur.iloc[0])) # name columns according to the row containing the region-labels
housing_type_rur2 = housing_type_rur.drop(['Region']) # Remove idle row
#%% COMMERCIAL building space demand (stock) calculated from Gomperz curve (fitted, using separate regression model)
# Select gompertz curve paramaters for the total commercial m2 demand (stock)
alpha = gompertz['All']['a'] if flag_ExpDec == 0 else 25.601
beta = gompertz['All']['b'] if flag_ExpDec == 0 else 28.431
gamma = gompertz['All']['c'] if flag_ExpDec == 0 else 0.0415
# find the total commercial m2 stock (in Millions of m2)
commercial_m2_cap = pd.DataFrame(index = range(1971,2061), columns = range(1,27))
for year in range(1971,2061):
for region in range(1,27):
if flag_ExpDec == 0:
commercial_m2_cap[region][year] = alpha * math.exp(-beta * math.exp((-gamma/1000) * sva_pc[str(region)][year]))
else:
commercial_m2_cap[region][year] = max(0.542, alpha - beta * math.exp((-gamma/1000) * sva_pc[str(region)][year]))
# Subdivide the total across Offices, Retail+, Govt+ & Hotels+
commercial_m2_cap_office = pd.DataFrame(index = range(1971,2061), columns = range(1,27)) # Offices
commercial_m2_cap_retail = pd.DataFrame(index = range(1971,2061), columns = range(1,27)) # Retail & Warehouses
commercial_m2_cap_hotels = pd.DataFrame(index = range(1971,2061), columns = range(1,27)) # Hotels & Restaurants
commercial_m2_cap_govern = pd.DataFrame(index = range(1971,2061), columns = range(1,27)) # Hospitals, Education, Government & Transportation
minimum_com_office = 25
minimum_com_retail = 25
minimum_com_hotels = 25
minimum_com_govern = 25
for year in range(1971,2061):
for region in range(1,27):
# get the square meter per capita floorspace for 4 commercial applications
office = gompertz['Office']['a'] * math.exp(-gompertz['Office']['b'] * math.exp((-gompertz['Office']['c']/1000) * sva_pc[str(region)][year]))
retail = gompertz['Retail+']['a'] * math.exp(-gompertz['Retail+']['b'] * math.exp((-gompertz['Retail+']['c']/1000) * sva_pc[str(region)][year]))
hotels = gompertz['Hotels+']['a'] * math.exp(-gompertz['Hotels+']['b'] * math.exp((-gompertz['Hotels+']['c']/1000) * sva_pc[str(region)][year]))
govern = gompertz['Govt+']['a'] * math.exp(-gompertz['Govt+']['b'] * math.exp((-gompertz['Govt+']['c']/1000) * sva_pc[str(region)][year]))
#calculate minimum values for later use in historic tail(Region 20: China @ 134 $/cap SVA)
minimum_com_office = office if office < minimum_com_office else minimum_com_office
minimum_com_retail = retail if retail < minimum_com_retail else minimum_com_retail
minimum_com_hotels = hotels if hotels < minimum_com_hotels else minimum_com_hotels
minimum_com_govern = govern if govern < minimum_com_govern else minimum_com_govern
# Then use the ratio's to subdivide the total commercial floorspace into 4 categories
commercial_sum = office + retail + hotels + govern
commercial_m2_cap_office[region][year] = commercial_m2_cap[region][year] * (office/commercial_sum)
commercial_m2_cap_retail[region][year] = commercial_m2_cap[region][year] * (retail/commercial_sum)
commercial_m2_cap_hotels[region][year] = commercial_m2_cap[region][year] * (hotels/commercial_sum)
commercial_m2_cap_govern[region][year] = commercial_m2_cap[region][year] * (govern/commercial_sum)
#%% Add historic tail (1720-1970) + 100 yr initial -----------------------------------------------------------
# load historic population development
hist_pop = pd.read_csv('files_initial_stock\hist_pop.csv', index_col = [0]) # initial population as a percentage of the 1970 population; unit: %; according to the Maddison Project Database (MPD) 2018 (Groningen University)
# Determine the historical average global trend in floorspace/cap & the regional rural population share based on the last 10 years of IMAGE data
floorspace_urb_trend_by_region = [0 for j in range(0,26)]
floorspace_rur_trend_by_region = [0 for j in range(0,26)]
rurpop_trend_by_region = [0 for j in range(0,26)]
commercial_m2_cap_office_trend = [0 for j in range(0,26)]
commercial_m2_cap_retail_trend = [0 for j in range(0,26)]
commercial_m2_cap_hotels_trend = [0 for j in range(0,26)]
commercial_m2_cap_govern_trend = [0 for j in range(0,26)]
# For the RESIDENTIAL & COMMERCIAL floorspace: Derive the annual trend (in m2/cap) over the initial 10 years of IMAGE data
for region in range(1,27):
floorspace_urb_trend_by_year = [0 for i in range(0,10)]
floorspace_rur_trend_by_year = [0 for i in range(0,10)]
commercial_m2_cap_office_trend_by_year = [0 for j in range(0,10)]
commercial_m2_cap_retail_trend_by_year = [0 for i in range(0,10)]
commercial_m2_cap_hotels_trend_by_year = [0 for j in range(0,10)]
commercial_m2_cap_govern_trend_by_year = [0 for i in range(0,10)]
# Get the growth by year (for the first 10 years)
for year in range(1970,1980):
floorspace_urb_trend_by_year[year-1970] = floorspace_urb[region][year+1]/floorspace_urb[region][year+2]
floorspace_rur_trend_by_year[year-1970] = floorspace_rur[region][year+1]/floorspace_rur[region][year+2]
commercial_m2_cap_office_trend_by_year[year-1970] = commercial_m2_cap_office[region][year+1]/commercial_m2_cap_office[region][year+2]
commercial_m2_cap_retail_trend_by_year[year-1970] = commercial_m2_cap_retail[region][year+1]/commercial_m2_cap_retail[region][year+2]
commercial_m2_cap_hotels_trend_by_year[year-1970] = commercial_m2_cap_hotels[region][year+1]/commercial_m2_cap_hotels[region][year+2]
commercial_m2_cap_govern_trend_by_year[year-1970] = commercial_m2_cap_govern[region][year+1]/commercial_m2_cap_govern[region][year+2]
rurpop_trend_by_region[region-1] = ((1 - (rurpop[str(region)][1980]/rurpop[str(region)][1970]))/10)*100
floorspace_urb_trend_by_region[region-1] = sum(floorspace_urb_trend_by_year)/10
floorspace_rur_trend_by_region[region-1] = sum(floorspace_rur_trend_by_year)/10
commercial_m2_cap_office_trend[region-1] = sum(commercial_m2_cap_office_trend_by_year)/10
commercial_m2_cap_retail_trend[region-1] = sum(commercial_m2_cap_retail_trend_by_year)/10
commercial_m2_cap_hotels_trend[region-1] = sum(commercial_m2_cap_hotels_trend_by_year)/10
commercial_m2_cap_govern_trend[region-1] = sum(commercial_m2_cap_govern_trend_by_year)/10
# Average global annual decline in floorspace/cap in %, rural: 1%; urban 1.2%; commercial: 1.26-2.18% /yr
floorspace_urb_trend_global = (1 - (sum(floorspace_urb_trend_by_region)/26))*100 # in % decrease per annum
floorspace_rur_trend_global = (1 - (sum(floorspace_rur_trend_by_region)/26))*100 # in % decrease per annum
commercial_m2_cap_office_trend_global = (1 - (sum(commercial_m2_cap_office_trend)/26))*100 # in % decrease per annum
commercial_m2_cap_retail_trend_global = (1 - (sum(commercial_m2_cap_retail_trend)/26))*100 # in % decrease per annum
commercial_m2_cap_hotels_trend_global = (1 - (sum(commercial_m2_cap_hotels_trend)/26))*100 # in % decrease per annum
commercial_m2_cap_govern_trend_global = (1 - (sum(commercial_m2_cap_govern_trend)/26))*100 # in % decrease per annum
# define historic floorspace (1820-1970) in m2/cap
floorspace_urb_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = floorspace_urb.columns)
floorspace_rur_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = floorspace_rur.columns)
rurpop_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = rurpop.columns)
pop_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = pop2.columns)
commercial_m2_cap_office_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = commercial_m2_cap_office.columns)
commercial_m2_cap_retail_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = commercial_m2_cap_retail.columns)
commercial_m2_cap_hotels_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = commercial_m2_cap_hotels.columns)
commercial_m2_cap_govern_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = commercial_m2_cap_govern.columns)
# Find minumum or maximum values in the original IMAGE data (Just for residential, commercial minimum values have been calculated above)
minimum_urb_fs = floorspace_urb.values.min() # Region 20: China
minimum_rur_fs = floorspace_rur.values.min() # Region 20: China
maximum_rurpop = rurpop.values.max() # Region 9 : Eastern Africa
# Calculate the actual values used between 1820 & 1970, given the trends & the min/max values
for region in range(1,regions+1):
for year in range(1820,1971):
# MAX of 1) the MINimum value & 2) the calculated value
floorspace_urb_1820_1970[region][year] = max(minimum_urb_fs, floorspace_urb[region][1971] * ((100-floorspace_urb_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
floorspace_rur_1820_1970[region][year] = max(minimum_rur_fs, floorspace_rur[region][1971] * ((100-floorspace_rur_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_office_1820_1970[region][year] = max(minimum_com_office, commercial_m2_cap_office[region][1971] * ((100-commercial_m2_cap_office_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_retail_1820_1970[region][year] = max(minimum_com_retail, commercial_m2_cap_retail[region][1971] * ((100-commercial_m2_cap_retail_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_hotels_1820_1970[region][year] = max(minimum_com_hotels, commercial_m2_cap_hotels[region][1971] * ((100-commercial_m2_cap_hotels_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_govern_1820_1970[region][year] = max(minimum_com_govern, commercial_m2_cap_govern[region][1971] * ((100-commercial_m2_cap_govern_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
# MIN of 1) the MAXimum value & 2) the calculated value
rurpop_1820_1970[str(region)][year] = min(maximum_rurpop, rurpop[str(region)][1970] * ((100 + rurpop_trend_by_region[region - 1])/100)**(1970 - year)) # average annual INcrease by region
# just add the tail to the population (no min/max & trend is pre-calculated in hist_pop)
pop_1820_1970[str(region)][year] = hist_pop[str(region)][year] * pop[str(region)][1970]
urbpop_1820_1970 = 1 - rurpop_1820_1970
# To avoid full model setup in 1820 (all required stock gets built in yr 1) we assume another tail that linearly increases to the 1820 value over a 100 year time period, so 1720 = 0
floorspace_urb_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = floorspace_urb.columns)
floorspace_rur_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = floorspace_rur.columns)
rurpop_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = rurpop.columns)
urbpop_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = urbpop.columns)
pop_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = pop2.columns)
commercial_m2_cap_office_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = commercial_m2_cap_office.columns)
commercial_m2_cap_retail_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = commercial_m2_cap_retail.columns)
commercial_m2_cap_hotels_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = commercial_m2_cap_hotels.columns)
commercial_m2_cap_govern_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = commercial_m2_cap_govern.columns)
for region in range(1,27):
for time in range(1721,1820):
# MAX(0,...) Because of floating point deviations, leading to negative stock in some cases
floorspace_urb_1721_1820[int(region)][time] = max(0.0, floorspace_urb_1820_1970[int(region)][1820] - (floorspace_urb_1820_1970[int(region)][1820]/100)*(1820-time))
floorspace_rur_1721_1820[int(region)][time] = max(0.0, floorspace_rur_1820_1970[int(region)][1820] - (floorspace_rur_1820_1970[int(region)][1820]/100)*(1820-time))
rurpop_1721_1820[str(region)][time] = max(0.0, rurpop_1820_1970[str(region)][1820] - (rurpop_1820_1970[str(region)][1820]/100)*(1820-time))
urbpop_1721_1820[str(region)][time] = max(0.0, urbpop_1820_1970[str(region)][1820] - (urbpop_1820_1970[str(region)][1820]/100)*(1820-time))
pop_1721_1820[str(region)][time] = max(0.0, pop_1820_1970[str(region)][1820] - (pop_1820_1970[str(region)][1820]/100)*(1820-time))
commercial_m2_cap_office_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_office_1820_1970[region][1820] - (commercial_m2_cap_office_1820_1970[region][1820]/100)*(1820-time))
commercial_m2_cap_retail_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_retail_1820_1970[region][1820] - (commercial_m2_cap_retail_1820_1970[region][1820]/100)*(1820-time))
commercial_m2_cap_hotels_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_hotels_1820_1970[region][1820] - (commercial_m2_cap_hotels_1820_1970[region][1820]/100)*(1820-time))
commercial_m2_cap_govern_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_govern_1820_1970[region][1820] - (commercial_m2_cap_govern_1820_1970[region][1820]/100)*(1820-time))
# combine historic with IMAGE data here
rurpop_tail = rurpop_1820_1970.append(rurpop2, ignore_index = False)
urbpop_tail = urbpop_1820_1970.append(urbpop, ignore_index = False)
pop_tail = pop_1820_1970.append(pop2, ignore_index = False)
floorspace_urb_tail = floorspace_urb_1820_1970.append(floorspace_urb, ignore_index = False)
floorspace_rur_tail = floorspace_rur_1820_1970.append(floorspace_rur, ignore_index = False)
commercial_m2_cap_office_tail = commercial_m2_cap_office_1820_1970.append(commercial_m2_cap_office, ignore_index = False)
commercial_m2_cap_retail_tail = commercial_m2_cap_retail_1820_1970.append(commercial_m2_cap_retail, ignore_index = False)
commercial_m2_cap_hotels_tail = commercial_m2_cap_hotels_1820_1970.append(commercial_m2_cap_hotels, ignore_index = False)
commercial_m2_cap_govern_tail = commercial_m2_cap_govern_1820_1970.append(commercial_m2_cap_govern, ignore_index = False)
rurpop_tail = rurpop_1721_1820.append(rurpop_1820_1970.append(rurpop2, ignore_index = False), ignore_index = False)
urbpop_tail = urbpop_1721_1820.append(urbpop_1820_1970.append(urbpop, ignore_index = False), ignore_index = False)
pop_tail = pop_1721_1820.append(pop_1820_1970.append(pop2, ignore_index = False), ignore_index = False)
floorspace_urb_tail = floorspace_urb_1721_1820.append(floorspace_urb_1820_1970.append(floorspace_urb, ignore_index = False), ignore_index = False)
floorspace_rur_tail = floorspace_rur_1721_1820.append(floorspace_rur_1820_1970.append(floorspace_rur, ignore_index = False), ignore_index = False)
commercial_m2_cap_office_tail = commercial_m2_cap_office_1721_1820.append(commercial_m2_cap_office_1820_1970.append(commercial_m2_cap_office, ignore_index = False), ignore_index = False)
commercial_m2_cap_retail_tail = commercial_m2_cap_retail_1721_1820.append(commercial_m2_cap_retail_1820_1970.append(commercial_m2_cap_retail, ignore_index = False), ignore_index = False)
commercial_m2_cap_hotels_tail = commercial_m2_cap_hotels_1721_1820.append(commercial_m2_cap_hotels_1820_1970.append(commercial_m2_cap_hotels, ignore_index = False), ignore_index = False)
commercial_m2_cap_govern_tail = commercial_m2_cap_govern_1721_1820.append(commercial_m2_cap_govern_1820_1970.append(commercial_m2_cap_govern, ignore_index = False), ignore_index = False)
#%% FLOOR AREA STOCK -----------------------------------------------------------
# adjust the share for urban/rural only (shares in csv are as percantage of the total(Rur + Urb), we needed to adjust the urban shares to add up to 1, same for rural)
housing_type_rur3 = housing_type_rur2/housing_type_rur2.sum()
housing_type_urb3 = housing_type_urb2/housing_type_urb2.sum()
# calculte the total rural/urban population (pop2 = millions of people, rurpop2 = % of people living in rural areas)
people_rur = | pd.DataFrame(rurpop_tail.values*pop_tail.values, columns = pop_tail.columns, index = pop_tail.index) | pandas.DataFrame |
"""
Generate figures for the DeepCytometer paper for v8 of the pipeline.
Environment: cytometer_tensorflow_v2.
We repeat the phenotyping from klf14_b6ntac_exp_0110_paper_figures_v8.py, but change the stratification of the data so
that we have Control (PATs + WT MATs) vs. Het MATs.
The comparisons we do are:
* Control vs. MAT WT
* MAT WT vs. MAT Het
This script partly deprecates klf14_b6ntac_exp_0099_paper_figures_v7.py:
* Figures have been updated to have v8 of the pipeline in the paper.
This script partly deprecates klf14_b6ntac_exp_0110_paper_figures_v8.py:
* We repeat the phenotyping, but change the stratification of the data so that we have Control (PATs + WT MATs) vs.
Het MATs.
"""
"""
This file is part of Cytometer
Copyright 2021 Medical Research Council
SPDX-License-Identifier: Apache-2.0
Author: <NAME> <<EMAIL>>
"""
# script name to identify this experiment
experiment_id = 'klf14_b6ntac_exp_0111_paper_figures'
# cross-platform home directory
from pathlib import Path
home = str(Path.home())
import os
import sys
sys.path.extend([os.path.join(home, 'Software/cytometer')])
DEBUG = False
SAVE_FIGS = False
# post-processing parameters
min_area = 203 / 2 # (pix^2) smaller objects are rejected
max_area = 44879 * 3 # (pix^2) larger objects are rejected
xres_ref = 0.4538234626730202
yres_ref = 0.4537822752643282
min_area_um2 = min_area * xres_ref * yres_ref
max_area_um2 = max_area * xres_ref * yres_ref
# json_annotation_files_dict here needs to have the same files as in
# klf14_b6ntac_exp_0098_full_slide_size_analysis_v7.py
# SQWAT: list of annotation files
json_annotation_files_dict = {}
json_annotation_files_dict['sqwat'] = [
'KLF14-B6NTAC 36.1d PAT 99-16 C1 - 2016-02-11 11.48.31.json',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46.json',
'KLF14-B6NTAC-MAT-17.1a 44-16 C1 - 2016-02-01 11.14.17.json',
'KLF14-B6NTAC-MAT-17.1e 48-16 C1 - 2016-02-01 16.27.05.json',
'KLF14-B6NTAC-MAT-18.2a 57-16 C1 - 2016-02-03 09.10.17.json',
'KLF14-B6NTAC-PAT-37.3c 414-16 C1 - 2016-03-15 17.15.41.json',
'KLF14-B6NTAC-MAT-18.1d 53-16 C1 - 2016-02-02 14.32.03.json',
'KLF14-B6NTAC-MAT-17.2b 65-16 C1 - 2016-02-04 10.24.22.json',
'KLF14-B6NTAC-MAT-17.2g 69-16 C1 - 2016-02-04 16.15.05.json',
'KLF14-B6NTAC 37.1a PAT 106-16 C1 - 2016-02-12 16.21.00.json',
'KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06.json',
# 'KLF14-B6NTAC-PAT-37.2d 411-16 C1 - 2016-03-15 12.42.26.json',
'KLF14-B6NTAC-MAT-17.2a 64-16 C1 - 2016-02-04 09.17.52.json',
'KLF14-B6NTAC-MAT-16.2f 216-16 C1 - 2016-02-18 10.28.27.json',
'KLF14-B6NTAC-MAT-17.1d 47-16 C1 - 2016-02-01 15.25.53.json',
'KLF14-B6NTAC-MAT-16.2e 215-16 C1 - 2016-02-18 09.19.26.json',
'KLF14-B6NTAC 36.1g PAT 102-16 C1 - 2016-02-11 17.20.14.json',
'KLF14-B6NTAC-37.1g PAT 112-16 C1 - 2016-02-16 13.33.09.json',
'KLF14-B6NTAC-38.1e PAT 94-16 C1 - 2016-02-10 12.13.10.json',
'KLF14-B6NTAC-MAT-18.2d 60-16 C1 - 2016-02-03 13.13.57.json',
'KLF14-B6NTAC-MAT-18.2g 63-16 C1 - 2016-02-03 16.58.52.json',
'KLF14-B6NTAC-MAT-18.2f 62-16 C1 - 2016-02-03 15.46.15.json',
'KLF14-B6NTAC-MAT-18.1b 51-16 C1 - 2016-02-02 09.59.16.json',
'KLF14-B6NTAC-MAT-19.2c 220-16 C1 - 2016-02-18 17.03.38.json',
'KLF14-B6NTAC-MAT-18.1f 55-16 C1 - 2016-02-02 16.14.30.json',
'KLF14-B6NTAC-PAT-36.3b 412-16 C1 - 2016-03-15 14.37.55.json',
'KLF14-B6NTAC-MAT-16.2c 213-16 C1 - 2016-02-17 14.51.18.json',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32.json',
'KLF14-B6NTAC 36.1e PAT 100-16 C1 - 2016-02-11 14.06.56.json',
'KLF14-B6NTAC-MAT-18.1c 52-16 C1 - 2016-02-02 12.26.58.json',
'KLF14-B6NTAC-MAT-18.2b 58-16 C1 - 2016-02-03 11.10.52.json',
'KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38.json',
'KLF14-B6NTAC-PAT-39.2d 454-16 C1 - 2016-03-17 14.33.38.json',
'KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00.json',
'KLF14-B6NTAC-MAT-18.2e 61-16 C1 - 2016-02-03 14.19.35.json',
'KLF14-B6NTAC-MAT-19.2g 222-16 C1 - 2016-02-25 15.13.00.json',
'KLF14-B6NTAC-PAT-37.2a 406-16 C1 - 2016-03-14 12.01.56.json',
'KLF14-B6NTAC 36.1j PAT 105-16 C1 - 2016-02-12 14.33.33.json',
'KLF14-B6NTAC-37.1b PAT 107-16 C1 - 2016-02-15 11.43.31.json',
'KLF14-B6NTAC-MAT-17.1c 46-16 C1 - 2016-02-01 14.02.04.json',
'KLF14-B6NTAC-MAT-19.2f 217-16 C1 - 2016-02-18 11.48.16.json',
'KLF14-B6NTAC-MAT-17.2d 67-16 C1 - 2016-02-04 12.34.32.json',
'KLF14-B6NTAC-MAT-18.3c 218-16 C1 - 2016-02-18 13.12.09.json',
'KLF14-B6NTAC-PAT-37.3a 413-16 C1 - 2016-03-15 15.54.12.json',
'KLF14-B6NTAC-MAT-19.1a 56-16 C1 - 2016-02-02 17.23.31.json',
'KLF14-B6NTAC-37.1h PAT 113-16 C1 - 2016-02-16 15.14.09.json',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53.json',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52.json',
'KLF14-B6NTAC-37.1e PAT 110-16 C1 - 2016-02-15 17.33.11.json',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54.json',
'KLF14-B6NTAC 36.1h PAT 103-16 C1 - 2016-02-12 10.15.22.json',
# 'KLF14-B6NTAC-PAT-39.1h 453-16 C1 - 2016-03-17 11.38.04.json',
'KLF14-B6NTAC-MAT-16.2b 212-16 C1 - 2016-02-17 12.49.00.json',
'KLF14-B6NTAC-MAT-17.1f 49-16 C1 - 2016-02-01 17.51.46.json',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11.json',
'KLF14-B6NTAC-MAT-16.2a 211-16 C1 - 2016-02-17 11.46.42.json',
'KLF14-B6NTAC-38.1f PAT 95-16 C1 - 2016-02-10 14.41.44.json',
'KLF14-B6NTAC-PAT-36.3a 409-16 C1 - 2016-03-15 10.18.46.json',
'KLF14-B6NTAC-MAT-19.2b 219-16 C1 - 2016-02-18 15.41.38.json',
'KLF14-B6NTAC-MAT-17.1b 45-16 C1 - 2016-02-01 12.23.50.json',
'KLF14-B6NTAC 36.1f PAT 101-16 C1 - 2016-02-11 15.23.06.json',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33.json',
'KLF14-B6NTAC-37.1d PAT 109-16 C1 - 2016-02-15 15.19.08.json',
'KLF14-B6NTAC-MAT-18.2c 59-16 C1 - 2016-02-03 11.56.52.json',
'KLF14-B6NTAC-PAT-37.2f 405-16 C1 - 2016-03-14 10.58.34.json',
'KLF14-B6NTAC-PAT-37.2e 408-16 C1 - 2016-03-14 16.23.30.json',
'KLF14-B6NTAC-MAT-19.2e 221-16 C1 - 2016-02-25 14.00.14.json',
# 'KLF14-B6NTAC-PAT-37.2c 407-16 C1 - 2016-03-14 14.13.54.json',
# 'KLF14-B6NTAC-PAT-37.2b 410-16 C1 - 2016-03-15 11.24.20.json',
'KLF14-B6NTAC-PAT-37.4b 419-16 C1 - 2016-03-17 10.22.54.json',
'KLF14-B6NTAC-37.1c PAT 108-16 C1 - 2016-02-15 14.49.45.json',
'KLF14-B6NTAC-MAT-18.1a 50-16 C1 - 2016-02-02 09.12.41.json',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38.json',
'KLF14-B6NTAC-PAT-37.2h 418-16 C1 - 2016-03-16 17.01.17.json',
'KLF14-B6NTAC-MAT-17.2c 66-16 C1 - 2016-02-04 11.46.39.json',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52.json',
'KLF14-B6NTAC-37.1f PAT 111-16 C2 - 2016-02-16 11.26 (1).json',
'KLF14-B6NTAC-PAT 37.2b 410-16 C4 - 2020-02-14 10.27.23.json',
'KLF14-B6NTAC-PAT 37.2c 407-16 C4 - 2020-02-14 10.15.57.json',
# 'KLF14-B6NTAC-PAT 37.2d 411-16 C4 - 2020-02-14 10.34.10.json'
]
# GWAT: list of annotation files
json_annotation_files_dict['gwat'] = [
'KLF14-B6NTAC-36.1a PAT 96-16 B1 - 2016-02-10 15.32.31.json',
'KLF14-B6NTAC-36.1b PAT 97-16 B1 - 2016-02-10 17.15.16.json',
'KLF14-B6NTAC-36.1c PAT 98-16 B1 - 2016-02-10 18.32.40.json',
'KLF14-B6NTAC 36.1d PAT 99-16 B1 - 2016-02-11 11.29.55.json',
'KLF14-B6NTAC 36.1e PAT 100-16 B1 - 2016-02-11 12.51.11.json',
'KLF14-B6NTAC 36.1f PAT 101-16 B1 - 2016-02-11 14.57.03.json',
'KLF14-B6NTAC 36.1g PAT 102-16 B1 - 2016-02-11 16.12.01.json',
'KLF14-B6NTAC 36.1h PAT 103-16 B1 - 2016-02-12 09.51.08.json',
# 'KLF14-B6NTAC 36.1i PAT 104-16 B1 - 2016-02-12 11.37.56.json',
'KLF14-B6NTAC 36.1j PAT 105-16 B1 - 2016-02-12 14.08.19.json',
'KLF14-B6NTAC 37.1a PAT 106-16 B1 - 2016-02-12 15.33.02.json',
'KLF14-B6NTAC-37.1b PAT 107-16 B1 - 2016-02-15 11.25.20.json',
'KLF14-B6NTAC-37.1c PAT 108-16 B1 - 2016-02-15 12.33.10.json',
'KLF14-B6NTAC-37.1d PAT 109-16 B1 - 2016-02-15 15.03.44.json',
'KLF14-B6NTAC-37.1e PAT 110-16 B1 - 2016-02-15 16.16.06.json',
'KLF14-B6NTAC-37.1g PAT 112-16 B1 - 2016-02-16 12.02.07.json',
'KLF14-B6NTAC-37.1h PAT 113-16 B1 - 2016-02-16 14.53.02.json',
'KLF14-B6NTAC-38.1e PAT 94-16 B1 - 2016-02-10 11.35.53.json',
'KLF14-B6NTAC-38.1f PAT 95-16 B1 - 2016-02-10 14.16.55.json',
'KLF14-B6NTAC-MAT-16.2a 211-16 B1 - 2016-02-17 11.21.54.json',
'KLF14-B6NTAC-MAT-16.2b 212-16 B1 - 2016-02-17 12.33.18.json',
'KLF14-B6NTAC-MAT-16.2c 213-16 B1 - 2016-02-17 14.01.06.json',
'KLF14-B6NTAC-MAT-16.2d 214-16 B1 - 2016-02-17 15.43.57.json',
'KLF14-B6NTAC-MAT-16.2e 215-16 B1 - 2016-02-17 17.14.16.json',
'KLF14-B6NTAC-MAT-16.2f 216-16 B1 - 2016-02-18 10.05.52.json',
# 'KLF14-B6NTAC-MAT-17.1a 44-16 B1 - 2016-02-01 09.19.20.json',
'KLF14-B6NTAC-MAT-17.1b 45-16 B1 - 2016-02-01 12.05.15.json',
'KLF14-B6NTAC-MAT-17.1c 46-16 B1 - 2016-02-01 13.01.30.json',
'KLF14-B6NTAC-MAT-17.1d 47-16 B1 - 2016-02-01 15.11.42.json',
'KLF14-B6NTAC-MAT-17.1e 48-16 B1 - 2016-02-01 16.01.09.json',
'KLF14-B6NTAC-MAT-17.1f 49-16 B1 - 2016-02-01 17.12.31.json',
'KLF14-B6NTAC-MAT-17.2a 64-16 B1 - 2016-02-04 08.57.34.json',
'KLF14-B6NTAC-MAT-17.2b 65-16 B1 - 2016-02-04 10.06.00.json',
'KLF14-B6NTAC-MAT-17.2c 66-16 B1 - 2016-02-04 11.14.28.json',
'KLF14-B6NTAC-MAT-17.2d 67-16 B1 - 2016-02-04 12.20.20.json',
'KLF14-B6NTAC-MAT-17.2f 68-16 B1 - 2016-02-04 14.01.40.json',
'KLF14-B6NTAC-MAT-17.2g 69-16 B1 - 2016-02-04 15.52.52.json',
'KLF14-B6NTAC-MAT-18.1a 50-16 B1 - 2016-02-02 08.49.06.json',
'KLF14-B6NTAC-MAT-18.1b 51-16 B1 - 2016-02-02 09.46.31.json',
'KLF14-B6NTAC-MAT-18.1c 52-16 B1 - 2016-02-02 11.24.31.json',
'KLF14-B6NTAC-MAT-18.1d 53-16 B1 - 2016-02-02 14.11.37.json',
# 'KLF14-B6NTAC-MAT-18.1e 54-16 B1 - 2016-02-02 15.06.05.json',
'KLF14-B6NTAC-MAT-18.2a 57-16 B1 - 2016-02-03 08.54.27.json',
'KLF14-B6NTAC-MAT-18.2b 58-16 B1 - 2016-02-03 09.58.06.json',
'KLF14-B6NTAC-MAT-18.2c 59-16 B1 - 2016-02-03 11.41.32.json',
'KLF14-B6NTAC-MAT-18.2d 60-16 B1 - 2016-02-03 12.56.49.json',
'KLF14-B6NTAC-MAT-18.2e 61-16 B1 - 2016-02-03 14.02.25.json',
'KLF14-B6NTAC-MAT-18.2f 62-16 B1 - 2016-02-03 15.00.17.json',
'KLF14-B6NTAC-MAT-18.2g 63-16 B1 - 2016-02-03 16.40.37.json',
'KLF14-B6NTAC-MAT-18.3b 223-16 B1 - 2016-02-25 16.53.42.json',
'KLF14-B6NTAC-MAT-18.3c 218-16 B1 - 2016-02-18 12.51.46.json',
'KLF14-B6NTAC-MAT-18.3d 224-16 B1 - 2016-02-26 10.48.56.json',
'KLF14-B6NTAC-MAT-19.1a 56-16 B1 - 2016-02-02 16.57.46.json',
'KLF14-B6NTAC-MAT-19.2b 219-16 B1 - 2016-02-18 14.21.50.json',
'KLF14-B6NTAC-MAT-19.2c 220-16 B1 - 2016-02-18 16.40.48.json',
'KLF14-B6NTAC-MAT-19.2e 221-16 B1 - 2016-02-25 13.15.27.json',
'KLF14-B6NTAC-MAT-19.2f 217-16 B1 - 2016-02-18 11.23.22.json',
'KLF14-B6NTAC-MAT-19.2g 222-16 B1 - 2016-02-25 14.51.57.json',
'KLF14-B6NTAC-PAT-36.3a 409-16 B1 - 2016-03-15 09.24.54.json',
'KLF14-B6NTAC-PAT-36.3b 412-16 B1 - 2016-03-15 14.11.47.json',
'KLF14-B6NTAC-PAT-36.3d 416-16 B1 - 2016-03-16 14.22.04.json',
# 'KLF14-B6NTAC-PAT-37.2a 406-16 B1 - 2016-03-14 11.46.47.json',
'KLF14-B6NTAC-PAT-37.2b 410-16 B1 - 2016-03-15 11.12.01.json',
'KLF14-B6NTAC-PAT-37.2c 407-16 B1 - 2016-03-14 12.54.55.json',
'KLF14-B6NTAC-PAT-37.2d 411-16 B1 - 2016-03-15 12.01.13.json',
'KLF14-B6NTAC-PAT-37.2e 408-16 B1 - 2016-03-14 16.06.43.json',
'KLF14-B6NTAC-PAT-37.2f 405-16 B1 - 2016-03-14 09.49.45.json',
'KLF14-B6NTAC-PAT-37.2g 415-16 B1 - 2016-03-16 11.04.45.json',
'KLF14-B6NTAC-PAT-37.2h 418-16 B1 - 2016-03-16 16.42.16.json',
'KLF14-B6NTAC-PAT-37.3a 413-16 B1 - 2016-03-15 15.31.26.json',
'KLF14-B6NTAC-PAT-37.3c 414-16 B1 - 2016-03-15 16.49.22.json',
'KLF14-B6NTAC-PAT-37.4a 417-16 B1 - 2016-03-16 15.25.38.json',
'KLF14-B6NTAC-PAT-37.4b 419-16 B1 - 2016-03-17 09.10.42.json',
'KLF14-B6NTAC-PAT-38.1a 90-16 B1 - 2016-02-04 17.27.42.json',
'KLF14-B6NTAC-PAT-39.1h 453-16 B1 - 2016-03-17 11.15.50.json',
'KLF14-B6NTAC-PAT-39.2d 454-16 B1 - 2016-03-17 12.16.06.json'
]
########################################################################################################################
## Common code to the rest of this script:
## Import packages and auxiliary functions
## USED IN PAPER
########################################################################################################################
# import pickle
from toolz import interleave
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# import scipy
import scipy.stats as stats
# import skimage
import sklearn.neighbors, sklearn.model_selection
import statsmodels.api as sm
# import statsmodels.formula.api as smf
from statsmodels.stats.multitest import multipletests
import seaborn as sns
# import openslide
import PIL
# from PIL import Image, ImageDraw
import cytometer.data
import cytometer.stats
import shapely
# directories
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
hand_traced_dir = os.path.join(klf14_root_data_dir, 'klf14_b6ntac_training_v2')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v8/annotations')
histo_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
dataframe_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper')
paper_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/klf14')
area2quantile_dir = os.path.join(home, 'Data/cytometer_data/deepcytometer_pipeline_v8')
saved_models_dir = os.path.join(home, 'Data/cytometer_data/deepcytometer_pipeline_v8')
DEBUG = False
method = 'corrected'
# k-folds file with hand traced filenames
saved_kfolds_filename = 'klf14_b6ntac_exp_0094_generate_extra_training_images.pickle'
# CSV file with metainformation of all mice
metainfo_csv_file = os.path.join(metainfo_dir, 'klf14_b6ntac_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# make sure that in the boxplots PAT comes before MAT
metainfo['sex'] = metainfo['sex'].astype(pd.api.types.CategoricalDtype(categories=['f', 'm'], ordered=True))
metainfo['ko_parent'] = metainfo['ko_parent'].astype(
pd.api.types.CategoricalDtype(categories=['PAT', 'MAT'], ordered=True))
metainfo['genotype'] = metainfo['genotype'].astype(
pd.api.types.CategoricalDtype(categories=['KLF14-KO:WT', 'KLF14-KO:Het'], ordered=True))
metainfo['functional_ko'] = 'Control'
metainfo.loc[(metainfo['ko_parent'] == 'MAT') & (metainfo['genotype'] == 'KLF14-KO:Het'), 'functional_ko'] = 'FKO'
metainfo.loc[(metainfo['ko_parent'] == 'MAT') & (metainfo['genotype'] == 'KLF14-KO:WT'), 'functional_ko'] = 'MAT_WT'
metainfo['functional_ko'] = metainfo['functional_ko'].astype(
pd.api.types.CategoricalDtype(categories=['Control', 'MAT_WT', 'FKO'], ordered=True))
# remove BW=NaNs
metainfo = metainfo[~np.isnan(metainfo['BW'])]
metainfo = metainfo.reset_index()
# load dataframe with cell population quantiles and histograms
dataframe_areas_filename = os.path.join(dataframe_dir, 'klf14_b6ntac_exp_0110_dataframe_areas_' + method + '.pkl')
df_all = | pd.read_pickle(dataframe_areas_filename) | pandas.read_pickle |
#%%
import numpy as np
import pandas as pd
import altair as alt
import anthro.io
# Generate a plot for fuel economy of all US light-duty vehicles
data = | pd.read_csv('../processed/tidy_automotive_trends.csv') | pandas.read_csv |
from __future__ import division
import json
import numpy as np
import pandas as pd
from scipy import stats
from visigoth.stimuli import Point, Points, PointCue, Pattern
from visigoth import (AcquireFixation, AcquireTarget,
flexible_values, limited_repeat_sequence)
def define_cmdline_params(self, parser):
parser.add_argument("--timing", default=1, type=float)
def create_stimuli(exp):
# Fixation point
fix = Point(exp.win,
exp.p.fix_pos,
exp.p.fix_radius,
exp.p.fix_trial_color)
# Spatial cue
cue = PointCue(exp.win,
exp.p.cue_norm,
exp.p.cue_radius,
exp.p.cue_color)
# Saccade targets
targets = Points(exp.win,
exp.p.target_pos,
exp.p.target_radius,
exp.p.target_color)
# Average of multiple sinusoidal grating stimulus
pattern = Pattern(exp.win,
n=exp.p.stim_gratings,
elementTex=exp.p.stim_tex,
elementMask=exp.p.stim_mask,
sizes=exp.p.stim_size,
sfs=exp.p.stim_sf,
pos=(0, 0)
)
return locals()
def generate_trials(exp):
"""Yield trial and pulse train info."""
# We need special logic to scheudule the final trial
# given the variability of trial durations.
finished = False
# Create a generator to control cue position repeats
cue_positions = list(range(len(exp.p.stim_pos)))
cue_pos_gen = limited_repeat_sequence(cue_positions,
exp.p.stim_pos_max_repeat)
# Create an infinite iterator for trial data
for t in exp.trial_count():
# Get the current time
now = exp.clock.getTime()
# Check whether we have performed the final trial of the run
if finished or now > (exp.p.run_duration - exp.p.finish_min):
raise StopIteration
# Sample parameters for the next trial and check constraints
attempts = 0
while True:
# Allow experimenter to break if we get stuck here
exp.check_abort()
# Check if we've blown through the final trial window
if exp.clock.getTime() > exp.p.run_duration:
raise StopIteration
# Increment the counter of attempts to find a good trial
attempts += 1
# Sample parameters for a trial
t_info, p_info = generate_trial_info(exp, t, cue_pos_gen)
# Calculate how long the trial will take
trial_dur = (t_info["wait_iti"]
+ t_info["wait_pre_stim"]
+ t_info["pulse_train_dur"]
+ 1)
finish_time = exp.p.run_duration - (now + trial_dur)
# Reject if the next trial is too long
if finish_time < exp.p.finish_min:
# Make a number of attempts to find a trial that finishes with
# enough null time at the end of the run
if attempts < 50:
continue
# If we are having a hard time scheduling a trial that gives
# enough null time, relax our criterion to get a trial that
# just finishes before the scanner does
if finish_time < 0:
continue
# Check if next trial will end in the finish window
if finish_time < (exp.p.finish_max * exp.p.timing):
finished = True
# Use these parameters for the next trial
break
yield t_info, p_info
def generate_trial_info(exp, t, cue_pos_gen):
# Schedule the next trial
wait_iti = flexible_values(exp.p.wait_iti)
if t == 1:
# Handle special case of first trial
if exp.p.skip_first_iti:
wait_iti = 0
else:
# Handle special case of early fixbreak on last trial
last_t_info = exp.trial_data[-1][0]
if last_t_info.fixbreak_early:
if exp.p.wait_iti_early_fixbreak is not None:
wait_iti = exp.p.wait_iti_early_fixbreak
# Determine the stimulus parameters for this trial
cue_pos = next(cue_pos_gen)
gen_dist = flexible_values(list(range(len(exp.p.dist_means))))
gen_mean = exp.p.dist_means[gen_dist]
gen_sd = exp.p.dist_sds[gen_dist]
target = exp.p.dist_targets[gen_dist]
trial_info = exp.trial_info(
# Stimulus parameters
cue_pos=cue_pos,
gen_dist=gen_dist,
gen_mean=gen_mean,
gen_sd=gen_sd,
target=target,
# Pulse info (filled in below)
log_contrast_mean=np.nan,
pulse_count=np.nan,
pulse_train_dur=np.nan,
# Timing parameters
wait_iti=wait_iti,
wait_pre_stim=flexible_values(exp.p.wait_pre_stim) * exp.p.timing,
wait_resp=flexible_values(exp.p.wait_resp),
wait_feedback=flexible_values(exp.p.wait_feedback),
# Track fixbreaks before pulses
fixbreak_early=np.nan,
# Achieved timing data
onset_fix=np.nan,
offset_fix=np.nan,
onset_cue=np.nan,
offset_cue=np.nan,
onset_targets=np.nan,
onset_feedback=np.nan,
)
t_info = pd.Series(trial_info, dtype=np.object)
p_info = generate_pulse_info(exp, t_info)
# Insert trial-level information determined by pulse schedule
t_info["log_contrast_mean"] = p_info["log_contrast"].mean()
t_info["trial_llr"] = p_info["pulse_llr"].sum()
t_info["pulse_count"] = len(p_info)
t_info["pulse_train_dur"] = (p_info["gap_dur"].sum()
+ p_info["pulse_dur"].sum())
return t_info, p_info
def generate_pulse_info(exp, t_info):
"""Generate the pulse train for a given trial."""
rng = np.random.RandomState()
# Randomly sample the pulse count for this trial
if rng.rand() < exp.p.pulse_single_prob:
count = 1
else:
count = int(flexible_values(exp.p.pulse_count, random_state=rng,
max=exp.p.pulse_count_max))
# Account for the duration of each pulse
pulse_dur = flexible_values(exp.p.pulse_dur, count, rng)
total_pulse_dur = np.sum(pulse_dur)
# Randomly sample gap durations with a constraint on trial duration
train_dur = np.inf
while train_dur > (exp.p.pulse_train_max * exp.p.timing):
gap_dur = flexible_values(exp.p.pulse_gap, count, rng) * exp.p.timing
train_dur = np.sum(gap_dur) + total_pulse_dur
# Generate the stimulus strength for each pulse
max_contrast = 1 / np.sqrt(exp.p.stim_gratings)
contrast_dist = "norm", t_info["gen_mean"], t_info["gen_sd"]
log_contrast = flexible_values(contrast_dist, count, rng,
max=np.log10(max_contrast))
# Define the LLR of each pulse
pulse_llr = compute_llr(log_contrast, exp.p.dist_means, exp.p.dist_sds)
# Determine the stimulus position
# TODO this currently hardcodes 2 possible stimulus positions for testing
if t_info["cue_pos"] == 0:
ps = [exp.p.cue_validity, 1 - exp.p.cue_validity]
elif t_info["cue_pos"] == 1:
ps = [1 - exp.p.cue_validity, exp.p.cue_validity]
stim_pos = np.random.choice([0, 1], count, p=ps)
p_info = pd.DataFrame(dict(
# Basic trial information
subject=exp.p.subject,
session=exp.p.session,
run=exp.p.run,
trial=t_info["trial"],
# Pulse information
pulse=np.arange(1, count + 1),
stim_pos=stim_pos,
log_contrast=log_contrast,
contrast=10 ** log_contrast,
pulse_llr=pulse_llr,
pulse_dur=pulse_dur,
gap_dur=gap_dur,
# Achieved performance
occurred=False,
blink=False,
pulse_onset=np.nan,
pulse_offset=np.nan,
dropped_frames=np.nan,
))
return p_info
def compute_llr(c, means, sds):
"""Compute the pulse log-likelihood supporting Target 1."""
# Define the generating distributions
m0, m1 = means
s0, s1 = sds
d0, d1 = stats.norm(m0, s0), stats.norm(m1, s1)
# Compute LLR of each pulse
l0, l1 = np.log10(d0.pdf(c)), np.log10(d1.pdf(c))
llr = l1 - l0
return llr
def run_trial(exp, info):
t_info, p_info = info
# ~~~ Set trial-constant attributes of the stimuli
exp.s.cue.pos = exp.p.stim_pos[t_info.cue_pos]
# ~~~ Inter-trial interval
exp.s.fix.color = exp.p.fix_iti_color
exp.wait_until(exp.iti_end, draw="fix", iti_duration=t_info.wait_iti)
# ~~~ Trial onset
t_info["onset_fix"] = exp.clock.getTime()
exp.s.fix.color = exp.p.fix_ready_color
res = exp.wait_until(AcquireFixation(exp),
timeout=exp.p.wait_fix,
draw="fix")
if res is None:
t_info["result"] = "nofix"
exp.sounds.nofix.play()
return t_info, p_info
for frame in exp.frame_range(seconds=exp.p.wait_start):
exp.check_fixation(allow_blinks=True)
exp.draw("fix")
# ~~~ Pre-stimulus period
exp.s.fix.color = exp.p.fix_trial_color
prestim_frames = exp.frame_range(seconds=t_info.wait_pre_stim,
yield_skipped=True)
for frame, skipped in prestim_frames:
if not exp.check_fixation(allow_blinks=True):
exp.sounds.fixbreak.play()
exp.flicker("fix")
t_info["result"] = "fixbreak"
t_info["fixbreak_early"] = True
t_info["offset_cue"] = exp.clock.getTime()
return t_info, p_info
flip_time = exp.draw(["fix", "cue", "targets"])
if not frame:
t_info["onset_targets"] = flip_time
t_info["onset_cue"] = flip_time
t_info["fixbreak_early"] = False
# ~~~ Stimulus period
for p, info in p_info.iterrows():
# Update the pattern
exp.s.pattern.pos = exp.p.stim_pos[info.stim_pos]
exp.s.pattern.contrast = info.contrast
exp.s.pattern.randomize_phases()
# Show each frame of the stimulus
for frame in exp.frame_range(seconds=info.pulse_dur):
if not exp.check_fixation(allow_blinks=True):
exp.sounds.fixbreak.play()
exp.flicker("fix")
t_info["result"] = "fixbreak"
t_info["offset_cue"] = exp.clock.getTime()
return t_info, p_info
stims = ["fix", "cue", "targets", "pattern"]
flip_time = exp.draw(stims)
if not frame:
exp.tracker.send_message("pulse_onset")
p_info.loc[p, "occurred"] = True
p_info.loc[p, "pulse_onset"] = flip_time
blink = not exp.tracker.check_eye_open(new_sample=False)
p_info.loc[p, "blink"] |= blink
# This counter is reset at beginning of frame_range
# so it should could to frames dropped during the stim
p_info.loc[p, "dropped_frames"] = exp.win.nDroppedFrames
gap_frames = exp.frame_range(seconds=info.gap_dur)
for frame in gap_frames:
if not exp.check_fixation(allow_blinks=True):
exp.sounds.fixbreak.play()
exp.flicker("fix")
t_info["result"] = "fixbreak"
t_info["offset_cue"] = exp.clock.getTime()
return t_info, p_info
flip_time = exp.draw(["fix", "cue", "targets"])
# Record the time of first flip as the offset of the last pulse
if not frame:
p_info.loc[p, "pulse_offset"] = flip_time
# ~~~ Response period
# Collect the response
now = exp.clock.getTime()
t_info["offset_fix"] = now
t_info["offset_cue"] = now
res = exp.wait_until(AcquireTarget(exp, t_info.target),
timeout=exp.p.wait_resp,
draw="targets")
if res is None:
t_info["result"] = "fixbreak"
else:
t_info.update(pd.Series(res))
# Give feedback
t_info["onset_feedback"] = exp.clock.getTime()
exp.sounds[t_info.result].play()
exp.show_feedback("targets", t_info.result, t_info.response)
exp.wait_until(timeout=exp.p.wait_feedback, draw=["targets"])
exp.s.targets.color = exp.p.target_color
# Prepare for the inter-trial interval
exp.s.fix.color = exp.p.fix_iti_color
exp.draw("fix")
return t_info, p_info
def serialize_trial_info(exp, info):
t_info, _ = info
return t_info.to_json()
def compute_performance(self):
if self.trial_data:
data = | pd.DataFrame([t for t, _ in self.trial_data]) | pandas.DataFrame |
import pandas as pd
import src.variables as var
pd.set_option('display.max_rows', 500)
| pd.set_option('display.max_columns', 500) | pandas.set_option |
from __future__ import print_function
from random_agent import random_agent
from policy_agent import policy_agent
import numpy as np
import pandas as pd
class Board(object):
def __init__(self):
self.tic = -1
self.tac = 1
self.board = np.zeros([3, 3])
def print_board(self):
print("=======================")
df = | pd.DataFrame(self.board) | pandas.DataFrame |
"""Tests for piece.py"""
from fractions import Fraction
import pandas as pd
import numpy as np
from harmonic_inference.data.data_types import KeyMode, PitchType
from harmonic_inference.data.piece import Note, Key, Chord, ScorePiece, get_reduction_mask
import harmonic_inference.utils.harmonic_constants as hc
import harmonic_inference.utils.rhythmic_utils as ru
import harmonic_inference.utils.harmonic_utils as hu
def test_note_from_series():
def check_equals(note_dict, note, measures_df, pitch_type):
assert pitch_type == note.pitch_type
if pitch_type == PitchType.MIDI:
assert (note_dict['midi'] % hc.NUM_PITCHES[PitchType.MIDI]) == note.pitch_class
else:
assert note.pitch_class == note_dict['tpc'] + hc.TPC_C
assert note.octave == note_dict['midi'] // hc.NUM_PITCHES[PitchType.MIDI]
assert note.onset == (note_dict['mc'], note_dict['onset'])
assert note.offset == (note_dict['offset_mc'], note_dict['offset_beat'])
assert note.duration == note_dict['duration']
assert note.onset_level == ru.get_metrical_level(
note_dict['onset'],
measures_df.loc[measures_df['mc'] == note_dict['mc']].squeeze(),
)
assert note.offset_level == ru.get_metrical_level(
note_dict['offset_beat'],
measures_df.loc[measures_df['mc'] == note_dict['offset_mc']].squeeze(),
)
note_dict = {
'midi': 50,
'tpc': 5,
'mc': 1,
'onset': Fraction(1, 2),
'offset_mc': 2,
'offset_beat': Fraction(3, 4),
'duration': Fraction(5, 6),
}
key_values = {
'midi': range(127),
'tpc': range(-hc.TPC_C, hc.TPC_C),
'mc': range(3),
'onset': [i * Fraction(1, 2) for i in range(3)],
'offset_mc': range(3),
'offset_beat': [i * Fraction(1, 2) for i in range(3)],
'duration': [i * Fraction(1, 2) for i in range(3)],
}
measures_df = pd.DataFrame({
'mc': list(range(10)),
'timesig': '12/8'
})
for key, values in key_values.items():
for value in values:
note_dict[key] = value
note_series = | pd.Series(note_dict) | pandas.Series |
## Copyright 2015-2021 PyPSA Developers
## You can find the list of PyPSA Developers at
## https://pypsa.readthedocs.io/en/latest/developers.html
## PyPSA is released under the open source MIT License, see
## https://github.com/PyPSA/PyPSA/blob/master/LICENSE.txt
"""
Build optimisation problems from PyPSA networks without Pyomo.
Originally retrieved from nomopyomo ( -> 'no more Pyomo').
"""
__author__ = "PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html"
__copyright__ = ("Copyright 2015-2021 PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html, "
"MIT License")
from .pf import (_as_snapshots, get_switchable_as_dense as get_as_dense)
from .descriptors import (get_bounds_pu, get_extendable_i, get_non_extendable_i,
expand_series, nominal_attrs, additional_linkports,
Dict, get_active_assets, get_activity_mask)
from .linopt import (linexpr, write_bound, write_constraint, write_objective,
set_conref, set_varref, get_con, get_var, join_exprs,
run_and_read_highs, run_and_read_cbc, run_and_read_gurobi,
run_and_read_glpk, run_and_read_cplex, run_and_read_xpress,
define_constraints, define_variables, define_binaries,
align_with_static_component)
import pandas as pd
import numpy as np
from numpy import inf
from distutils.version import LooseVersion
pd_version = LooseVersion(pd.__version__)
agg_group_kwargs = dict(numeric_only=False) if pd_version >= "1.3" else {}
import gc, time, os, re, shutil
from tempfile import mkstemp
import logging
logger = logging.getLogger(__name__)
lookup = pd.read_csv(os.path.join(os.path.dirname(__file__), 'variables.csv'),
index_col=['component', 'variable'])
def define_nominal_for_extendable_variables(n, c, attr):
"""
Initializes variables for nominal capacities for a given component and a
given attribute.
Parameters
----------
n : pypsa.Network
c : str
network component of which the nominal capacity should be defined
attr : str
name of the variable, e.g. 'p_nom'
"""
ext_i = get_extendable_i(n, c)
if ext_i.empty: return
lower = n.df(c)[attr+'_min'][ext_i]
upper = n.df(c)[attr+'_max'][ext_i]
define_variables(n, lower, upper, c, attr)
def define_dispatch_for_extendable_and_committable_variables(n, sns, c, attr):
"""
Initializes variables for power dispatch for a given component and a
given attribute.
Parameters
----------
n : pypsa.Network
c : str
name of the network component
attr : str
name of the attribute, e.g. 'p'
"""
ext_i = get_extendable_i(n, c)
if c == 'Generator':
ext_i = ext_i.union(n.generators.query('committable').index)
if ext_i.empty:
return
active = get_activity_mask(n, c, sns)[ext_i] if n._multi_invest else None
define_variables(n, -inf, inf, c, attr, axes=[sns, ext_i], spec='ext', mask=active)
def define_dispatch_for_non_extendable_variables(n, sns, c, attr):
"""
Initializes variables for power dispatch for a given component and a
given attribute.
Parameters
----------
n : pypsa.Network
c : str
name of the network component
attr : str
name of the attribute, e.g. 'p'
"""
fix_i = get_non_extendable_i(n, c)
if c == 'Generator':
fix_i = fix_i.difference(n.generators.query('committable').index)
if fix_i.empty: return
nominal_fix = n.df(c)[nominal_attrs[c]][fix_i]
min_pu, max_pu = get_bounds_pu(n, c, sns, fix_i, attr)
lower = min_pu.mul(nominal_fix)
upper = max_pu.mul(nominal_fix)
axes = [sns, fix_i]
active = get_activity_mask(n, c, sns)[fix_i] if n._multi_invest else None
kwargs = dict(spec='non_ext', mask=active)
dispatch = define_variables(n, -inf, inf, c, attr, axes=axes, **kwargs)
dispatch = linexpr((1, dispatch))
define_constraints(n, dispatch, '>=', lower, c, 'mu_lower', **kwargs)
define_constraints(n, dispatch, '<=', upper, c, 'mu_upper', **kwargs)
def define_dispatch_for_extendable_constraints(n, sns, c, attr):
"""
Sets power dispatch constraints for extendable devices for a given
component and a given attribute.
Parameters
----------
n : pypsa.Network
c : str
name of the network component
attr : str
name of the attribute, e.g. 'p'
"""
ext_i = get_extendable_i(n, c)
if ext_i.empty: return
min_pu, max_pu = get_bounds_pu(n, c, sns, ext_i, attr)
operational_ext_v = get_var(n, c, attr)[ext_i]
nominal_v = get_var(n, c, nominal_attrs[c])[ext_i]
rhs = 0
active = get_activity_mask(n, c, sns)[ext_i] if n._multi_invest else None
kwargs = dict(spec=attr, mask=active)
lhs, *axes = linexpr((max_pu, nominal_v), (-1, operational_ext_v), return_axes=True)
define_constraints(n, lhs, '>=', rhs, c, 'mu_upper', axes=axes, **kwargs)
lhs, *axes = linexpr((min_pu, nominal_v), (-1, operational_ext_v), return_axes=True)
define_constraints(n, lhs, '<=', rhs, c, 'mu_lower', axes=axes, **kwargs)
def define_fixed_variable_constraints(n, sns, c, attr, pnl=True):
"""
Sets constraints for fixing variables of a given component and attribute
to the corresponding values in n.df(c)[attr + '_set'] if pnl is True, or
n.pnl(c)[attr + '_set']
Parameters
----------
n : pypsa.Network
c : str
name of the network component
attr : str
name of the attribute, e.g. 'p'
pnl : bool, default True
Whether variable which should be fixed is time-dependent
"""
if pnl:
if attr + '_set' not in n.pnl(c): return
fix = n.pnl(c)[attr + '_set'].loc[sns]
if fix.empty: return
if n._multi_invest:
active = get_activity_mask(n, c, sns)
fix = fix.where(active)
fix = fix.stack()
lhs = linexpr((1, get_var(n, c, attr).stack()[fix.index]),
as_pandas=False)
constraints = write_constraint(n, lhs, '=', fix).unstack().T
else:
if attr + '_set' not in n.df(c): return
fix = n.df(c)[attr + '_set'].dropna()
if fix.empty: return
lhs = linexpr((1, get_var(n, c, attr)[fix.index]), as_pandas=False)
constraints = write_constraint(n, lhs, '=', fix)
set_conref(n, constraints, c, f'mu_{attr}_set')
def define_generator_status_variables(n, sns):
c = 'Generator'
com_i = n.generators.query('committable').index
ext_i = get_extendable_i(n, c)
if not (ext_i.intersection(com_i)).empty:
logger.warning("The following generators have both investment optimisation"
f" and unit commitment:\n\n\t{', '.join((ext_i.intersection(com_i)))}\n\nCurrently PyPSA cannot "
"do both these functions, so PyPSA is choosing investment optimisation "
"for these generators.")
com_i = com_i.difference(ext_i)
if com_i.empty: return
active = get_activity_mask(n, c, sns)[com_i] if n._multi_invest else None
define_binaries(n, (sns, com_i), 'Generator', 'status', mask=active)
def define_committable_generator_constraints(n, sns):
c, attr = 'Generator', 'status'
com_i = n.df(c).query('committable and not p_nom_extendable').index
if com_i.empty: return
nominal = n.df(c)[nominal_attrs[c]][com_i]
min_pu, max_pu = get_bounds_pu(n, c, sns, com_i, 'p')
lower = min_pu.mul(nominal)
upper = max_pu.mul(nominal)
status = get_var(n, c, attr)
p = get_var(n, c, 'p')[com_i]
lhs = linexpr((lower, status), (-1, p))
active = get_activity_mask(n, c, sns)[com_i] if n._multi_invest else None
define_constraints(n, lhs, '<=', 0, 'Generators', 'committable_lb', mask=active)
lhs = linexpr((upper, status), (-1, p))
define_constraints(n, lhs, '>=', 0, 'Generators', 'committable_ub', mask=active)
def define_ramp_limit_constraints(n, sns, c):
"""
Defines ramp limits for a given component with valid ramplimit.
"""
rup_i = n.df(c).query('ramp_limit_up == ramp_limit_up').index
rdown_i = n.df(c).query('ramp_limit_down == ramp_limit_down').index
if rup_i.empty & rdown_i.empty:
return
fix_i = get_non_extendable_i(n, c)
ext_i = get_extendable_i(n, c)
if "committable" in n.df(c):
com_i = n.df(c).query('committable').index.difference(ext_i)
else:
com_i = []
# Check if ramping is not at start of n.snapshots
start_i = n.snapshots.get_loc(sns[0]) - 1
pnl = n.pnl(c)
# get dispatch for either one or two ports
attr = ({'p', 'p0'} & set(pnl)).pop()
p_prev_fix = pnl[attr].iloc[start_i]
is_rolling_horizon = (sns[0] != n.snapshots[0]) and not p_prev_fix.empty
if is_rolling_horizon:
active = get_activity_mask(n, c, sns)
p = get_var(n, c, 'p')
p_prev = get_var(n, c, 'p').shift(1, fill_value=-1)
rhs_prev = pd.DataFrame(0, *p.axes)
rhs_prev.loc[sns[0]] = p_prev_fix
else:
active = get_activity_mask(n, c, sns[1:])
p = get_var(n, c, 'p').loc[sns[1:]]
p_prev = get_var(n, c, 'p').shift(1, fill_value=-1).loc[sns[1:]]
rhs_prev = pd.DataFrame(0, *p.axes)
# fix up
gens_i = rup_i.intersection(fix_i)
if not gens_i.empty:
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]))
rhs = rhs_prev[gens_i] + n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom')
kwargs = dict(spec='nonext.', mask=active[gens_i])
define_constraints(n, lhs, '<=', rhs, c, 'mu_ramp_limit_up', **kwargs)
# ext up
gens_i = rup_i.intersection(ext_i)
if not gens_i.empty:
limit_pu = n.df(c)['ramp_limit_up'][gens_i]
p_nom = get_var(n, c, 'p_nom')[gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (-limit_pu, p_nom))
rhs = rhs_prev[gens_i]
kwargs = dict(spec='ext.', mask=active[gens_i])
define_constraints(n, lhs, '<=', rhs, c, 'mu_ramp_limit_up', **kwargs)
# com up
gens_i = rup_i.intersection(com_i)
if not gens_i.empty:
limit_start = n.df(c).loc[gens_i].eval('ramp_limit_start_up * p_nom')
limit_up = n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom')
status = get_var(n, c, 'status').loc[p.index, gens_i]
status_prev = get_var(n, c, 'status').shift(1, fill_value=-1).loc[p.index, gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]),
(limit_start - limit_up, status_prev),
(- limit_start, status))
rhs = rhs_prev[gens_i]
if is_rolling_horizon:
status_prev_fix = n.pnl(c)['status'][com_i].iloc[start_i]
rhs.loc[sns[0]] += (limit_up - limit_start) * status_prev_fix
kwargs = dict(spec='com.', mask=active[gens_i])
define_constraints(n, lhs, '<=', rhs, c, 'mu_ramp_limit_up', **kwargs)
# fix down
gens_i = rdown_i.intersection(fix_i)
if not gens_i.empty:
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]))
rhs = rhs_prev[gens_i] + n.df(c).loc[gens_i].eval('-1 * ramp_limit_down * p_nom')
kwargs = dict(spec='nonext.', mask=active[gens_i])
define_constraints(n, lhs, '>=', rhs, c, 'mu_ramp_limit_down', **kwargs)
# ext down
gens_i = rdown_i.intersection(ext_i)
if not gens_i.empty:
limit_pu = n.df(c)['ramp_limit_down'][gens_i]
p_nom = get_var(n, c, 'p_nom')[gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (limit_pu, p_nom))
rhs = rhs_prev[gens_i]
kwargs = dict(spec='ext.', mask=active[gens_i])
define_constraints(n, lhs, '>=', rhs, c, 'mu_ramp_limit_down', **kwargs)
# com down
gens_i = rdown_i.intersection(com_i)
if not gens_i.empty:
limit_shut = n.df(c).loc[gens_i].eval('ramp_limit_shut_down * p_nom')
limit_down = n.df(c).loc[gens_i].eval('ramp_limit_down * p_nom')
status = get_var(n, c, 'status').loc[p.index, gens_i]
status_prev = get_var(n, c, 'status').shift(1, fill_value=-1).loc[p.index, gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]),
(limit_down - limit_shut, status),
(limit_shut, status_prev))
rhs = rhs_prev[gens_i]
if is_rolling_horizon:
status_prev_fix = n.pnl(c)['status'][com_i].iloc[start_i]
rhs.loc[sns[0]] += -limit_shut * status_prev_fix
kwargs = dict(spec='com.', mask=active[gens_i])
define_constraints(n, lhs, '>=', rhs, c, 'mu_ramp_limit_down', **kwargs)
def define_nominal_constraints_per_bus_carrier(n, sns):
for carrier in n.carriers.index:
for bound, sense in [("max", "<="), ("min", ">=")]:
col = f'nom_{bound}_{carrier}'
if col not in n.buses.columns: continue
rhs = n.buses[col].dropna()
lhs = | pd.Series('', rhs.index) | pandas.Series |
import os
import pandas as pd
DATA_CUISINE_PATH = "data/cuisine_data/"
DATA_RECIPES_PATH = "data/recipes_data/"
def import_data():
train = pd.read_json(os.path.join(DATA_CUISINE_PATH, 'train.json'))
test = pd.read_json(os.path.join(DATA_CUISINE_PATH, 'test.json'))
return pd.concat([train,test],axis=0)
def import_recipes_main():
data_path_ar = os.path.join(DATA_RECIPES_PATH, "recipes_raw_nosource_ar.json")
data_path_epi = os.path.join(DATA_RECIPES_PATH, "recipes_raw_nosource_epi.json")
data_path_fn = os.path.join(DATA_RECIPES_PATH, "recipes_raw_nosource_fn.json")
data = pd.concat([ | pd.read_json(data_path_ar, orient='index') | pandas.read_json |
import argparse
import glob
import math
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from numba import jit, prange
from sklearn import metrics
from utils import *
@jit(nopython=True, nogil=True, cache=True, parallel=True, fastmath=True)
def compute_tp_tn_fp_fn(y_true, y_pred):
tp = 0
tn = 0
fp = 0
fn = 0
for i in prange(y_pred.size):
tp += y_true[i] * y_pred[i]
tn += (1-y_true[i]) * (1-y_pred[i])
fp += (1-y_true[i]) * y_pred[i]
fn += y_true[i] * (1-y_pred[i])
return tp, tn, fp, fn
def compute_precision(tp, fp):
return tp / (tp + fp)
def compute_recall(tp, fn):
return tp / (tp + fn)
def compute_f1_score(precision, recall):
try:
return (2*precision*recall) / (precision + recall)
except:
return 0
def compute_fbeta_score(precision, recall, beta):
try:
return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)
except:
return 0
def compute_accuracy(tp,tn,fp,fn):
return (tp + tn)/(tp + tn + fp + fn)
def compute_auc(GT, pred):
return metrics.roc_auc_score(GT, pred)
def compute_auprc(GT, pred):
prec, rec, thresholds = metrics.precision_recall_curve(GT, pred)
# print(prec, rec, thresholds)
plt.plot(prec, rec)
plt.show()
# return metrics.auc(prec, rec)
def compute_average_precision(GT, pred):
ratio = sum(GT)/np.size(GT)
return metrics.average_precision_score(GT, pred), ratio
def main(args):
#====== Numba compilation ======
# The 2 lines are important
compute_tp_tn_fp_fn(np.array([0,0,0], dtype=np.uint8), np.array([0,1,0], dtype=np.uint8))
compute_tp_tn_fp_fn(np.array([0,0,0], dtype=np.float32), np.array([0,1,0], dtype=np.float32))
#===============================
out = args.out
if not os.path.exists(os.path.dirname(out)):
os.makedirs(os.path.dirname(out))
model_name = args.model_name
number_epochs = args.epochs
batch_size = args.batch_size
NumberFilters = args.number_filters
lr = args.learning_rate
cv_fold = args.cv_fold
model_params = ['Number Epochs', 'Batch Size', 'Number Filters', 'Learning Rate', 'Empty col', 'Empty col2', 'Empty col3', 'CV']
param_values = [number_epochs, batch_size, NumberFilters, lr, '', '', '', '']
Params = pd.Series(param_values, index=model_params, name='Params values')
metrics_names = ['AUPRC','AUPRC - Baseline','F1_Score','Fbeta_Score','Accuracy','Recall','Precision','CV fold']
Metrics = pd.Series(metrics_names, index=model_params, name='Model\Metrics')
if not os.path.exists(out):
Folder_Metrics = pd.DataFrame(columns = model_params)
Image_Metrics = pd.DataFrame(columns = model_params)
else:
Metrics_file = pd.ExcelFile(out)
Folder_Metrics = pd.read_excel(Metrics_file, 'Sheet1', index_col=0, header=None)
Folder_Metrics = Folder_Metrics[Folder_Metrics.columns[:8]]
Folder_Metrics.columns = model_params
Image_Metrics = pd.read_excel(Metrics_file, 'Sheet2', index_col=0, header=None)
Image_Metrics.columns = model_params
matching_values = (Folder_Metrics.values[:,:4] == Params.values[:4]).all(1)
if not matching_values.any():
Folder_Metrics = Folder_Metrics.append(pd.Series(['Number Epochs', 'Batch Size', 'Number Filters', 'Learning Rate', '', '', '', 'CV'], name='Params', index=model_params), ignore_index=False)
Folder_Metrics = Folder_Metrics.append(Params, ignore_index=False)
Folder_Metrics = Folder_Metrics.append(Metrics, ignore_index=False)
Folder_Metrics = Folder_Metrics.append(pd.Series(name='', dtype='object'), ignore_index=False)
matching_values = (Image_Metrics.values[:,:4] == Params.values[:4]).all(1)
if not matching_values.any():
Image_Metrics = Image_Metrics.append(pd.Series(['Number Epochs', 'Batch Size', 'Number Filters', 'Learning Rate', '', '', '', 'File Name'], name='Params', index=model_params), ignore_index=False)
Image_Metrics = Image_Metrics.append(pd.Series(param_values, index=model_params, name='Params values'), ignore_index=False)
Image_Metrics = Image_Metrics.append(pd.Series(['AUPRC','AUPRC - Baseline','F1_Score','Fbeta_Score','Accuracy','Recall','Precision','File Name'], index=model_params, name='Model\Metrics'), ignore_index=False)
Image_Metrics = Image_Metrics.append(pd.Series(name='', dtype='object'), ignore_index=False)
arrays = [range(len(Folder_Metrics)), Folder_Metrics.index]
Index = pd.MultiIndex.from_arrays(arrays, names=('number', 'name'))
Folder_Metrics.set_index(Index, inplace=True)
arrays = [range(len(Image_Metrics)), Image_Metrics.index]
Index = pd.MultiIndex.from_arrays(arrays, names=('number', 'name'))
Image_Metrics.set_index(Index, inplace=True)
idx1 = Folder_Metrics[(Folder_Metrics.values[:,:4] == Params.values[:4]).all(1)].index.get_level_values('number').tolist()[0]
idx2 = Image_Metrics[(Image_Metrics.values[:,:4] == Params.values[:4]).all(1)].index.get_level_values('number').tolist()[0]
img_fn_array = []
if args.pred_img:
img_obj = {}
img_obj["img"] = args.pred_img
img_obj["GT"] = args.groundtruth_img
if args.pred_raw_img:
img_obj['raw'] = args.pred_raw_img
img_fn_array.append(img_obj)
if args.pred_dir:
normpath_img = os.path.normpath("/".join([args.pred_dir, '*', '']))
normpath_GT = os.path.normpath("/".join([args.groundtruth_dir, '*', '']))
if args.pred_raw_dir:
normpath_raw = os.path.normpath("/".join([args.pred_raw_dir, '*', '']))
img_list = []
for img_fn in glob.iglob(normpath_img, recursive=True):
if args.tool == 'RCSeg':
img_split = os.path.basename(img_fn).split("_")
if img_split[0] == img_split[-2] or (img_split[-2] not in ['upper', 'lower']):
img_list.append(img_fn)
else:
img_list.append(img_fn)
if args.pred_raw_dir:
for (img_fn, GT_fn, raw_fn) in zip(sorted(img_list), sorted(glob.iglob(normpath_GT, recursive=True)), sorted(glob.iglob(normpath_raw, recursive=True))):
if os.path.isfile(img_fn) and True in [ext in img_fn for ext in [".nrrd", ".nrrd.gz", ".nii", ".nii.gz", ".gipl", ".gipl.gz"]]:
img_obj = {}
img_obj["img"] = img_fn
img_obj["GT"] = GT_fn
img_obj["raw"] = raw_fn
img_fn_array.append(img_obj)
else:
for (img_fn, GT_fn) in zip(sorted(img_list), sorted(glob.iglob(normpath_GT, recursive=True))):
if os.path.isfile(img_fn) and True in [ext in img_fn for ext in [".nrrd", ".nrrd.gz", ".nii", ".nii.gz", ".gipl", ".gipl.gz"]]:
img_obj = {}
img_obj["img"] = img_fn
img_obj["GT"] = GT_fn
img_fn_array.append(img_obj)
total_values = pd.DataFrame(columns=model_params)
for img_obj in img_fn_array:
startTime = time.time()
pred_path = img_obj["img"]
GT_path = img_obj["GT"]
pred, _ = ReadFile(pred_path)
GT, _ = ReadFile(GT_path, verbose=0)
pred = Normalize(pred,out_min=0,out_max=1)
GT = Normalize(GT,out_min=0,out_max=1)
pred[pred<=0.5]=0
pred[pred>0.5]=1
GT[GT<=0.5]=0
GT[GT>0.5]=1
pred = np.array(pred).flatten()
GT = np.array(GT).flatten()
GT = np.uint8(GT > 0.5)
tp, tn, fp, fn = compute_tp_tn_fp_fn(GT, pred)
recall = compute_recall(tp, fn)
precision = compute_precision(tp, fp)
f1 = compute_f1_score(precision, recall)
fbeta = compute_fbeta_score(precision, recall, 2)
acc = compute_accuracy(tp, tn, fp, fn)
if 'raw' in img_obj:
raw_path = img_obj["raw"]
raw, _ = ReadFile(raw_path, verbose=0)
raw = Normalize(raw,out_min=0,out_max=1)
raw = np.array(raw).flatten()
# auc = compute_auc(GT, raw)
compute_auprc(GT, raw)
auprc, ratio = compute_average_precision(GT, raw)
else:
# auc = compute_auc(GT, pred)
# auprc = compute_auprc(GT, raw)
auprc, ratio = compute_average_precision(GT, pred)
metrics_line = [auprc,ratio,f1,fbeta,acc,recall,precision]
metrics_line.append(os.path.basename(pred_path).split('.')[0])
total_values.loc[len(total_values)] = metrics_line
stopTime = time.time()
print('Processing completed in {0:.2f} seconds'.format(stopTime-startTime))
means = total_values[total_values.columns.drop('CV')].mean()
stds = total_values[total_values.columns.drop('CV')].std()
stds = [0 if math.isnan(x) else x for x in stds]
values = [(f"{mean:.4f}"+' \u00B1 '+f"{std:.4f}") for (mean,std) in zip(means,stds)]
values.append(cv_fold)
line = | pd.DataFrame([values], columns=model_params) | pandas.DataFrame |
#%%
import numpy as np
import pandas as pd
import anthro.io
import altair as alt
# Load thea data
data = | pd.read_csv('../processed/FAOSTAT_crop_primary_yields.csv') | pandas.read_csv |
"""
Code borrowed/reproduced from kjchalup's 'A fast conditional independence test'
Reference: <NAME> and <NAME>, 2017.
@author: roshanprakash
"""
import pandas as pd
from joblib import Parallel, delayed
import numpy as np
import time
from scipy.stats import ttest_1samp
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import mean_squared_error as mse
from sklearn.tree import DecisionTreeRegressor as DT
from sklearn.model_selection import GridSearchCV
def _mix_merge_columns(x, z, seed=None):
"""
Permutes the columns of two samples separately and merges them.
PARAMETERS
----------
- x (numpy array) : the first set of random variables, of shape (N, D1)
- y (numpy array) : the next set of random variables, of shape (N, D2)
RETURNS
-------
- a numpy array of shape (N, D1+D2), containing permuted columns.
"""
num_columns = x.shape[1]+z.shape[1]
global_state = np.random.get_state()
np.random.seed(seed or int(time.time()))
shuffled_idxs = np.random.permutation(np.arange(num_columns))
np.random.set_state(global_state) # set the global state back to what it was
reordered_out = np.zeros([x.shape[0], num_columns])
reordered_out[:, shuffled_idxs[:x.shape[1]]] = x
reordered_out[:, shuffled_idxs[x.shape[1]:]] = z
return reordered_out
def _find_best_model(x, y, z, params_grid, test_size, log_features=False):
"""
Performs GridSearch on `params_grid`.
PARAMETERS
----------
- x (numpy array) : the input set of random variables, of shape (N, D1)
- y (numpy array) : the target set of random variables, of shape (N, D2)
- z (numpy array) : the conditioning set of random variables, of shape (N, D3)
- params_grid (dict) : the hyperparameters to try out while performing grid search ; for more details,
look up `sklearn.model_selection.GridSearchCV`
- test_size (float) : the proportion of samples to be used as test data
- log_features (bool, default=False) : if True 'log2' will be used as `max_features` for the Decision Tree
Regressor provided there are atleast 10 features in the input
RETURNS
-------
- the Decision Tree Regressor with the optimal value for `min_sample_split`.
"""
model_input = _mix_merge_columns(x, z)
if log_features and model_input.shape>10:
max_features = 'log2'
else:
max_features = 'auto'
cv_splits = ShuffleSplit(n_splits=3, test_size=test_size)
best_params = GridSearchCV(DT(max_features=max_features), params_grid, cv=cv_splits, n_jobs=-1).fit(model_input, y).best_params_
best_model = DT(**best_params)
return best_model
def _compute_error(data_tuple):
"""
Fits the decision tree regression model to a data set, and computes the error on the test set.
PARAMETERS
----------
- data_dict (dict) : a dictionary containing the covariates, target and the decision tree model to be fitted.
- proportion_test (float) : the fraction of samples to be included in test set
- i (int) : the run index used to access the shuffled indices of data for this run and the seed to shuffle columns
before merging `x` and `z`
RETURNS
-------
- The model error on the test set.
"""
data_dict, proportion_test, i = data_tuple
model = data_dict['model']
n_test = data_dict['n_test']
shuffled_idxs = data_dict['shuffled_idxs'][i]
if data_dict['reshuffle']:
perm_idxs = np.random.permutation(data_dict['x'].shape[0])
else:
perm_idxs = np.arange(data_dict['x'].shape[0])
# mix up columns before training
x = _mix_merge_columns(data_dict['x'][perm_idxs], data_dict['z'], i)
model.fit(x[shuffled_idxs][n_test:], data_dict['y'][shuffled_idxs][n_test:])
return mse(data_dict['y'][shuffled_idxs][:n_test], model.predict(x[shuffled_idxs][:n_test]))
def test_conditional_independence(x, y, z, nruns=8, params_grid={'min_samples_split':[2, 8, 64, 512, 1e-2, .2, .4]}, test_size=0.1, threshold=0.01, verbose=False):
"""
Performs fast conditional/unconditional independence tests using Decision Tree Regressors.
PARAMETERS
----------
- x (numpy array) : the first set of random variables, of shape (N, D1)
- y (numpy array) : the next set of random variables, of shape (N, D2)
- z (numpy array) : the conditioning set of random variables, of shape (N, D3)
- params_grid (dict) : the hyperparameters to try out while performing grid search ; for more details,
look up `sklearn.model_selection.GridSearchCV`
- test_size (float, default=0.1) : the proportion of samples to be used as test data
- threshold (float, default=0.01) : the alpha value for t-test
RETURNS
-------
- True, if X is conditionally independent of Y, given Z and False otherwise.
"""
assert x.shape[0]==y.shape[0], 'X and Y should contain the same number of data instances!'
num_instances = x.shape[0]
num_test_instances = int(test_size*num_instances)
shuffled_idxs = [np.random.permutation(num_instances) for i in range(nruns)]
y = StandardScaler().fit_transform(y)
# find the best-fitting decision regression tree for y = f(x, z) and then train and compute error for each of `nruns`
best_model = _find_best_model(x, y, z, params_grid, test_size)
data_dict = {'x':x, 'y':y, 'z':z, 'model':best_model, 'reshuffle':False, 'shuffled_idxs':shuffled_idxs, 'n_test':num_test_instances}
results_xz = np.array(Parallel(n_jobs=-1, max_nbytes=100e6)(delayed(_compute_error)((data_dict, test_size, run_idx)) for run_idx in range(nruns)))
# find the best-fitting decision regression tree for : y = f(reshuffle(z)) if z is not empty, else y = f(reshuffle(x))
if z.shape[1]==0:
x_ = x[np.random.permutation(num_instances)]
else:
x_ = np.empty(shape=[num_instances, 0])
data_dict['best_model'] = _find_best_model(x, y, z, params_grid, test_size)
data_dict['reshuffle'] = True
data_dict['x'] = x_
results_z = np.array(Parallel(n_jobs=-1, max_nbytes=100e6)(delayed(_compute_error)((data_dict, test_size, run_idx)) for run_idx in range(nruns)))
# perform 1-sample t-test to check significance of both samples of results
t_stat, p_val = ttest_1samp(results_z/results_xz, 1)
if t_stat<0:
p_val = 1-p_val/2
else:
p_val = p_val/2
# print out p-val if required
if verbose:
print('p-value for the null hypothesis that X and Y are conditionally independent, given Z : {}'.format(p_val))
# return if samples are independent or otherwise
if p_val<threshold:
return False
else:
return True
if __name__=='__main__':
data = np.zeros((10000, 4))
data[:, 0] = np.random.normal(loc=10.0, scale=5.0, size=10000)
data[:, 1] = np.random.normal(loc=1.0, scale=2.0, size=10000)
data[:, 2] = np.random.gamma(2, 0.65, 10000)
data[:, 3] = data[:, 1]+data[:, 2]
data = | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import re
import ipaddress
import codecs
import time
import pandas as pd
import urllib3
from urllib3 import util
from classifier4gyoithon.GyoiClassifier import DeepClassifier
from classifier4gyoithon.GyoiExploit import Metasploit
from classifier4gyoithon.GyoiReport import CreateReport
from util import Utilty
# Type of printing.
OK = 'ok' # [*]
NOTE = 'note' # [+]
FAIL = 'fail' # [-]
WARNING = 'warn' # [!]
NONE = 'none' # No label.
# Identify product name using signature.
def identify_product(categoy, target_url, response, utility):
product_list = []
reason_list = []
full_path = os.path.dirname(os.path.abspath(__file__))
file_name = 'signature_' + categoy + '.txt'
try:
with codecs.open(os.path.join(full_path + '/signatures/', file_name), 'r', 'utf-8') as fin:
matching_patterns = fin.readlines()
for pattern in matching_patterns:
items = pattern.replace('\r', '').replace('\n', '').split('@')
keyword_list = []
product = items[0]
signature = items[1]
list_match = re.findall(signature, response, flags=re.IGNORECASE)
if len(list_match) != 0:
# Output result (header)
keyword_list.append(list_match)
utility.print_message(OK, 'category : {}'.format(categoy))
utility.print_message(OK, 'product : {}'.format(product))
utility.print_message(OK, 'reason : {}'.format(keyword_list))
utility.print_message(OK, 'target url : {}'.format(target_url))
utility.print_message(NONE, '-' * 42)
product_list.append(product)
reason_list.append(keyword_list)
except Exception as err:
utility.print_exception(err, '{}'.format(err))
return product_list, reason_list
# Classifier product name using signatures.
def classifier_signature(ip_addr, port, target_url, response, log_file, utility):
utility.print_message(NOTE, 'Analyzing gathered HTTP response using Signature.')
ip_list = []
port_list = []
vhost_list = []
judge_list = []
version_list = []
reason_list = []
scan_type_list = []
ua_list = []
http_ver_list = []
ssl_list = []
sni_list = []
url_list = []
log_list = []
product_list = []
for category in ['os', 'web', 'framework', 'cms']:
products, keywords = identify_product(category, target_url, response, utility)
for product, keyword in zip(products, keywords):
ip_list.append(ip_addr)
port_list.append(port)
vhost_list.append(ip_addr)
judge_list.append(category + ':' + str(product))
version_list.append('-')
reason_list.append(keyword)
scan_type_list.append('[ip]')
ua_list.append('-')
http_ver_list.append('HTTP/1.1')
ssl_list.append('-')
sni_list.append('-')
url_list.append(target_url)
log_list.append(log_file)
product_list.append(product)
if len(product_list) == 0:
utility.print_message(WARNING, 'Product Not Found.')
return []
# logging.
series_ip = pd.Series(ip_list)
series_port = pd.Series(port_list)
series_vhost = pd.Series(vhost_list)
series_judge = pd.Series(judge_list)
series_version = pd.Series(version_list)
series_reason = pd.Series(reason_list)
series_scan_type = pd.Series(scan_type_list)
series_ua = pd.Series(ua_list)
series_http_ver = pd.Series(http_ver_list)
series_ssl = pd.Series(ssl_list)
series_sni = pd.Series(sni_list)
series_url = pd.Series(url_list)
series_log = pd.Series(log_list)
df = pd.DataFrame({'ip': series_ip,
'port': series_port,
'vhost': series_vhost,
'judge': series_judge,
'judge_version': series_version,
'reason': series_reason,
'scantype': series_scan_type,
'ua': series_ua,
'version': series_http_ver,
'ssl': series_ssl,
'sni': series_sni,
'url': series_url,
'log': series_log},
columns=['ip', 'port', 'vhost', 'judge', 'judge_version', 'reason',
'scantype', 'ua', 'version', 'ssl', 'sni', 'url', 'log'])
saved_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'gyoithon')
df.sort_values(by='port', ascending=False).to_csv(os.path.join(saved_path, 'webconf.csv'),
mode='a',
header=False,
index=False)
return product_list
# Create webconf.csv
def create_webconf(ip_addr, port, log_file):
utility.print_message(NOTE, 'Create "webconf.csv".')
series_ip = pd.Series([ip_addr])
series_port = pd.Series([str(port)])
series_vhost = | pd.Series([ip_addr]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Sat May 22 01:11:59 2021
@author: <NAME>, Department of Planning, DCEA, Aalborg University
<EMAIL>
"""
'''
Demonstrates the behavior of the module estimating the solar power received by a given PBR geometry.
Execute the block on the influence of azimuth to reproduce the figure in SI I.
# API source : https://ec.europa.eu/jrc/en/PVGIS/docs/noninteractive
'''
import requests
import matplotlib.pyplot as plt
import pandas as pd
import decimal
import random
import numpy as np
import os
# Set working directory to file location
# (works only when executing the whole file and not only sections (Run Current cell))
currentfolder=os.path.dirname(os.path.realpath(__file__))
os.chdir(currentfolder)
import Retrieving_solar_and_climatic_data_1 as solardata
import Functions_for_physical_and_biological_calculations_1 as functions
#Influence of the length of the PBR
# Assuming the PBR unit is a rectangle with width=length/3
df_resultslength= pd.DataFrame(columns=['Upper','Lower','Average'])
length_values=range(1,50)
for length in length_values:
datacollection=solardata.Qreceived_bym2PBR_month(43.695, 1.922, 3, 90, 1.5, 0.03, 0.01, 0.2, length, length/3)
averageday_persquaremeter=sum(datacollection[0]['Average'])
upperday_persquaremeter=sum(datacollection[0]['Upper'])
lowerday_persquaremeter=sum(datacollection[0]['Lower'])
df_resultslength.loc[length]=[upperday_persquaremeter,lowerday_persquaremeter,averageday_persquaremeter]
plt.plot(length_values,df_resultslength)
plt.ylabel('Wh collected per day')
plt.xlabel('Length of the PBR (width=length/3)')
plt.savefig('../Plot/Sensitivity_length_Qm2',dpi=600)
###Influence of the Tube diameter
df_resultsdiameter=pd.DataFrame(columns=['Upper','Lower','Average'])
diameter_values=range(1,11)
diameter_values=[a/100 for a in diameter_values]
for diameter in diameter_values:
datacollection=solardata.Qreceived_bym2PBR_month(43.695, 1.922, 3, 90, 1.5, diameter, 0.01, 0.2, 25, 25/3)
averageday_persquaremeter=sum(datacollection[0]['Average'])
upperday_persquaremeter=sum(datacollection[0]['Upper'])
lowerday_persquaremeter=sum(datacollection[0]['Lower'])
df_resultsdiameter.loc[diameter]=[upperday_persquaremeter,lowerday_persquaremeter,averageday_persquaremeter]
plt.plot(diameter_values,df_resultsdiameter)
plt.ylabel('Wh collected per day')
plt.xlabel('Tube diameter in m')
plt.savefig('../Plot/Sensitivity_Tube_diameter',dpi=600)
###Influence of the gap between tubes
df_resultsgap=pd.DataFrame(columns=['Upper','Lower','Average'])
gap_values=range(1,30)
gap_values=[a/100 for a in gap_values]
for gap in gap_values:
datacollection=solardata.Qreceived_bym2PBR_month(43.695, 1.922, 3, 90, 1.5, 0.03,gap, 0.2, 25, 25/3)
averageday_persquaremeter=sum(datacollection[0]['Average'])
upperday_persquaremeter=sum(datacollection[0]['Upper'])
lowerday_persquaremeter=sum(datacollection[0]['Lower'])
df_resultsgap.loc[gap]=[upperday_persquaremeter,lowerday_persquaremeter,averageday_persquaremeter]
plt.plot(gap_values,df_resultsgap)
plt.ylabel('Wh collected per day')
plt.xlabel('Gap between rows in m')
plt.savefig('../Plot/Sensitivity_horizontal_distance',dpi=600)
###Influence of the horizontal distance between stacks
df_resultshori_dist= | pd.DataFrame(columns=['Upper','Lower','Average']) | pandas.DataFrame |
from sklearn.metrics import f1_score,recall_score,precision_score,confusion_matrix,accuracy_score
from pylab import *
import torch
import torch.nn as nn
import copy
import random
import pandas as pd
import numpy as np
from tqdm import trange
import pickle
import json
import sys
import time
import shap
from sklearn.model_selection import train_test_split
sys.path.append("classes")
sys.path.append("/home/matilda/PycharmProjects/log_level_estimation/TorchLRP")
from loss_functions import NuLogsyLossCompute
from model import *
from networks import *
from tokenizer import *
from data_loader import *
from prototype import get_prototypes
from collections import defaultdict
import torch.nn.functional as F
import pickle
import spacy
from collections import defaultdict
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
class Baseline(nn.Module):
def __init__(self, n_dimension, n_targets, max_size, d_model):
super(Baseline, self).__init__()
self.layer0 = nn.ModuleList([nn.Linear(d_model, d_model) for i in range(max_size)])
self.l1 = nn.Linear(n_dimension, n_dimension)
self.l2 = nn.Linear(n_dimension, n_dimension)
self.l3 = nn.Linear(n_dimension, n_targets)
self.max_size = max_size
self.activation = torch.tanh
def forward(self, input):
input = input.reshape(-1, 50, 16)
out = []
for idx in range(self.max_size):
out.append(self.layer0[idx](input[:, idx, :]))
input = torch.cat(out, dim=1)
input = self.activation(self.l1(input))
input = self.activation(self.l2(input))
input = self.l3(input)
return input
def run_train_baseline(dataloader, model, optimizer, f_loss, epoch, device="cpu"):
model.train()
total_loss = 0
start = time.time()
for i, batch in enumerate(dataloader):
load, y = batch
# print("device")
if device == "cuda":
out = model.forward(load.cuda())
else:
out = model.forward(load)
if device == "cuda":
loss = f_loss(out, y.cuda().long())
else:
loss = f_loss(out, y.long())
loss.backward()
optimizer.step()
optimizer.zero_grad()
total_loss += loss
elapsed = time.time() - start
if i % 5 == 0:
print("Epoch %d Train Step: %d / %d Loss: %f" % (epoch, i, len(dataloader), loss), end='\r')
print("Epoch %d Train Step: %d / %d Loss: %f" % (epoch, i, len(dataloader), loss), end='\r')
return total_loss / len(dataloader)
def run_test_baseline(dataloader, model, optimizer, f_loss, epoch, device="cpu"):
model.eval()
preds = []
with torch.no_grad():
for i, batch in enumerate(dataloader):
load, y = batch
if device=="cuda":
out = model.forward(load.cuda())
else:
out = model.forward(load)
if device=="cuda":
tmp = out.detach().cpu().numpy()
else:
tmp = out.detach().cpu().numpy()
preds += list(np.argmax(tmp, axis=1))
return preds
def run_optimizer_baseline(model, train_dataloader, test_dataloader_good_repos, test_dataloader_bad_repos, load_test_good_repos_labels, load_test_bad_repos_labels, optimizer, n_epochs,cross_entropoy_loss,class_weights, device):
conf_matrix_good = []
conf_matrix_bad = []
preds = []
best_f1_score = 0
best_conf_matrix = []
best_model = []
best_preds = []
for epoch in range(1, 1 + n_epochs):
loss = run_train_baseline(train_dataloader, model, optimizer, cross_entropoy_loss, epoch, device=device)
print("Epoch %d Train Loss: %f" % (epoch, loss), " " * 30)
start_time = time.time()
print("----------GOOD REPOS----------")
preds1 = run_test_baseline(test_dataloader_good_repos, model, optimizer, cross_entropoy_loss, epoch, device=device)
print(f"Accuracy:{round(accuracy_score(preds1, load_test_good_repos_labels), 2)}")
print(f"f1_score:{round(f1_score(preds1, load_test_good_repos_labels, average='binary'), 2)}")
print(f"recall_score:{round(recall_score(preds1, load_test_good_repos_labels, average='binary'), 2)}")
print(f"precision_score:{round(precision_score(preds1, load_test_good_repos_labels, average='binary'), 2)}")
print(f"confusion matrix: ", confusion_matrix(preds1, load_test_good_repos_labels))
conf_matrix_good.append(confusion_matrix(preds1, load_test_good_repos_labels))
calc_f1_score = f1_score(preds1, load_test_good_repos_labels, average='binary')
if best_f1_score < calc_f1_score:
best_f1_score = calc_f1_score
best_conf_matrix = confusion_matrix(preds1, load_test_good_repos_labels)
best_model = model
best_preds = preds1
# print("----------BAD REPOS----------")
#
# preds = run_test_baseline(test_dataloader_bad_repos, model, optimizer, cross_entropoy_loss, epoch, device=device)
# print(f"Accuracy:{round(accuracy_score(preds, load_test_bad_repos_labels), 2)}")
# print(f"f1_score:{round(f1_score(preds, load_test_bad_repos_labels, average='binary'), 2)}")
# print(f"recall_score:{round(recall_score(preds, load_test_bad_repos_labels, average='binary'), 2)}")
# print(f"precision_score:{round(precision_score(preds, load_test_bad_repos_labels, average='binary'), 2)}")
#
# conf_matrix_bad.append(confusion_matrix(preds, load_test_bad_repos_labels))
return best_model, best_preds, best_f1_score, best_conf_matrix
def extract_load(df):
print("Split descriptive and target data into numpay arrays.")
load = df['log_message'].values
labels = df['log_level'].values
return load, labels
def tokenization_dataset(df, load, labels, label_mapper):
tokenizer = LogTokenizer()
tokenized = []
for i in trange(0, len(df)):
tokenized.append(np.array(tokenizer.tokenize(df['log_message'][i])))
labels_tokenized = [label_mapper[label] for label in labels]
return tokenized, labels_tokenized, tokenizer
def word2_vec_representation(df, load, labels, label_mapper, nlp):
tokenizer = LogTokenizer()
tokenized = []
for i in trange(0, len(df)):
tokenized.append(nlp(df['log_message'][i]).vector)
labels_tokenized = [label_mapper[label] for label in labels]
return tokenized, labels_tokenized, tokenizer
def convert_normal_anomaly(x):
if x == 'trace':
return 'trace'
elif x == 'warn':
return 'warning'
elif x == 'warning':
return 'warning'
elif x == 'info':
return 'normal'
elif x == 'debug':
return 'debug'
elif x == 'log':
return 'log'
else:
return 'anomaly'
def convert_error_info(x):
if x == 'trace':
return 'trace'
elif x == 'warn':
return 'log'
elif x == 'warning':
return 'log'
elif x == 'info':
return 'info'
elif x == 'debug':
return 'debug'
elif x == 'log':
return 'log'
else:
return 'error'
def convert_error_warning(x):
if x == 'trace':
return 'trace'
elif x == 'warn':
return 'warning'
elif x == 'warning':
return 'warning'
elif x == 'info':
return 'debug'
elif x == 'debug':
return 'debug'
elif x == 'log':
return 'log'
else:
return 'error'
def convert_info_warning(x):
if x == 'trace':
return 'trace'
elif x == 'warn':
return 'warning'
elif x == 'warning':
return 'warning'
elif x == 'info':
return 'info'
elif x == 'debug':
return 'debug'
elif x == 'log':
return 'log'
else:
return 'log'
def convert_error_info_warning(x):
if x == 'trace':
return 'trace'
elif x == 'warn':
return 'warning'
elif x == 'warning':
return 'warning'
elif x == 'info':
return 'info'
elif x == 'debug':
return 'debug'
elif x == 'log':
return 'log'
else:
return 'error'
def read_data(path):
print("Reading data at path ", path)
return pd.read_csv(path).drop(columns=["Unnamed: 0"])
def preprocess_data(df, scenario, verbose=True):
if verbose:
print("Filtering the special characters in the dataframe!")
df['log_message'] = df['log_message'].str.replace("\<\*\>", " ")
df['log_message'] = df['log_message'].str.replace("\[STR\]", " ")
df['log_message'] = df['log_message'].str.replace("\[NUM\]", " ")
if verbose:
print("Converting the classes into required categories. Pair or triplet of (INFO, ERROR, WARNING). ")
if scenario=="error_warning":
df.loc[:, 'log_level'] = df.loc[:, 'log_level'].apply(lambda x: convert_error_warning(x))
elif scenario == "info_warning":
df.loc[:, 'log_level'] = df.loc[:, 'log_level'].apply(lambda x: convert_info_warning(x))
elif scenario == "info_error":
df.loc[:, 'log_level'] = df.loc[:, 'log_level'].apply(lambda x: convert_error_info(x))
elif scenario=="info_error_warning":
df.loc[:, 'log_level'] = df.loc[:, 'log_level'].apply(lambda x: convert_error_info_warning(x))
else:
print("Insert a valid scenario, one in error_warning, info_warning, info_error")
exit(-1)
if verbose:
print("Prior removing (DEBUG, LOG and TRACE) ", df.shape)
df = df[df['log_level'] != 'debug']
df = df[df['log_level'] != 'log']
df = df[df['log_level'] != 'trace']
if verbose:
print("Size after removal ", df.shape)
indecies_to_preserve = df.index
df = df.reset_index()
df = df.drop("index", axis=1)
return df, indecies_to_preserve
def extract_load(df):
print("Split descriptive and target data into numpay arrays.")
load = df['log_message'].values
labels = df['log_level'].values
return load, labels
def tokenization_dataset(df, load, labels, label_mapper):
tokenizer = LogTokenizer()
tokenized = []
for i in trange(0, len(df)):
tokenized.append(np.array(tokenizer.tokenize(df['log_message'][i])))
labels_tokenized = [label_mapper[label] for label in labels]
return tokenized, labels_tokenized, tokenizer
def run_train(dataloader, model, optimizer, f_loss, epoch, polars=None, device="cpu"):
model.train()
total_loss = 0
start = time.time()
for i, batch in enumerate(dataloader):
load, y = batch
if polars is not None:
y = polars[y.numpy()]
y = torch.autograd.Variable(y).cuda()
if device == "gpu":
out = model.forward(load.cuda().long())
else:
out = model.forward(load.long())
if isinstance(f_loss, nn.CosineSimilarity):
loss = (1 - f_loss(out, y)).pow(2).sum()
else:
if device=="gpu":
loss = f_loss(out, y.cuda().long())
else:
loss = f_loss(out, y.long())
loss.backward()
optimizer.step()
optimizer.zero_grad()
total_loss += loss
elapsed = time.time() - start
if i % 5 == 0:
print("Epoch %d Train Step: %d / %d Loss: %f" %
(epoch, i, len(dataloader), loss), end='\r')
print("Epoch %d Train Step: %d / %d Loss: %f" %
(epoch, i, len(dataloader), loss), end='\r')
return total_loss / len(dataloader)
def run_test(dataloader, model, optimizer, f_loss, epoch, polars=None, device="cpu"):
model.eval()
preds = []
tmps = []
scores_head1 = []
scores_head2 = []
with torch.no_grad():
for i, batch in enumerate(dataloader):
load, y = batch
if device=="gpu":
out = model.forward(load.cuda().long())
else:
out = model.forward(load.long())
if isinstance(f_loss, nn.CosineSimilarity):
x = F.normalize(out, p=2, dim=1)
x = torch.mm(x, polars.t().cuda())
pred = x.max(1, keepdim=True)[1].reshape(1, -1)[0]
preds += list(pred.detach().cpu().numpy())
else:
tmp = out.detach().cpu().numpy()
preds += list(np.argmax(tmp, axis=1))
tmps += list(tmp)
scores_head1 += model.encoder.layers[0].self_attn.attn[:, 0, :, :].detach().cpu()
scores_head2 += model.encoder.layers[0].self_attn.attn[:, 1, :, :].detach().cpu()
return preds, scores_head1, scores_head2
def run_optimizer(model, train_dataloader, test_dataloader, test_dataloader_bad_repos, labels_test, labels_test_bad_repos, optimizer, n_epochs, f_loss, polars, class_weights, device):
conf_matrix_good = []
conf_matrix_bad = []
best_f1_good = 0
best_f1_bad = 0
idx_good = 0
idx_bad = 0
best_model = 0
best_preds = 0
for epoch in range(1, 1 + n_epochs):
print("Epoch", epoch)
loss = run_train(train_dataloader, model, optimizer, f_loss, epoch, polars, device)
print("Epoch %d Train Loss: %f" % (epoch, loss), " " * 30)
start_time = time.time()
print("----------GOOD REPOS----------")
preds1, scores11, scores12 = run_test(test_dataloader, model, optimizer, f_loss, epoch, polars, device)
print(f"Accuracy:{round(accuracy_score(preds1, labels_test), 2)}")
print(f"f1_score:{round(f1_score(preds1, labels_test, average='binary'), 2)}")
print(f"recall_score:{round(recall_score(preds1, labels_test, average='binary'), 2)}")
print(f"precision_score:{round(precision_score(preds1, labels_test, average='binary'), 2)}")
conf_matrix_good.append(confusion_matrix(preds1, labels_test))
pp = confusion_matrix(preds1, labels_test)
print(pp)
if pp.shape[0]<3:
if best_f1_good < f1_score(preds1, labels_test, average='binary') and pp[0][0] >0 and pp[1][1] > 0:
best_f1_good = f1_score(preds1, labels_test, average='binary')
idx_good = epoch-1
best_model = model
# torch.save(model,
# "/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/models/incremental/" + scenario + ".pth")
# with open(
# "/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/models/incremental/" + scenario + "_label_mapper.pickle",
# "wb") as file:
# pickle.dump(label_mapper, file)
else:
if best_f1_good < f1_score(preds1, labels_test, average='binary') and pp[0][0] >0 and pp[1][1] > 0 and pp[2][2]:
best_f1_good = f1_score(preds1, labels_test, average='binary')
idx_good = epoch-1
best_model = model
# torch.save(model,
# "/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/models/incremental/" + scenario + ".pth")
# with open(
# "/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/models/incremental/" + scenario + "_label_mapper.pickle",
# "wb") as file:
# pickle.dump(label_mapper, file)
print("----------BAD REPOS----------")
preds, scores21, scores22 = run_test(test_dataloader_bad_repos, model, optimizer, f_loss, epoch, polars, device)
print(f"Accuracy:{round(accuracy_score(preds, labels_test_bad_repos), 2)}")
print(f"f1_score:{round(f1_score(preds, labels_test_bad_repos, average='binary'), 2)}")
print(f"recall_score:{round(recall_score(preds, labels_test_bad_repos, average='binary'), 2)}")
print(f"precision_score:{round(precision_score(preds, labels_test_bad_repos, average='binary'), 2)}")
conf_matrix_bad.append(confusion_matrix(preds, labels_test_bad_repos))
pp = confusion_matrix(preds, labels_test_bad_repos)
if pp.shape[0] < 3:
if best_f1_bad < f1_score(preds, labels_test_bad_repos, average='binary') and pp[0][0] > 0 and pp[1][1] > 0:
best_f1_bad = f1_score(preds, labels_test_bad_repos, average='binary')
idx_bad = epoch - 1
else:
if best_f1_bad < f1_score(preds, labels_test_bad_repos, average='binary') and pp[0][0] > 0 and pp[1][1] > 0 and pp[2][2]:
best_f1_bad = f1_score(preds, labels_test_bad_repos, average='binary')
idx_bad = epoch - 1
return best_model, preds1, preds, conf_matrix_good, conf_matrix_bad, scores11, scores12, scores21, scores22, best_f1_good, best_f1_bad, idx_good, idx_bad
def top_ranked_repos(repositories, star_repos, number_repos_good, number_bad_repos, number_validation_repos, good_bad_hypo):
repositories= repositories.drop('index', axis=1)
repositories = repositories.reset_index()
repositories.columns = ["id", "repo_link"]
if good_bad_hypo:
top_repos = star_repos.iloc[:number_repos_good, :].repo_name
bottom_repos = star_repos.iloc[(-1)*number_bad_repos:,:].repo_name # THIS TRAINS ON TOP repositories
else:
top_repos = star_repos.iloc[(-1)*number_repos_good:, :].repo_name.values
bottom_repos = star_repos.iloc[:number_bad_repos,:].repo_name # THIS TRAINS ON BOTTOM repos
grepos = np.arange(number_repos_good).tolist()
validation_repos = set(random.sample(grepos, number_validation_repos))
train_repos = set(grepos).difference(validation_repos)
top_ranked_indecies = []
top_ranked_validation_indecies = []
bottom_ranked_indecies = []
joint = []
for good_repos in top_repos[list(train_repos)]:
top_ranked_indecies.append(repositories[repositories.repo_link==good_repos].id.values)
joint.append(repositories[repositories.repo_link==good_repos].id.values)
for good_repos in top_repos[list(validation_repos)]:
top_ranked_validation_indecies.append(repositories[repositories.repo_link==good_repos].id.values)
joint.append(repositories[repositories.repo_link==good_repos].id.values)
for bad_repos in bottom_repos:
bottom_ranked_indecies.append(repositories[repositories.repo_link==bad_repos].id.values)
joint.append(repositories[repositories.repo_link==bad_repos].id.values)
return np.hstack(top_ranked_indecies), np.hstack(top_ranked_validation_indecies), np.hstack(bottom_ranked_indecies), np.hstack(joint)
def create_data_loaders_baselines(load_train, labels_train, load_test, labels_test, batch_size):
train_data = TensorDataset(torch.tensor(load_train, dtype=torch.float32), torch.tensor(labels_train.astype(np.int32), dtype=torch.int32))
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
test_data = TensorDataset(
torch.tensor(load_test, dtype=torch.float32),
torch.tensor(labels_test.astype(np.int32).flatten(), dtype=torch.int32))
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size)
return train_dataloader, test_dataloader
def evaluate(preds1, load_test_good_repos_labels, preds, load_test_bad_repos_labels, good_bad_hypo):
fin_results = defaultdict(dict)
print("********"*10)
print("----------GOOD REPOS----------")
print(f"Accuracy:{round(accuracy_score(preds1, load_test_good_repos_labels), 2)}")
print(f"f1_score:{round(f1_score(preds1, load_test_good_repos_labels, average='binary'), 2)}")
print(f"recall_score:{round(recall_score(preds1, load_test_good_repos_labels, average='binary'), 2)}")
print(f"precision_score:{round(precision_score(preds1, load_test_good_repos_labels, average='binary'), 2)}")
d = {}
d["Accuracy"] = accuracy_score(preds1, load_test_good_repos_labels)
d['F1_score'] = f1_score(preds1, load_test_good_repos_labels, average='binary')
d["recall_score"] = recall_score(preds1, load_test_good_repos_labels, average='binary')
d["precision_score"] = precision_score(preds1, load_test_good_repos_labels, average='binary')
d["confusion_matrix"] = confusion_matrix(preds1, load_test_good_repos_labels)
if good_bad_hypo == True:
fin_results["good_repos"] = d
else:
fin_results["bad_repos"] = d
print("----------BAD REPOS----------")
print(f"Accuracy:{round(accuracy_score(preds, load_test_bad_repos_labels), 2)}")
print(f"f1_score:{round(f1_score(preds, load_test_bad_repos_labels, average='binary'), 2)}")
print(f"recall_score:{round(recall_score(preds, load_test_bad_repos_labels, average='binary'), 2)}")
print(f"precision_score:{round(precision_score(preds, load_test_bad_repos_labels, average='binary'), 2)}")
conf_matrix_bad.append(confusion_matrix(preds, load_test_bad_repos_labels))
d = {}
d["Accuracy"] = accuracy_score(preds, load_test_bad_repos_labels)
d['F1_score'] = f1_score(preds, load_test_bad_repos_labels, average='binary')
d["recall_score"] = recall_score(preds, load_test_bad_repos_labels, average='binary')
d["precision_score"] = precision_score(preds, load_test_bad_repos_labels, average='binary')
d["confusion_matrix"] = confusion_matrix(preds, load_test_bad_repos_labels)
if good_bad_hypo == True:
fin_results["bad_repos"] = d
else:
fin_results["good_repos"] = d
return fin_results
def create_data_loaders_baselines_test( load_test, labels_test, batch_size):
test_data = TensorDataset(
torch.tensor(load_test, dtype=torch.float32),
torch.tensor(labels_test.astype(np.int32).flatten(), dtype=torch.int32))
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size)
return test_dataloader
all_results = defaultdict(dict)
all_results_m1 = defaultdict(dict)
all_results_m2 = defaultdict(dict)
all_results_m3 = defaultdict(dict)
#
#
# good_bad_hypo = True
# scenario = "info_error_warning"
# store_path = "../../5_results/models/learning_scenario1/"
# results_name = store_path + scenario + "/10_fold_sample_bin_" + str(good_bad_hypo) + "_.pickle"
# label_mapper_name = store_path + scenario + "/label_mapper_bin_" + str(good_bad_hypo) + "_.pickle"
#
# with open(results_name, "rb") as file:
# all_results = pickle.load(file)
#
# with open(label_mapper_name, "rb") as file:
# label_mapper_name = pickle.load(file)
#
# store_path = "../../5_results/models/baseline/"
# results_name_m1 = store_path + scenario + "/model1_10_fold_sample_bin_" + str(good_bad_hypo) + "_.pickle"
# results_name_m2 = store_path + scenario + "/model2_10_fold_sample_bin_" + str(good_bad_hypo) + "_.pickle"
# results_name_m3 = store_path + scenario + "/model3_10_fold_sample_bin_" + str(good_bad_hypo) + "_.pickle"
# label_mapper_name = store_path + scenario + "/label_mapper_bin_" + str(good_bad_hypo) + "_.pickle"
#
# with open(results_name_m1, "rb") as file:
# all_results_m1 = pickle.load(file)
#
# with open(results_name_m2, "rb") as file:
# all_results_m2 = pickle.load(file)
#
# with open(results_name_m3, "rb") as file:
# all_results_m3 = pickle.load(file)
#
# with open(label_mapper_name, "rb") as file:
# label_mapper_name = pickle.load(file)
#
# print(all_results_m3.keys())
for seed in np.arange(1):
print("CURRENTLY PROCESSING SEED {}".format(seed))
PATH = "../../3_preprocessed_data/filtered_log_df_reduced.csv"
PATH_COUNTS = "../../3_preprocessed_data/stars_repos.csv"
learning_rate = 0.0001
decay = 0.001
betas = (0.9, 0.999)
momentum = 0.9
number_repos_good = 700
number_bad_repos = 1
number_validation_repos = 100
batch_size = 2048
pad_len = 50
n_layers=2
in_features=16
out_features=16
num_heads=2
dropout=0.05
max_len=50
n_targets = 2
device = "gpu"
random_seed = seed
torch.manual_seed(random_seed)
np.random.seed(random_seed)
scenario = "info_error" # ONE IN: "info_warning", "info_error", "error_warning", "info_error_warning"
n_epochs = 50
good_bad_hypo = True
df = read_data(PATH)
repositories = df['repo_link']
df, indecies_to_preserve = preprocess_data(df, scenario)
repositories = repositories.loc[indecies_to_preserve]
repositories = repositories.reset_index()
star_repos = | pd.read_csv(PATH_COUNTS) | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import joblib
from utils import string2json
# from config import TIMESTEP
import argparse
import sys
plt.interactive(True)
pd.options.display.max_columns = 15
pic_prefix = 'pic/'
data_dict_resampled = joblib.load('data/data_dict_resampled')
gamedata_dict = joblib.load('data/gamedata_dict')
parser = argparse.ArgumentParser()
parser.add_argument('--TIMESTEP', default=10, type=float)
if __debug__:
print('SUPER WARNING!!! YOU ARE INTO DEBUG MODE', file=sys.stderr)
args = parser.parse_args(['--TIMESTEP=10'])
else:
args = parser.parse_args()
TIMESTEP = args.TIMESTEP
data_dict_resampled_merged = {}
def timestamp2step(times, df_start_time):
return np.round((np.array(times) - df_start_time) / TIMESTEP).astype(int)
# player_id = list(gamedata_dict.keys())[0] # DEBUG
for player_id in gamedata_dict:
df_resampled4player = data_dict_resampled[player_id]
df_resampled4player = df_resampled4player.reset_index()
gamedata_dict4player = gamedata_dict[player_id]
time_game_start = | pd.to_datetime(gamedata_dict4player['time_game_start'], unit='s') | pandas.to_datetime |
import pandas as pd
import numpy as np
from dplypy.dplyframe import DplyFrame
from dplypy.pipeline import row_name_subset, slice_row, slice_column
def test_row_name_subset():
pandas_df = pd.DataFrame(
[[1, 2], [3, 4], [5, 6]], index=["idx1", 7, "idx3"], columns=["col1", "col2"]
)
df = DplyFrame(pandas_df)
output1 = df + row_name_subset(["idx1"])
expected1 = pandas_df.loc[["idx1"]]
pd.testing.assert_frame_equal(output1.pandas_df, expected1)
output2 = df + row_name_subset([7])
expected2 = pandas_df.loc[[7]]
pd.testing.assert_frame_equal(output2.pandas_df, expected2)
try:
df + row_name_subset([6])
except KeyError:
pass
else:
raise AssertionError("KeyError was not raised")
output3 = df + row_name_subset([7, "idx3"])
expected3 = pandas_df.loc[[7, "idx3"]]
pd.testing.assert_frame_equal(output3.pandas_df, expected3)
output4 = df + row_name_subset(
pd.Series([False, True, False], index=[7, "idx1", "idx3"])
)
expected4 = pandas_df.loc[
pd.Series([False, True, False], index=[7, "idx1", "idx3"])
]
pd.testing.assert_frame_equal(output4.pandas_df, expected4)
try:
df + row_name_subset(pd.Series([False, True, False], index=[6, "idx1", "idx3"]))
except pd.core.indexing.IndexingError:
pass
else:
raise AssertionError("IndexingError was not raised")
output5 = df + row_name_subset( | pd.Index(["idx3", 7], name="indices") | pandas.Index |
"""
Script goal, to produce trends in netcdf files
This script can also be used in P03 if required
"""
#==============================================================================
__title__ = "Global Vegetation Trends"
__author__ = "<NAME>"
__version__ = "v1.0(28.03.2019)"
__email__ = "<EMAIL>"
#==============================================================================
# +++++ Check the paths and set ex path to fireflies folder +++++
import os
import sys
if not os.getcwd().endswith("fireflies"):
if "fireflies" in os.getcwd():
p1, p2, _ = os.getcwd().partition("fireflies")
os.chdir(p1+p2)
else:
raise OSError(
"This script was called from an unknown path. CWD can not be set"
)
sys.path.append(os.getcwd())
#==============================================================================
# Import packages
import numpy as np
import pandas as pd
import argparse
import datetime as dt
from collections import OrderedDict
import warnings as warn
from scipy import stats
import xarray as xr
from numba import jit
import bottleneck as bn
import scipy as sp
import glob
# from netCDF4 import Dataset, num2date, date2num
# from scipy import stats
# import statsmodels.stats.multitest as smsM
# Import plotting and colorpackages
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
import seaborn as sns
import cartopy.crs as ccrs
import cartopy.feature as cpf
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# Import debugging packages
import ipdb
print("numpy version : ", np.__version__)
print("pandas version : ", pd.__version__)
print("xarray version : ", xr.__version__)
#==============================================================================
def main():
# ========== Set up the params ==========
arraysize = 10000 # size of the area to test
mat = 40.0 # how long before a forest reaches maturity
germ = 10.0 # how long before a burnt site can germinate
burnfrac = 0.10 # how much burns
# burnfrac = BurntAreaFraction(year=2016)/2
# nburnfrac = 0.0 # how much burns in other years
nburnfrac = 0.02 # how much burns in other years
# nburnfrac = BurntAreaFraction(year=2018)/2.0 # how much burns in other years
# nburnfrac = np.mean([BurntAreaFraction(year=int(yr)) for yr in [2015, 2017, 2018]]) # how much burns in other years
firefreqL = [25, 20, 15, 11, 5, 4, 3, 2, 1] # how often the fires happen
years = 200 # number of years to loop over
RFfrac = 0.04 # The fraction that will fail to recuit after a fire
iterations = 100
# ========== Create empty lists to hold the variables ==========
obsMA = OrderedDict()
obsMF = OrderedDict()
obsGF = OrderedDict()
obsSF = OrderedDict()
obsMAstd = OrderedDict()
obsMFstd = OrderedDict()
obsGFstd = OrderedDict()
obsSFstd = OrderedDict()
# ========== Loop over the fire frequency list ==========
for firefreq in firefreqL:
print("Testing with a %d year fire frequency" % firefreq)
# ************VECTORISE THIS LOOP *********
iymean = []
ifmat = []
ifgerm = []
ifsap = []
for it in np.arange(iterations):
print("Iteration %d of %d" % (it, iterations))
# ========== Make an array ==========
array = np.zeros( arraysize)
rucfail = np.ones( arraysize)
index = np.arange( arraysize)
# ========== Make the entire array mature forest ==========
array[:] = mat
# ========== Create the empty arrays ==========
ymean = []
fmat = []
fgerm = []
fsap = []
rfhold = 0 #the left over fraction of RF
# ========== start the loop ==========
# ************VECTORISE THIS LOOP *********
for year in range(0, years):
# Loop over every year in case i want to add major fire events
# print(year)
if year % firefreq == 0:
# FIre year
array, rucfail, rfhold = firetime(array, index, mat, germ, burnfrac, rucfail, RFfrac, rfhold)
else:
# non fire year
array, rucfail, rfhold = firetime(array, index, mat, germ, nburnfrac, rucfail, RFfrac, rfhold)
# Mean years
ymean.append(np.mean(array))
# Fraction of mature forest\
fmat.append(np.sum(array>=mat)/float(arraysize))
# Fraction of germinating forest
fsap.append(np.sum(np.logical_and((array>germ), (array<mat)))/float(np.sum((array>germ))))
# Fraction of germinating forest
fgerm.append(np.sum(array>germ)/float(arraysize))
# if year>60 and firefreq == 1:
iymean.append(np.array(ymean))
ifmat.append (np.array(fmat))
ifgerm.append (np.array(fgerm))
ifsap .append (np.array(fsap))
# ipdb.set_trace()
# warn.warn("The end of an iteration has been reached")
# for v in iymean: plt.plot(v)
# plt.show()
# for v in ifmat: plt.plot(v)
# plt.show()
# for v in ifgerm: plt.plot(v)
# plt.show()
# ipdb.set_trace()
FFfmean = np.mean(np.vstack (iymean), axis=0)
FFSTD = np.std( np.vstack (iymean), axis=0)
FFfmat = np.mean(np.vstack (ifmat), axis=0)
FFmatSTD = np.std( np.vstack (ifmat), axis=0)
FFfgerm = np.mean(np.vstack (ifgerm), axis=0)
FFgermSTD = np.std( np.vstack (ifgerm), axis=0)
FFfsap = np.mean(np.vstack (ifsap), axis=0)
FFsapSTD = np.std( np.vstack (ifsap), axis=0)
obsMA["FF_%dyr" % firefreq] = FFfmean
obsMF["FF_%dyr" % firefreq] = FFfmat
obsGF["FF_%dyr" % firefreq] = FFfgerm
obsSF["FF_%dyr" % firefreq] = FFfsap
obsMAstd["FF_%dyr" % firefreq] = FFSTD
obsMFstd["FF_%dyr" % firefreq] = FFmatSTD
obsGFstd["FF_%dyr" % firefreq] = FFgermSTD
obsSFstd["FF_%dyr" % firefreq] = FFsapSTD
obs = OrderedDict()
obs["MeanAge"] = | pd.DataFrame(obsMA) | pandas.DataFrame |
import logging, os, sys, yaml
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import pandas as pd
import numpy as np
from tqdm import tqdm
from Models import *
from Datasets import STD_Dataset
# Function to load YAML config file into a Python dict
def load_parameters(yaml_path):
with open(yaml_path) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
return config
# Function to create PyTorch dataset objects from
# list of datasets defined in YAML file (and loaded into a dict via load_parameters function)
def load_std_datasets(datasets, apply_vad):
return {
ds_name:STD_Dataset(
root_dir = ds_attrs['root_dir'],
labels_csv = ds_attrs['labels_csv'],
query_dir = ds_attrs['query_dir'],
audio_dir = ds_attrs['audio_dir'],
apply_vad = apply_vad
) for (ds_name, ds_attrs) in datasets.items()
}
# Function to create PyTorch DataLoaders from PyTorch datasets
# created via load_std_datasets function
def create_data_loaders(loaded_datasets, config):
return {
ds_name:DataLoader(
dataset = dataset,
batch_size = config['datasets'][ds_name]['batch_size'],
shuffle = True if ds_name == 'train' else False,
num_workers = config['dl_num_workers']
) for (ds_name, dataset) in loaded_datasets.items()
}
# Function to load saved models to continue training from or for evaluation on test data
# Expected input is a config dict with the model name (ConvNet, VGG, ResNet34) to paths(s)
# to saved models.
def load_saved_model(config):
model, optimizer, criterion = instantiate_model(config)
logging.info(" Loading model from '%s'" % (config['model_path']))
checkpoint = torch.load(config['model_path'])
model.load_state_dict(checkpoint['model_state_dict'])
if(config['mode'] == 'train'):
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if config['mode'] == 'eval' and config['use_gpu'] and torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
return model, optimizer, criterion
# Admin function to create output directory if necessary, set up log files, make a copy
# of the config file, and create output CSV file of training predictions (if mode is training)
def setup_exp(config):
output_dir = config['artifacts']['dir']
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if(config['mode'] == 'train'):
make_results_csv(os.path.join(output_dir, 'train_results.csv'))
logging.basicConfig(
filename = os.path.join(output_dir, config['artifacts']['log']),
level = logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
with open(os.path.join(output_dir, 'config.yaml'), 'w') as f:
yaml.dump(config, f)
return output_dir
def instantiate_model(config):
# Instantiate model based on string given in config file (ConvNet, VGG11, ResNet34)
constructor = globals()[config['model_name']]
model = constructor()
logging.info(" Instantiating model '%s'" % (config['model_name']))
if config['use_gpu']:
model.cuda()
if(config['mode'] == 'train'):
model.train()
if(config['optimizer'] == 'adam'):
optimizer = torch.optim.Adam(model.parameters(), lr=config['learning_rate'])
if(config['criterion'] == 'BCELoss'):
criterion = torch.nn.BCELoss()
return model, optimizer, criterion
if(config['mode'] == 'eval'):
model.eval()
return model, None, None
# Create CSV file with appropriate header for training/evaluation process to then append to.
def make_results_csv(csv_path, headers = 'train'):
if (headers == 'train'):
csv_cols = ['epoch', 'query','reference','label','pred']
elif (headers == 'eval'):
csv_cols = ['query','reference','label','pred']
t_df = | pd.DataFrame(columns=csv_cols) | pandas.DataFrame |
from tqdm import tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import nltk
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from nltk.stem.porter import *
from nltk.corpus import stopwords
nltk.download('stopwords')
from wordcloud import WordCloud, STOPWORDS
import gensim
from gensim.models import Phrases
from gensim.corpora import Dictionary
from gensim.models import LdaModel, CoherenceModel
import pyLDAvis.gensim
def preprocess_topic_modeling(df):
df['publish_date'] = | pd.to_datetime(df['publish_date']) | pandas.to_datetime |
import pandas as pd
class CFBDataframe:
def __init__(self):
# list of dfs values[0] and empty init df values[1]
self.data_map = {"drives": [[], pd.DataFrame()], "games": [[], pd.DataFrame()], "lines": [[], pd.DataFrame()],
"player_game_stats": [[], pd.DataFrame()], "player_season_stats": [[], pd.DataFrame()],
"player_usage": [[], pd.DataFrame()], "recruiting_groups": [[], pd.DataFrame()],
"recruiting_players": [[], pd.DataFrame()], "recruiting_teams": [[], pd.DataFrame()],
"cleaned_games": [[], pd.DataFrame()],
"venues": [[], pd.DataFrame()]}
@staticmethod
def append_dfs(df, df_type):
# append df to list of dfs
df_type[0].append(df)
# concat list of dfs
df_type[1] = pd.concat(df_type[0])
@staticmethod
def impute_df(file):
# import data with Windows encoding
try:
df = pd.read_csv(file, encoding='ANSI') # , na_values='?')
# df.fillna(pd.Series.mean(df))
# if not Windows, do Mac encoding
except LookupError:
try:
df = | pd.read_csv(file, encoding='ISO-8859-1') | pandas.read_csv |
# -*- coding: utf-8 -*-
import datetime
import logging
import os
from ast import literal_eval
import numpy as np
import pandas as pd
from fooltrader.consts import CHINA_STOCK_INDEX, USA_STOCK_INDEX
from fooltrader.contract import data_contract
from fooltrader.contract import files_contract
from fooltrader.contract.files_contract import get_kdata_dir, get_kdata_path
from fooltrader.settings import US_STOCK_CODES
from fooltrader.utils.utils import get_file_name, to_time_str
logger = logging.getLogger(__name__)
def convert_to_list_if_need(input):
if input and "[" in input:
return literal_eval(input)
else:
return input
# meta
def get_security_list(security_type='stock', exchanges=['sh', 'sz'], start=None, end=None,
mode='simple', start_date=None, codes=None):
"""
get security list.
Parameters
----------
security_type : str
{‘stock’, 'future'},default: stock
exchanges : list
['sh', 'sz','nasdaq','nyse','amex'],default: ['sh','sz']
start : str
the start code,default:None
only works when exchanges is ['sh','sz']
end : str
the end code,default:None
only works when exchanges is ['sh','sz']
mode : str
whether parse more security info,{'simple','es'},default:'simple'
start_date : Timestamp str or Timestamp
the filter for start list date,default:None
codes : list
the exact codes to query,default:None
Returns
-------
DataFrame
the security list
"""
if security_type == 'stock':
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import os, sys, pickle, bcolz
from miki.data import dataGlovar
from miki.data.dataFunction import DataFunction
from miki.data.dataBcolz import DataBcolz
class Query(object):
def __init__(self):
self.__time1 = pd.to_datetime('09:30:00').time()
self.__time2 = pd.to_datetime('15:00:00').time()
self.base_path = dataGlovar.DataPath+'/pickle'
os.makedirs(self.base_path, exist_ok=True)
self.last_end = None
self.cache_data = {}
self.cache_time = None
def get_security_info(self):
# 获取所有股票信息
df = pd.read_pickle(dataGlovar.DataPath+'/stock_info.pkl')
return df
def get_security_list(self, types=['stock']):
df = df = pd.read_pickle(dataGlovar.DataPath+'/stock_info.pkl')
df = df[df.type.isin(types)]
return df.index.values.tolist()
def get_all_trade_days(self):
# 获取所有交易日
with open(dataGlovar.DataPath+'/all_trade_days.pkl', 'rb') as f:
all_trade_days = pickle.load(f)
return all_trade_days
def get_time_list(self, start, end, unit):
# 获取交易时间列表
def func(x):
return (x.hour-9)*60+x.minute-30 if x.hour<=11 else (x.hour-13+2)*60+x.minute
start,end = pd.to_datetime(start),pd.to_datetime(end)
date_list = bcolz.open(DataFunction.get_path('000001.XSHG', unit='1m')+'/date', mode='r')
date_list = [datetime.utcfromtimestamp(i) for i in date_list]
series = pd.Series(date_list, index=date_list)
series = series[(series.index>=start)&(series.index<=end)]
if len(series) == 0: return []
series = series.apply(func)
return list(series[series.values%unit==0].index)
def get_valuation_x(self, name, date):
# 获取某天的指标数据
assert name in ['valuation','indicator'], 'name should be valuation or indicator'
df = pd.read_pickle(dataGlovar.DataPath+'/finance/{}/{}.pkl'.format(name, str(pd.to_datetime(date).date())))
return df
def get_valuation_y(self, name):
# 获取某个指标的数据
for types in ['valuation','indicator']:
path = dataGlovar.DataPath+'/finance/{}/{}.pkl'.format(types, name)
if os.path.exists(path):
df = pd.read_pickle(path)
return df
return None
def get_dataframe(self, year, field, unit, update):
# 获取某年的数据,用于回测推送
start = pd.to_datetime('{}-01-01'.format(year))
end = pd.to_datetime('{}-01-01'.format(year+1))
cache_path = self.base_path+'/{}-{}-{}.pkl'.format(field, unit, year)
if not os.path.exists(cache_path) or update:
print('not exist path {}, generate data !'.format(cache_path))
# 获取股票代码
security_list = []
for param in ['stock1m']:
for i in os.listdir(dataGlovar.DataPath+'/'+param):
for j in os.listdir(os.path.join(dataGlovar.DataPath+'/'+param, i)):
security_list.append(j)
# 沪深300、中证500基准
security_list += ['399300.XSHE','000905.XSHG']
df_list = []
for security in security_list:
path = DataFunction.get_path(security, unit=unit)
if not os.path.exists(path):
return None
table = bcolz.open(path, mode='r')
outcols = ['date',field]
table = table.fetchwhere('(date<={})&(date>={})'.format(end.timestamp(), start.timestamp()), outcols=outcols)
if len(table)==0:
continue
df = table.todataframe()
if unit=='1m':
df['date'] = df['date'].apply(lambda x:datetime.utcfromtimestamp(x))
else:
df['date'] = df['date'].apply(lambda x:datetime.utcfromtimestamp(x).date())
df = df.set_index('date')
df.columns = [security]
df_list.append(df)
df = pd.concat(df_list, axis=1, sort=True)
df.to_pickle(cache_path)
else:
df = pd.read_pickle(cache_path)
return df
def get_today_data(self, fq='post'):
# 获取当天数据
today_data, security_list, field_list = DataBcolz.get_today_data()
if fq=='post':
factor_list = []
for security in security_list:
path = DataFunction.get_path(security, '1d')+'/factor'
if os.path.exists(path):
init_factor = bcolz.open(path, mode='r')[0]
else:
init_factor = 1
factor_list.append(init_factor)
ix = [field_list.index(i) for i in ['open','high','low','close','high_limit','low_limit']]
today_data[:,:,ix] = today_data[:,:,ix] * np.array(factor_list)[np.newaxis,:,np.newaxis]
return today_data, security_list, field_list
def get_stock(self, security, end, field_list, unit, start=None, limit=None, fq=None, use_cache=False):
# 获取单只股票数据 use_cache会在内存缓存数据
end = pd.to_datetime(pd.to_datetime(end).strftime('%Y-%m-%d %H:%M:00'))
if use_cache and security in self.cache_data and end.date()==self.cache_time:
df = self.cache_data[security].copy()
else:
if unit in ['1m','5m','15m','30m','60m','120m']:
path = DataFunction.get_path(security, '1m')
if not os.path.exists(path): return | pd.DataFrame(columns=field_list) | pandas.DataFrame |
import urllib
from io import StringIO
from io import BytesIO
import csv
import numpy as np
from datetime import datetime
import matplotlib.pylab as plt
import pandas as pd
import scipy.signal as signal
datos=pd.read_csv('https://raw.githubusercontent.com/ComputoCienciasUniandes/FISI2029-201910/master/Seccion_1/Fourier/Datos/transacciones2008.txt', sep=';')
datos[0]= | pd.to_datetime(datos[0], format='%d/%m/%Y/ %H:%M:%S') | pandas.to_datetime |
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@file: data_explore.py
@time: 2019-05-06 17:22
"""
import pandas as pd
import math
import featuretools as ft
from feature_selector import FeatureSelector
from mayiutils.datasets.data_preprocessing import DataExplore as de
if __name__ == '__main__':
mode = 2
if mode == 7:
"""
放弃利用featuretools扩充特征,自己构建特征
"""
dfzy = pd.read_csv('zy_all_featured_claim.csv', parse_dates=['就诊结帐费用发生日期', '入院时间', '出院时间'],
encoding='gbk')
del dfzy['ROWNUM']
del dfzy['主被保险人客户号']
del dfzy['出险人客户号']
del dfzy['就诊结帐费用发生日期']
del dfzy['自费描述']
del dfzy['部分自付描述']
del dfzy['医保支付描述']
del dfzy['出院时间']
del dfzy['event_id']
# 构造特征
# 自费金额占比
dfzy['自费总金额'] = dfzy['自费金额'] + dfzy['部分自付金额']
# 自费总金额占费用金额比
dfzy['自费总金额占比'] = dfzy['自费总金额'] / dfzy['费用金额']
# 医保支付金额占比
dfzy['医保支付金额占比'] = dfzy['医保支付金额'] / dfzy['费用金额']
# 平均每次事件费用金额
dfzy['费用金额mean'] = dfzy['费用金额'] / dfzy['event_count']
# log
def tlog(x):
if x < 1:
x = 0
if x != 0:
x = math.log(x)
return x
dfzy['费用金额log'] = dfzy['费用金额'].apply(tlog)
dfzy['自费金额log'] = dfzy['自费金额'].apply(tlog)
dfzy['部分自付金额log'] = dfzy['部分自付金额'].apply(tlog)
dfzy['医保支付金额log'] = dfzy['医保支付金额'].apply(tlog)
dfzy['自费总金额log'] = dfzy['自费总金额'].apply(tlog)
dfzy['费用金额meanlog'] = dfzy['费用金额mean'].apply(tlog)
# 构建one-hot特征
def build_one_hot_features(df, cols):
for col in cols:
t = pd.get_dummies(dfzy[col], prefix=col)
df = pd.concat([df, t], axis=1)
del df[col]
return df
del dfzy['疾病代码']
del dfzy['医院代码']
# dfzy['疾病代码'] = dfzy['疾病代码'].apply(lambda x: x[:3])
# s = dfzy['疾病代码'].value_counts()
# dfzy['疾病代码'][dfzy['疾病代码'].isin(list(s[s < 40].index))] = 'other'
# print(dfzy['医院代码'].value_counts())
def t(x):
if x.find('三级') != -1:
return 3
if x.find('二级') != -1:
return 2
if x.find('一级') != -1:
return 1
if x.find('未评级') != -1:
return 0
# print(dfzy['医院等级'].value_counts())
dfzy['医院等级'] = dfzy['医院等级'].apply(t)
# dfzy = build_one_hot_features(dfzy, ['性别', '证件类型', '人员属性', '出险原因', '险种代码', '医院代码', '费用项目代码', '就诊类型名称', '医院等级', '疾病代码'])
dfzy = build_one_hot_features(dfzy, ['性别', '证件类型', '人员属性', '出险原因', '险种代码', '费用项目代码', '就诊类型名称'])
# print(s[s<40].index)
# print(dfzy['疾病代码'].value_counts())
# dfzy.info()
fs = FeatureSelector(data=dfzy)
fs.identify_collinear(correlation_threshold=0.975)
"""
2 features with a correlation magnitude greater than 0.97.
"""
correlated_features = fs.ops['collinear']
# print(correlated_features)
print(fs.record_collinear.head(30))
train_removed_all_once = fs.remove(methods=['collinear'])
train_removed_all_once.index = train_removed_all_once['总案号_分案号']
del train_removed_all_once['总案号_分案号']
del train_removed_all_once['入院时间']
print(train_removed_all_once.shape)#(9843, 350)
print(list(train_removed_all_once.columns))
train_removed_all_once.info()
# print(train_removed_all_once.index)
train_removed_all_once.to_csv('zy_train_data.csv', encoding='gbk')
if mode == 6:
"""
featuretools扩充特征 基本没用啊
"""
dfzy = pd.read_csv('zy_all_featured_claim.csv', parse_dates=['出生日期', '就诊结帐费用发生日期', '入院时间', '出院时间'], encoding='gbk')
del dfzy['ROWNUM']
del dfzy['主被保险人客户号']
del dfzy['出险人客户号']
del dfzy['就诊结帐费用发生日期']
del dfzy['自费描述']
del dfzy['部分自付描述']
del dfzy['医保支付描述']
del dfzy['出院时间']
del dfzy['event_id']
# dfzy.info()
es = ft.EntitySet(id='zy')
es = es.entity_from_dataframe(entity_id='zy',
dataframe=dfzy,
variable_types={
'人员属性': ft.variable_types.Categorical,
'证件类型': ft.variable_types.Categorical,
'费用项目代码': ft.variable_types.Categorical,
'生效年': ft.variable_types.Categorical,
},
index='总案号_分案号',
time_index='出生日期')
# print(es)
# print(es['zy'])
# Perform deep feature synthesis without specifying primitives
features, feature_names = ft.dfs(entityset=es, target_entity='zy',
max_depth=2)
# print(feature_names)
# print(type(features))
print(features.shape)
# print(features.head())
# features.to_csv('zy_all_featured_claim_derivation.csv', encoding='gbk', index=False)
fs = FeatureSelector(data=features)
fs.identify_collinear(correlation_threshold=0.975)
"""
2 features with a correlation magnitude greater than 0.97.
"""
correlated_features = fs.ops['collinear']
print(correlated_features[:5])
print(fs.record_collinear.head())
train_removed_all_once = fs.remove(methods=['collinear'])
print(type(fs.data))
print(type(train_removed_all_once))
print(train_removed_all_once.shape)
train_removed_all_once.info()
if mode == 5:
"""
事件压成赔案
"""
dfzy = | pd.read_csv('zy_all_featured_event.csv', parse_dates=['就诊结帐费用发生日期', '入院时间', '出院时间'], encoding='gbk') | pandas.read_csv |
"""Genera los reportes de los modulos."""
# Utilidades
import collections
import functools
import ssl
import sys
# matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
# Pandas
import pandas as pd
# Django
from django.http.response import Http404
from django.template.exceptions import TemplateDoesNotExist
from django.template.loader import get_template
from django.utils import timezone
from django.views.generic import (
ListView,
)
# Django Weasyprint
from django_weasyprint import WeasyTemplateResponseMixin
from django_weasyprint.utils import django_url_fetcher
from django_weasyprint.views import WeasyTemplateResponse
# Modelos
from andromeda.modules.inventory.models import Inventory, TechnicalDataSheet
from andromeda.modules.loans.models import Loans, InventoryLoans
from andromeda.modules.maintenance.models import Maintenance
from andromeda.modules.technical_support.models import Support
from andromeda.users.models import User
# Consultas
from andromeda.utils.queries import (
get_total_implements,
get_best_auxiliary,
)
from andromeda.utils.utils import get_image
def str_to_class(module):
"""Obtiene el modelo de la clase ingresada en el path."""
return getattr(sys.modules[__name__], module)
class ReportListView(ListView):
"""List View de reportes,"""
def dispatch(self, request, *args, **kwargs):
try:
# Template
self.template_name = 'PDF/modules/{}/report.html'.format(self.kwargs['module'])
get_template(self.template_name)
# Model
self.model = str_to_class(self.kwargs['module'].capitalize())
return super(ReportListView, self).dispatch(request, *args, *kwargs)
except TemplateDoesNotExist:
raise Http404
except AttributeError:
raise Http404
def get_context_data(self, **kwargs):
"""Contexto de datos."""
context = super().get_context_data(**kwargs)
context['now'] = timezone.now()
if self.kwargs['module'] == 'loans':
# Datos para prestamos
context['total_loans'] = Loans.objects.count()
context['implements_total'] = get_total_implements()
context['best'] = get_best_auxiliary(Loans)
elif self.kwargs['module'] == 'inventory':
# Datos de inventario
context['implements_total'] = Inventory.objects.count()
context['tech_tabs_total'] = TechnicalDataSheet.objects.count()
context['disabled_implements'] = Inventory.objects.filter(status_implement='Inactivo').count()
elif self.kwargs['module'] == 'maintenance':
# Datos de mantenimiento
context['maintenance_total'] = Maintenance.objects.filter(is_active=False).count()
context['implements_maintenance_total'] = Inventory.objects.filter(status_implement='En mantenimiento').count()
context['best_auxiliary_maintenance'] = get_best_auxiliary(Maintenance)
elif self.kwargs['module'] == 'support':
context['supports_completed'] = Support.objects.filter(status_support='Completado').count()
context['supports_total'] = Support.objects.count()
context['best_auxiliary_support'] = get_best_auxiliary(Support)
else:
context['module'] = True
return context
class CustomWeasyTemplateResponse(WeasyTemplateResponse):
# customized response class to change the default URL fetcher
def get_url_fetcher(self):
# disable host and certificate check
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
return functools.partial(django_url_fetcher, ssl_context=context)
class PrintReportView(WeasyTemplateResponseMixin, ReportListView):
"""Renderiza el template como pdf"""
pdf_attachment = True
response_class = CustomWeasyTemplateResponse
def get_pdf_filename(self):
return f"andromeda-{timezone.now().strftime('%Y%m%d-%H%M')}.pdf"
def most_requested_implements(request):
"""Grafico de los implementos mas solicitados por los usuarios."""
data = Loans.objects.values_list('implement', flat=True)
implements = collections.Counter(data)
names = []
for implement_id in implements.keys():
x = InventoryLoans.objects.get(pk=implement_id)
names.append(x.implement.name)
values = list(implements.values())
fig, axs = plt.subplots(figsize=(10, 4))
axs.yaxis.set_major_locator(MaxNLocator(integer=True))
axs.set_ylabel("Solicitudes")
axs.bar(names, values)
fig.suptitle('Implementos mas solicitados')
return get_image()
def graph_users(request):
"""Grafico plot del modelo de usuarios."""
user = User.objects.all().values()
df = | pd.DataFrame(user, columns=['date_joined']) | pandas.DataFrame |
"""
Utils for time series generation
--------------------------------
"""
import math
from typing import Union, Optional, Sequence
import numpy as np
import pandas as pd
import holidays
from darts import TimeSeries
from darts.logging import raise_if_not, get_logger, raise_log, raise_if
logger = get_logger(__name__)
def _generate_index(
start: Optional[Union[pd.Timestamp, int]] = None,
end: Optional[Union[pd.Timestamp, int]] = None,
length: Optional[int] = None,
freq: str = "D",
) -> Union[pd.DatetimeIndex, pd.Int64Index]:
"""Returns an index with a given start point and length. Either a pandas DatetimeIndex with given frequency
or a pandas Int64Index. The index starts at
Parameters
----------
start
The start of the returned index. If a pandas Timestamp is passed, the index will be a pandas
DatetimeIndex. If an integer is passed, the index will be a pandas Int64Index index. Works only with
either `length` or `end`.
end
Optionally, the end of the returned index. Works only with either `start` or `length`. If `start` is
set, `end` must be of same type as `start`. Else, it can be either a pandas Timestamp or an integer.
length
Optionally, the length of the returned index. Works only with either `start` or `end`.
freq
The time difference between two adjacent entries in the returned index. Only effective if `start` is a
pandas Timestamp. A DateOffset alias is expected; see
`docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/TimeSeries.html#dateoffset-objects>`_.
The freq is optional for generating an integer index.
"""
constructors = [
arg_name
for arg, arg_name in zip([start, end, length], ["start", "end", "length"])
if arg is not None
]
raise_if(
len(constructors) != 2,
"index can only be generated with exactly two of the following parameters: [`start`, `end`, `length`]. "
f"Observed parameters: {constructors}. For generating an index with `end` and `length` consider setting "
f"`start` to None.",
logger,
)
raise_if(
end is not None and start is not None and type(start) != type(end),
"index generation with `start` and `end` requires equal object types of `start` and `end`",
logger,
)
if isinstance(start, pd.Timestamp) or isinstance(end, pd.Timestamp):
index = pd.date_range(start=start, end=end, periods=length, freq=freq)
else: # int
index = pd.Int64Index(
range(
start if start is not None else end - length + 1,
end + 1 if end is not None else start + length,
)
)
return index
def constant_timeseries(
value: float = 1,
start: Optional[Union[pd.Timestamp, int]] = pd.Timestamp("2000-01-01"),
end: Optional[Union[pd.Timestamp, int]] = None,
length: Optional[int] = None,
freq: str = "D",
column_name: Optional[str] = "constant",
dtype: np.dtype = np.float64,
) -> TimeSeries:
"""
Creates a constant univariate TimeSeries with the given value, length (or end date), start date and frequency.
Parameters
----------
value
The constant value that the TimeSeries object will assume at every index.
start
The start of the returned TimeSeries' index. If a pandas Timestamp is passed, the TimeSeries will have a pandas
DatetimeIndex. If an integer is passed, the TimeSeries will have a pandas Int64Index index. Works only with
either `length` or `end`.
end
Optionally, the end of the returned index. Works only with either `start` or `length`. If `start` is
set, `end` must be of same type as `start`. Else, it can be either a pandas Timestamp or an integer.
length
Optionally, the length of the returned index. Works only with either `start` or `end`.
freq
The time difference between two adjacent entries in the returned TimeSeries. Only effective if `start` is a
pandas Timestamp. A DateOffset alias is expected; see
`docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/TimeSeries.html#dateoffset-objects>`_.
column_name
Optionally, the name of the value column for the returned TimeSeries
dtype
The desired NumPy dtype (np.float32 or np.float64) for the resulting series
Returns
-------
TimeSeries
A constant TimeSeries with value 'value'.
"""
index = _generate_index(start=start, end=end, freq=freq, length=length)
values = np.full(len(index), value, dtype=dtype)
return TimeSeries.from_times_and_values(
index, values, freq=freq, columns=pd.Index([column_name])
)
def linear_timeseries(
start_value: float = 0,
end_value: float = 1,
start: Optional[Union[pd.Timestamp, int]] = pd.Timestamp("2000-01-01"),
end: Optional[Union[pd.Timestamp, int]] = None,
length: Optional[int] = None,
freq: str = "D",
column_name: Optional[str] = "linear",
dtype: np.dtype = np.float64,
) -> TimeSeries:
"""
Creates a univariate TimeSeries with a starting value of `start_value` that increases linearly such that
it takes on the value `end_value` at the last entry of the TimeSeries. This means that
the difference between two adjacent entries will be equal to
(`end_value` - `start_value`) / (`length` - 1).
Parameters
----------
start_value
The value of the first entry in the TimeSeries.
end_value
The value of the last entry in the TimeSeries.
start
The start of the returned TimeSeries' index. If a pandas Timestamp is passed, the TimeSeries will have a pandas
DatetimeIndex. If an integer is passed, the TimeSeries will have a pandas Int64Index index. Works only with
either `length` or `end`.
end
Optionally, the end of the returned index. Works only with either `start` or `length`. If `start` is
set, `end` must be of same type as `start`. Else, it can be either a pandas Timestamp or an integer.
length
Optionally, the length of the returned index. Works only with either `start` or `end`.
freq
The time difference between two adjacent entries in the returned TimeSeries. Only effective if `start` is a
pandas Timestamp. A DateOffset alias is expected; see
`docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/TimeSeries.html#dateoffset-objects>`_.
column_name
Optionally, the name of the value column for the returned TimeSeries
dtype
The desired NumPy dtype (np.float32 or np.float64) for the resulting series
Returns
-------
TimeSeries
A linear TimeSeries created as indicated above.
"""
index = _generate_index(start=start, end=end, freq=freq, length=length)
values = np.linspace(start_value, end_value, len(index), dtype=dtype)
return TimeSeries.from_times_and_values(
index, values, freq=freq, columns=pd.Index([column_name])
)
def sine_timeseries(
value_frequency: float = 0.1,
value_amplitude: float = 1.0,
value_phase: float = 0.0,
value_y_offset: float = 0.0,
start: Optional[Union[pd.Timestamp, int]] = pd.Timestamp("2000-01-01"),
end: Optional[Union[pd.Timestamp, int]] = None,
length: Optional[int] = None,
freq: str = "D",
column_name: Optional[str] = "sine",
dtype: np.dtype = np.float64,
) -> TimeSeries:
"""
Creates a univariate TimeSeries with a sinusoidal value progression with a given frequency, amplitude,
phase and y offset.
Parameters
----------
value_frequency
The number of periods that take place within one time unit given in `freq`.
value_amplitude
The maximum difference between any value of the returned TimeSeries and `y_offset`.
value_phase
The relative position within one period of the first value of the returned TimeSeries (in radians).
value_y_offset
The shift of the sine function along the y axis.
start
The start of the returned TimeSeries' index. If a pandas Timestamp is passed, the TimeSeries will have a pandas
DatetimeIndex. If an integer is passed, the TimeSeries will have a pandas Int64Index index. Works only with
either `length` or `end`.
end
Optionally, the end of the returned index. Works only with either `start` or `length`. If `start` is
set, `end` must be of same type as `start`. Else, it can be either a pandas Timestamp or an integer.
length
Optionally, the length of the returned index. Works only with either `start` or `end`.
freq
The time difference between two adjacent entries in the returned TimeSeries. Only effective if `start` is a
pandas Timestamp. A DateOffset alias is expected; see
`docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/TimeSeries.html#dateoffset-objects>`_.
column_name
Optionally, the name of the value column for the returned TimeSeries
dtype
The desired NumPy dtype (np.float32 or np.float64) for the resulting series
Returns
-------
TimeSeries
A sinusoidal TimeSeries parametrized as indicated above.
"""
index = _generate_index(start=start, end=end, freq=freq, length=length)
values = np.array(range(len(index)), dtype=dtype)
f = np.vectorize(
lambda x: value_amplitude
* math.sin(2 * math.pi * value_frequency * x + value_phase)
+ value_y_offset
)
values = f(values)
return TimeSeries.from_times_and_values(
index, values, freq=freq, columns=pd.Index([column_name])
)
def gaussian_timeseries(
mean: Union[float, np.ndarray] = 0.0,
std: Union[float, np.ndarray] = 1.0,
start: Optional[Union[pd.Timestamp, int]] = pd.Timestamp("2000-01-01"),
end: Optional[Union[pd.Timestamp, int]] = None,
length: Optional[int] = None,
freq: str = "D",
column_name: Optional[str] = "gaussian",
dtype: np.dtype = np.float64,
) -> TimeSeries:
"""
Creates a gaussian univariate TimeSeries by sampling all the series values independently,
from a gaussian distribution with mean `mean` and standard deviation `std`.
Parameters
----------
mean
The mean of the gaussian distribution that is sampled at each step.
If a float value is given, the same mean is used at every step.
If a numpy.ndarray of floats with the same length as `length` is
given, a different mean is used at each time step.
std
The standard deviation of the gaussian distribution that is sampled at each step.
If a float value is given, the same standard deviation is used at every step.
If an array of dimension `(length, length)` is given, it will
be used as covariance matrix for a multivariate gaussian distribution.
start
The start of the returned TimeSeries' index. If a pandas Timestamp is passed, the TimeSeries will have a pandas
DatetimeIndex. If an integer is passed, the TimeSeries will have a pandas Int64Index index. Works only with
either `length` or `end`.
end
Optionally, the end of the returned index. Works only with either `start` or `length`. If `start` is
set, `end` must be of same type as `start`. Else, it can be either a pandas Timestamp or an integer.
length
Optionally, the length of the returned index. Works only with either `start` or `end`.
freq
The time difference between two adjacent entries in the returned TimeSeries. Only effective if `start` is a
pandas Timestamp. A DateOffset alias is expected; see
`docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/TimeSeries.html#dateoffset-objects>`_.
column_name
Optionally, the name of the value column for the returned TimeSeries
dtype
The desired NumPy dtype (np.float32 or np.float64) for the resulting series
Returns
-------
TimeSeries
A white noise TimeSeries created as indicated above.
"""
if type(mean) == np.ndarray:
raise_if_not(
mean.shape == (length,),
"If a vector of means is provided, "
"it requires the same length as the TimeSeries.",
logger,
)
if type(std) == np.ndarray:
raise_if_not(
std.shape == (length, length),
"If a matrix of standard deviations is provided, "
"its shape has to match the length of the TimeSeries.",
logger,
)
index = _generate_index(start=start, end=end, freq=freq, length=length)
values = np.random.normal(mean, std, size=len(index)).astype(dtype)
return TimeSeries.from_times_and_values(
index, values, freq=freq, columns=pd.Index([column_name])
)
def random_walk_timeseries(
mean: float = 0.0,
std: float = 1.0,
start: Optional[Union[pd.Timestamp, int]] = pd.Timestamp("2000-01-01"),
end: Optional[Union[pd.Timestamp, int]] = None,
length: Optional[int] = None,
freq: str = "D",
column_name: Optional[str] = "random_walk",
dtype: np.dtype = np.float64,
) -> TimeSeries:
"""
Creates a random walk univariate TimeSeries, where each step is obtained by sampling a gaussian distribution
with mean `mean` and standard deviation `std`.
Parameters
----------
mean
The mean of the gaussian distribution that is sampled at each step.
std
The standard deviation of the gaussian distribution that is sampled at each step.
start
The start of the returned TimeSeries' index. If a pandas Timestamp is passed, the TimeSeries will have a pandas
DatetimeIndex. If an integer is passed, the TimeSeries will have a pandas Int64Index index. Works only with
either `length` or `end`.
end
Optionally, the end of the returned index. Works only with either `start` or `length`. If `start` is
set, `end` must be of same type as `start`. Else, it can be either a pandas Timestamp or an integer.
length
Optionally, the length of the returned index. Works only with either `start` or `end`.
freq
The time difference between two adjacent entries in the returned TimeSeries. Only effective if `start` is a
pandas Timestamp. A DateOffset alias is expected; see
`docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/TimeSeries.html#dateoffset-objects>`_.
column_name
Optionally, the name of the value column for the returned TimeSeries
dtype
The desired NumPy dtype (np.float32 or np.float64) for the resulting series
Returns
-------
TimeSeries
A random walk TimeSeries created as indicated above.
"""
index = _generate_index(start=start, end=end, freq=freq, length=length)
values = np.cumsum(np.random.normal(mean, std, size=len(index)), dtype=dtype)
return TimeSeries.from_times_and_values(
index, values, freq=freq, columns= | pd.Index([column_name]) | pandas.Index |
"""
A collection of classes extending the functionality of Python's builtins.
email <EMAIL>
"""
import re
import typing
import string
import enum
import os
import sys
from glob import glob
from pathlib import Path
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %% ========================== File Management =========================================
class Base:
def __init__(self, *args, **kwargs):
pass
@classmethod
def from_saved(cls, path, *args, reader=None, **kwargs):
"""Whether the save format is .json, .mat, .pickle or .npy, Reader returns Structure
For best experience, make sure that your subclass can be initialized with no or only fake inputs.
"""
inst = cls(*args, **kwargs)
if reader is None:
data = Read.read(path)
else:
data = reader(path)
# return inst, data
new_data = data.dict if type(data) is Struct else data # __setitem__ should be defined.
inst.__dict__.update(new_data)
return inst
def save_npy(self, path, **kwargs):
np.save(path, self.__dict__, **kwargs)
def save_pickle(self, path, itself=False, **kwargs):
"""
:param path:
:param itself: determiens whether to save the weights only or the entire class.
"""
if not itself:
Save.pickle(path, self.__dict__, **kwargs)
else:
Save.pickle(path, self, **kwargs)
def save_json(self, path, *args, **kwargs):
"""Use case: json is good for simple dicts, e.g. settings.
Advantage: human-readable from file explorer."""
_ = args
Save.json(path, self.__dict__, **kwargs)
return self
def save_mat(self, path, *args, **kwargs):
"""for Matlab compatibility."""
_ = args
Save.mat(path, self.__dict__, **kwargs)
return self
def get_attributes(self):
attrs = list(filter(lambda x: ('__' not in x) and not x.startswith("_"), dir(self)))
return attrs
# [setattr(Path, name, getattr(MyPath, name)) for name in funcs]
# def get_methods(self):
# def get_dict(self):
# return list(self.__dict__.keys())
def __deepcopy__(self, *args, **kwargs):
"""Literally creates a new copy of values of old object, rather than referencing them.
similar to copy.deepcopy()"""
obj = self.__class__(*args, **kwargs)
obj.__dict__.update(copy.deepcopy(self.__dict__))
return obj
def __copy__(self, *args, **kwargs):
"""Shallow copy. New object, but the keys of which are referencing the values from the old object.
Does similar functionality to copy.copy"""
obj = self.__class__(*args, **kwargs)
obj.__dict__.update(self.__dict__.copy())
return obj
def evalstr(self, string_, expected='self'):
_ = self
if type(string_) is str:
if expected == 'func':
return eval("lambda x: " + string_)
elif expected == 'self':
if "self" in string_:
return eval(string_)
else:
return string_
else:
return string_
class P(type(Path()), Path, Base):
"""Path Class: Designed with one goal in mind: any operation on paths MUST NOT take more than one line of code.
"""
# ===================================== File Specs ================================================================
def size(self, units='mb'):
sizes = List(['b', 'kb', 'mb', 'gb'])
factor = dict(zip(sizes + sizes.apply("x.swapcase()"),
np.tile(1024 ** np.arange(len(sizes)), 2)))[units]
if self.is_file():
total_size = self.stat().st_size
elif self.is_dir():
results = self.rglob("*")
total_size = 0
for item in results:
if item.is_file():
total_size += item.stat().st_size
else:
raise TypeError("This thing is not a file nor a folder.")
return round(total_size / factor, 1)
def time(self, which="m", **kwargs):
"""Meaning of ``which values``
* ``m`` time of modifying file ``content``, i.e. the time it was created.
* ``c`` time of changing file status (its inode is changed like permissions, name etc, but not contents)
* ``a`` last time the file was accessed.
:param which: Determines which time to be returned. Three options are availalable:
:param kwargs:
:return:
"""
time = {"m": self.stat().st_mtime, "a": self.stat().st_atime, "c": self.stat().st_ctime}[which]
from datetime import datetime
return datetime.fromtimestamp(time, **kwargs)
# ================================ Path Object management ===========================================
@property
def trunk(self):
""" useful if you have multiple dots in file name where .stem fails.
"""
return self.name.split('.')[0]
def __add__(self, name):
return self.parent.joinpath(self.stem + name)
def __sub__(self, other):
return P(str(self).replace(str(other), ""))
# def __rtruediv__(self, other):
# tmp = str(self)
# if tmp[0] == "/": # if dir starts with this, all Path methods fail.
# tmp = tmp[1:]
# return P(other) / tmp
def prepend(self, prefix, stem=False):
"""Add extra text before file name
e.g: blah\blah.extenion ==> becomes ==> blah/name_blah.extension
"""
if stem:
return self.parent.joinpath(prefix + self.stem)
else:
return self.parent.joinpath(prefix + self.name)
def append(self, name='', suffix=None):
"""Add extra text after file name, and optionally add extra suffix.
e.g: blah\blah.extenion ==> becomes ==> blah/blah_name.extension
"""
if suffix is None:
suffix = ''.join(self.suffixes)
return self.parent.joinpath(self.stem + name + suffix)
def append_time_stamp(self, ft=None):
return self.append(name="-" + get_time_stamp(ft=ft))
def absolute_from(self, reference=None):
"""As opposed to ``relative_to`` which takes two abolsute paths and make ``self`` relative to ``reference``,
this one takes in two relative paths, and return an absolute version of `self` the reference
for which is ``reference``.
:param reference: a directory `name` from which the current relative path ``self`` is defined.
Default value of reference is current directory name, making the method act like ``absolute`` method
.. warning:: ``reference`` should be within working directory, otherwise it raises an error.
.. note:: If you have the full path of the reference, then this method would give the same result as
agoing with `reference / self`
"""
if reference is None:
reference = P.cwd()[-1].string
return P.cwd().split(at=reference)[0] / reference / self
def split(self, at : str =None, index : int =None, sep: int= 1):
"""Splits a path at a given string or index
:param self:
:param at:
:param index:
:param sep: can be either [-1, 0, 1]. Determines where the separator is going to live with:
left portion, none or right portion.
:return: two paths
"""
if index is None: # at is provided
items = str(self).split(sep=at)
one, two = items[0], items[1]
one = one[:-1] if one.endswith("/") else one
two = two[1:] if two.startswith("/") else two
one, two = P(one), P(two)
else:
one = self[:index]
two = P(*self.parts[index + 1:])
# appending `at` to one of the portions
if sep == 0:
pass # neither of the portions get the sperator appended to it.
elif sep == 1: # append it to right portion
two = at / two
elif sep == -1: # append it to left portion.
one = one / at
else:
raise ValueError(f"`sep` should take a value from the set [-1, 0, 1] but got {sep}")
return one, two
def __getitem__(self, slici):
if type(slici) is slice:
return P(*self.parts[slici])
elif type(slici) is list or type(slice) is np.ndarray:
return P(*[self[item] for item in slici])
else:
return P(self.parts[slici])
def __len__(self):
return len(self.parts)
@property
def len(self):
return self.__len__()
def __setitem__(self, key, value):
fullparts = list(self.parts)
fullparts[key] = value
return P(*fullparts) # TODO: how to change self[-1]
def switch(self, key: str, val: str):
"""Changes a given part of the path to another given one"""
return P(str(self).replace(key, val))
def switch_index(self, key: int, val: str):
"""Changes a given index of the path to another given one"""
fullparts = list(self.parts)
fullparts[key] = val
return P(*fullparts)
def __deepcopy__(self, memodict=None):
if memodict is None:
_ = {}
return P(str(self))
# ================================ String Nature management ====================================
def __repr__(self): # this is useful only for the console
return "P: " + self.__str__()
@property
def string(self): # this method is used by other functions to get string representation of path
return str(self)
def get_num(self, astring=None):
if astring is None:
astring = self.stem
return int("".join(filter(str.isdigit, str(astring))))
def make_valid_filename(self, replace='_'):
return self.make_valid_filename_(self.trunk, replace=replace)
@staticmethod
def make_valid_filename_(astring, replace='_'):
return re.sub(r'^(?=\d)|\W', replace, str(astring))
@staticmethod
def get_random_string(length=10, pool=None):
if pool is None:
pool = string.ascii_letters
import random
result_str = ''.join(random.choice(pool) for _ in range(length))
return result_str
def as_unix(self):
return P(str(self).replace('\\', '/').replace('//', '/'))
# ==================================== File management =========================================
def delete(self, are_you_sure=False):
if are_you_sure:
if self.is_file():
self.unlink() # missing_ok=True added in 3.8
else:
import shutil
shutil.rmtree(self, ignore_errors=True)
# self.rmdir() # dir must be empty
else:
print("File not deleted because user is not sure.")
def send2trash(self):
send2trash = Experimental.assert_package_installed("send2trash")
send2trash.send2trash(self.string)
def move(self, new_path):
new_path = P(new_path)
temp = self.absolute()
temp.rename(new_path.absolute() / temp.name)
return new_path
def renameit(self, new_name):
new_path = self.parent / new_name
self.rename(new_path)
return new_path
def copy(self, target_dir=None, target_name=None, contents=False, verbose=False):
"""
:param target_dir: copy the file to this directory (filename remains the same).
:param target_name: full path of destination (including -potentially different- file name).
:param contents: copy the parent directory or its contents (relevant only if copying a directory)
:param verbose:
:return: path to copied file or directory.
.. wanring:: Do not confuse this with ``copy`` module that creates clones of Python objects.
"""
dest = None # destination.
if target_dir is not None:
assert target_name is None, f"You can either pass target_dir or target_name but not both"
dest = P(target_dir).create() / self.name
if target_name is not None:
assert target_dir is None, f"You can either pass target_dir or target_name but not both"
target_name = P(target_name)
target_name.parent.create()
dest = target_name
if dest is None:
dest = self.append(f"_copy__{get_time_stamp()}")
if self.is_file():
import shutil
shutil.copy(str(self), str(dest)) # str() only there for Python < (3.6)
if verbose:
print(f"File \n{self}\ncopied successfully to: \n{dest}")
elif self.is_dir():
from distutils.dir_util import copy_tree
if contents:
copy_tree(str(self), str(dest))
else:
copy_tree(str(self), str(P(dest).joinpath(self.name).create()))
else:
print("Could not copy this thing. Not a file nor a folder.")
return dest
def clean(self):
"""removes contents on a folder, rather than deleting the folder."""
contents = self.listdir()
for content in contents:
self.joinpath(content).send2trash()
return self
def readit(self, reader=None, notfound=FileNotFoundError, verbose=False, **kwargs):
"""
:param reader: function that reads this file format, if not passed it will be inferred from extension.
:param notfound: behaviour when file ``self`` to be read doesn't actually exist. Default: throw an error.
can be set to return `False` or any other value that will be returned if file not found.
:param verbose:
:param kwargs:
:return:
"""
filename = self
if '.zip' in str(self):
filename = self.unzip(op_path=tmp("unzipped"))
if verbose:
print(f"File {self} was uncompressed to {filename}")
def apply_reader_or_infer_it():
if reader is None:
return Read.read(filename, **kwargs)
else:
return reader(str(filename), **kwargs)
if notfound is FileNotFoundError:
return apply_reader_or_infer_it()
else: # encapsulate the function within a try context manager.
try:
return apply_reader_or_infer_it()
except Exception:
return notfound
def explore(self): # explore folders.
# os.startfile(os.path.realpath(self))
filename = self.absolute().string
if sys.platform == "win32":
os.startfile(filename) # works for files and folders alike
elif sys.platform == 'linux':
import subprocess
opener = "xdg-open"
subprocess.call([opener, filename]) # works for files and folders alike
else: # mac
# os.system(f"open {filename}")
import subprocess
subprocess.call(["open", filename]) # works for files and folders alike
# ======================================== Folder management =======================================
def create(self, parents=True, exist_ok=True, parent_only=False):
"""Creates directory while returning the same object
"""
if parent_only:
self.parent.mkdir(parents=parents, exist_ok=exist_ok)
else:
self.mkdir(parents=parents, exist_ok=exist_ok)
return self
@property
def browse(self):
return self.search("*").to_struct(key_val=lambda x: ("qq_" + x.make_valid_filename(), x)).clean_view
def search(self, pattern='*', r=False, generator=False, files=True, folders=True, compressed=False,
dotfiles=False,
absolute=True, filters: list = None, not_in: list = None, win_order=False):
"""
:param pattern: linux search pattern
:param r: recursive search flag
:param generator: output format, list or generator.
:param files: include files in search.
:param folders: include directories in search.
:param dotfiles: flag to indicate whether the search should include those or not.
:param filters: list of filters
:param absolute: return relative paths or abosolute ones.
:param not_in: list of strings that search results should not contain them (short for filter with simple lambda)
:param win_order: return search results in the order of files as they appear on a Windows machine.
:return: search results.
# :param visible: exclude hidden files and folders (Windows)
"""
# ================= Get concrete values for default arguments ========================================
if filters is None:
filters = []
else:
pass
if not_in is not None:
for notin in not_in:
filters += [lambda x: str(notin) not in str(x)]
# ============================ get generator of search results ========================================
if self.suffix == ".zip":
import zipfile
with zipfile.ZipFile(str(self)) as z:
contents = L(z.namelist())
from fnmatch import fnmatch
raw = contents.filter(lambda x: fnmatch(x, pattern)).apply(lambda x: self / x)
elif dotfiles:
raw = self.glob(pattern) if not r else self.rglob(pattern)
else:
if r:
path = self / "**" / pattern
raw = glob(str(path), recursive=r)
else:
path = self.joinpath(pattern)
raw = glob(str(path))
if compressed:
comp_files = L(raw).filter(lambda x: '.zip' in str(x))
for comp_file in comp_files:
raw += P(comp_file).search(pattern=pattern, r=r, generator=generator, files=files, folders=folders,
compressed=compressed,
dotfiles=dotfiles,
absolute=absolute, filters=filters, not_in=not_in, win_order=win_order)
# if os.name == 'nt':
# import win32api, win32con
# def folder_is_hidden(p):
# if os.name == 'nt':
# attribute = win32api.GetFileAttributes(p)
# return attribute & (win32con.FILE_ATTRIBUTE_HIDDEN | win32con.FILE_ATTRIBUTE_SYSTEM)
def run_filter(item):
flags = [True]
if not files:
flags.append(item.is_dir())
if not folders:
flags.append(item.is_file())
for afilter in filters:
flags.append(afilter(item))
return all(flags)
def do_screening(item):
item = P(item) # because some filters needs advanced functionalities of P objects.
if absolute:
item = item.absolute()
if run_filter(item):
return item
else:
return None
if generator:
def gen():
flag = False
while not flag:
item = next(raw)
flag = do_screening(item)
if flag:
yield item
return gen
else:
# unpack the generator and vet the items (the function also returns P objects)
processed = [result for item in raw if (result := do_screening(item))]
if not processed: # if empty, don't proceeed
return List(processed)
if win_order: # this option only supported in non-generator mode.
processed.sort(key=lambda x: [int(k) if k.isdigit() else k for k in re.split('([0-9]+)', x.stem)])
return List(processed)
def listdir(self):
return List(os.listdir(self)).apply(P)
def find(self, *args, r=True, **kwargs):
"""short for the method ``search`` then pick first item from results.
.. note:: it is delibrately made to return None in case and object is not found.
"""
results = self.search(*args, r=r, **kwargs)
return results[0] if len(results) > 0 else None
# def open_with_system(self):
# self.explore() # if it is a file, it will be opened with its default program.
@staticmethod
def tmp(folder=None, fn=None, path="home"):
"""
folder is created.
file name is not created, only appended.
"""
if str(path) == "home":
path = P.home() / f"tmp_results"
path.mkdir(exist_ok=True, parents=True)
if folder is not None:
path = path / folder
path.mkdir(exist_ok=True, parents=True)
if fn is not None:
path = path / fn
return path
# ====================================== Compression ===========================================
def zip(self, op_path=None, arcname=None, **kwargs):
"""
"""
op_path = op_path or self
arcname = arcname or self.name
arcname = P(self.evalstr(arcname, expected="self"))
op_path = P(self.evalstr(op_path, expected="self"))
if arcname.name != self.name:
arcname /= self.name # arcname has to start from somewhere and end with filename
if self.is_file():
op_path = Compression.zip_file(ip_path=self, op_path=op_path, arcname=arcname, **kwargs)
else:
op_path = Compression.compress_folder(ip_path=self, op_path=op_path,
arcname=arcname, format_='zip', **kwargs)
return op_path
def unzip(self, op_path=None, fname=None, **kwargs):
zipfile = self
if self.suffix != ".zip": # may be there is .zip somewhere in the path.
assert ".zip" in str(self), f"Not a zip archive."
zipfile, fname = self.split(at=".zip", sep=0)
zipfile += ".zip"
if op_path is None:
op_path = zipfile.parent / zipfile.stem
else:
op_path = P(self.evalstr(op_path, expected="self"))
return Compression.unzip(zipfile, op_path, fname, **kwargs)
def compress(self, op_path=None, base_dir=None, format_="zip", **kwargs):
formats = ["zip", "tar", "gzip"]
assert format_ in formats, f"Unsupported format {format_}. The supported formats are {formats}"
_ = self, op_path, base_dir, kwargs
pass
def decompress(self):
pass
tmp = P.tmp
class Compression:
"""Provides consistent behaviour across all methods ...
Both files and folders when compressed, default is being under the root of archive."""
def __init__(self):
pass
@staticmethod
def compress_folder(ip_path, op_path, arcname, format_='zip', **kwargs):
"""Explanation of Shutil parameters:
* ``base_dir`` (here referred to as ``ip_path``) is what is going to be acturally archived.
When provided, it **has to** be relevant to ``root_dir`` (here referred to as ``arcname``).
* ``root_dir`` is where the archive is going to start from. It will create all the necessary subfolder till
it reaches the ``base_dir`` where archiving actually starts.
* Example: If you want to compress a folder in ``Downloads/myfolder/compress_this``
Then, say that your rootdir is where you want the archive structure to include,
then mention the folder you want to actually archive relatively to that root.
.. note:: ``format_`` can only be one of ``zip, tar, gztar, bztar, xztar``.
"""
root_dir = ip_path.split(at=arcname[0])[0]
import shutil # shutil works with folders nicely (recursion is done interally)
result_path = shutil.make_archive(base_name=op_path, format=format_,
root_dir=str(root_dir), base_dir=str(arcname), **kwargs)
return P(result_path) # same as op_path but (possibly) with format extension
@staticmethod
def zip_file(ip_path, op_path, arcname, **kwargs):
"""
arcname determines the directory of the file being archived inside the archive. Defaults to same
as original directory except for drive. When changed, it should still include the file name in its end.
If arcname = filename without any path, then, it will be in the root of the archive.
"""
import zipfile
if op_path.suffix != ".zip":
op_path = op_path + f".zip"
jungle_zip = zipfile.ZipFile(str(op_path), 'w')
jungle_zip.write(filename=str(ip_path), arcname=str(arcname), compress_type=zipfile.ZIP_DEFLATED, **kwargs)
jungle_zip.close()
return op_path
@staticmethod
def unzip(ip_path, op_path, fname=None, **kwargs):
from zipfile import ZipFile
with ZipFile(str(ip_path), 'r') as zipObj:
if fname is None: # extract all:
zipObj.extractall(op_path, **kwargs)
else:
zipObj.extract(str(fname), str(op_path), **kwargs)
op_path = P(op_path) / fname
return P(op_path)
@staticmethod
def gz(file):
import gzip
import shutil
with open(file, 'rb') as f_in:
with gzip.open(str(file) + '.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
@staticmethod
def ungz(self, op_path=None):
import shutil
import gzip
fn = str(self)
op_path = op_path or self.parent / self.stem
with gzip.open(fn, 'r') as f_in, open(op_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
return P(op_path)
@staticmethod
def tar():
# import tarfile
pass
@staticmethod
def untar(self, fname=None, extract_dir='.', mode='r', **kwargs):
import tarfile
file = tarfile.open(str(self), mode)
if fname is None: # extract all files in the archive
file.extractall(path=extract_dir, **kwargs)
else:
file.extract(fname, **kwargs)
file.close()
return fname
class Read:
@staticmethod
def read(path, **kwargs):
suffix = P(path).suffix[1:]
# if suffix in ['eps', 'jpg', 'jpeg', 'pdf', 'pgf', 'png', 'ps', 'raw', 'rgba', 'svg', 'svgz', 'tif', 'tiff']:
# # plt.gcf().canvas.get_supported_filetypes().keys():
# return plt.imread(path, **kwargs)
# else:
reader = getattr(Read, suffix)
return reader(str(path), **kwargs)
@staticmethod
def npy(path, **kwargs):
"""returns Structure if the object loaded is a dictionary"""
data = np.load(str(path), allow_pickle=True, **kwargs)
if data.dtype == np.object:
data = data.item()
if type(data) is dict:
data = Struct(data)
return data
@staticmethod
def mat(path, **kwargs):
"""
:param path:
:return: Structure object
"""
from scipy.io import loadmat
return Struct(loadmat(path, **kwargs))
@staticmethod
def json(path, r=False, **kwargs):
"""Returns a Structure"""
import json
with open(str(path), "r") as file:
mydict = json.load(file, **kwargs)
if r:
return Struct.recursive_struct(mydict)
else:
return Struct(mydict)
@staticmethod
def yaml(path, r=False):
import yaml
with open(str(path), "r") as file:
mydict = yaml.load(file, Loader=yaml.FullLoader)
if r:
return Struct.recursive_struct(mydict)
else:
return Struct(mydict)
@staticmethod
def csv(path, **kwargs):
w = P(path).append(".dtypes").readit(reader=pd.read_csv, notexist=None)
w = dict(zip(w['index'], w['dtypes'])) if w else w
return pd.read_csv(path, dtypes=w, **kwargs)
@staticmethod
def pickle(path, **kwargs):
# import pickle
dill = Experimental.assert_package_installed("dill")
with open(path, 'rb') as file:
obj = dill.load(file, **kwargs)
if type(obj) is dict:
obj = Struct(obj)
return obj
@staticmethod
def pkl(*args, **kwargs):
return Read.pickle(*args, **kwargs)
@staticmethod
def csv(path, *args, **kwargs):
return pd.read_csv(path, *args, **kwargs)
class Save:
@staticmethod
def csv(path, obj):
obj.to_frame('dtypes').reset_index().to_csv(P(path).append(".dtypes").string)
@staticmethod
def mat(path=P.tmp(), mdict=None, **kwargs):
"""
.. note::
Avoid using mat for saving results because of incompatiblity:
* `None` type is not accepted.
* Scalars are conveteed to [1 x 1] arrays.
* etc. As such, there is no gaurantee that you restore what you saved.
Unless you want to pass the results to Matlab animals, avoid this format.
"""
from scipy.io import savemat
if '.mat' not in str(path):
path += '.mat'
path.parent.mkdir(exist_ok=True, parents=True)
for key, value in mdict.items():
if value is None:
mdict[key] = []
savemat(str(path), mdict, **kwargs)
@staticmethod
def json(path, obj, **kwargs):
"""This format is **compatible** with simple dictionaries that hold strings or numbers
but nothing more than that.
E.g. arrays or any other structure. An example of that is settings dictionary. It is useful because it can be
inspected using any text editor."""
import json
if not str(path).endswith(".json"):
path = str(path) + ".json"
with open(str(path), "w") as file:
json.dump(obj, file, default=lambda x: x.__dict__, **kwargs)
@staticmethod
def yaml(path, obj, **kwargs):
import yaml
if not str(path).endswith(".yaml"):
path = str(path) + ".yaml"
with open(str(path), "w") as file:
yaml.dump(obj, file, **kwargs)
# @staticmethod
# def pickle(path, obj, **kwargs):
# if ".pickle" not in str(path):
# path = path + ".pickle"
# import pickle
# with open(str(path), 'wb') as file:
# pickle.dump(obj, file, **kwargs)
@staticmethod
def pickle(path, obj, **kwargs):
dill = Experimental.assert_package_installed("dill")
with open(str(path), 'wb') as file:
dill.dump(obj, file, **kwargs)
def accelerate(func, ip):
""" Conditions for this to work:
* Must run under __main__ context
* func must be defined outside that context.
To accelerate IO-bound process, use multithreading. An example of that is somthing very cheap to process,
but takes a long time to be obtained like a request from server. For this, multithreading launches all threads
together, then process them in an interleaved fashion as they arrive, all will line-up for same processor,
if it happens that they arrived quickly.
To accelerate processing-bound process use multiprocessing, even better, use Numba.
Method1 use: multiprocessing / multithreading.
Method2: using joblib (still based on multiprocessing)
from joblib import Parallel, delayed
Fast method using Concurrent module
"""
split = np.array_split(ip, os.cpu_count())
# make each thread process multiple inputs to avoid having obscene number of threads with simple fast
# operations
# vectorize the function so that it now accepts lists of ips.
# def my_func(ip):
# return [func(tmp) for tmp in ip]
import concurrent.futures
with concurrent.futures.ProcessPoolExecutor() as executor:
op = executor.map(func, split)
op = list(op) # convert generator to list
op = np.concatenate(op, axis=0)
# op = self.reader.assign_resize(op, f=0.8, nrp=56, ncp=47, interpolation=True)
return op
# %% ========================== Object Management ==============================================
class List(list, Base):
"""Use this class to keep items of the same type.
"""
# =============================== Constructor Methods ====================
def __init__(self, obj_list=None):
super().__init__()
self.list = list(obj_list) if obj_list is not None else []
def __bool__(self):
return bool(self.list)
@classmethod
def from_copies(cls, obj, count):
return cls([copy.deepcopy(obj) for _ in range(count)])
@classmethod
def from_replicating(cls, func, *args, replicas=None, **kwargs):
"""
:param args: could be one item repeated for all instances, or iterable. If iterable, it can by a Cycle object.
:param kwargs: those could be structures:
:param replicas:
:param func:
"""
if not args and not kwargs: # empty args list and kwargs list
return cls([func() for _ in range(replicas)])
else:
result = []
for params in zip(*(args + tuple(kwargs.values()))):
an_arg = params[:len(args)]
a_val = params[len(args):]
a_kwarg = dict(zip(kwargs.keys(), a_val))
result.append(func(*an_arg, **a_kwarg))
return cls(result)
def save_items(self, directory, names=None, saver=None):
if saver is None:
saver = Save.pickle
if names is None:
names = range(len(self))
for name, item in zip(names, self.list):
saver(path=directory / name, obj=item)
def __deepcopy__(self, memodict=None):
if memodict is None:
memodict = {}
_ = memodict
return List([copy.deepcopy(i) for i in self.list])
def __copy__(self):
return List(self.list.copy())
def __getstate__(self):
return self.list
def __setstate__(self, state):
self.list = state
# ================= call methods =====================================
def method(self, name, *args, **kwargs):
return List([getattr(i, name)(*args, **kwargs) for i in self.list])
def attr(self, name):
return List([getattr(i, name) for i in self.list])
# def __getattribute__(self, item):
# # you can dispense with this method. Its only purpose is to make eaisr experience qwith the linter
# # obj = object.__getattribute__(self, "list")[0]
# # try:
# # attr = object.__getattribute__(self, item)
# # if hasattr(obj, item):
# # return self.__getattr__(item)
# # else:
# # return attr
# # except AttributeError:
# # return self.__getattr__(item)
# if item == "list": # grant special access to this attribute.
# return object.__getattribute__(self, "list")
# if item in object.__getattribute__(self, "__dict__").keys():
# return self.__getattr__(item)
# else:
# return object.__getattribute__(self, item)
def __getattr__(self, name): # fallback position when normal mechanism fails.
# this is called when __getattribute__ raises an error or call this explicitly.
result = List([getattr(i, name) for i in self.list])
return result
def __call__(self, *args, lest=True, **kwargs):
if lest:
return List([i(*args, **kwargs) for i in self.list])
else:
return [i(*args, **kwargs) for i in self.list]
# ======================== Access Methods ==========================================
def __getitem__(self, key):
if type(key) is list or type(key) is np.ndarray: # to allow fancy indexing like List[1, 5, 6]
return List([self[item] for item in key])
# behaves similarly to Numpy A[1] vs A[1:2]
result = self.list[key] # return the required item only (not a List)
if type(key) is not slice:
return result # choose one item
else:
return List(result)
def __setitem__(self, key, value):
self.list[key] = value
def sample(self, size=1):
return self[np.random.choice(len(self), size)]
def to_struct(self, key_val=None):
"""
:param key_val: function that returns (key, value) pair.
:return:
"""
if key_val is None:
def key_val(x):
return str(x), x
else:
key_val = self.evalstr(key_val)
return Struct.from_keys_values_pairs(self.apply(key_val))
# def find(self, patt, match="fnmatch"):
# """Looks up the string representation of all items in the list and finds the one that partially matches
# the argument passed. This method is a short for ``self.filter(lambda x: string_ in str(x))`` If you need more
# complicated logic in the search, revert to filter method.
# """
#
# if match == "string" or None:
# for idx, item in enumerate(self.list):
# if patt in str(item):
# return item
# elif match == "fnmatch":
# import fnmatch
# for idx, item in enumerate(self.list):
# if fnmatch.fnmatch(str(item), patt):
# return item
# else: # "regex"
# # escaped = re.escape(string_)
# compiled = re.compile(patt)
# for idx, item in enumerate(self.list):
# if compiled.search(str(item)) is not None:
# return item
# return None
def index(self, func):
""" A generalization of the `.index` method of `list`. It takes in a function rather than an
item to find its index. Additionally, it returns full list of results, not just the first result.
:param func:
:return: List of indices of items where the function returns `True`.
"""
func = self.evalstr(func, expected='func')
res = []
for idx, x in enumerate(self.list):
if func(x):
res.append(idx)
return res
# ======================= Modify Methods ===============================
def combine(self):
res = self.list[0]
for item in self.list[1:]:
res = res + item
return res
def append(self, obj):
self.list.append(obj)
def __add__(self, other):
return List(self.list + other.list)
def __repr__(self):
if len(self.list) > 0:
tmp1 = f"List object with {len(self.list)} elements. One example of those elements: \n"
tmp2 = f"{self.list[0].__repr__()}"
return tmp1 + tmp2
else:
return f"An Empty List []"
def __len__(self):
return len(self.list)
@property
def len(self):
return self.list.__len__()
def __iter__(self):
return iter(self.list)
def apply(self, func, *args, lest=None, jobs=None, depth=1, verbose=False, **kwargs):
"""
:param jobs:
:param func: func has to be a function, possibly a lambda function. At any rate, it should return something.
:param args:
:param lest:
:param verbose:
:param depth: apply the function to inner Lists
:param kwargs: a list of outputs each time the function is called on elements of the list.
:return:
"""
if depth > 1:
depth -= 1
# assert type(self.list[0]) == List, "items are not Lists".
self.apply(lambda x: x.apply(func, *args, lest=lest, jobs=jobs, depth=depth, **kwargs))
func = self.evalstr(func, expected='func')
tqdm = 0
if verbose or jobs:
Experimental.assert_package_installed("tqdm")
from tqdm import tqdm
if lest is None:
if jobs:
from joblib import Parallel, delayed
return List(Parallel(n_jobs=jobs)(delayed(func)(i, *args, **kwargs) for i in tqdm(self.list)))
else:
iterator = self.list if not verbose else tqdm(self.list)
return List([func(x, *args, **kwargs) for x in iterator])
else:
if jobs:
from joblib import Parallel, delayed
return List(Parallel(n_jobs=jobs)(delayed(func)(x, y) for x, y in tqdm(zip(self.list, lest))))
else:
iterator = zip(self.list, lest) if not verbose else tqdm(zip(self.list, lest))
return List([func(x, y) for x, y in iterator])
def modify(self, func, lest=None):
"""Modifies objects rather than returning new list of objects, hence the name of the method.
:param func: a string that will be executed, assuming idx, x and y are given.
:param lest:
:return:
"""
if lest is None:
for x in self.list:
_ = x
exec(func)
else:
for idx, (x, y) in enumerate(zip(self.list, lest)):
_ = idx, x, y
exec(func)
return self
def sort(self, *args, **kwargs):
self.list.sort(*args, **kwargs)
return self
def sorted(self, *args, **kwargs):
return List(sorted(self.list, *args, **kwargs))
def filter(self, func):
if type(func) is str:
func = eval("lambda x: " + func)
result = List()
for item in self.list:
if func(item):
result.append(item)
return result
def print(self, nl=1, sep=False, style=repr):
for idx, item in enumerate(self.list):
print(f"{idx:2}- {style(item)}", end=' ')
for _ in range(nl):
print('', end='\n')
if sep:
print(sep * 100)
def to_dataframe(self, names=None, minimal=True):
DisplayData.set_display()
columns = ['object'] + list(self.list[0].__dict__.keys())
df = pd.DataFrame(columns=columns)
if minimal:
return df
for i, obj in enumerate(self.list):
if names is None:
name = [obj]
else:
name = [names[i]]
df.loc[i] = name + list(self.list[i].__dict__.values())
return df
def to_numpy(self):
return self.np
@property
def np(self):
return np.array(self.list)
L = List
class Struct(Base):
"""Use this class to keep bits and sundry items.
Combines the power of dot notation in classes with strings in dictionaries to provide Pandas-like experience
"""
def __init__(self, dictionary=None, **kwargs):
"""
:param dictionary: a dict, a Struct, None or an object with __dict__ attribute.
"""
super(Struct, self).__init__()
if type(dictionary) is Struct:
dictionary = dictionary.dict
if dictionary is None: # only kwargs were passed
final_dict = kwargs
elif not kwargs: # only dictionary was passed
final_dict = dictionary if type(dictionary) is dict else dictionary.__dict__
else: # both were passed
final_dict = dictionary if type(dictionary) is dict else dictionary.__dict__
final_dict.update(kwargs)
self.__dict__ = final_dict
def __bool__(self):
return bool(self.__dict__)
@staticmethod
def recursive_struct(mydict):
struct = Struct(mydict)
for key, val in struct.items():
if type(val) is dict:
struct[key] = Struct.recursive_struct(val)
return struct
@staticmethod
def recursive_dict(struct):
mydict = struct.dict
for key, val in mydict.items():
if type(val) is Struct:
mydict[key] = Struct.recursive_dict(val)
return mydict
@classmethod
def from_keys_values(cls, keys: list, values: list):
return cls(dict(zip(keys, values)))
@classmethod
def from_keys_values_pairs(cls, my_list):
res = dict()
for k, v in my_list:
res[k] = v
return cls(res)
@classmethod
def from_names(cls, *names, default_=None): # Mimick NamedTuple and defaultdict
if default_ is None:
default_ = [None] * len(names)
return cls.from_keys_values(names, values=default_)
def get_values(self, keys):
return List([self[key] for key in keys])
@property
def clean_view(self):
class Temp:
pass
temp = Temp()
temp.__dict__ = self.__dict__
return temp
def __repr__(self):
repr_string = ""
for key in self.keys().list:
repr_string += str(key) + ", "
return "Struct: [" + repr_string + "]"
def print(self, sep=20, yaml=False):
if yaml:
self.save_yaml(P.tmp(fn="__tmp.yaml"))
txt = P.tmp(fn="__tmp.yaml").read_text()
print(txt)
return None
repr_string = ""
repr_string += "Structure, with following entries:\n"
repr_string += "Key" + " " * sep + "Item Type" + " " * sep + "Item Details\n"
repr_string += "---" + " " * sep + "---------" + " " * sep + "------------\n"
for key in self.keys().list:
key_str = str(key)
type_str = str(type(self[key])).split("'")[1]
val_str = DisplayData.get_repr(self[key])
repr_string += key_str + " " * abs(sep - len(key_str)) + " " * len("Key")
repr_string += type_str + " " * abs(sep - len(type_str)) + " " * len("Item Type")
repr_string += val_str + "\n"
print(repr_string)
def __str__(self):
mystr = str(self.__dict__)
mystr = mystr[1:-1].replace(":", " =").replace("'", "")
return mystr
def __getitem__(self, item): # allows indexing into entries of __dict__ attribute
return self.__dict__[item] # thus, gives both dot notation and string access to elements.
def __setitem__(self, key, value):
self.__dict__[key] = value
def __getattr__(self, item): # this works better with the linter.
try:
self.__dict__[item]
except KeyError:
# try:
# super(Struct, self).__getattribute__(item)
# object.__getattribute__(self, item)
# except AttributeError:
raise AttributeError(f"Could not find the attribute `{item}` in object `{self.__class__}`")
def __getstate__(self): # serialize
return self.__dict__
def __setstate__(self, state): # deserialize
self.__dict__ = state
def __iter__(self):
return iter(self.dict.items())
def save_yaml(self, path):
Save.yaml(path, self.recursive_dict(self))
@property
def dict(self): # allows getting dictionary version without accessing private memebers explicitly.
return self.__dict__
@dict.setter
def dict(self, adict):
self.__dict__ = adict
def update(self, *args, **kwargs):
"""Accepts dicts and keyworded args
"""
new_struct = Struct(*args, **kwargs)
self.__dict__.update(new_struct.__dict__)
return self
def apply(self, func):
func = self.evalstr(func)
for key, val in self.items():
self[key] = func(val)
return self
def inverse(self):
return Struct({v: k for k, v in self.dict.items()})
def append_values(self, *others, **kwargs):
""" """
return Struct(self.concat_dicts(*((self.dict,) + others), **kwargs))
@staticmethod
def concat_values(*dicts, method=None, lenient=True, collect_items=False, clone=True):
if method is None:
method = list.__add__
if not lenient:
keys = dicts[0].keys()
for i in dicts[1:]:
assert i.keys() == keys
# else if lenient, take the union
if clone:
total_dict = copy.deepcopy(dicts[0]) # take first dict in the tuple
else:
total_dict = dicts[0] # take first dict in the tuple
if collect_items:
for key, val in total_dict.item():
total_dict[key] = [val]
def method(tmp1, tmp2):
return tmp1 + [tmp2]
if len(dicts) > 1: # are there more dicts?
for adict in dicts[1:]:
for key in adict.keys(): # get everything from this dict
try: # may be the key exists in the total dict already.
total_dict[key] = method(total_dict[key], adict[key])
except KeyError: # key does not exist in total dict
if collect_items:
total_dict[key] = [adict[key]]
else:
total_dict[key] = adict[key]
return Struct(total_dict)
def keys(self):
"""Same behaviour as that of `dict`, except that is doesn't produce a generator."""
return List(self.dict.keys())
def values(self):
"""Same behaviour as that of `dict`, except that is doesn't produce a generator."""
return List(self.dict.values())
def items(self):
"""Same behaviour as that of `dict`, except that is doesn't produce a generator."""
return List(self.dict.items())
def to_dataframe(self, *args, **kwargs):
# return self.values().to_dataframe(names=self.keys())
return | pd.DataFrame(self.__dict__, *args, **kwargs) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Calculation of inhomogeneity factor for a population of stacking sequence
@author: <NAME>
"""
import sys
sys.path.append(r'C:\LAYLA')
import numpy as np
import pandas as pd
from src.CLA.lampam_functions import calc_lampam
# Creation of a table of stacking sequences
ss = np.array([0, 45, 90, -45, 0, 45, 90, -45])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = ss
ss = np.array([0, 45, 90, -45, 0, 45, 90, -45])
ss = np.matlib.repmat(ss, 1, 2)
ss = np.ravel(ss)
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 45, 90, -45, 0, 45, 90, -45])
ss = np.matlib.repmat(ss, 1, 3)
ss = np.ravel(ss)
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 45, 90, -45, 0, 45, 90, -45])
ss = np.matlib.repmat(ss, 1, 4)
ss = np.ravel(ss)
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 0, 0, 0, 90, 90, 90, 90])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([90, 90, 90, 90, 0, 0, 0, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 0, 0, 0, 0, 90, 90, 90])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([90, 90, 90, 0, 0, 0, 0, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 0, 0, 0, 0, 0, 90, 90])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([90, 90, 0, 0, 0, 0, 0, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([90, 0, 0, 0, 0, 0, 0, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 0, 0, 0, 0, 0, 0, 90])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 0, 0, 0, 0, 0, 0, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([90, 90, 90, 90, 90, 0, 0, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 0, 0, 90, 90, 90, 90, 90])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([90, 90, 90, 90, 90, 90, 0, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 0, 90, 90, 90, 90, 90, 90])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 90, 90, 90, 90, 90, 90, 90])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([90, 90, 90, 90, 90, 90, 90, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([90, 90, 90, 90, 90, 90, 90, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([45, 45, 0, 0, 0, 0, 0, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([-45, -45, 0, 0, 0, 0, 0, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 0, 0, 0, 0, 0, 45, 45])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 0, 0, 0, 0, 0, -45, -45])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
Pop = pd.DataFrame()
for ipop in range(sst.shape[0]):
# stacking sequence
ss = sst[ipop][0]
Pop.loc[ipop, 'ss'] = ss
ss = ss.split(' ')
ss = np.array(ss)
ss = ss.astype(int)
# lamination parameter
lampam = calc_lampam(ss, constraints)
# inhomogeneity factor
inh = np.linalg.norm(lampam[0:4] - lampam[8:12])
Pop.loc[ipop, 'inh'] = inh
Pop.loc[ipop, 'lampam[9]- lampam[1]'] = lampam[8] - lampam[0]
Pop.loc[ipop, 'lampam[11]- lampam[3]'] = lampam[10] - lampam[2]
Pop.loc[ipop, 'lampam[1]'] = lampam[0]
Pop.loc[ipop, 'lampam[2]'] = lampam[1]
Pop.loc[ipop, 'lampam[3]'] = lampam[2]
Pop.loc[ipop, 'lampam[9]'] = lampam[8]
Pop.loc[ipop, 'lampam[10]'] = lampam[9]
Pop.loc[ipop, 'lampam[11]'] = lampam[10]
Pop.loc[ipop, 'lampam[12]'] = lampam[11]
ipop += 1
print(f'The population consist of {len(Pop.index)} individuals')
## Write results in a Excell sheet
writer = | pd.ExcelWriter('Inhomogeneity factors.xlsx') | pandas.ExcelWriter |
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
import numpy as np
import os
from py2neo import Graph, Node, Relationship, NodeMatcher, RelationshipMatcher
# from neo4j import GraphDatabase
# import neo4j
import networkx as nx
import json
import datetime
import matplotlib.pyplot as plt
# from ggplot import *
from shutil import copytree
import math
# from graph_tool.all import *
import json
import random
# Choose a path for the Neo4j_Imports folder to import the data from MOD into Neo4j
# formose_MOD_exports_path = "../data/formose/Neo4j_Imports"
formose_MOD_exports_path = "../data/pyruvic_acid/Neo4j_Imports"
glucose_MOD_exports_path = "../data/glucose/Neo4j_Imports"
# exports_folder_paths = [formose_MOD_exports_path, glucose_MOD_exports_path]
EXPORT_PATHS = [glucose_MOD_exports_path]
# Set the following to False if you want to leave order of import records in
# each generation file the same; set to True to randomly shuffle the order of
# the records within each file. By shuffling the order, the order at which the
# molecules are imported into Neo4j will be randomized, and thus the start point
# at which the cycles pattern match begins is randomized each time, so we can
# get samples at different starting points in the network since it is too
# computationally intensive to match for all possible patterns in the network.
SHUFFLE_GENERATION_DATA = True
# Repeat the whole import and pattern match routine REPEAT_RUNS amount of times.
# Pair this with SHUFFLE_GENERATION_DATA so that if SHUFFLE_GENERATION_DATA
# is True, sample pattern matches on the graph REPEAT_RUNS amount of times
# starting from random points on the graph from the shuffling, where each
# run matches up to NUM_STRUCTURES_LIMIT of patterns.
REPEAT_RUNS = 10
# Filter out these molecules by smiles string from being imported into Neo4j
# for pattern match / network statistic calculations.
MOLECULE_FILTER = ['O']
# If True, will match for autocatalytic pattern mattches using the pattern match
# query in graph_queries/_FINAL_QUERY_PARAMETERIZED.txt. If not, will skip this
# and just do node degree / rank calculations. (One reason you might want to disable
# pattern match query results is because this is very computationally intensive
# and takes a lot of time; so disable if you are just looking for network statistics.)
PATTERN_MATCHES = True
# Rather than disabling completely if running into performance issues, limit the
# number of patterns that can be matched so that the query stops executing as
# soon as it reaches the pattern limit, and the matches are returned.
NUM_STRUCTURES_LIMIT = 100
# Limit the range of the ring size. Note that the ring size includes molecule
# and reaction nodes, so if a ring of 3 molecules to 6 molecules is desired,
# for example, then RING_SIZE_RANGE would be (3*2, 6*2), or (6, 12)
RING_SIZE_RANGE = (6, 8) # (6, 8) is size 6-8 reaction+molecule nodes, or 3-4 molecule nodes only
# Limit the number of generations that each network can be imported on. If None,
# no limit--will default to the maximum number of generations generated. You may
# want to limit this to ~4 generations or less if performance is an issue; the
# network will grow exponentially, so pattern match queries might take too long
# to produce results.
GENERATION_LIMIT = 4 # None
# If NETWORK_SNAPSHOTS is True, the program gathers data on the network at each generation
# in the reaction netowrk. If False, the program gathers data only on the state of
# the network once all generations have completely finished being loaded (snapshot
# only of the final generation).
NETWORK_SNAPSHOTS = True
# Enable this only if you want to capture network statistics (such as node degree
# plots over generation)
COLLECT_NETWORK_STATISTICS = False
# Set this to True if you want to generate a static image of the network after
# loading. Might run into Out of Memory error. Default leaving this as False
# because we generated a much nicer visualization of the full network using Gephi.
FULL_NETWORK_VISUALIZATION = False
# configure network database Neo4j
url = "bolt://neo4j:0000@localhost:7687"
graph = Graph(url)
matcher = NodeMatcher(graph)
rel_matcher = RelationshipMatcher(graph)
def get_timestamp():
return str(datetime.datetime.now()).replace(":","-").replace(" ","_").replace(".","-")
def create_molecule_if_not_exists(smiles_str, generation_formed, exact_mass=0):
"""
Create molecule in DB if not exists.
"""
molecule = matcher.match("Molecule", smiles_str = smiles_str).first()
if molecule is None:
# molecule does not exist, create node with generation information
tx = graph.begin()
new_m = Node("Molecule",
smiles_str = smiles_str,
exact_mass = round(float(exact_mass),3),
generation_formed = generation_formed)
tx.create(new_m)
tx.commit()
return new_m
return molecule
def create_reaction_if_not_exists(id, rule, generation_formed):
reaction = matcher.match("Reaction", id = id).first()
if reaction is None:
tx = graph.begin()
new_rxn = Node("Reaction",
id = id,
rule = rule,
generation_formed = generation_formed)
tx.create(new_rxn)
tx.commit()
return new_rxn
return reaction
def create_reactant_rel_if_not_exists(smiles_str, rxn_id, generation_formed):
molecule = matcher.match("Molecule", smiles_str = smiles_str).first()
reaction = matcher.match("Reaction", id = rxn_id).first()
match_pattern = rel_matcher.match(nodes=(molecule, reaction),
r_type="REACTANT" #,
# properties = {"generation_formed": generation_formed}
)
# if pattern does not exist in db
if len(list(match_pattern)) <= 0:
tx = graph.begin()
# see documentation for weird Relationship function; order of args go:
# from node, relationship, to node, and then kwargs for relationship properties
# https://py2neo.org/v4/data.html#py2neo.data.Relationship
new_r = Relationship(molecule, "REACTANT", reaction,
generation_formed=generation_formed)
tx.create(new_r)
tx.commit()
return new_r
return match_pattern
def create_product_rel_if_not_exists(smiles_str, rxn_id, generation_formed):
molecule = matcher.match("Molecule", smiles_str = smiles_str).first()
reaction = matcher.match("Reaction", id = rxn_id).first()
match_pattern = rel_matcher.match(nodes=(reaction, molecule),
r_type="PRODUCT" #,
# properties = {"generation_formed": generation_formed}
)
# if pattern does not exist in db
if len(list(match_pattern)) <= 0:
tx = graph.begin()
# see documentation for weird Relationship function; order of args go:
# from node, relationship, to node, and then kwargs for relationship properties
# https://py2neo.org/v4/data.html#py2neo.data.Relationship
new_r = Relationship(reaction, "PRODUCT", molecule,
generation_formed=generation_formed)
tx.create(new_r)
tx.commit()
return new_r
return match_pattern
def save_query_results(generation_num, query_result, file_name, this_out_folder):
with open(f'output/' + this_out_folder + f"/{generation_num}/{file_name}.json", 'w') as file_data_out:
json.dump(query_result, file_data_out)
data_df = pd.read_json(f'output/' + this_out_folder + f"/{generation_num}/{file_name}.json")
data_df.to_csv(f'output/' + this_out_folder + f"/{generation_num}/{file_name}.csv", index=False)
def read_query_results(file_path):
try:
df = | pd.read_csv(file_path) | pandas.read_csv |
import numpy as np
import pandas as pd
from estimagic.parameters.block_trees import block_tree_to_matrix
from estimagic.parameters.block_trees import matrix_to_block_tree
from numpy.testing import assert_array_equal
from pybaum import tree_equal
def test_matrix_to_block_tree_array_and_scalar():
t = {"a": 1.0, "b": np.arange(2)}
calculated = matrix_to_block_tree(np.arange(9).reshape(3, 3), t, t)
expected = {
"a": {"a": np.array(0), "b": np.array([1, 2])},
"b": {"a": np.array([3, 6]), "b": np.array([[4, 5], [7, 8]])},
}
assert _tree_equal_up_to_dtype(calculated, expected)
def test_matrix_to_block_tree_only_params_dfs():
tree = {
"a": pd.DataFrame(index=["a", "b"]).assign(value=[1, 2]),
"b": pd.DataFrame(index=["j", "k", "l"]).assign(value=[3, 4, 5]),
}
calculated = matrix_to_block_tree(np.arange(25).reshape(5, 5), tree, tree)
expected = {
"a": {
"a": pd.DataFrame([[0, 1], [5, 6]], columns=["a", "b"], index=["a", "b"]),
"b": pd.DataFrame(
[[2, 3, 4], [7, 8, 9]], columns=["j", "k", "l"], index=["a", "b"]
),
},
"b": {
"a": pd.DataFrame(
[[10, 11], [15, 16], [20, 21]],
index=["j", "k", "l"],
columns=["a", "b"],
),
"b": pd.DataFrame(
[[12, 13, 14], [17, 18, 19], [22, 23, 24]],
index=["j", "k", "l"],
columns=["j", "k", "l"],
),
},
}
assert _tree_equal_up_to_dtype(calculated, expected)
def test_matrix_to_block_tree_single_element():
tree1 = {"a": 0}
tree2 = {"b": 1, "c": 2}
block_tree = {"a": {"b": 0, "c": 1}}
matrix = np.array([[0, 1]])
calculated = matrix_to_block_tree(matrix, tree1, tree2)
assert tree_equal(block_tree, calculated)
# one params df (make sure we don't get a list back)
# dataframe and scalar
# tests against jax
def test_block_tree_to_matrix_array_and_scalar():
t1 = {"c": np.arange(3), "d": (2.0, 1)}
t2 = {"a": 1.0, "b": np.arange(2)}
expected = np.arange(15).reshape(5, 3)
block_tree = {
"c": {"a": np.array([0, 3, 6]), "b": np.array([[1, 2], [4, 5], [7, 8]])},
"d": (
{"a": np.array(9), "b": np.array([10, 11])},
{"a": np.array(12), "b": np.array([13, 14])},
),
}
calculated = block_tree_to_matrix(block_tree, t1, t2)
assert_array_equal(expected, calculated)
def test_block_tree_to_matrix_only_params_dfs():
expected = np.arange(25).reshape(5, 5)
tree = {
"a": pd.DataFrame(index=["a", "b"]).assign(value=[1, 2]),
"b": pd.DataFrame(index=["j", "k", "l"]).assign(value=[3, 4, 5]),
}
block_tree = {
"a": {
"a": | pd.DataFrame([[0, 1], [5, 6]], columns=["a", "b"], index=["a", "b"]) | pandas.DataFrame |
import pandas as pd
from scipy import sparse
from itertools import repeat
import pytest
import anndata as ad
from anndata.utils import import_function, make_index_unique
from anndata.tests.helpers import gen_typed_df
def test_make_index_unique():
index = pd.Index(["val", "val", "val-1", "val-1"])
with pytest.warns(UserWarning):
result = make_index_unique(index)
expected = | pd.Index(["val", "val-2", "val-1", "val-1-1"]) | pandas.Index |
#%%
import os
import glob
import itertools
import re
import regex
import numpy as np
import pandas as pd
import skbio
import collections
import git
#%%
# Import this project's library
import rnaseq_barcode as rnaseq
# Find home directory for repo
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# Define data directory
datadir = f"{homedir}/data/processed_sequencing/20210715_lacI_titration/"
# Read CSV file explaining what each file is
metadata = pd.read_csv(
f"{homedir}/data/demux_sequencing/20210715_lacI_titration/MANIFEST"
)
metadata = metadata[metadata.direction == 'forward'].reset_index()
metadata = metadata.rename(columns={'sample-id': 'id'})
# output directory
outdir = f"{homedir}/data/barcodes/20210715_lacI_titration/"
# Generate output directory if it doesn't exist
if not os.path.exists(outdir):
os.makedirs(outdir)
# read reference GFP barcodes
df_gfp = pd.read_csv('./gfp_barcode.csv', comment="#")
#%%
for i, f in enumerate(metadata.filename):
print(f)
print('loading fastq into memory')
# Use skbio to have a generator to iterate over fastq
seqs = skbio.io.read(
f'{datadir}{f}',
format="fastq",
verify="false",
variant="illumina1.8"
)
# Initialize list to save sequence objects
seq_list = list()
# Initialize counter
counter = 0
# Iterate over sequences
for seq in seqs:
if counter%100000 == 0:
print(f'read # {counter}')
# Extract sequence information
seq_id = seq.metadata["id"]
# Extract sequence
sequence = str(skbio.DNA(sequence=seq, validate=False))
# Append to list
seq_list.append([seq_id, sequence])
counter += 1
# Initialize dataframe to save sequences
names = ["id", "sequence"]
df_seq = | pd.DataFrame.from_records(seq_list, columns=names) | pandas.DataFrame.from_records |
# encoding=utf-8
'''
lb 0.2190 2 folds
'''
from nltk.corpus import stopwords
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import KFold
from sklearn.linear_model import Ridge
from scipy.sparse import hstack, csr_matrix
import pandas as pd
import numpy as np
import lightgbm as lgb
import matplotlib.pyplot as plt
import gc, re
from sklearn.utils import shuffle
from contextlib import contextmanager
from sklearn.externals import joblib
import time
start_time=time.time()
print("Starting job at time:", time.time())
debug = False
print("loading data ...")
used_cols = ["item_id", "user_id"]
if debug == False:
train_df = pd.read_csv("../input/train.csv", parse_dates=["activation_date"])
y = train_df["deal_probability"]
test_df = | pd.read_csv("../input/test.csv", parse_dates=["activation_date"]) | pandas.read_csv |
from typing import Any, Callable, Iterable, List
import toolz as fp
from toolz import curry
import pandas as pd
import numpy as np
from pandas.util import hash_pandas_object
from sklearn.metrics import roc_auc_score, r2_score, mean_squared_error, log_loss, precision_score, recall_score, \
fbeta_score, brier_score_loss, mean_absolute_error
from fklearn.types import EvalFnType, EvalReturnType, PredictFnType, UncurriedEvalFnType
def generic_sklearn_evaluator(name_prefix: str, sklearn_metric: Callable[..., float]) -> UncurriedEvalFnType:
"""
Returns an evaluator build from a metric from sklearn.metrics
Parameters
----------
name_prefix: str
The default name of the evaluator will be name_prefix + target_column.
sklearn_metric: Callable
Metric function from sklearn.metrics. It should take as parameters y_true, y_score, kwargs.
Returns
----------
eval_fn: Callable
An evaluator function that uses the provided metric
"""
def p(test_data: pd.DataFrame,
prediction_column: str = "prediction",
target_column: str = "target",
eval_name: str = None,
**kwargs: Any) -> EvalReturnType:
try:
score = sklearn_metric(test_data[target_column], test_data[prediction_column], **kwargs)
except ValueError:
# this might happen if there's only one class in the fold
score = np.nan
if eval_name is None:
eval_name = name_prefix + target_column
return {eval_name: score}
return p
@curry
def auc_evaluator(test_data: pd.DataFrame,
prediction_column: str = "prediction",
target_column: str = "target",
eval_name: str = None) -> EvalReturnType:
"""
Computes the ROC AUC score, given true label and prediction scores.
Parameters
----------
test_data : Pandas' DataFrame
A Pandas' DataFrame with with target and prediction scores.
prediction_column : Strings
The name of the column in `test_data` with the prediction scores.
target_column : String
The name of the column in `test_data` with the binary target.
eval_name : String, optional (default=None)
the name of the evaluator as it will appear in the logs.
Returns
----------
log: dict
A log-like dictionary with the ROC AUC Score
"""
eval_fn = generic_sklearn_evaluator("auc_evaluator__", roc_auc_score)
eval_data = test_data.assign(**{target_column: lambda df: df[target_column].astype(int)})
return eval_fn(eval_data, prediction_column, target_column, eval_name)
@curry
def precision_evaluator(test_data: pd.DataFrame,
threshold: float = 0.5,
prediction_column: str = "prediction",
target_column: str = "target",
eval_name: str = None) -> EvalReturnType:
"""
Computes the precision score, given true label and prediction scores.
Parameters
----------
test_data : pandas.DataFrame
A Pandas' DataFrame with with target and prediction scores.
threshold : float
A threshold for the prediction column above which samples
will be classified as 1
prediction_column : str
The name of the column in `test_data` with the prediction scores.
target_column : str
The name of the column in `test_data` with the binary target.
eval_name : str, optional (default=None)
the name of the evaluator as it will appear in the logs.
Returns
----------
log: dict
A log-like dictionary with the Precision Score
"""
eval_fn = generic_sklearn_evaluator("precision_evaluator__", precision_score)
eval_data = test_data.assign(**{prediction_column: (test_data[prediction_column] > threshold).astype(int)})
return eval_fn(eval_data, prediction_column, target_column, eval_name)
@curry
def recall_evaluator(test_data: pd.DataFrame,
threshold: float = 0.5,
prediction_column: str = "prediction",
target_column: str = "target",
eval_name: str = None) -> EvalReturnType:
"""
Computes the recall score, given true label and prediction scores.
Parameters
----------
test_data : pandas.DataFrame
A Pandas' DataFrame with with target and prediction scores.
threshold : float
A threshold for the prediction column above which samples
will be classified as 1
prediction_column : str
The name of the column in `test_data` with the prediction scores.
target_column : str
The name of the column in `test_data` with the binary target.
eval_name : str, optional (default=None)
the name of the evaluator as it will appear in the logs.
Returns
----------
log: dict
A log-like dictionary with the Precision Score
"""
eval_data = test_data.assign(**{prediction_column: (test_data[prediction_column] > threshold).astype(int)})
eval_fn = generic_sklearn_evaluator("recall_evaluator__", recall_score)
return eval_fn(eval_data, prediction_column, target_column, eval_name)
@curry
def fbeta_score_evaluator(test_data: pd.DataFrame,
threshold: float = 0.5,
beta: float = 1.0,
prediction_column: str = "prediction",
target_column: str = "target",
eval_name: str = None) -> EvalReturnType:
"""
Computes the F-beta score, given true label and prediction scores.
Parameters
----------
test_data : pandas.DataFrame
A Pandas' DataFrame with with target and prediction scores.
threshold : float
A threshold for the prediction column above which samples
will be classified as 1
beta : float
The beta parameter determines the weight of precision in the combined score.
beta < 1 lends more weight to precision, while beta > 1 favors recall
(beta -> 0 considers only precision, beta -> inf only recall).
prediction_column : str
The name of the column in `test_data` with the prediction scores.
target_column : str
The name of the column in `test_data` with the binary target.
eval_name : str, optional (default=None)
the name of the evaluator as it will appear in the logs.
Returns
----------
log: dict
A log-like dictionary with the Precision Score
"""
eval_data = test_data.assign(**{prediction_column: (test_data[prediction_column] > threshold).astype(int)})
eval_fn = generic_sklearn_evaluator("fbeta_evaluator__", fbeta_score)
return eval_fn(eval_data, prediction_column, target_column, eval_name, beta=beta)
@curry
def logloss_evaluator(test_data: pd.DataFrame,
prediction_column: str = "prediction",
target_column: str = "target",
eval_name: str = None) -> EvalReturnType:
"""
Computes the logloss score, given true label and prediction scores.
Parameters
----------
test_data : Pandas' DataFrame
A Pandas' DataFrame with with target and prediction scores.
prediction_column : Strings
The name of the column in `test_data` with the prediction scores.
target_column : String
The name of the column in `test_data` with the binary target.
eval_name : String, optional (default=None)
the name of the evaluator as it will appear in the logs.
Returns
----------
log: dict
A log-like dictionary with the logloss score.
"""
eval_fn = generic_sklearn_evaluator("logloss_evaluator__", log_loss)
eval_data = test_data.assign(**{target_column: lambda df: df[target_column].astype(int)})
return eval_fn(eval_data, prediction_column, target_column, eval_name)
@curry
def brier_score_evaluator(test_data: pd.DataFrame,
prediction_column: str = "prediction",
target_column: str = "target",
eval_name: str = None) -> EvalReturnType:
"""
Computes the Brier score, given true label and prediction scores.
Parameters
----------
test_data : Pandas' DataFrame
A Pandas' DataFrame with with target and prediction scores.
prediction_column : Strings
The name of the column in `test_data` with the prediction scores.
target_column : String
The name of the column in `test_data` with the binary target.
eval_name : String, optional (default=None)
The name of the evaluator as it will appear in the logs.
Returns
----------
log: dict
A log-like dictionary with the Brier score.
"""
eval_fn = generic_sklearn_evaluator("brier_score_evaluator__", brier_score_loss)
eval_data = test_data.assign(**{target_column: lambda df: df[target_column].astype(int)})
return eval_fn(eval_data, prediction_column, target_column, eval_name)
@curry
def expected_calibration_error_evaluator(test_data: pd.DataFrame,
prediction_column: str = "prediction",
target_column: str = "target",
eval_name: str = None,
n_bins: int = 100,
bin_choice: str = "count") -> EvalReturnType:
"""
Computes the expected calibration error (ECE), given true label and prediction scores.
See "On Calibration of Modern Neural Networks"(https://arxiv.org/abs/1706.04599) for more information.
The ECE is the distance between the actuals observed frequency and the predicted probabilities,
for a given choice of bins.
Perfect calibration results in a score of 0.
For example, if for the bin [0, 0.1] we have the three data points:
1. prediction: 0.1, actual: 0
2. prediction: 0.05, actual: 1
3. prediction: 0.0, actual 0
Then the predicted average is (0.1 + 0.05 + 0.00)/3 = 0.05, and the empirical frequency is (0 + 1 + 0)/3 = 1/3.
Therefore, the distance for this bin is::
|1/3 - 0.05| ~= 0.28.
Graphical intuition::
Actuals (empirical frequency between 0 and 1)
| *
| *
| *
______ Predictions (probabilties between 0 and 1)
Parameters
----------
test_data : Pandas' DataFrame
A Pandas' DataFrame with with target and prediction scores.
prediction_column : Strings
The name of the column in `test_data` with the prediction scores.
target_column : String
The name of the column in `test_data` with the binary target.
eval_name : String, optional (default=None)
The name of the evaluator as it will appear in the logs.
n_bins: Int (default=100)
The number of bins.
This is a trade-off between the number of points in each bin and the probability range they span.
You want a small enough range that still contains a significant number of points for the distance to work.
bin_choice: String (default="count")
Two possibilities:
"count" for equally populated bins (e.g. uses `pandas.qcut` for the bins)
"prob" for equally spaced probabilities (e.g. uses `pandas.cut` for the bins),
with distance weighed by the number of samples in each bin.
Returns
-------
log: dict
A log-like dictionary with the expected calibration error.
"""
if eval_name is None:
eval_name = "expected_calibration_error_evaluator__" + target_column
if bin_choice == "count":
bins = pd.qcut(test_data[prediction_column], q=n_bins)
elif bin_choice == "prob":
bins = | pd.cut(test_data[prediction_column], bins=n_bins) | pandas.cut |
import pytest
from cellrank.tl._colors import _map_names_and_colors, _create_categorical_colors
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical_dtype
from matplotlib.colors import is_color_like
class TestColors:
def test_create_categorical_colors_too_many_colors(self):
with pytest.raises(ValueError):
_create_categorical_colors(1000)
def test_create_categorical_colors_no_categories(self):
c = _create_categorical_colors(0)
assert c == []
def test_create_categorical_colors_neg_categories(self):
with pytest.raises(RuntimeError):
_create_categorical_colors(-1)
def test_create_categorical_colors_normal_run(self):
colors = _create_categorical_colors(62)
assert len(colors) == 62
assert all(map(lambda c: isinstance(c, str), colors))
assert all(map(lambda c: is_color_like(c), colors))
class TestMappingColors:
def test_mapping_colors_not_categorical(self):
query = pd.Series(["foo", "bar", "baz"], dtype="str")
reference = pd.Series(["foo", np.nan, "bar", "baz"], dtype="category")
with pytest.raises(TypeError):
_map_names_and_colors(reference, query)
def test_mapping_colors_invalid_size(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", np.nan, "bar", "baz"], dtype="category")
with pytest.raises(ValueError):
_map_names_and_colors(reference, query)
def test_mapping_colors_different_index(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category", index=[2, 3, 4])
reference = pd.Series(["foo", "bar", "baz"], dtype="category", index=[1, 2, 3])
with pytest.raises(ValueError):
_map_names_and_colors(reference, query)
def test_mapping_colors_invalid_colors(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
with pytest.raises(ValueError):
_map_names_and_colors(
reference, query, colors_reference=["red", "green", "foo"]
)
def test_mapping_colors_too_few_colors(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
with pytest.raises(ValueError):
_map_names_and_colors(reference, query, colors_reference=["red", "green"])
def test_mapping_colors_simple_1(self):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, np.nan, "d", "a"]).astype("category")
expected = pd.Series(["a_1", "a_2", "b"])
expected_index = pd.Index(["a", "b", "d"])
res = _map_names_and_colors(x, y)
assert isinstance(res, pd.Series)
np.testing.assert_array_equal(res.values, expected.values)
np.testing.assert_array_equal(res.index.values, expected_index.values)
def test_mapping_colors_simple_2(self):
query = | pd.Series(["foo", "bar", "baz"], dtype="category") | pandas.Series |
import os
import pandas as pd
from autumn.projects.covid_19.mixing_optimisation.constants import OPTI_REGIONS, PHASE_2_START_TIME
from autumn.projects.covid_19.mixing_optimisation.mixing_opti import DURATIONS, MODES
from autumn.projects.covid_19.mixing_optimisation.utils import (
get_country_population_size,
get_scenario_mapping_reverse,
)
from autumn.tools.db.load import load_uncertainty_table
from autumn.settings import BASE_PATH
FIGURE_PATH = os.path.join(
BASE_PATH,
"apps",
"covid_19",
"mixing_optimisation",
"outputs",
"plots",
"outputs",
"figures",
"tables",
)
DATA_PATH = os.path.join(
BASE_PATH,
"apps",
"covid_19",
"mixing_optimisation",
"outputs",
"pbi_databases",
"calibration_and_scenarios",
"full_immunity",
)
who_deaths = {
"by_october": {
"belgium": 10.2,
"france": 31.7,
"italy": 35.9,
"spain": 32.4,
"sweden": 5.9,
"united-kingdom": 42.1,
},
"by_january": {
"belgium": 19.6,
"france": 64.3,
"italy": 74.2,
"spain": 51.4,
"sweden": 9.7,
"united-kingdom": 73.5,
},
}
def main():
uncertainty_dfs = {}
for country in OPTI_REGIONS:
dir_path = os.path.join(DATA_PATH, country)
uncertainty_dfs[country] = load_uncertainty_table(dir_path)
for per_capita in [False, True]:
make_main_outputs_tables_new_messaging(uncertainty_dfs, per_capita=per_capita)
def get_quantile(output_df, sc_idx, quantile):
mask_scenario = output_df["scenario"] == sc_idx
masked_output_df = output_df[mask_scenario]
time_read = PHASE_2_START_TIME if sc_idx == 0 else max(masked_output_df["time"])
mask_time = masked_output_df["time"] == time_read
masked_output_df = masked_output_df[mask_time]
mask_quantile = masked_output_df["quantile"] == quantile
return float(masked_output_df[mask_quantile]["value"])
def get_uncertainty_cell_value(
country, uncertainty_df, output, mode, duration, per_capita=False, population=None
):
# output is in ["deaths_before", "deaths_unmitigated", "deaths_opti_deaths", "deaths_opti_yoll",
# "yoll_before", "yoll_unmitigated", "yoll_opti_deaths", "yoll_opti_yoll"]
# return blank if repeat row
if "_before" in output or "unmitigated" in output or "who_" in output:
if mode != MODES[0] or duration != DURATIONS[0]:
return ""
# return WHO estimate if requested
if "who_" in output:
if "_before" in output:
value = who_deaths["by_october"][country]
else:
value = who_deaths["by_january"][country]
if per_capita:
country_name = country.title() if country != "united-kingdom" else "United Kingdom"
pop = get_country_population_size(country_name)
value *= 1000 / pop * 1.0e6
value = int(value)
return value
if "deaths_" in output:
type = "accum_deaths"
elif "yoll_" in output:
type = "accum_years_of_life_lost"
else:
type = "proportion_seropositive"
mask_output = uncertainty_df["type"] == type
output_df = uncertainty_df[mask_output]
if "opti_yoll" in output:
objective = "yoll"
else:
objective = "deaths"
if "unmitigated" in output:
sc_idx = get_scenario_mapping_reverse(None, None, None)
elif "_before" in output:
sc_idx = 0
else:
sc_idx = get_scenario_mapping_reverse(mode, duration, objective)
val_025 = get_quantile(output_df, sc_idx, 0.025)
val_50 = get_quantile(output_df, sc_idx, 0.5)
val_975 = get_quantile(output_df, sc_idx, 0.975)
if output.startswith("total_"):
val_025 += get_quantile(output_df, 0, 0.025)
val_50 += get_quantile(output_df, 0, 0.5)
val_975 += get_quantile(output_df, 0, 0.975)
if per_capita:
multiplier = {
"accum_deaths": 1.0e6 / population,
"accum_years_of_life_lost": 1.0e4 / population,
"proportion_seropositive": 100,
}
rounding = {"accum_deaths": 0, "accum_years_of_life_lost": 0, "proportion_seropositive": 0}
if not per_capita:
multiplier = {
"accum_deaths": 1.0 / 1000.0,
"accum_years_of_life_lost": 1.0 / 1000.0,
"proportion_seropositive": 100,
}
rounding = {"accum_deaths": 1, "accum_years_of_life_lost": 0, "proportion_seropositive": 0}
# read the percentile
median = round(multiplier[type] * val_50, rounding[type])
lower = round(multiplier[type] * val_025, rounding[type])
upper = round(multiplier[type] * val_975, rounding[type])
if rounding[type] == 0:
median = int(median)
lower = int(lower)
upper = int(upper)
cell_content = f"{median} ({lower}-{upper})"
return cell_content
def make_main_outputs_tables(uncertainty_dfs, per_capita=False):
"""
This now combines Table 1 and Table 2
"""
countries = ["belgium", "france", "italy", "spain", "sweden", "united-kingdom"]
country_names = [c.title() for c in countries]
country_names[-1] = "United Kingdom"
column_names = [
"country",
"deaths_before",
"who_before" "deaths_unmitigated",
"deaths_opti_deaths",
"deaths_opti_yoll",
"who_by_jan",
"yoll_before",
"yoll_unmitigated",
"yoll_opti_deaths",
"yoll_opti_yoll",
"sero_before",
"sero_unmitigated",
"sero_opti_deaths",
"sero_opti_yoll",
]
table = | pd.DataFrame(columns=column_names) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 9 20:13:44 2020
@author: Adam
"""
#%% Heatmap generator "Barcode"
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re
def join_cols(row):
return ''.join(list(row))
def find_favor(seq):
t = []
for m in re.finditer(seq, DNA):
t += [m.start()]
return t
DNA = np.loadtxt('./data/DNA.txt', str)
DNA = ''.join(DNA)
print('DNA Length = {} '.format(len(DNA)) )
start_idxs = []
for m in re.finditer('GTC', DNA):
start_idxs += [m.start()]
start_idxs = np.array(start_idxs)
df = pd.DataFrame()
df['loc'] = np.arange(len(DNA))
df['start_ind'] = 0
df.loc[start_idxs,'start_ind'] = 1
favor = pd.read_csv('./data/favor_seqs.csv')
gtc_loc = list(favor.iloc[0,:])[0].find('GTC')
red_idxs = []
for detsize in range(3,4):
dets = favor['seq'].str[ gtc_loc-detsize:gtc_loc + 3 + detsize]
dets = list(np.unique(dets))
detslocs = list(map(find_favor, dets))
detslocs = [x for x in detslocs if len(x) > 1]
for tlocs in detslocs:
mean_dist = np.mean(np.diff(tlocs))
median_dist = np.median(np.diff(tlocs))
if(mean_dist > 1000 and mean_dist < 6000
or
median_dist > 1000 and median_dist < 6000):
red_idxs += [tlocs]
red_idxs = [item for sublist in red_idxs for item in sublist]
plt.figure(figsize=(16,4))
plt.bar(start_idxs, [0.3]*len(start_idxs), width=64, color='black', alpha=0.8)
plt.bar(red_idxs, [1]*len(red_idxs), width=64, color='red')
plt.ylim([0,1])
plt.xlim([0,len(DNA)])
plt.xlabel('DNA nucleotide index')
plt.yticks([])
plt.xticks([])
plt.title('\"Intresting\" Sequences')
plt.legend(['GTC Locations','Intresting Frequency Locations'], facecolor=(1,1,1,1), framealpha=0.98 )
plt.savefig('./out/favor_seqs_k_3.png')
plt.show()
#%% Prim VS Primon when POLY is saturated
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
print('\n=====================================================\n')
def mms(t):
if(np.max(t) - np.min(t) > 0):
t = (t - np.min(t))/(np.max(t) - np.min(t))
else:
t = (t)/(np.max(t))
return t
def ms(t):
return t/np.max(t)
def nucs2seq(row):
row = list(row)
t = ''.join(row)
return t
# Heatmap for favorite seqs vs all gtc containing seqs
df = pd.read_csv('./data/chip_B.csv')
df_favor = pd.read_csv('./data/favor_seqs.csv')
df['seq'] = list(map( nucs2seq, np.array(df.iloc[:,:-4]) ))
tcols = df.columns
tcols = list(tcols[:-4]) + ['poly','prim','primo','seq']
df.columns = tcols
df['primo-prim'] = df['primo'] - df['prim']
labels = ['poly','primo','prim','primo-prim']
df = df.sort_values('poly').reset_index(drop=True)
sm = 100
plt.figure(figsize=(12,8))
for i, lab in enumerate(labels):
plt.subplot(4,1,i+1)
if(i != 3):
df = df.sort_values(lab).reset_index(drop=True)
y = df[lab].copy()
if(i != 3):
y = mms( y )**0.5
y = y.rolling(sm).mean().drop(np.arange(sm)).reset_index(drop=True)
y = pd.Series(y)
plt.plot(np.arange(len(y)),y, alpha=0.8)
plt.title(lab + ' sorted by self')
plt.ylabel(' ln(score)' )
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=1)
#%% Collect favorite sequences
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
print('\n=====================================================\n')
labels = ['poly','primo','prim']
def mms(t):
if(np.max(t) - np.min(t) > 0):
t = (t - np.min(t))/(np.max(t) - np.min(t))
else:
t = (t)/(np.max(t))
return t
def nucs2seq(row):
row = list(row)
t = ''.join(row)
return t
# Heatmap for favorite seqs vs all gtc containing seqs
df = pd.read_csv('./data/chip_B.csv')
df_favor = pd.read_csv('./data/favor_seqs.csv')
df['seq'] = list(map( nucs2seq, np.array(df.iloc[:,:-3]) ))
# keep favorite seuqnces (1000~6000 reps)
df_test = pd.read_csv('./data/validation.csv')
df.index = df['seq']
df = df.loc[df_favor['seq'],:]
df = df.dropna(axis=0).reset_index(drop=True)
df.columns = list(df.columns[:-4]) + ['poly', 'prim', 'primo', 'seq']
# keep non test set sequences
toDrop = df_test['seq']
df.index = df['seq']
df = df.drop(toDrop, axis=0, errors='ignore')
df = df.reset_index(drop=True)
print('lets unite the data by seq and watch the mean and std of each sequence')
dfm = pd.DataFrame()
dfm['primo'] = mms(df.groupby('seq').median()['primo'])
dfm['primo_std'] = mms(df.groupby('seq').std()['primo'])#/mms( df.groupby('seq').mean()['primo'] )
dfm['prim'] = mms(df.groupby('seq').median()['prim'])
dfm['prim_std'] = mms(df.groupby('seq').std()['prim'])#/mms( df.groupby('seq').mean()['poly'] )
dfm['poly'] = mms(df.groupby('seq').median()['poly'])
dfm['poly_std'] = mms(df.groupby('seq').std()['poly'])#/mms( df.groupby('seq').mean()['poly'] )
dfm['seq'] = dfm.index
dfm = dfm.reset_index(drop=True)
T1 = np.percentile(dfm['primo'], 95)
T2 = np.percentile(dfm['primo_std'], 90)
T3 = np.percentile(dfm['prim'], 95)
T4 = np.percentile(dfm['prim_std'], 90)
T5 = np.percentile(dfm['poly'], 95)
T6 = np.percentile(dfm['poly_std'], 90)
print('length of dfm before outlier cleaning = {}'.format(len(dfm)) )
dfm = dfm.drop(np.where(dfm['primo'] > T1 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['primo_std'] > T2 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['prim'] > T3 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['prim_std'] > T4 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['poly'] > T5 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['poly_std'] > T6 )[0]).reset_index(drop=True)
print('length of dfm after outlier cleaning = {}'.format(len(dfm)) )
nucs = np.array(list(map(list, dfm['seq']))).copy()
nucs = pd.DataFrame(nucs.copy())
nucs = nucs.add_suffix('_nuc')
nucs = nucs.reset_index(drop=True)
dfm = pd.concat([dfm, nucs], axis=1)
dfm = dfm.reset_index(drop=True)
toKeep = [x for x in dfm.columns if 'std' not in x]
dfm = dfm.loc[:,toKeep]
for lab in labels:
dfm.loc[:,lab] = mms(dfm.loc[:,lab])
for lab in labels:
dfm.loc[:,lab] = mms(dfm.loc[:,lab]**0.5)
dfm.to_csv('data/chip_B_favor.csv', index=False)
#%% Heatmap of ABS Correlation
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def mms(t):
if(np.max(t) - np.min(t) > 0):
t = (t - np.min(t))/(np.max(t) - np.min(t))
else:
t = (t)/(np.max(t))
return t
def count_letters(df_nucs, rep_dict):
X = df_nucs.copy()
X = X.replace(rep_dict)
X = np.array(X)
X = np.sum(X,1)
return X
df = pd.read_csv('data/chip_B_favor.csv')
cols = df.columns
cols = [x for x in cols if 'nuc' in x]
df_nucs = df.loc[:,cols].copy()
df_labels = df.loc[:,['primo','prim','poly']]
df_res = pd.DataFrame()
# count appereances of each individual letter
for letter in ['A','C','G','T']:
rep_dict = {'A':0,'C':0,'G':0,'T':0}
rep_dict[letter] = 1
df_res['{}_count'.format(letter) ] = count_letters(df_nucs, rep_dict)
gtc_ind_start = ''.join( list(df_nucs.iloc[0,:]) ).find('GTC') - 5
gtc_ind_end = gtc_ind_start + 5 + 3 + 5
# extract puryn and prymidin densities
# A,<NAME>
# C,T Prymidins
""" =================== Left Side Count =============================== """
rep_dict = {'A':1,'C':0,'G':1,'T':0}
df_res['Left_Pur_count'] = count_letters(df_nucs.iloc[:,:gtc_ind_start], rep_dict)
rep_dict = {'A':0,'C':1,'G':0,'T':1}
df_res['Left_Pry_count'] = count_letters(df_nucs.iloc[:,:gtc_ind_start], rep_dict)
""" =================== Center / Determinant Count ===================== """
rep_dict = {'A':1,'C':0,'G':1,'T':0}
df_res['Center_Pur_count'] = count_letters(df_nucs.iloc[:,gtc_ind_start:gtc_ind_start], rep_dict)
rep_dict = {'A':0,'C':1,'G':0,'T':1}
df_res['Center_Pry_count'] = count_letters(df_nucs.iloc[:,gtc_ind_start:gtc_ind_start], rep_dict)
""" =================== Right Side Count =============================== """
rep_dict = {'A':1,'C':0,'G':1,'T':0}
df_res['Right_Pur_count'] = count_letters(df_nucs.iloc[:,gtc_ind_end:], rep_dict)
rep_dict = {'A':0,'C':1,'G':0,'T':1}
df_res['Right_Pry_count'] = count_letters(df_nucs.iloc[:,gtc_ind_end:], rep_dict)
df_res = pd.concat([df_res, df_labels], axis=1)
plt.figure(figsize=(12,8))
df_corr = (df_res.corr().abs())
sns.heatmap(df_corr, cmap="bwr")
plt.title('Absolute Correlation')
plt.show()
plt.figure(figsize=(12,8))
df_corr = df_corr.loc[['primo','prim','poly'],['primo','prim','poly']]
sns.heatmap(df_corr, cmap="bwr")
plt.title('Absolute Correlation')
plt.show()
#%% K mers spectrum
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from itertools import product
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
from scipy.stats import entropy
NMERS = [1,2,3]
df = pd.read_csv('./data/chip_B_favor.csv')
labels = ['primo','prim','poly']
np.random.RandomState(42)
df.index = df['seq']
m2 = 'CCACCCCAAAAAACCCCGTCAAAACCCCAAAAACCA'
df.loc[m2,'primo']
im = plt.imread(r'C:\Users\Ben\Desktop/Picture1.png')
x = list(range(1,14))
y = [1,
0,
0.4,
0.6,
0.47,
0.13,
0.2,
0.3,
0.5,
0.46,
0.5,
0.67,
0.8]
x= np.array(x)
y= np.array(y)
plt.imshow(im)
plt.scatter(x,y, c='red')
#for col in labels:
#df = df.drop(np.where(df[col] > np.percentile(df[col],95))[0],axis=0).reset_index(drop=True)
#df = df.drop(np.where(df[col] < np.percentile(df[col],5))[0],axis=0).reset_index(drop=True)
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
for col in labels:
df[col] = mms(df[col])
df[col] = np.round(df[col]*2)
df[col] = df[col].replace({0:'0weak',1:'1medium',2:'2strong'})
plt.figure(figsize=(18,16))
for i, N in enumerate(NMERS):
letters = ['A','C','G','T']
exec('combs_list = list(product(' + 'letters,'*N + '))')
combs_list = list(map(''.join,combs_list))
df_mer = pd.DataFrame(np.zeros([len(df), len(combs_list)]))
df_mer.columns = combs_list
mers = df['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, 1) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
#coutn mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
df_mer = np.sum(df_mer)
df_mer = df_mer/np.sum(df_mer)
df_mer = df_mer[(df_mer >= 0.01 )]
plt.subplot(len(NMERS),1,i+1)
plt.scatter(np.arange(len(df_mer)), df_mer, color=(['blue','red','green'])[i] )
plt.xticks(np.arange(len(df_mer)), df_mer.index, rotation=90)
#plt.legend([' Variance: {}'.format( np.var(df_mer)) ])
plt.title('{}-Mer'.format(N) )
plt.ylim([0, 0.3])
plt.ylabel('mer frequency')
#%% K-MEANS and Hirarchial clustering
"""
Dendogram
Plot By TSNE
"""
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
NLIST = [5]
labels = ['poly','prim','primo']
labels = ['primo']
ShowTextOnDendogram = True
showKM = True
showHC = False
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
def OHE(df):
cols = []
for i in range(36):
for letter in ['A','C','G','T']:
cols += [ str(i+1) + '_nuc_' + letter]
tdf = pd.get_dummies(df)
toAdd = np.setdiff1d(cols, tdf.columns)
for col in toAdd:
tdf[col] = 0
for col in cols:
tdf[col] = tdf[col].astype(int)
tdf = tdf.loc[:,cols]
return tdf
df = pd.read_csv('./data/chip_B_favor.csv')
df = pd.concat([OHE(df.drop(labels,axis=1)), df.loc[:,labels]], axis=1)
df_backup = df.copy()
# =============================================================================
# Hirarchical Clustering
# =============================================================================
from scipy.cluster import hierarchy
if(showHC):
#WORKS FINE
X = df_backup.drop(labels,axis=1).copy()
X = X.iloc[:,:].reset_index(drop=True)
Z = hierarchy.linkage(X, method='ward')
Z = pd.DataFrame(Z)
botline = Z.iloc[np.argmax(np.diff(Z.iloc[:,-2])),-2] * 1.05
topline = Z.iloc[np.argmax(np.diff(Z.iloc[:,-2])) + 1, -2] * 0.95
fig = plt.figure(figsize=(4, 6))
dn = hierarchy.dendrogram(Z, p=7, truncate_mode='level', color_threshold=40, distance_sort=True)
plt.hlines([botline, topline], xmin=0, xmax=len(Z), ls='--', alpha = 0.9 )
plt.ylabel('Ward Distance')
disticks = np.unique(np.sqrt(Z.iloc[:,-2]).astype(int))
#plt.yticks( disticks**2 , disticks)
plt.xticks([])
plt.xlabel('')
Z = hierarchy.linkage(X, method='ward')
X[labels] = df_backup[labels].copy()
thr = 40
dists = [ 20, 40, 80, 120]
fntsze = 22
thr = 40
for i, thr in enumerate(dists):
Xg = X.copy()
Xg['bin'] = hierarchy.fcluster(Z, thr, criterion='distance', depth=5, R=None, monocrit=None)
Xres = Xg.groupby('bin').sum()
Xres[labels] = Xg.groupby('bin').median()[labels]
xcount = Xg.copy()
xcount['count'] = 1
xcount = xcount.groupby('bin').sum()['count']
xcnew = [xcount.iloc[0]/2]
for j in xcount.index[1:]:
xcnew += [np.sum(xcount[:j-1]) + xcount[j]/2]
xcount = pd.Series( xcnew )
xcount.index = xcount.index + 1
#plt.subplot(4,1, i+1 )
#plt.scatter(Xres.index, Xres[labels])
toKeep = [x for x in X.drop(labels, axis=1).columns if '36' not in x]
Xres = (Xres.loc[:,toKeep])
Xres.columns = [x[-1] for x in Xres.columns]
Xres = Xres.T
Xres = Xres.groupby(Xres.index).sum()
for col in Xres.columns:
Xres[col] = Xres[col] / np.sum(Xres[col])
Xres = Xres.T
row_idx = 1
for row_idx in Xres.index:
row = Xres.loc[row_idx,:]
print(
xcount.iloc[row_idx-1]
)
accumsize = 0
for dx, lett in enumerate(row.index):
x_rng = plt.gca().get_xlim()[1]
# =============================================================================
# # ADDING TEXT TO DENDOGRAM
# =============================================================================
if(ShowTextOnDendogram == True):
plt.text(x= xcount.iloc[row_idx-1]*x_rng/len(Xg) + accumsize,
y=thr, horizontalalignment='left',
s=lett, fontsize=np.max([fntsze*row[lett], 6]) ,
weight='normal', fontname='arial')
accumsize += np.max([fntsze*row[lett], 8]) + 36
#% TODO MAKE THIS PRETTY
from sklearn.metrics import silhouette_score
res_ss = []
xvec = [5]
for i in xvec:
X = df.copy().drop(['bin'], axis=1, errors='ignore')
X = X.drop(labels, axis=1)
tmp_ss = []
for j in range(1):
km = KMeans(i, random_state=j )
y = km.fit_predict(X)
ss = silhouette_score( X, y )
tmp_ss += [ss]
print('sil score => mean: {} | std: {}'.format(np.mean(tmp_ss), np.std(tmp_ss)) )
res_ss += [np.mean(tmp_ss)]
plt.figure()
plt.scatter(xvec,res_ss)
plt.xlabel('K-Value')
plt.ylabel('Sil Score')
plt.show()
if(showKM):
col = 'primo'
plt.figure(figsize=(6,4))
for i, Nbins in enumerate(NLIST):
df = df_backup.copy()
km = KMeans(Nbins, random_state=42 )
df['bin'] = km.fit_predict(df.drop(labels,axis=1))
cc = np.array(km.cluster_centers_).reshape(km.cluster_centers_.shape[0],
km.cluster_centers_.shape[1]//4,
4)
cc = np.array(pd.DataFrame(np.argmax(cc,axis=2)).replace({0:'A',1:'C',2:'G',3:'T'}))
centers = [''.join(l) for l in cc]
tdf = df.loc[:,['bin',col]]
#rep_d = {0:'A',1:'B',2:'C',3:'D',4:'E'}
rep_d = {0:2,1:3,2:0,3:1,4:4}
df['bin'] = df['bin'].replace(rep_d)
centers = list(np.array(centers)[list(rep_d.values())])
print('Mean Words:')
print(centers)
#rep_d = {'A':2,'B':3,'C':0,'D':1,'E':4}
#df['bin'] = df['bin'].replace(rep_d)
plt.subplot(len(NLIST),1,i+1)
sns.violinplot(x="bin", y=col, data=df, palette="Blues", cut=0)
plt.ylim([-0.2, 1.2])
plt.ylabel('Primase \nBinding Scores', fontsize=12)
plt.title('Scores Distribution by Cluster', fontsize=12)
"""
for tx, tcent in zip(np.arange(np.max(tdf['bin'])+1) , centers):
chunks, chunk_size = len(tcent), len(tcent)//6
stlist = [ tcent[i:i+chunk_size] for i in range(0, chunks, chunk_size) ]
tcent = '\n'.join(stlist)
t = plt.text(x=tx-0.5, y=0, s=tcent, fontsize=10, color='red', fontweight='normal', backgroundcolor='white')
t.set_bbox(dict(facecolor='white', alpha=0.7, edgecolor='white'))
plt.xlim([-1, Nbins-1 + 0.5])
"""
#plt.xticks( np.arange(np.max(tdf['bin'])+1)
#,centers , rotation=-90, fontsize=12)
plt.yticks( [0,0.25,0.5,0.75,1], fontsize=12 )
plt.tight_layout()
plt.savefig('./out/kmeans/forpaper_B_centroids_' + str(Nbins) + 'bins')
plt.show()
#plt.close()
#%% PCA
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from itertools import product
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
NMERS = [3]
df = pd.read_csv('./data/chip_B_favor.csv')
#labels = ['primo','prim','poly']
labels = ['primo']
np.random.RandomState(42)
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
"""
for col in labels:
df[col] = mms(df[col])
df[col] = np.round(df[col]*2)
df[col] = df[col].replace({0:'0weak',1:'1medium',2:'2strong'})
"""
for N in NMERS:
letters = ['A','C','G','T']
exec('combs_list = list(product(' + 'letters,'*N + '))')
combs_list = list(map(''.join,combs_list))
df_mer = pd.DataFrame(np.zeros([len(df), len(combs_list)]))
df_mer.columns = combs_list
mers = df['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, 1) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
#coutn mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
pca = PCA(n_components=np.min([16,len(df_mer.columns)]), svd_solver='auto', random_state=42)
df_mer = pd.DataFrame(pca.fit_transform(df_mer.dropna(axis=1)))
df_mer = df_mer.add_prefix('pc')
#MMS -1 1
for col in df_mer.columns:
df_mer[col] = mms(df_mer[col])
for col in labels:
df_mer[col] = df[col]
np.cumsum(pca.explained_variance_ratio_)
1/0
# 3D scatter
for lab in labels:
fig = plt.figure(figsize=(14,10))
ax = fig.add_subplot(111, projection='3d')
x = df_mer['pc0']
y = df_mer['pc1']
z = df_mer['pc2']
clrs = mms( (df_mer[lab]) )
ax.scatter3D(2*x + 0.05*np.random.randn(len(x)) ,
2*y + 0.05*np.random.randn(len(y)) ,
2*z + 0.05*np.random.randn(len(z)) ,
alpha=0.6, c=clrs, cmap='bwr')
plt.xlabel('pc0')
plt.ylabel('pc1')
ax.set_zlabel('pc2')
plt.title('{}: {}-mer projection'.format(lab,N) )
plt.show()
""" PUT A COMMENT TO SEE 3D Projection """
#plt.close()
fig = plt.figure(figsize=(14,10))
x = df_mer['pc0']
y = df_mer['pc1']
plt.scatter( x-0.5, #+ 0.05*np.random.randn(len(x)) ,
y-0.5, #+ 0.05*np.random.randn(len(y)) ,
alpha=0.6, c=clrs, cmap='bwr' )
plt.xlabel('pc0')
plt.ylabel('pc1')
plt.title('{}: {}-mer projection'.format(lab,N) )
plt.savefig('./out/pca/{}_{}mer'.format(lab,N) )
plt.show()
""" PUT A COMMENT TO SEE 2D Projection """
#plt.close()
#%% Dynamic clustering and prediction
"""
This techinique invloves all of our research,
by using PCA we learn the existence of 5 clusters,
by using kmeans we classify each sequence to its cluster,
by using regressors suchj as lasso we train a model for each cluster
and predict labels with high resolution.
we can compare results with or without dynamic clustering.
"""
"""
Dendogram
Plot By TSNE
"""
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
import pickle
from time import clock, sleep
[plt.close() for x in plt.get_fignums()]
N = 3
with_clustering = True
stime = clock()
#labels = ['poly','prim','primo']
labels = ['primo']
def OHE(df):
cols = []
for i in range(36):
for letter in ['A','C','G','T']:
cols += [ str(i+1) + '_nuc_' + letter]
tdf = pd.get_dummies(df)
toAdd = np.setdiff1d(cols, tdf.columns)
for col in toAdd:
tdf[col] = 0
for col in cols:
tdf[col] = tdf[col].astype(int)
tdf = tdf.loc[:,cols]
return tdf
df = pd.read_csv('./data/chip_B_favor.csv')
df = pd.concat([OHE(df.drop(labels,axis=1)), df.loc[:,labels]], axis=1)
# apply KMEANS
km = KMeans(5, random_state=42, n_init=20 )
bins_pred = km.fit_predict(df.drop(labels,axis=1))
pickle.dump(km, open('./out/regressors/models/km.sav' , 'wb') )
t = km.cluster_centers_
cc = np.array(km.cluster_centers_).reshape(km.cluster_centers_.shape[0],
km.cluster_centers_.shape[1]//4, 4)
cc = np.array(pd.DataFrame(np.argmax(cc,axis=2)).replace({0:'A',1:'C',2:'G',3:'T'}))
centers = [''.join(l) for l in cc]
df = pd.read_csv('./data/chip_B_favor.csv')
df['bin'] = bins_pred
"""
# Hard To Predict (HTP) Generator
htpgen = pd.DataFrame(np.random.randint(0,4,[5000, 36])).replace({0:'A',1:'C',2:'G',3:'T'})
htpgen = htpgen.add_suffix('_nuc')
htpgen = OHE(htpgen)
htpgen['bin'] = km.predict(htpgen)
# Easy To Predict (HTP) Generator
etpgen = pd.DataFrame(np.random.randint(0,4,[5000, 36])).replace({0:'A',1:'C',2:'G',3:'T'})
etpgen = etpgen.add_suffix('_nuc')
etpgen = OHE(etpgen)
etpgen['bin'] = km.predict(etpgen)
t = np.array(htpgen.iloc[:,-1])
1/0
"""
from itertools import product
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_validate
#from sklearn.linear_model import LassoLarsIC
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
import xgboost as xgb
test_df = pd.read_csv('./data/validation.csv').loc[:,['seq','toKeep','label']]
test_df = test_df.iloc[np.where(test_df['toKeep'] > 0)[0],:].reset_index(drop=True)
test_df = test_df.loc[:,['seq','label']]
splitted = pd.DataFrame(np.zeros([len(test_df),36]))
splitted = splitted.add_suffix('_nuc')
for i,seq in enumerate(test_df['seq']):
splitted.iloc[i,:] = list(seq)
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
for col in labels:
df[col] = mms(df[col])
splitted = OHE(splitted)
splitted['bin'] = km.predict(splitted)
test_df['bin'] = splitted['bin']
letters = ['A','C','G','T']
exec('combs_list = list(product(' + 'letters,'*N + '))')
combs_list = list(map(''.join,combs_list))
#Train preparation
df_mer = pd.DataFrame(np.zeros([len(df), len(combs_list)]))
df_mer.columns = combs_list
mers = df['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, 1) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
df_mer['seq'] = df['seq']
#forFUN
ACOUNT = [ x.count('A') for x in df['seq'] ]
CCOUNT = [ x.count('C') for x in df['seq'] ]
GCOUNT = [ x.count('G') for x in df['seq'] ]
TCOUNT = [ x.count('T') for x in df['seq'] ]
#count mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
X = df_mer.copy()
X['bin'] = df['bin']
#plt.plot( (X.sum()[:-2]).sort_values() )
#X.iloc[:,:-2] = X.iloc[:,:-2]/list(np.sum(X.iloc[:,:-2]))
train = X.copy()
y = df[labels]
"""
Drek = pd.concat([train.drop('seq',axis=1), pd.DataFrame(y)], axis=1)
Drek.iloc[:,:-1] /= Drek.iloc[:,:-1].max()
Drek = Drek.drop('GTC',axis=1, errors='ignore')
Drek = Drek.corr('spearman').abs()
plt.figure(figsize=(12,12))
sns.heatmap(Drek, cmap='bwr')
plt.show()
1/0
"""
#Test preparation
df_mer = pd.DataFrame(np.zeros([len(test_df), len(combs_list)]))
df_mer.columns = combs_list
mers = test_df['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, 1) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
#count mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
test = df_mer.copy()
test['bin'] = test_df['bin']
y_test = test_df['label']
X_test = test.copy().reset_index(drop=True)
y_test = y_test.copy().reset_index(drop=True)
p_test = np.zeros(len(y_test))
X_train = train.copy().reset_index(drop=True)
if( with_clustering == False):
X_train['bin'] = 0
y_train = y.copy().reset_index(drop=True)
mean_mae_per_lab = []
df_results = pd.DataFrame()
res_label = []
res_tbin = []
res_mae = []
res_fi = []
res_bias = []
bin_weights = []
tstr = ''
for lab in labels:
mean_mae_per_bin = []
print("\n==============================")
print('label = {}'.format(lab) )
ber = pd.DataFrame(np.zeros([5,len(np.unique(X_train['bin']))]))
ber = ber.add_prefix('bin_')
for tbin in np.unique(X_train['bin']):
"""
drek = X_train.copy()
drek['primo'] = y_train.copy()
drek = drek.sort_values(['bin','primo']).reset_index(drop=True)
xax = []
for i in range(5):
xax += list(range(sum(drek['bin'] == i)))
drek['xax'] = xax
plt.figure(figsize=(8,8))
sns.lineplot( x='xax' ,y='primo', hue='bin', data=drek )
"""
test_strong = pd.DataFrame()
test_weak = pd.DataFrame()
yv = (y_train.loc[:,lab].iloc[np.where(X_train['bin'] == tbin)[0]])
Xv = X_train.iloc[np.where(X_train['bin'] == tbin)[0]].copy().drop(['bin','seq'],axis=1)
#plt.figaspect(1)
#h_0 = np.histogram(yv, bins=len(yv))
#cdf_0 = np.cumsum(np.sort( h_0[0]/len(yv)))
#plt.plot( [0] + list(h_0[1][1:]), [0] + list(cdf_0) )
#plt.plot( [0,1],[0,1] )
#tb = pd.concat([Xv, yv], axis=1)
#plt.plot( np.sort( 1/np.sort(h_0[0]) *yv) )
"""
Drek = pd.concat([Xv, pd.DataFrame(yv)], axis=1)
Drek.iloc[:,:-1] /= Drek.iloc[:,:-1].max()
Drek = Drek.drop('GTC',axis=1)
Drek = Drek.corr().abs()
plt.figure()
sns.heatmap(Drek, cmap='bwr')
plt.show()
continue
"""
print(len(Xv))
tst_idxs = np.where(X_test['bin'] == tbin)[0]
tst_idxs = np.array(list(tst_idxs))
if( len(tst_idxs) != 0 ):
yt = y_test.iloc[tst_idxs].copy()
#initiate Test Set
test_strong = X_test.iloc[yt[yt==1].index].drop('bin',axis=1)
test_weak = X_test.iloc[yt[yt==0].index].drop('bin',axis=1)
#reg = LassoLarsIC('bic', max_iter=200, fit_intercept=False, positive=True)
#reg = LassoLarsIC('bic', max_iter=200, normalize=False, fit_intercept=False, positive=True)
#reg = xgb.XGBRegressor(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1,
# max_depth = 8, alpha = 10, n_estimators = 10)
# Regression Fitting
from copy import deepcopy
regs = []
tmp_preds = []
for rs in range(5):
""" We are going to test several regressors:
KNN, RBF-SVM, Linear-SVM, RF, XGBOOST
"""
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
#reg = RandomForestRegressor(max_depth = 8, random_state=rs)
#reg = LassoLarsIC('aic', max_iter=200, normalize=False, fit_intercept=False, positive=True)
#reg = KNeighborsRegressor(n_neighbors=2)
#reg = Lasso(alpha=0.00025, normalize=False, fit_intercept=True, positive=False)
reg = Lasso(alpha=0.00025, normalize=True, fit_intercept=False, positive=True) # This is the model we actually use
#reg = KNeighborsRegressor(15)
#reg = SVR(kernel='rbf')
#reg = SVR(kernel='linear')
#reg = RandomForestRegressor()
#reg = xgb.XGBRegressor()
idxs_pool = list(Xv.index)
train_idxs = np.random.choice( idxs_pool, size=4*len(idxs_pool)//5, replace=False )
train_idxs = np.sort(train_idxs)
tX = Xv.loc[train_idxs,:].copy()
ty = yv.loc[train_idxs].copy()
pX = Xv.drop(labels=train_idxs).copy()
py = yv.drop(labels=train_idxs).copy()
tX = tX
pX = pX
reg.fit(tX, ty)
pred = reg.predict(pX)
from sklearn.metrics import mean_absolute_error
print('K-Fold Iter: {}, MAE: {:2.3f}'.format(rs, mean_absolute_error(py,pred)) )
tmp_preds += [pred.copy()]
regs += [deepcopy(reg)]
ber.iloc[rs,tbin] = mean_absolute_error(py,pred)
#plt.plot( np.arange(len(py)), pd.Series(np.abs(py - pred)).expanding().mean() )
from sklearn.metrics import mean_squared_error
print('RMSE: {:2.3f}'.format( np.sqrt(mean_squared_error(py,pred)) ) )
print('BER: {:2.3f}'.format(np.mean(ber.iloc[:,tbin])) )
print('==================\nTotal BER: {:2.3f}'.format(np.mean(np.mean(ber))) )
reg = regs[np.argmin(np.array(ber.iloc[:,tbin]))]
pred = tmp_preds[np.argmin(np.array(ber.iloc[:,tbin]))]
if(with_clustering == False):
plt.scatter(py,pred, alpha=0.8, s=4, zorder=2 )
plt.plot([0,1],[0,1])
plt.xlabel('True')
plt.ylabel('Prediction')
plt.gca().set_aspect('equal')
"""
else:
plt.scatter(py,pred, alpha=0.8, s=4, zorder=2 )
if(tbin == 4):
plt.plot([0,1],[0,1], color='black', ls='--', zorder = 1, alpha=0.8)
plt.xlim([-0.05,1.05])
plt.ylim([-0.05,1.05])
plt.gca().set_aspect('equal')
plt.legend( ['y=x'] + list(centers), fontsize='x-small')
plt.xlabel('true')
plt.ylabel('pred')
"""
"""
res = cross_validate(reg, Xv , y=yv, groups=None,
scoring='neg_mean_absolute_error', cv=5, n_jobs=5, verbose=0,
fit_params=None, return_estimator=True)
best_estimator = res['estimator'][np.argmax(res['test_score'])]
"""
best_estimator = reg
ber['test_score'] = -ber.iloc[:,tbin].copy()
res = ber.copy()
mean_estimator_mae = -np.mean(res['test_score'])
mean_estimator_std = np.std(res['test_score'])
print('\033[1m cv mean: {:2.3f} | cv std: {:2.3f} \033[0m'.format(mean_estimator_mae, mean_estimator_std) )
# Save best model and collect resutls
pickle.dump(best_estimator, open('./out/regressors/models/{}_{}.sav'.format(lab, tbin) , 'wb') )
tmp_err = np.min(-res['test_score'])
#mean_mae_per_bin += [ tmp_err*len(np.where(X_train['bin'] == tbin)[0])/len(X_train)]
mean_mae_per_bin += [ tmp_err ]
#print(lab + ' => bin: ' + str(tbin) + ' | MAE: {:2.3f}'.format(tmp_err) )
tstr = tstr + lab + ' => bin: ' + str(tbin) + ' | MAE: {:2.3f}\n'.format(tmp_err)
if(len(test_strong) > 0):
p_test[test_strong.index] = list(best_estimator.predict(test_strong))
if(len(test_weak) > 0):
p_test[test_weak.index] = list(best_estimator.predict(test_weak))
res_label += [lab]
res_tbin += [tbin]
res_mae += [ np.round(mean_mae_per_bin[-1], 3)]
if( 'Lasso' in str(reg.__repr__)[:60]):
res_fi += [
list(zip(np.array(best_estimator.coef_), Xv.columns)) + [(best_estimator.intercept_,'Bias')]
]
else:
res_fi += [[0]]
mean_mae_per_bin[-1] = mean_mae_per_bin[-1]#*len(np.where(X_train['bin'] == tbin)[0])/len(X_train)
bin_weights += [len(np.where(X_train['bin'] == tbin)[0])/len(X_train)]
mean_mae_per_lab += [np.sum(mean_mae_per_bin) ]
print("=================\nMean Label MAE = {:2.3f} | STD MAE = {:2.3f}".format( np.mean(mean_mae_per_bin), np.std(mean_mae_per_bin) ) )
strong_pred = p_test[y_test == 1]
weak_pred = p_test[y_test == 0]
plt.figure(figsize=(8,4))
[freqs,bns] = np.histogram(y_train.loc[:,lab], bins=10, weights=[1/len(y_train)]*len(y_train) )
plt.barh(y=bns[:-1] + 0.05, width=freqs*10, height=0.1, alpha=0.4, zorder=1)
plt.xlim([-1, len(strong_pred)+1])
sns.distplot(y, hist=False, color='black', bins=len(y), kde_kws={'cut':3})
sns.distplot(weak_pred, hist=False, color='blue')
t = sns.distplot(strong_pred, hist=False, color='red')
plt.close()
def isclose(a, b, abs_tol=0.001):
return abs(a-b) <= abs_tol
colors = ['black', 'blue', 'red']
labs = ['Train', 'Test - Weak', 'Test - Strong']
plt.figure()
for cc, unnor in enumerate(t.get_lines()):
newy = (unnor.get_ydata())/np.sum(unnor.get_ydata())
plt.plot(unnor.get_xdata(), newy, color=colors[cc], label=labs[cc])
if(cc == 1):
tnewy = []
newx = unnor.get_xdata()
for twp in weak_pred:
cands = (np.where([ isclose(tx, twp, 0.005) for tx in newx])[0])
tnewy.append(cands[len(cands)//2])
plt.scatter(weak_pred, newy[tnewy], color=colors[cc], label=None)
if(cc == 2):
tnewy = []
newx = unnor.get_xdata()
for twp in strong_pred:
cands = (np.where([ isclose(tx, twp, 0.005) for tx in newx])[0])
tnewy.append(cands[len(cands)//2])
plt.scatter(strong_pred, newy[tnewy], color=colors[cc], label=None)
plt.ylim([0,0.04])
plt.xlim([0,1])
plt.title('Binding Scores Approximated Distributions', fontsize=14)
plt.legend()
plt.xlabel('Binding Score', fontsize=12)
plt.ylabel('$Probability(Score)$', fontsize=12)
1/0
"""
1/0
def d2r(d):
return d*3.14/180
[freqs,bns] = np.histogram(y_train.loc[:,lab], bins=64, weights=[1/len(y_train)]*len(y_train) )
sns.distplot( y_train.loc[:,lab], bins=8, hist=True,norm_hist=True, kde=False )
plt.scatter(strong_pred, strong_pred)
ax = plt.subplot(111, projection='polar')
[freqs,bns] = np.histogram(y_train.loc[:,lab], bins=64, weights=[1/len(y_train)]*len(y_train) )
sns.distplot(y_train.loc[:,lab]*d2r(360), bins=8, hist=True, norm_hist=True, kde=False , ax=ax)
ax.set_xlabel('$P(V>v)$')
#tfr = 1-freqs.cumsum()
#tfr
plt.xticks( [d2r(x) for x in np.arange(0,360,45)], ['A{}'.format(x) for x in np.arange(0,360,45)] )
#plt.scatter( freqs[(10*strong_pred).astype(int)]*(360), strong_pred )
#.plt.scatter( freqs[(10*strong_pred).astype(int)]*(360), strong_pred )
plt.scatter( strong_pred*d2r(360), strong_pred/2 )
plt.scatter( weak_pred*d2r(360), weak_pred/2, zorder=10 )
#ax.bar( bns[1:]*360,freqs , width=0.2, alpha=0.4 )
spr = (np.round(strong_pred*100)//10).astype(int)
wpr = (np.round(weak_pred*100)//10).astype(int)
fr = np.round(freqs*100)
frcs = 1-freqs.cumsum()
frcs = np.concatenate( [[1], frcs[1:-1], [0]] )
plt.plot(frcs)
plt.scatter( fr[spr], strong_pred )
plt.scatter( fr[wpr], weak_pred )
ax = plt.subplot(111, projection='polar')
ax.plot([d2r(x) for x in np.linspace(0,360,36)], [np.mean(strong_pred)]*36, lw=8, alpha=0.2, color='red')
ax.plot([d2r(x) for x in np.linspace(0,360,36)], [np.mean(weak_pred)]*36, lw=8, alpha=0.2, color='blue')
thetas = [d2r(x) for x in np.linspace(0,360,8+1)[:-1]]
#ax.plot(thetas, strong_pred, 'r^', color='red')
#ax.plot(thetas, weak_pred, 'rv', color='blue')
ax.plot(thetas + [0], list(strong_pred) + [strong_pred[0]], '', color='red')
ax.plot(thetas + [0], list(weak_pred) + [weak_pred[0]], '', color='blue')
ax.set_rlabel_position(0)
#ax.set_rticks( [0,1],[2,'b'])#['']*len(np.arange(0,1.2,0.2)))
#ax.set_thetagrids([90,270])
#ax.set_rgrids()
#ax.set_yticks([])
#ax.set_ylim([0,1.1])
ax.set_xticks([])
_ = [ax.plot([d2r(x) for x in np.linspace(0,360,36)], [v]*36, alpha=0.1, color='black') for v in np.arange(0,1,0.1)]
ax = plt.subplot(111, projection='polar')
#ax.plot([d2r(x) for x in np.linspace(0,360,36)], [np.mean(strong_pred)]*36, lw=8, alpha=0.2, color='red')
#ax.plot([d2r(x) for x in np.linspace(0,360,36)], [np.mean(weak_pred)]*36, lw=8, alpha=0.2, color='blue')
thetas = [d2r(x) for x in np.linspace(0,360,8+1)[:-1]]
#ax.plot(thetas, strong_pred, 'r^', color='red')
#ax.plot(thetas, weak_pred, 'rv', color='blue')
ax.plot(thetas + [0], list(strong_pred) + [strong_pred[0]], '', color='red')
ax.plot(thetas + [0], list(weak_pred) + [weak_pred[0]], '', color='blue')
ax.set_rlabel_position(0)
ax.set_rticks( [0,1],[2,'b'])#['']*len(np.arange(0,1.2,0.2)))
#ax.set_thetagrids([90,270])
#ax.set_rgrids()
ax.set_yticks([])
ax.set_xticks(np.arange(10), 1-np.cumsum(freqs))
#ax.set_ylim([0,1.1])
ax.set_xticks([])
[ax.plot([d2r(x) for x in np.linspace(0,360,36)], [v]*36, alpha=0.1, color='black') for v in np.arange(0,1,0.1)]
tmp_df_for_show = pd.DataFrame()
tmp_df_for_show['theta'] = list(thetas)*2
tmp_df_for_show['val'] = np.round(list(strong_pred) + list(weak_pred),3)
tmp_df_for_show['set'] = [1]*8 + [0]*8
#sns.FacetGrid(data=tmp_df_for_show, col="theta", hue="set", height="val")
g = sns.FacetGrid(tmp_df_for_show,subplot_kws=dict(projection='polar'), height=4.5,
sharex=False, sharey=False, despine=False)
g.map(sns.scatterplot,data=tmp_df_for_show, x='theta', y='val', hue='set')
#ax.bar(bns[:-1], freqs)
"""
plt.xticks([])
plt.savefig('./out/regressors/{}_{}_{}'.format(N, y_test.name, 'LassoIC') )
plt.show()
#plt.close()
print(tstr)
etime = clock()
print('Runtime: {:5.2f} [Seconds]'.format(etime-stime) )
df_results['label'] = res_label
df_results['tbin'] = res_tbin
df_results['fi'] = res_fi
df_results['mae'] = res_mae
#df_results['w_mae'] = np.array([ [mean_mae_per_lab[0]]*5, [mean_mae_per_lab[1]]*5, [mean_mae_per_lab[2]]*5]).reshape(-1)
df_results['w_mae'] = np.multiply(mean_mae_per_bin,bin_weights )
df_results.to_csv('./out/regressors/weighted_lasso.csv',index=False)
cv_res = pd.DataFrame({'MAE':np.mean(ber), 'STD':np.std(ber)})
print(centers)
#print(cv_res)
#print( 'Final WMAE = {:2.3f}'.format( np.sum(cv_res.iloc[:-1,0]*bin_weights) ) )
print( 'Final WMAE = {:2.3f}'.format( np.sum(df_results['w_mae']) ) )
1/0
lofi = []
for tbin in range(len(res_fi)):
ltz = np.where(np.array(res_fi)[tbin][:,0].astype(float) != 0)[0]
ifs = np.array(res_fi)[tbin][ltz,:]
ifs = [ [x[1], x[0]] for x in list(map(list, ifs))]
ifs = [ [x[0], np.round(float(x[1]),4) ] for x in ifs]
ifs = list(np.array(ifs)[np.argsort( np.abs(np.array(ifs)[:,1].astype(float)) )[-1::-1]])
ifs = list(map(list, ifs))
lofi += [ifs]
toPrint = list((dict(ifs).items()))[:5]
print(tbin, ' => ', toPrint)
df_results['fi'] = lofi
df_results.to_csv('./out/regressors/light_weighted_lasso.csv',index=False)
#%
fi_per_bin = df_results['fi'].copy()
exec('combs_list = list(product(' + 'letters,'*N + '))')
combs_list = list(map(''.join,combs_list))
df_fi = pd.DataFrame(np.zeros([5,len(combs_list)]))
df_fi.columns = combs_list
for i in range(len(fi_per_bin)):
tf = fi_per_bin[i]
df_fi.loc[i, list(np.array(tf)[:,0])] = (np.array(tf)[:,1]).astype(float)
zero_importance = list(df_fi.columns[np.where(df_fi.sum() == 0)])
zero_importance.remove('GTC')
sorted_imp = (df_fi.replace({0:np.nan}).median().sort_values())
sorted_imp = sorted_imp.fillna(0)
sorted_imp = sorted_imp[sorted_imp > 0]
sorted_imp = sorted_imp.sort_values()
sorted_imp = sorted_imp[-10:]
plt.figure()
plt.subplot(2,1,1)
sns.scatterplot(x=sorted_imp.index, y=sorted_imp)
plt.xticks(sorted_imp.index, rotation=60)
plt.title('Kmers Median Coefficients')
plt.ylim([-0.01, 0.2])
plt.subplot(2,1,2)
sns.scatterplot(x=zero_importance, y=[0]*len(zero_importance))
plt.xticks(zero_importance, rotation=60)
plt.title('Kmers Non Important')
plt.ylim([-0.01, 0.2])
plt.tight_layout()
plt.show()
#%% IGNORE 20.4.20
1/0
#PLOTTER - Dynamic clustering and prediction
"""
This techinique invloves all of our research,
by using PCA we learn the existence of 5 clusters,
by using kmeans we classify each sequence to its cluster,
by using regressors suchj as lasso we train a model for each cluster
and predict labels with high resolution.
we can compare results with or without dynamic clustering.
"""
"""
Dendogram
Plot By TSNE
"""
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
from itertools import product
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
import pickle
def OHE(df):
cols = []
for i in range(36):
for letter in ['A','C','G','T']:
cols += [ str(i+1) + '_nuc_' + letter]
tdf = pd.get_dummies(df)
toAdd = np.setdiff1d(cols, tdf.columns)
for col in toAdd:
tdf[col] = 0
for col in cols:
tdf[col] = tdf[col].astype(int)
tdf = tdf.loc[:,cols]
return tdf
for ClusterSize in [0,1]:
for KMER in [1,2,3,4]:
print('\n========================================================')
print('========================================================')
print( ['Without Clustering','With Clustering'][ClusterSize] )
print( '{}-Mer'.format(KMER) )
N = KMER
#labels = ['poly','prim','primo']
labels = ['primo']
with_clustering = True
df = pd.read_csv('./data/chip_B_favor.csv')
df = pd.concat([OHE(df.drop(labels,axis=1)), df.loc[:,labels]], axis=1)
# apply KMEANS
km = KMeans(5, random_state=42 )
bins_pred = km.fit_predict(df.drop(labels,axis=1))
pickle.dump(km, open('./out/regressors/models/km.sav' , 'wb') )
df = pd.read_csv('./data/chip_B_favor.csv')
df['bin'] = ClusterSize*bins_pred
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_validate
from sklearn.linear_model import LassoLarsIC
test_df = pd.read_csv('./data/validation.csv').loc[:,['seq','toKeep','label']]
test_df = test_df.iloc[np.where(test_df['toKeep'] > 0)[0],:].reset_index(drop=True)
test_df = test_df.loc[:,['seq','label']]
splitted = pd.DataFrame(np.zeros([len(test_df),36]))
splitted = splitted.add_suffix('_nuc')
for i,seq in enumerate(test_df['seq']):
splitted.iloc[i,:] = list(seq)
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
for col in labels:
df[col] = mms(df[col])
splitted = OHE(splitted)
splitted['bin'] = km.predict(splitted)
test_df['bin'] = splitted['bin']
letters = ['A','C','G','T']
exec('combs_list = list(product(' + 'letters,'*N + '))')
combs_list = list(map(''.join,combs_list))
#Train preparation
df_mer = pd.DataFrame(np.zeros([len(df), len(combs_list)]))
df_mer.columns = combs_list
mers = df['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, 1) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
df_mer['seq'] = df['seq']
#count mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
X = df_mer.copy()
X['bin'] = df['bin']
train = X.copy()
y = df[labels]
#Test preparation
df_mer = pd.DataFrame(np.zeros([len(test_df), len(combs_list)]))
df_mer.columns = combs_list
mers = test_df['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, N) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
#count mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
test = df_mer.copy()
test['bin'] = test_df['bin']
y_test = test_df['label']
X_test = test.copy().reset_index(drop=True)
y_test = y_test.copy().reset_index(drop=True)
p_test = np.zeros(len(y_test))
X_train = train.copy().reset_index(drop=True)
if( with_clustering == False):
X_train['bin'] = 0
y_train = y.copy().reset_index(drop=True)
mean_mae_per_lab = []
df_results = pd.DataFrame()
res_label = []
res_tbin = []
res_mae = []
res_fi = []
res_bias = []
bin_weights = []
for lab in labels:
mean_mae_per_bin = []
for tbin in np.unique(X_train['bin']):
test_strong = pd.DataFrame()
test_weak = pd.DataFrame()
yv = (y_train.loc[:,lab].iloc[np.where(X_train['bin'] == tbin)[0]])
Xv = X_train.iloc[np.where(X_train['bin'] == tbin)[0]].copy().drop(['bin','seq'],axis=1)
tst_idxs = np.where(X_test['bin'] == tbin)[0]
tst_idxs = np.array(list(tst_idxs))
if( len(tst_idxs) != 0 ):
yt = y_test.iloc[tst_idxs].copy()
#initiate Test Set
test_strong = X_test.iloc[yt[yt==1].index].drop('bin',axis=1)
test_weak = X_test.iloc[yt[yt==0].index].drop('bin',axis=1)
"""
# drop zero cols
keepCols = np.where(np.sum(Xv) > 0)[0]
Xv = Xv.iloc[:,keepCols]
test_strong = test_strong.iloc[:,keepCols]
test_weak = test_weak.iloc[:,keepCols]
"""
#reg = LassoLarsIC('bic', fit_intercept=False, positive=True)
reg = LassoLarsIC('bic')
# LassoIC Regression Fitting
res = cross_validate(reg, Xv , y=yv, groups=None,
scoring='neg_mean_absolute_error', cv=5, n_jobs=6, verbose=0,
fit_params=None, return_estimator=True)
best_estimator = res['estimator'][np.argmax(res['test_score'])]
# Save best model and collect resutls
pickle.dump(best_estimator, open('./out/regressors/models/{}_{}.sav'.format(lab, tbin) , 'wb') )
tmp_err = np.min(-res['test_score'])
#mean_mae_per_bin += [ tmp_err*len(np.where(X_train['bin'] == tbin)[0])/len(X_train)]
mean_mae_per_bin += [ tmp_err ]
print( str(tbin) + ' ' + lab + ' lasso -> ', tmp_err )
if(len(test_strong) > 0):
p_test[test_strong.index] = list(best_estimator.predict(test_strong))
if(len(test_weak) > 0):
p_test[test_weak.index] = list(best_estimator.predict(test_weak))
res_label += [lab]
res_tbin += [tbin]
res_mae += [ np.round(mean_mae_per_bin[-1], 3)]
res_fi += [
list(zip(np.array(best_estimator.coef_), Xv.columns)) + [(np.round(best_estimator.intercept_, 3), 'Bias')]
]
mean_mae_per_bin[-1] = mean_mae_per_bin[-1]#*len(np.where(X_train['bin'] == tbin)[0])/len(X_train)
bin_weights += [len(np.where(X_train['bin'] == tbin)[0])/len(X_train)]
mean_mae_per_lab += [ np.sum(np.multiply(mean_mae_per_bin,bin_weights)) ]
print("Mean MAE = {}".format(mean_mae_per_lab[-1]) )
strong_pred = p_test[y_test == 1]
weak_pred = p_test[y_test == 0]
plt.figure(figsize=(8,4))
[freqs,bns] = np.histogram(y_train.loc[:,lab], bins=10, weights=[1/len(y_train)]*len(y_train) )
plt.barh(y=bns[:-1] + 0.05, width=freqs*10, height=0.1, alpha=0.4, zorder=1)
plt.xlim([-1, len(strong_pred)+1])
plt.scatter( x=np.arange(len(strong_pred)), y=strong_pred, color='red' , zorder=2)
plt.scatter( x=np.arange(len(weak_pred)), y=weak_pred , color='blue', zorder=3)
plt.legend(['Allegedly Strong Bonding', 'Allegedly Weak Bonding'])
plt.xlabel('Sample Index')
plt.title('Lasso - {0} distribution\nModel MAE = {1:2.3f}'.format(lab, (np.min(-res['test_score'])) ),
fontsize=16, fontname='Arial')
yticks = freqs
yticks = np.round(yticks,2)
yticks = list((yticks*100).astype(int).astype(str))
yticks = [ x + '%' for x in yticks]
plt.yticks( bns+0.05 , yticks)
plt.ylabel("Bin Probability",fontsize=12)
ax = plt.gca().twinx()
ax.yaxis.tick_right()
plt.yticks(np.arange(0,1.1,0.1))
ax.set_ylabel("Relative Bonding Strength",fontsize=12)
plt.xticks([])
#plt.savefig('./out/regressors/{}_{}_{}'.format(N, y_test.name, 'LassoIC') )
plt.show()
plt.close()
df_results['label'] = res_label
df_results['tbin'] = res_tbin
df_results['fi'] = res_fi
df_results['mae'] = res_mae
#df_results['w_mae'] = np.array([ [mean_mae_per_lab[0]]*5, [mean_mae_per_lab[1]]*5, [mean_mae_per_lab[2]]*5]).reshape(-1)
df_results['w_mae'] = np.multiply(mean_mae_per_bin,bin_weights )
lofi = []
for tbin in range(len(res_fi)):
ltz = np.where(np.array(res_fi)[tbin][:,0].astype(float) != 0)[0]
ifs = np.array(res_fi)[tbin][ltz,:]
ifs = [ [x[1], x[0]] for x in list(map(list, ifs))]
ifs = [ [x[0], np.round(float(x[1]),4) ] for x in ifs]
ifs = list(np.array(ifs)[np.argsort(np.array(ifs)[:,1])[-1::-1]])
ifs = list(map(list, ifs))
lofi += [ifs]
#print(tbin, '\n', dict(ifs), '\n')
df_results['fi'] = lofi
#df_results.to_csv('./out/regressors/light_weighted_lasso.csv',index=False)
print('========================================================')
print('========================================================\n')
#%% Exp sequences Generator - VERY HEAVY - DO NOT RUN UNLESS U HAVE TIME
df_results.index = df_results['label']
df_gen = X_train.loc[:,['seq','bin']].reset_index(drop=True)
df_gen['primo'] = y_train['primo'].copy()
#df_gen = df_gen.groupby('bin').mean().sort_values('primo').reset_index()
# REF seqs
seq_max = X_train.iloc[np.where(y_train['primo'] == 1)[0],:]['seq']
seq_min = X_train.iloc[np.where(y_train['primo'] == 0)[0],:]['seq']
seq_max = list(seq_max)[0]
seq_min = list(seq_min)[0]
"""
For Each Bin:
choose min seq
find similar seq which is not in the training
predict its' bin and score
choose max seq
find similar seq which is not in the training
predict its' bin and score
"""
exp_bins = ['max','min']
exp_seqs = [seq_max, seq_min]
exp_pred = [1,0]
N = 1
for tbin in np.unique(df_gen['bin']):
mdl = pickle.load(open('./out/regressors/models/primo_{}.sav'.format(tbin), 'rb') )
tdf = df_gen.iloc[np.where(df_gen['bin'] == tbin)[0],:]
tdf = tdf.iloc[np.where(tdf['primo'] > 0)[0],:]
tdf = tdf.iloc[np.where(tdf['primo'] < 1)[0],:]
#sort
tdf = tdf.sort_values('primo').reset_index()
# =============== MIN SEQ HANDLE =====================
tminseq = tdf.iloc[0,:]['seq']
cands_seqs = []
cands_scre = []
#find similar seq
letters = ['A','C','G','T']
newseq = tminseq
for i in range(len(newseq)):
for j in range(4):
if(i >= tminseq.find('GTC') and i < tminseq.find('GTC')+3):
continue
else:
newseq = tminseq[:i] + letters[j] + tminseq[i+1:]
seqexsits = [ x for x in tdf['seq'] if newseq == x ]
if( len(seqexsits) > 0):
continue
else:
df_newseq = pd.DataFrame(list(newseq))
df_newseq = df_newseq.T.add_suffix('_nuc')
df_newseq = OHE(df_newseq)
pbin = km.predict(df_newseq)[0]
if(pbin != tbin):
continue
else:
df_newseq = pd.DataFrame()
df_newseq['seq'] = | pd.Series(newseq) | pandas.Series |
from cafcoding.tools import etl
from cafcoding.tools import meteo
from cafcoding.tools import log
from cafcoding import constants
from pandarallel import pandarallel
import pandas as pd
import srtm
import numpy as np
import logging
logger = logging.getLogger(constants.LOGGER_ID)
pandarallel.initialize()
ETL_VERSION = "1.2.2"
ABS_COLUMNS = ['TCU1_Axle1Speed','TCU1_Axle2Speed','TCU1_ElecEffApp',
'TCU2_Axle1Speed','TCU2_Axle2Speed','TCU2_ElecEffApp',
'TCU3_Axle1Speed','TCU3_Axle2Speed','TCU3_ElecEffApp',
'TCU4_Axle1Speed','TCU4_Axle2Speed','TCU4_ElecEffApp']
SHIFT_COLUMNS = ['TCU1_LinePowerConsumed','TCU1_LinePowerDissipated', 'TCU1_LinePowerReturned',
'TCU2_LinePowerConsumed','TCU2_LinePowerDissipated', 'TCU2_LinePowerReturned',
'TCU3_LinePowerConsumed','TCU3_LinePowerDissipated', 'TCU3_LinePowerReturned',
'TCU4_LinePowerConsumed','TCU4_LinePowerDissipated', 'TCU4_LinePowerReturned',
'TCU1_DCBusVoltage','TCU2_DCBusVoltage','TCU3_DCBusVoltage','TCU4_DCBusVoltage',
'PLC_TRACTION_BRAKE_COMMAND','PLC_Speed','EPAC1_WSP_Acting','EPAC2_WSP_Acting',
'TCU1_Axle1Speed_abs','TCU1_Axle2Speed_abs','TCU1_ElecEffApp_abs',
'TCU2_Axle1Speed_abs','TCU2_Axle2Speed_abs','TCU2_ElecEffApp_abs',
'TCU3_Axle1Speed_abs','TCU3_Axle2Speed_abs','TCU3_ElecEffApp_abs',
'TCU4_Axle1Speed_abs','TCU4_Axle2Speed_abs','TCU4_ElecEffApp_abs']
TRAIN_REMOVE_AT_STOP_WINDOW = 5
@log.log_decorator
def process_etl(df, df_meteo, del_stopped_train=True, columns_to_abs=ABS_COLUMNS, columns_to_shift=SHIFT_COLUMNS, fill_delta= False):
df= prepare_data_time(df)
date_range = {'min': df['ts_date'].min(), 'max': df['ts_date'].max()}
logger.info('Filter dates: {} - {}'.format(date_range["min"],date_range["max"]))
df_meteo, stations_list = process_meteo_dataframe(df_meteo,date_range)
df = etl.fill_dataframe_by_ut(df)
df = generate_new_columns_from_gps_data(df, stations_list)
df = merge_with_meteo(df, df_meteo)
df, delta = create_index_ts_date(df)
if not fill_delta:
delta = None
# Cuidado realizamos un fill_dataframe_by_ut aplicando delta
df = generate_auxiliar_columns(df,delta)
if del_stopped_train:
df = delete_stopped_train(df)
# Funciones de contexto fisico
df = etl.column_to_absolute(df,columns_to_abs)
df = etl.create_shifts(df,columns_to_shift)
df = etl.create_differences(df,columns_to_shift)
df= create_categorical(df)
df = etl.fill_dataframe_by_ut(df)
return df
@log.log_decorator
def prepare_data_time(df):
df = df.sort_values(by=['ut','ts_date'])
df['ts_date'] = | pd.to_datetime(df.ts_date,format="%Y/%m/%d %H:%M:%S.%f") | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""Supports OMNI Combined, Definitive, IMF and Plasma Data, and Energetic
Proton Fluxes, Time-Shifted to the Nose of the Earth's Bow Shock, plus Solar
and Magnetic Indices. Downloads data from the NASA Coordinated Data Analysis
Web (CDAWeb). Supports both 5 and 1 minute files.
Properties
----------
platform
'omni'
name
'hro'
tag
Select time between samples, one of {'1min', '5min'}
inst_id
None supported
Note
----
Files are stored by the first day of each month. When downloading use
omni.download(start, stop, freq='MS') to only download days that could possibly
have data. 'MS' gives a monthly start frequency.
This material is based upon work supported by the
National Science Foundation under Grant Number 1259508.
Any opinions, findings, and conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views
of the National Science Foundation.
Warnings
--------
- Currently no cleaning routine. Though the CDAWEB description indicates that
these level-2 products are expected to be ok.
- Module not written by OMNI team.
Custom Functions
----------------
time_shift_to_magnetic_poles
Shift time from bowshock to intersection with one of the magnetic poles
calculate_clock_angle
Calculate the clock angle and IMF mag in the YZ plane
calculate_imf_steadiness
Calculate the IMF steadiness using clock angle and magnitude in the YZ plane
calculate_dayside_reconnection
Calculate the dayside reconnection rate
"""
import datetime as dt
import functools
import numpy as np
import pandas as pds
import scipy.stats as stats
import warnings
from pysat import logger
from pysat.instruments.methods import general as mm_gen
from pysatNASA.instruments.methods import cdaweb as cdw
# ----------------------------------------------------------------------------
# Instrument attributes
platform = 'omni'
name = 'hro'
tags = {'1min': '1-minute time averaged data',
'5min': '5-minute time averaged data'}
inst_ids = {'': [tag for tag in tags.keys()]}
# ----------------------------------------------------------------------------
# Instrument test attributes
_test_dates = {'': {'1min': dt.datetime(2009, 1, 1),
'5min': dt.datetime(2009, 1, 1)}}
# ----------------------------------------------------------------------------
# Instrument methods
def init(self):
"""Initializes the Instrument object with instrument specific values.
Runs once upon instantiation.
"""
ackn_str = ''.join(('For full acknowledgement info, please see: ',
'https://omniweb.gsfc.nasa.gov/html/citing.html'))
self.acknowledgements = ackn_str
self.references = ' '.join(('<NAME> and <NAME>, Solar',
'wind spatial scales in and comparisons',
'of hourly Wind and ACE plasma and',
'magnetic field data, J. Geophys. Res.,',
'Vol. 110, No. A2, A02209,',
'10.1029/2004JA010649.'))
logger.info(ackn_str)
return
def clean(self):
""" Cleaning function for OMNI data
Note
----
'clean' - Replace default fill values with NaN
'dusty' - Same as clean
'dirty' - Same as clean
'none' - Preserve original fill values
"""
for key in self.data.columns:
if key != 'Epoch':
fill = self.meta[key, self.meta.labels.fill_val][0]
idx, = np.where(self[key] == fill)
# Set the fill values to NaN
self[idx, key] = np.nan
# Replace the old fill value with NaN and add this to the notes
fill_notes = "".join(["Replaced standard fill value with NaN. ",
"Standard value was: {:}".format(
self.meta[key,
self.meta.labels.fill_val])])
notes = '\n'.join([str(self.meta[key, self.meta.labels.notes]),
fill_notes])
self.meta[key, self.meta.labels.notes] = notes
self.meta[key, self.meta.labels.fill_val] = np.nan
return
# ----------------------------------------------------------------------------
# Instrument functions
#
# Use the default CDAWeb and pysat methods
# Set the list_files routine
fname = ''.join(['omni_hro_{tag:s}_{{year:4d}}{{month:02d}}{{day:02d}}_',
'v{{version:02d}}.cdf'])
supported_tags = {inst_id: {tag: fname.format(tag=tag) for tag in tags.keys()}
for inst_id in inst_ids.keys()}
list_files = functools.partial(mm_gen.list_files,
supported_tags=supported_tags,
file_cadence= | pds.DateOffset(months=1) | pandas.DateOffset |
import pandas as pd
import requests as req
from io import StringIO
######################################### Items DF ########################################
def get_items():
'''
This function obtains the items data from the base url,
loops through items pages,
makes items df,
and writes the df to a csv, and
returns the data in a pandas dataframe
'''
items_list = []
response = requests.get('https://python.zach.lol/api/v1/items')
data = response.json()
n = data['payload']['max_page']
for i in range(1, n+1):
url = 'https://python.zach.lol/api/v1/items?page=' + str(i)
response = requests.get(url)
data = response.json()
page_items = data['payload']['items']
items_list += page_items
#create df
items = pd.DataFrame(items_list)
#make items df as a csv
items.to_csv('items.csv')
return items
######################################### Stores DF #######################################
def get_stores():
'''
This function obtains the stores data from the base url,
loops through stores pages,
makes stores df,
and writes the df to a csv, and
returns the data in a pandas dataframe
'''
stores_list = []
response = requests.get('https://python.zach.lol/api/v1/stores')
data = response.json()
n = data['payload']['max_page']
for i in range(1, n+1):
url = 'https://python.zach.lol/api/v1/stores?page=' + str(i)
response = requests.get(url)
data = response.json()
page_stores = data['payload']['stores']
stores_list += page_stores
#create df
stores = pd.DataFrame(stores_list)
#make items df as a csv
stores.to_csv('stores.csv')
return stores
######################################### Sales DF ########################################
def get_sales():
'''
This function obtains the sales data from the base url,
loops through sales pages,
makes sales df,
and writes the df to a csv, and
returns the data in a pandas dataframe
'''
sales_list = []
response = requests.get('https://python.zach.lol/api/v1/sales')
data = response.json()
n = data['payload']['max_page']
for i in range(1, n+1):
url = 'https://python.zach.lol/api/v1/sales?page=' + str(i)
response = requests.get(url)
data = response.json()
page_sales = data['payload']['sales']
sales_list += page_sales
#create df
sales = pd.DataFrame(sales_list)
#make items df as a csv
sales.to_csv('sales.csv')
return sales
####################################### Complete DF ########################################
def get_complete_data():
'''
This function takes in the previous 3 functions above,
merges the sales, stores, and items dfs,
writes the df to a csv, and
returns the data in a pandas dataframe
'''
items = pd.read_csv('items.csv')
stores = pd.read_csv('stores.csv')
sales= pd.read_csv('sales.csv')
#merge sales w/ stores
combined_df = sales.merge(stores, left_on='store', right_on='store_id')
#merge sales and stores on items
combined_df = combined_df.merge(items, left_on='item', right_on='item_id')
#make complete dataframe to a csv
combined_df.to_csv('combined_df.csv')
return combined_df
######################################### OPSD Germany DF ########################################
def get_germany_data():
'''
This function gets Open Power Systems Data for Germany,
reads data as a csv, and
returns the data in a pandas dataframe
'''
url = 'https://raw.githubusercontent.com/jenfly/opsd/master/opsd_germany_daily.csv'
germany_data = pd.read_csv(url)
return germany_data
######################################### alternate functions ########################################
def _create_df_from_payloads(endpoint, max_pages, target_key_name):
"""
Helper function that loops through the pages returned from the Zach API,\
adds the information to a list and then converts to a single dataframe that is returned.
"""
page_list = []
for i in range(1, max_pages + 1):
response = requests.get(endpoint + "?page=" + str(i))
data = response.json()
page_items = data['payload'][target_key_name]
page_list += page_items
return | pd.DataFrame(page_list) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This module is EXPERIMENTAL, that means that tests are missing.
The reason is that the coastdat2 dataset is deprecated and will be replaced by
the OpenFred dataset from Helmholtz-Zentrum Geesthacht. It should work though.
This module is designed for the use with the coastdat2 weather data set
of the Helmholtz-Zentrum Geesthacht.
A description of the coastdat2 data set can be found here:
https://www.earth-syst-sci-data.net/6/147/2014/
SPDX-FileCopyrightText: 2016-2019 <NAME> <<EMAIL>>
SPDX-License-Identifier: MIT
"""
__copyright__ = "<NAME> <<EMAIL>>"
__license__ = "MIT"
# Python libraries
import os
import datetime
import logging
from collections import namedtuple
import calendar
# External libraries
import requests
import pandas as pd
import pvlib
from shapely.geometry import Point
from windpowerlib.wind_turbine import WindTurbine
# Internal modules
from reegis import tools
from reegis import feedin
from reegis import config as cfg
from reegis import powerplants as powerplants
from reegis import geometries
from reegis import bmwi
def download_coastdat_data(filename=None, year=None, url=None,
test_only=False, overwrite=True):
"""
Download coastdat data set from internet source.
Parameters
----------
filename : str
Full path with the filename, where the downloaded file will be stored.
year : int or None
Year of the weather data set. If a url is passed this value will be
ignored because it is used to create the default url.
url : str or None
Own url can be used if the default url does not work an one found an
alternative valid url.
test_only : bool
If True the the url is tested but the file will not be downloaded
(default: False).
overwrite : bool
If True the file will be downloaded even if it already exist.
(default: True)
Returns
-------
str or None : If the url is valid the filename is returned otherwise None.
Examples
--------
>>> download_coastdat_data(year=2014, test_only=True)
'coastDat2_de_2014.h5'
>>> print(download_coastdat_data(url='https://osf.io/url', test_only=True))
None
>>> download_coastdat_data(filename='w14.hd5', year=2014) # doctest: +SKIP
"""
if url is None:
url_ids = cfg.get_dict("coastdat_url_id")
url_id = url_ids.get(str(year), None)
if url_id is not None:
url = cfg.get("coastdat", "basic_url").format(url_id=url_id)
if url is not None and not test_only:
response = requests.get(url, stream=True)
if response.status_code == 200:
msg = "Downloading the coastdat2 file of {0} from {1} ..."
logging.info(msg.format(year, url))
if filename is None:
headers = response.headers["Content-Disposition"]
filename = (
headers.split("; ")[1].split("=")[1].replace('"', "")
)
tools.download_file(filename, url, overwrite=overwrite)
return filename
else:
raise ValueError("URL not valid: {0}".format(url))
elif url is not None and test_only:
response = requests.get(url, stream=True)
if response.status_code == 200:
headers = response.headers["Content-Disposition"]
filename = headers.split("; ")[1].split("=")[1].replace('"', "")
else:
filename = None
return filename
else:
raise ValueError("No URL found for {0}".format(year))
def fetch_id_by_coordinates(latitude, longitude):
"""
Get nearest weather data set to a given location.
Parameters
----------
latitude : float
longitude : float
Returns
-------
int : coastdat id
Examples
--------
>>> fetch_id_by_coordinates(53.655119, 11.181475)
1132101
"""
coastdat_polygons = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
location = Point(longitude, latitude)
cid = coastdat_polygons[coastdat_polygons.contains(location)].index
if len(cid) == 0:
msg = "No id found for latitude {0} and longitude {1}."
logging.warning(msg.format(latitude, longitude))
return None
elif len(cid) == 1:
return cid[0]
def fetch_data_coordinates_by_id(coastdat_id):
"""
Returns the coordinates of the weather data set.
Parameters
----------
coastdat_id : int or str
ID of the coastdat weather data set
Returns
-------
namedtuple : Fields are latitude and longitude
Examples
--------
>>> location=fetch_data_coordinates_by_id(1132101)
>>> round(location.latitude, 3)
53.692
>>> round(location.longitude, 3)
11.351
"""
coord = namedtuple("weather_location", "latitude, longitude")
coastdat_polygons = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
c = coastdat_polygons.loc[int(coastdat_id)].geometry.centroid
return coord(latitude=c.y, longitude=c.x)
def fetch_coastdat_weather(year, coastdat_id):
"""
Fetch weather one coastdat weather data set.
Parameters
----------
year : int
Year of the weather data set
coastdat_id : numeric
ID of the coastdat data set.
Returns
-------
pd.DataFrame : Weather data set.
Examples
--------
>>> coastdat_id=fetch_id_by_coordinates(53.655119, 11.181475)
>>> fetch_coastdat_weather(2014, coastdat_id)['v_wind'].mean().round(2)
4.39
"""
weather_file_name = os.path.join(
cfg.get("paths", "coastdat"),
cfg.get("coastdat", "file_pattern").format(year=year),
)
if not os.path.isfile(weather_file_name):
download_coastdat_data(filename=weather_file_name, year=year)
key = "/A{0}".format(int(coastdat_id))
return pd.DataFrame(pd.read_hdf(weather_file_name, key))
def adapt_coastdat_weather_to_pvlib(weather, loc):
"""
Adapt the coastdat weather data sets to the needs of the pvlib.
Parameters
----------
weather : pandas.DataFrame
Coastdat2 weather data set.
loc : pvlib.location.Location
The coordinates of the weather data point.
Returns
-------
pandas.DataFrame : Adapted weather data set.
Examples
--------
>>> cd_id=1132101
>>> cd_weather=fetch_coastdat_weather(2014, cd_id)
>>> c=fetch_data_coordinates_by_id(cd_id)
>>> location=pvlib.location.Location(**getattr(c, '_asdict')())
>>> pv_weather=adapt_coastdat_weather_to_pvlib(cd_weather, location)
>>> 'ghi' in cd_weather.columns
False
>>> 'ghi' in pv_weather.columns
True
"""
w = pd.DataFrame(weather.copy())
w["temp_air"] = w.temp_air - 273.15
w["ghi"] = w.dirhi + w.dhi
clearskydni = loc.get_clearsky(w.index).dni
w["dni"] = pvlib.irradiance.dni(
w["ghi"],
w["dhi"],
pvlib.solarposition.get_solarposition(
w.index, loc.latitude, loc.longitude
).zenith,
clearsky_dni=clearskydni,
)
return w
def adapt_coastdat_weather_to_windpowerlib(weather, data_height):
"""
Adapt the coastdat weather data sets to the needs of the pvlib.
Parameters
----------
weather : pandas.DataFrame
Coastdat2 weather data set.
data_height : dict
The data height for each weather data column.
Returns
-------
pandas.DataFrame : Adapted weather data set.
Examples
--------
>>> cd_id=1132101
>>> cd_weather=fetch_coastdat_weather(2014, cd_id)
>>> data_height=cfg.get_dict('coastdat_data_height')
>>> wind_weather=adapt_coastdat_weather_to_windpowerlib(
... cd_weather, data_height)
>>> cd_weather.columns.nlevels
1
>>> wind_weather.columns.nlevels
2
"""
weather = pd.DataFrame(weather.copy())
cols = {
"v_wind": "wind_speed",
"z0": "roughness_length",
"temp_air": "temperature",
}
weather.rename(columns=cols, inplace=True)
dh = [(key, data_height[key]) for key in weather.columns]
weather.columns = pd.MultiIndex.from_tuples(dh)
return weather
def normalised_feedin_for_each_data_set(
year, wind=True, solar=True, overwrite=False
):
"""
Loop over all weather data sets (regions) and calculate a normalised time
series for each data set with the given parameters of the power plants.
This file could be more elegant and shorter but it will be rewritten soon
with the new feedinlib features.
year : int
The year of the weather data set to use.
wind : boolean
Set to True if you want to create wind feed-in time series.
solar : boolean
Set to True if you want to create solar feed-in time series.
Returns
-------
"""
# Get coordinates of the coastdat data points.
data_points = pd.read_csv(
os.path.join(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_centroid"),
),
index_col="gid",
)
pv_sets = None
wind_sets = None
# Open coastdat-weather data hdf5 file for the given year or try to
# download it if the file is not found.
weather_file_name = os.path.join(
cfg.get("paths", "coastdat"),
cfg.get("coastdat", "file_pattern").format(year=year),
)
if not os.path.isfile(weather_file_name):
download_coastdat_data(year=year, filename=weather_file_name)
weather = pd.HDFStore(weather_file_name, mode="r")
# Fetch coastdat data heights from ini file.
data_height = cfg.get_dict("coastdat_data_height")
# Create basic file and path pattern for the resulting files
coastdat_path = os.path.join(cfg.get("paths_pattern", "coastdat"))
feedin_file = os.path.join(
coastdat_path, cfg.get("feedin", "file_pattern")
)
# Fetch coastdat region-keys from weather file.
key_file_path = coastdat_path.format(year="", type="")[:-2]
key_file = os.path.join(key_file_path, "coastdat_keys.csv")
if not os.path.isfile(key_file):
coastdat_keys = weather.keys()
if not os.path.isdir(key_file_path):
os.makedirs(key_file_path)
pd.Series(coastdat_keys).to_csv(key_file)
else:
coastdat_keys = pd.read_csv(
key_file, index_col=[0], squeeze=True, header=None
)
txt_create = "Creating normalised {0} feedin time series for {1}."
hdf = {"wind": {}, "solar": {}}
if solar:
logging.info(txt_create.format("solar", year))
# Add directory if not present
os.makedirs(
coastdat_path.format(year=year, type="solar"), exist_ok=True
)
# Create the pv-sets defined in the solar.ini
pv_sets = feedin.create_pvlib_sets()
# Open a file for each main set (subsets are stored in columns)
for pv_key, pv_set in pv_sets.items():
filename = feedin_file.format(
type="solar", year=year, set_name=pv_key
)
if not os.path.isfile(filename) or overwrite:
hdf["solar"][pv_key] = pd.HDFStore(filename, mode="w")
if wind:
logging.info(txt_create.format("wind", year))
# Add directory if not present
os.makedirs(
coastdat_path.format(year=year, type="wind"), exist_ok=True
)
# Create the pv-sets defined in the wind.ini
wind_sets = feedin.create_windpowerlib_sets()
# Open a file for each main set (subsets are stored in columns)
for wind_key, wind_set in wind_sets.items():
for subset_key, subset in wind_set.items():
wind_sets[wind_key][subset_key] = WindTurbine(**subset)
filename = feedin_file.format(
type="wind", year=year, set_name=wind_key
)
if not os.path.isfile(filename) or overwrite:
hdf["wind"][wind_key] = pd.HDFStore(filename, mode="w")
# Define basic variables for time logging
remain = len(coastdat_keys)
done = 0
start = datetime.datetime.now()
# Loop over all regions
for coastdat_key in coastdat_keys:
# Get weather data set for one location
local_weather = weather[coastdat_key]
# Adapt the coastdat weather format to the needs of pvlib.
# The expression "len(list(hdf['solar'].keys()))" returns the number
# of open hdf5 files. If no file is open, there is nothing to do.
if solar and len(list(hdf["solar"].keys())) > 0:
# Get coordinates for the weather location
local_point = data_points.loc[int(coastdat_key[2:])]
# Create a pvlib Location object
location = pvlib.location.Location(
latitude=local_point["lat"], longitude=local_point["lon"]
)
# Adapt weather data to the needs of the pvlib
local_weather_pv = adapt_coastdat_weather_to_pvlib(
local_weather, location
)
# Create one DataFrame for each pv-set and store into the file
for pv_key, pv_set in pv_sets.items():
if pv_key in hdf["solar"]:
hdf["solar"][pv_key][coastdat_key] = feedin.feedin_pv_sets(
local_weather_pv, location, pv_set
)
# Create one DataFrame for each wind-set and store into the file
if wind and len(list(hdf["wind"].keys())) > 0:
local_weather_wind = adapt_coastdat_weather_to_windpowerlib(
local_weather, data_height
)
for wind_key, wind_set in wind_sets.items():
if wind_key in hdf["wind"]:
hdf["wind"][wind_key][
coastdat_key
] = feedin.feedin_wind_sets(local_weather_wind, wind_set)
# Start- time logging *******
remain -= 1
done += 1
if divmod(remain, 10)[1] == 0:
elapsed_time = (datetime.datetime.now() - start).seconds
remain_time = elapsed_time / done * remain
end_time = datetime.datetime.now() + datetime.timedelta(
seconds=remain_time
)
msg = "Actual time: {:%H:%M}, estimated end time: {:%H:%M}, "
msg += "done: {0}, remain: {1}".format(done, remain)
logging.info(msg.format(datetime.datetime.now(), end_time))
# End - time logging ********
for k1 in hdf.keys():
for k2 in hdf[k1].keys():
hdf[k1][k2].close()
weather.close()
logging.info(
"All feedin time series for {0} are stored in {1}".format(
year, coastdat_path.format(year=year, type="")
)
)
def store_average_weather(
data_type,
weather_path=None,
years=None,
keys=None,
out_file_pattern="average_data_{data_type}.csv",
):
"""
Get average wind speed over all years for each weather region. This can be
used to select the appropriate wind turbine for each region
(strong/low wind turbines).
Parameters
----------
data_type : str
The data_type of the coastdat weather data: 'dhi', 'dirhi', 'pressure',
'temp_air', 'v_wind', 'z0'.
keys : list or None
List of coastdat keys. If None all available keys will be used.
years : list or None
List of one or more years to calculate the average data from. You
have to make sure that the weather data files for the given years
exist in the weather path.
weather_path : str
Path to folder that contains all needed files. If None the default
path defined in the config file will be used.
out_file_pattern : str or None
Name of the results file with a placeholder for the data type e.g.
``average_data_{data_type}.csv``). If None no file will be written.
Examples
--------
>>> store_average_weather('temp_air', years=[2014, 2013]) # doctest: +SKIP
>>> v=store_average_weather('v_wind', years=[2014],
... out_file_pattern=None, keys=[1132101])
>>> float(v.loc[1132101].round(2))
4.39
"""
logging.info("Calculating the average wind speed...")
weather_pattern = cfg.get("coastdat", "file_pattern")
if weather_path is None:
weather_path = cfg.get("paths", "coastdat")
# Finding existing weather files.
data_files = os.listdir(weather_path)
# Possible time range for coastdat data set (reegis: 1998-2014).
check = True
if years is None:
years = range(1948, 2017)
check = False
used_years = []
for year in years:
if weather_pattern.format(year=year) in data_files:
used_years.append(year)
elif check is True:
msg = "File not found".format(weather_pattern.format(year=year))
raise FileNotFoundError(msg)
# Loading coastdat-grid as shapely geometries.
coastdat_polygons = pd.DataFrame(
geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
)
coastdat_polygons.drop("geometry", axis=1, inplace=True)
# Opening all weather files
weather = dict()
# open hdf files
for year in used_years:
weather[year] = pd.HDFStore(
os.path.join(weather_path, weather_pattern.format(year=year)),
mode="r",
)
if keys is None:
keys = coastdat_polygons.index
n = len(list(keys))
logging.info("Remaining: {0}".format(n))
for key in keys:
data_type_avg = pd.Series()
n -= 1
if n % 100 == 0:
logging.info("Remaining: {0}".format(n))
hdf_id = "/A{0}".format(key)
for year in used_years:
ws = weather[year][hdf_id][data_type]
data_type_avg = data_type_avg.append(ws, verify_integrity=True)
# calculate the average wind speed for one grid item
coastdat_polygons.loc[
key, "{0}_avg".format(data_type)
] = data_type_avg.mean()
# Close hdf files
for year in used_years:
weather[year].close()
if keys is not None:
coastdat_polygons.dropna(inplace=True)
# write results to csv file
if out_file_pattern is not None:
filename = out_file_pattern.format(data_type=data_type)
fn = os.path.join(weather_path, filename)
logging.info("Average temperature saved to {0}".format(fn))
coastdat_polygons.to_csv(fn)
return coastdat_polygons
def spatial_average_weather(
year, geo, parameter, name, outpath=None, outfile=None
):
"""
Calculate the mean value of a parameter over all data sets within each
region for one year.
Parameters
----------
year : int
Select the year you want to calculate the average temperature for.
geo : geometries.Geometry object
Polygons to calculate the average parameter for.
outpath : str
Place to store the outputfile.
outfile : str
Set your own name for the outputfile.
parameter : str
Name of the item (temperature, wind speed,... of the weather data set.
name : str
Name of the regions table to be used as a column name.
Returns
-------
str : Full file name of the created file.
Example
-------
>>> germany_geo=geometries.load(
... cfg.get('paths', 'geometry'),
... cfg.get('geometry', 'germany_polygon'))
>>> fn=spatial_average_weather(2012, germany_geo, 'temp_air', 'deTemp',
... outpath=os.path.expanduser('~')
... )# doctest: +SKIP
>>> temp=pd.read_csv(fn, index_col=[0], parse_dates=True, squeeze=True
... )# doctest: +SKIP
>>> round(temp.mean() - 273.15, 2)# doctest: +SKIP
8.28
>>> os.remove(fn)# doctest: +SKIP
"""
logging.info(
"Getting average {0} for {1} in {2} from coastdat2.".format(
parameter, name, year
)
)
name = name.replace(" ", "_")
# Create a Geometry object for the coastdat centroids.
coastdat_geo = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
coastdat_geo["geometry"] = coastdat_geo.centroid
# Join the tables to create a list of coastdat id's for each region.
coastdat_geo = geometries.spatial_join_with_buffer(
coastdat_geo, geo, name=name, limit=0
)
# Fix regions with no matches (no matches if a region ist too small).
fix = {}
for reg in set(geo.index) - set(coastdat_geo[name].unique()):
reg_point = geo.representative_point().loc[reg]
coastdat_poly = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
fix[reg] = coastdat_poly.loc[
coastdat_poly.intersects(reg_point)
].index[0]
# Open the weather file
weather_file = os.path.join(
cfg.get("paths", "coastdat"),
cfg.get("coastdat", "file_pattern").format(year=year),
)
if not os.path.isfile(weather_file):
download_coastdat_data(year=year, filename=weather_file)
weather = pd.HDFStore(weather_file, mode="r")
# Calculate the average temperature for each region with more than one id.
avg_value = pd.DataFrame()
for region in geo.index:
cd_ids = coastdat_geo[coastdat_geo[name] == region].index
number_of_sets = len(cd_ids)
tmp = pd.DataFrame()
logging.debug((region, len(cd_ids)))
for cid in cd_ids:
try:
cid = int(cid)
except ValueError:
pass
if isinstance(cid, int):
key = "A" + str(cid)
else:
key = cid
tmp[cid] = weather[key][parameter]
if len(cd_ids) < 1:
key = "A" + str(fix[region])
avg_value[region] = weather[key][parameter]
else:
avg_value[region] = tmp.sum(1).div(number_of_sets)
weather.close()
# Create the name an write to file
regions = sorted(geo.index)
if outfile is None:
out_name = "{0}_{1}".format(regions[0], regions[-1])
outfile = os.path.join(
outpath,
"average_{parameter}_{type}_{year}.csv".format(
year=year, type=out_name, parameter=parameter
),
)
avg_value.to_csv(outfile)
logging.info("Average temperature saved to {0}".format(outfile))
return outfile
def federal_state_average_weather(year, parameter):
"""
Example for spatial_average_weather() with federal states polygons.
Parameters
----------
year
parameter
Returns
-------
"""
federal_states = geometries.get_federal_states_polygon()
filename = os.path.join(
cfg.get("paths", "coastdat"),
"average_{0}_BB_TH_{1}.csv".format(parameter, year),
)
if not os.path.isfile(filename):
spatial_average_weather(
year, federal_states, parameter, "federal_states", outfile=filename
)
return pd.read_csv(
filename,
index_col=[0],
parse_dates=True,
date_parser=lambda col: pd.to_datetime(col, utc=True),
)
def aggregate_by_region_coastdat_feedin(
pp, regions, year, category, outfile, weather_year=None
):
"""
Aggregate wind and pv feedin time series for each region defined by
a geoDataFrame with region polygons.
Parameters
----------
pp : pd.DataFrame
Power plant table.
regions : geopandas.geoDataFrame
Table with the polygons.
year : int
Year for the power plants and for the weather data if weather_year is
None.
category : str
Feed-in category: 'wind' or 'solar'
outfile : str
Name of the output file.
weather_year : int or None
If None the year parameter will be used for the weather year.
"""
cat = category.lower()
logging.info("Aggregating {0} feed-in for {1}...".format(cat, year))
if weather_year is None:
weather_year = year
weather_year_str = ""
else:
logging.info("Weather data taken from {0}.".format(weather_year))
weather_year_str = " (weather: {0})".format(weather_year)
# Define the path for the input files.
coastdat_path = os.path.join(cfg.get("paths_pattern", "coastdat")).format(
year=weather_year, type=cat
)
# Do normalized timeseries exist? If not, create
if os.path.isdir(coastdat_path):
if len(os.listdir(coastdat_path)) == 0:
normalised_feedin_for_each_data_set(weather_year)
else:
normalised_feedin_for_each_data_set(weather_year)
# Prepare the lists for the loops
set_names = []
set_name = None
pwr = dict()
columns = dict()
replace_str = "coastdat_{0}_{1}_".format(weather_year, category)
for file in os.listdir(coastdat_path):
if file[-2:] == "h5":
set_name = file[:-3].replace(replace_str, "")
set_names.append(set_name)
pwr[set_name] = pd.HDFStore(os.path.join(coastdat_path, file))
columns[set_name] = pwr[set_name]["/A1129087"].columns
# Create DataFrame with MultiColumns to take the results
my_index = pwr[set_name]["/A1129087"].index
my_cols = pd.MultiIndex(
levels=[[], [], []],
codes=[[], [], []],
names=["region", "set", "subset"],
)
feed_in = pd.DataFrame(index=my_index, columns=my_cols)
# Loop over all aggregation regions
# Sum up time series for one region and divide it by the
# capacity of the region to get a normalised time series.
for region in regions:
try:
coastdat_ids = pp.loc[(category, region)].index
except KeyError:
coastdat_ids = []
number_of_coastdat_ids = len(coastdat_ids)
logging.info(
"{0}{3} - {1} ({2})".format(
year, region, number_of_coastdat_ids, weather_year_str
)
)
logging.debug("{0}".format(coastdat_ids))
# Loop over all sets that have been found in the coastdat path
if number_of_coastdat_ids > 0:
for name in set_names:
# Loop over all sub-sets that have been found within each file.
for col in columns[name]:
temp = pd.DataFrame(index=my_index)
# Loop over all coastdat ids, that intersect with the
# actual region.
for coastdat_id in coastdat_ids:
# Create a tmp table for each coastdat id.
coastdat_key = "/A{0}".format(int(coastdat_id))
pp_inst = float(
pp.loc[
(category, region, coastdat_id),
"capacity_{0}".format(year),
]
)
temp[coastdat_key] = pwr[name][coastdat_key][col][
:8760
].multiply(pp_inst)
# Sum up all coastdat columns to one region column
colname = "_".join(col.split("_")[-3:])
feed_in[region, name, colname] = temp.sum(axis=1).divide(
float(
pp.loc[
(category, region), "capacity_{0}".format(year)
].sum()
)
)
feed_in.to_csv(outfile)
for name_of_set in set_names:
pwr[name_of_set].close()
def aggregate_by_region_hydro(pp, regions, year, outfile_name):
"""Aggregate hydro power plants by region."""
hydro = bmwi.bmwi_re_energy_capacity()["water"]
hydro_capacity = pp.loc["Hydro", "capacity_{0}".format(year)].sum()
full_load_hours = hydro.loc[year, "energy"] / hydro_capacity * 1000
hydro_path = os.path.abspath(os.path.join(*outfile_name.split("/")[:-1]))
if not os.path.isdir(hydro_path):
os.makedirs(hydro_path)
idx = pd.date_range(
start="{0}-01-01 00:00".format(year),
end="{0}-12-31 23:00".format(year),
freq="H",
tz="Europe/Berlin",
)
feed_in = pd.DataFrame(columns=regions, index=idx)
feed_in[feed_in.columns] = full_load_hours / len(feed_in)
feed_in.to_csv(outfile_name)
# https://shop.dena.de/fileadmin/denashop/media/Downloads_Dateien/esd/
# 9112_Pumpspeicherstudie.pdf
# S. 110ff
def aggregate_by_region_geothermal(regions, year, outfile_name):
"""Aggregate hydro power plants by region."""
full_load_hours = cfg.get("feedin", "geothermal_full_load_hours")
hydro_path = os.path.abspath(os.path.join(*outfile_name.split("/")[:-1]))
if not os.path.isdir(hydro_path):
os.makedirs(hydro_path)
idx = pd.date_range(
start="{0}-01-01 00:00".format(year),
end="{0}-12-31 23:00".format(year),
freq="H",
tz="Europe/Berlin",
)
feed_in = pd.DataFrame(columns=regions, index=idx)
feed_in[feed_in.columns] = full_load_hours / len(feed_in)
feed_in.to_csv(outfile_name)
def load_feedin_by_region(
year, feedin_type, name, region=None, weather_year=None
):
"""
Parameters
----------
year
feedin_type
name
region
weather_year
Returns
-------
"""
feedin_path = os.path.join(cfg.get("paths", "feedin"), name, str(year))
# Create pattern for the name of the resulting files.
if weather_year is None:
feedin_region_outfile_name = os.path.join(
feedin_path,
cfg.get("feedin", "region_file_pattern").format(
year=year, type=feedin_type, name=name
),
)
else:
feedin_path = os.path.join(feedin_path, "weather_variations")
feedin_region_outfile_name = os.path.join(
feedin_path,
cfg.get("feedin", "region_file_pattern_var").format(
year=year, type=feedin_type, name=name, var=weather_year
),
)
# Add any federal state to get its normalised feed-in.
if feedin_type in ["solar", "wind"]:
fd_in = pd.read_csv(
feedin_region_outfile_name, index_col=[0], header=[0, 1, 2]
)
elif feedin_type in ["hydro", "geothermal"]:
fd_in = pd.read_csv(
feedin_region_outfile_name, index_col=[0], header=[0]
)
else:
fd_in = None
if region is not None and fd_in is not None:
fd_in = fd_in[region]
return fd_in
def windzone_region_fraction(pp, name, year=None, dump=False):
"""
Parameters
----------
pp : pd.DataFrame
year : int
name : str
dump : bool
Returns
-------
Examples
--------
>>> my_fn=os.path.join(cfg.get('paths', 'powerplants'),
... cfg.get('powerplants', 'reegis_pp'))
>>> my_pp=pd.DataFrame(pd.read_hdf(my_fn, 'pp')) # doctest: +SKIP
>>> wz=windzone_region_fraction(my_pp, 'federal_states', 2014
... dump=False) # doctest: +SKIP
>>> round(float(wz.loc['NI', 1]), 2) # doctest: +SKIP
0.31
"""
pp = pp.loc[pp.energy_source_level_2 == "Wind"]
if year is None:
capacity_col = "capacity"
else:
capacity_col = "capacity_{0}".format(year)
path = cfg.get("paths", "geometry")
filename = "windzones_germany.geojson"
gdf = geometries.load(path=path, filename=filename)
gdf.set_index("zone", inplace=True)
geo_path = cfg.get("paths", "geometry")
geo_file = cfg.get("coastdat", "coastdatgrid_polygon")
coastdat_geo = geometries.load(path=geo_path, filename=geo_file)
coastdat_geo["geometry"] = coastdat_geo.centroid
points = geometries.spatial_join_with_buffer(coastdat_geo, gdf, "windzone")
wz = pd.DataFrame(points["windzone"])
pp = pd.merge(pp, wz, left_on="coastdat2", right_index=True)
pp["windzone"].fillna(0, inplace=True)
pp = pp.groupby([name, "windzone"]).sum()[capacity_col]
wz_regions = pp.groupby(level=0).apply(lambda x: x / float(x.sum()))
if dump is True:
filename = "windzone_{0}.csv".format(name)
fn = os.path.join(cfg.get("paths", "powerplants"), filename)
wz_regions.to_csv(fn, header=False)
return wz_regions
def scenario_feedin(year, name, weather_year=None, feedin_ts=None):
"""
Load solar, wind, hydro, geothermal for all regions in one Mulitindex table
year : int
name : str
weather_year : pd.DataFrame or None
feedin_ts : pd.DataFrame or None
"""
if feedin_ts is None:
cols = pd.MultiIndex(levels=[[], []], codes=[[], []])
feedin_ts = pd.DataFrame(columns=cols)
hydro = load_feedin_by_region(
year, "hydro", name, weather_year=weather_year
).reset_index(drop=True)
for region in hydro.columns:
feedin_ts[region, "hydro"] = hydro[region]
geothermal = load_feedin_by_region(
year, "geothermal", name, weather_year=weather_year
).reset_index(drop=True)
for region in geothermal.columns:
feedin_ts[region, "geothermal"] = geothermal[region]
if calendar.isleap(year) and weather_year is not None:
if not calendar.isleap(weather_year):
feedin_ts = feedin_ts.iloc[:8760]
feedin_ts = scenario_feedin_pv(
year, name, feedin_ts=feedin_ts, weather_year=weather_year
)
feedin_ts = scenario_feedin_wind(
year, name, feedin_ts=feedin_ts, weather_year=weather_year
)
return feedin_ts.sort_index(1)
def scenario_feedin_wind(
year, name, regions=None, feedin_ts=None, weather_year=None
):
"""
Parameters
----------
year
name
regions
feedin_ts
weather_year
Returns
-------
"""
# Get fraction of windzone per region
wz = pd.read_csv(
os.path.join(
cfg.get("paths", "powerplants"), "windzone_{0}.csv".format(name)
),
index_col=[0, 1],
header=None,
)
# Get normalised feedin time series
wind = load_feedin_by_region(
year, "wind", name, weather_year=weather_year
).reset_index(drop=True)
if weather_year is not None:
if calendar.isleap(weather_year) and not calendar.isleap(year):
wind = wind.iloc[:8760]
# Rename columns and remove obsolete level
wind.columns = wind.columns.droplevel(2)
cols = wind.columns.get_level_values(1).unique()
rn = {c: c.replace("coastdat_{0}_wind_".format(year), "") for c in cols}
wind.rename(columns=rn, level=1, inplace=True)
wind.sort_index(1, inplace=True)
# Get wind turbines by wind zone
wind_types = {float(k): v for (k, v) in cfg.get_dict("windzones").items()}
wind_types = pd.Series(wind_types).sort_index()
if regions is None:
regions = wind.columns.get_level_values(0).unique()
if feedin_ts is None or len(feedin_ts.index) == 0:
cols = pd.MultiIndex(levels=[[], []], codes=[[], []])
feedin_ts = pd.DataFrame(index=wind.index, columns=cols)
for region in regions:
frac = (
pd.merge(
wz.loc[region],
| pd.DataFrame(wind_types) | pandas.DataFrame |
from strava_segment_rank.util.strava_api.strava_api_helpers import compute_athlete_segment_frequency
from strava_segment_rank.util.strava_selenium.strava_selenium_helpers import strava_scrape_segment_leaderboard
from strava_segment_rank.util.strava_selenium.strava_selenium_helpers import strava_login
from strava_segment_rank.util.strava_api.strava_api_helpers import authenticate
from config import (
strava_login_url,
strava_segment_leaderboard_url,
)
from stravaio import StravaIO
from selenium import webdriver
import os
import pandas
import datetime
def strava_segment_rank(start_date, end_date, top_k):
"""
:param start_date: MM/DD/YYYY
:type start_date: str
:param end_date: MM/DD/YYYY
:type end_date: str
:param top_k: top k attempted segments
:type top_k: int
:return: Pandas DataFrame
"""
driver = webdriver.Chrome(os.environ['CHROMEDRIVER_PATH'])
strava_login(
driver,
strava_login_url,
os.environ['STRAVA_USERNAME'],
os.environ['STRAVA_PASSWORD']
)
client = StravaIO(
access_token=authenticate(
os.environ['STRAVA_CLIENT_ID'],
os.environ['STRAVA_CLIENT_SECRET']
)
)
start_date = datetime.datetime(
int(start_date.split('/')[2]),
int(start_date.split('/')[0]),
int(start_date.split('/')[1]),
).timestamp()
end_date = datetime.datetime(
int(end_date.split('/')[2]),
int(end_date.split('/')[0]),
int(end_date.split('/')[1]),
).timestamp()
segment_frequencies = compute_athlete_segment_frequency(client, int(start_date), int(end_date))
segment_frequencies_df = pandas.DataFrame(
{
'segment_id': segment_frequencies.keys(),
'frequency': segment_frequencies.values()
}
)
segment_leadboard_datas = []
for segment_id, frequency in segment_frequencies.items():
print('Scrapping segment ', str(segment_id))
segment_leaderboard_data = strava_scrape_segment_leaderboard(
driver,
segment_id,
strava_segment_leaderboard_url
)
segment_leadboard_datas.append(segment_leaderboard_data)
segment_leaderboard_df = | pandas.DataFrame(segment_leadboard_datas) | pandas.DataFrame |
import os
import argparse
import tables as h5
import pandas as pd
import numpy as np
import gvar as gv
import matplotlib as mpl
import matplotlib.pyplot as plt
# now module for Madras-Sokal autocorr time
from emcee import autocorr
# Figure formatting for paper
fig_width = 6.75 # in inches, 2x as wide as APS column
gr = 1.618034333 # golden ratio
fig_size = (fig_width, fig_width / gr)
fig_size2 = (fig_width, fig_width * 1.6)
plt_axes = [0.14,0.14,0.85,0.85]
axes1 = [0.13,0.13,0.69,0.85]
axes2 = [0.821,0.13,0.12,0.85]
fs_text = 18 # font size of text
fs_leg = 14 # legend font size
mrk_size = '5' # marker size
tick_size = 14 # tick size
lw = 1 # line width
# saving as rc params
mpl.rcParams['figure.figsize'] = fig_size
mpl.rcParams['lines.linewidth'] = lw
mpl.rcParams['lines.markersize'] = mrk_size
mpl.rcParams['font.size'] = fs_text
mpl.rcParams['legend.fontsize'] = fs_leg
mpl.rcParams['axes.labelsize'] = fs_text
mpl.rcParams['xtick.labelsize'] = tick_size
mpl.rcParams['ytick.labelsize'] = tick_size
mpl.rcParams['backend'] = 'Agg'
# streams and cfg offsets for the 3 ensembles
streams = {
'a15m135XL_s':['b','c','d','e'],
'a09m135_s' :['a','b'],
'a06m310L_s' :['b','c'],
}
## offset in units of MDTU for visualization
offset = {
'a15m135XL_s':[0, 2700, 5400, 8100],
'a09m135_s' :[0, 3422],
'a06m310L_s' :[0, 3200],
}
## stitching of a15 pbp
stitching = {
'a15m135XL_s': [250,5],
'a09m135_s' : [0,0],
'a06m310L_s' : [0,0],
}
## measurement frequency in Len.
freq = {
'a15m135XL_s':25,
'a09m135_s' :4,
'a06m310L_s' :3,
}
pbp_reps = 5 # number of stochastic point sources for PBP
labels = {'a15m135XL_s':'a15m135XL', 'a09m135_s':'a09m135', 'a06m310L_s':'a06m310L'}
lattices = {'a15m135XL':'a15', 'a09m135':'a09', 'a06m310L':'a06'}
colors_a = {'a15':'#ec5d57', 'a12':'#70bf41', 'a09':'#51a7f9', 'a06':'#00FFFF'}
# we have a total of 6 observables
## The first 4 are measured every MDTU
## The last 2 are measured every TRAJ
obs_all = ['deltaS','pbp_c','pbp_s','pbp_l','acc_rej','plaq']
# forget about deltaS because it's complicated with a15m135
obs_a15 = ['pbp_c','pbp_s','pbp_l','acc_rej','plaq']
# we use MDTU and TRAJ as indices for observables
index_all = ['mdt','traj']
# for a15m135 pbp there is a separate index!!
index_a15 = ['mdt_pbp','traj']
####
def plot_pbp(ensemble,dataset,column):
colors = ['r','orange','g','b','m']
if len(streams[ensemble]) == 1:
cs = [colors_a[lattices[labels[ensemble]]]]
else:
cs = colors[::int(len(colors)/len(streams[ensemble]))]
gs = mpl.gridspec.GridSpec(1, 2,width_ratios=[5, 1])
gs.update(wspace=0.00)
#---
ax0 = plt.subplot(gs[0])
ax1 = plt.subplot(gs[1])
#---
binned_data = np.array([])
for i,s in enumerate(streams[ensemble]):
# print every saved config
idx = np.concatenate((np.arange(0,stitching[ensemble][0]),
np.arange(stitching[ensemble][0],dataset.loc[s].shape[0],freq[ensemble])))
data = dataset.loc[s].iloc[idx][column].values
index = np.concatenate((dataset.loc[s].iloc[idx].index[:stitching[ensemble][0]].values,
dataset.loc[s].iloc[idx].index[stitching[ensemble][0]:].values+stitching[ensemble][1]))+offset[ensemble][i]
avg = np.mean(data)
#---
ax0.plot(index,data,label=s,color=cs[i],marker='s',mfc='None',alpha=.5)
ax0.hlines(y=avg,xmin=index.min(),xmax=index.max(),color=cs[i],linestyles='dashed',linewidths=2)
#---
binned_data = np.append(binned_data,data)
#---
binned_data = binned_data.reshape((-1,len(streams[ensemble])),order='F')
bins = np.linspace(start = binned_data.min()-binned_data.std(), stop = binned_data.max()+binned_data.std(), num = int(binned_data.shape[0]/10))
ax1.hist(binned_data,bins=bins,color=cs[0:len(streams[ensemble])],orientation='horizontal',histtype='bar',align='left', stacked=True, fill=True,alpha=0.7)
# Remove the inner label numbers of the histograms
nullfmt = mpl.ticker.NullFormatter()
ax1.yaxis.set_major_formatter(nullfmt)
# Remove the inner ticks on the y axis
nulllct= mpl.ticker.NullLocator()
ax1.yaxis.set_major_locator(nulllct)
ax1.set_ylim(bins.min(),bins.max())
#---
ax0.set_ylim(bins.min(),bins.max())
ax0.set_xlabel('HMC trajectory')
ax0.text(0.05,0.1,labels[ensemble],transform=ax0.transAxes,bbox=dict(boxstyle="round",facecolor='None'),verticalalignment='center')
ax0.legend(loc=2,ncol=4)
plt.tight_layout()
figname = os.path.join('figures',ensemble+'_'+column+'.pdf')
print('Saving to {}'.format(figname))
plt.savefig(figname,transparent=True)
######
def parsing_args():
parser = argparse.ArgumentParser(description='Extract HMC observable for an ensemble')
parser.add_argument('--ens', type=str, default='a06m310L_s', help='pick an ensemble [%(default)s]')
parser.add_argument('--autocorr', default=False, action='store_const', const=True, help='Compute autocorr times? [%(default)s]')
parser.add_argument('--plots', default=False, action='store_const', const=True, help='Plot HMC histories? [%(default)s]')
### Setup
args = parser.parse_args()
ensemble = args.ens
acorr = args.autocorr
plot = args.plots
return ensemble, acorr, plot
def main():
ensemble, acorr, plot = parsing_args()
filename_obs = 'hmc_obs_'+ensemble+'.h5'
print('Getting data from {}'.format(filename_obs))
if (not os.path.isfile(filename_obs)):
print('File {} not found'.format(filename_obs))
return
print('Extract HMC observable for {}'.format(ensemble))
prex = labels[ensemble]
print('This ensemble has {} streams: {}'.format(len(streams[ensemble]),streams[ensemble]))
### treat a15 separately because of pbp measurements
if ensemble == 'a15m135XL_s':
obs = obs_a15
index = index_a15
else:
obs = obs_all
index = index_all
### Get data
dataset_mdt = dict()
dataset_trj = dict()
dset_mdt = dict()
dset_trj = dict()
## Open file
f = h5.open_file(filename_obs)
for i,s in enumerate(streams[ensemble]):
node_name = "/{}_{}/{}".format(prex,s,index[0])
index_mdt = f.get_node(node_name).read()
node_name = "/{}_{}/{}".format(prex,s,index[1])
index_trj = f.get_node(node_name).read()
for o in obs:
node_name = "/{}_{}/{}".format(prex,s,o)
data = f.get_node(node_name).read()
if o in ['pbp_c','pbp_s','pbp_l']:
data = np.reshape(data,(-1,pbp_reps*2))
dataset_mdt[o] = data.mean(axis=1)
if o in ['deltaS']:
dataset_mdt[o] = data
if o in ['acc_rej']:
dataset_trj[o] = data[:,1]
if o in ['plaq']:
dataset_trj[o] = data.mean(axis=1)/3.
dset_mdt[s] = pd.DataFrame(data=dataset_mdt,index=index_mdt)
dset_trj[s] = | pd.DataFrame(data=dataset_trj,index=index_trj) | pandas.DataFrame |
import jax.numpy as np
import qtensornetwork.components
import qtensornetwork.circuit
import qtensornetwork.ansatz
import qtensornetwork.util
import qtensornetwork.optimizer
from qtensornetwork.gate import *
from jax.config import config
config.update("jax_enable_x64", True)
import tensorflow as tf
from tensorflow import keras
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
def generate_binary_mnist(f_label, s_label, train_num, test_num, width, height):
mnist = keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
df = pd.DataFrame(columns=["label"])
df["label"] = y_train.reshape([-1])
list_f = df.loc[df.label==f_label].sample(n=train_num)
list_s = df.loc[df.label==s_label].sample(n=train_num)
label_list = pd.concat([list_f, list_s])
label_list = label_list.sort_index()
label_idx = label_list.index.values
train_label = label_list.label.values
x_train = x_train[label_idx]
y_train= train_label
y_train = np.array([[0, 1] if i==f_label else [1, 0] for i in y_train])
df = | pd.DataFrame(columns=["label"]) | pandas.DataFrame |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas= | pd.DataFrame(reshapedf) | pandas.DataFrame |
# We use word2vec instead of glove embedding in this file
# This word2vec is a self-trained one
import argparse
import json
import os
import pickle
from itertools import chain
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas
import seaborn as sns
from gensim.models import Word2Vec
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.layers import Bidirectional, Embedding, Dropout, SpatialDropout1D, Dense, LSTM, \
BatchNormalization
from tensorflow.python.keras.optimizer_v2.adam import Adam
from tensorflow.python.keras.utils.vis_utils import plot_model
from tensorflow.python.ops.init_ops import Constant
from tqdm import trange
# Set up a argument parser
parser = argparse.ArgumentParser()
parser.add_argument("--epoch", type=int, default=300, required=False)
parser.add_argument("--bs", type=int, default=64, required=False)
parser.add_argument("--lr", type=float, default=0.001, required=False)
parser.add_argument("--model", type=str, choices=["lstm_bilstm", "bilstm", "bilstm_bilstm"], default="bilstm_bilstm",
required=False, help="The model to train the NER")
parser.add_argument("--layers", type=int, default=2, required=False, help="The number of BiLSTM layers you want to try")
args = parser.parse_args()
print(args)
# Set up some parameter we can use
epochs = args.epoch
BS = args.bs
LR = args.lr
# Load the data for three splits
train_dict = pickle.load(open("./data/train.pkl", 'rb'))
val_dict = pickle.load(open("./data/val.pkl", 'rb'))
test_dict = pickle.load(open("./data/test.pkl", 'rb'))
total_list = train_dict["word_seq"] + val_dict["word_seq"] + test_dict["word_seq"]
model = Word2Vec(total_list, size=300, window=5, min_count=1, workers=4)
# model.train(val_dict["word_seq"], total_examples=2950, epochs=5)
# model.train(test_dict["word_seq"], total_examples=2950, epochs=5)
print("Train word2vec model down.")
# Give all the words appeared in our corpus their glove embedding, for those who are not exist, random initialize them
encoded_dict = {}
count = 0
total = 0
word2vec_keys = model.wv.vocab.keys()
dimension = 300
for i in [train_dict, val_dict, test_dict]:
for j in trange(len(i['word_seq'])):
for word in i['word_seq'][j]:
if word not in word2vec_keys:
encoded_dict[word] = np.random.rand(1, dimension)[0]
count += 1
total += 1
else:
encoded_dict[word] = model.wv[word]
total += 1
# Test how many words are found in glove and how many are randomly initialized
print("words not found {}".format(count))
print("words total {}".format(total))
print(len(encoded_dict))
if not os.path.exists('./word2vec'):
os.mkdir('./word2vec/')
np.save("./word2vec/encoded_dict_{}d.npy".format(dimension), encoded_dict)
# Build a dict that records the word to a single unique integer, and our encoded matrix for word embedding
encoded_word2id = {}
encoded_matrix = np.zeros((len(encoded_dict.keys()), dimension), dtype=float)
for i, word in enumerate(encoded_dict.keys()):
encoded_word2id[word] = i
encoded_matrix[i] = encoded_dict[word]
print(encoded_matrix.shape)
np.save("./word2vec/encoded_matrix_{}d.npy".format(dimension), encoded_matrix)
# Build the tag <--> index dictionary and add PAD tag into it
tag_list = list(set(chain(*train_dict["tag_seq"])))
tag_to_index_dict = {t: i for i, t in enumerate(tag_list)}
index_to_tag_dict = {i: t for i, t in enumerate(tag_list)}
# save out dictionary for generation
if not os.path.exists('./lstm_model'):
os.mkdir('./lstm_model/')
if not os.path.exists('./lstm_results'):
os.mkdir('./lstm_results')
np.save("./lstm_model/model_tag2id_e{}_bs{}.npy".format(epochs, BS), tag_to_index_dict)
np.save("./lstm_model/model_id2tag_e{}_bs{}.npy".format(epochs, BS), index_to_tag_dict)
# Load some parameters for deep learning
embedding_dim = dimension
num_words = len(encoded_dict)
input_length = 128
n_tags = len(tag_to_index_dict)
print(embedding_dim, num_words, input_length, n_tags)
# Set our model
def get_bi_lstm_model():
model = Sequential()
model.add(
Embedding(num_words, embedding_dim, embeddings_initializer=Constant(encoded_matrix), input_length=input_length,
trainable=True))
model.add(SpatialDropout1D(0.2))
model.add(BatchNormalization())
if args.model == "lstm_bilstm":
model.add(LSTM(128, return_sequences=True))
elif args.model == "bilstm_bilstm":
for _ in range(args.layers):
model.add(Bidirectional(LSTM(128, return_sequences=True)))
model.add(BatchNormalization())
model.add(Dropout(0.15))
adam = Adam(lr=LR, beta_1=0.9, beta_2=0.999)
model.add(Dense(units=n_tags, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
model.summary()
return model
# Define the function to train our model
def train_model(X, y, val_X, val_y, model):
hist = model.fit(X, y, batch_size=BS, verbose=1, epochs=epochs, validation_data=(val_X, val_y), shuffle=True)
return hist
# build our model and print the summary
model_bi_lstm_lstm = get_bi_lstm_model()
try:
plot_model(model_bi_lstm_lstm, show_shapes=True)
except ImportError:
pass
# Use the dict we've prepared before to do the embedding and transformation
train_input = np.array(
[[encoded_word2id[word] for word in train_dict['word_seq'][i]] for i in range(len(train_dict['word_seq']))])
val_input = np.array(
[[encoded_word2id[word] for word in val_dict['word_seq'][i]] for i in range(len(val_dict['word_seq']))])
test_input = np.array(
[[encoded_word2id[word] for word in test_dict['word_seq'][i]] for i in range(len(test_dict['word_seq']))])
train_output = np.array(
[[tag_to_index_dict[tag] for tag in train_dict['tag_seq'][i]] for i in range(len(train_dict['tag_seq']))])
val_output = np.array(
[[tag_to_index_dict[tag] for tag in val_dict['tag_seq'][i]] for i in range(len(val_dict['tag_seq']))])
# Check the shape of our input, their first dimension must be the same
print(train_input.shape, val_input.shape, test_input.shape)
print(train_output.shape, val_output.shape)
# Train our model and save the loss recording
history = train_model(train_input, train_output, val_input, val_output, model_bi_lstm_lstm)
# Do some visualization
sns.set_style(style="darkgrid")
sns.set(font_scale=1.75)
plt.rcParams["figure.figsize"] = (30, 15)
mpl.use('Agg')
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
# plt.show()
plt.savefig(
'./lstm_results/accuracy_BS{}E{}LR{}_{}d_{}layer.png'.format(BS, epochs, LR, dimension,
args.layers))
plt.clf()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
# plt.show()
plt.savefig(
'./lstm_results/model_loss_BS{}E{}LR{}_{}d_{}layers.png'.format(BS, epochs, LR,
dimension, args.layers))
print("save images down.")
# Save the validation accuracy for us to find the best model trained
np.save(
'./lstm_results/model_results_val_BS{}E{}LR{}_{}d_{}layers.npy'.format(BS, epochs, LR,
dimension, args.layers),
history.history['val_accuracy'])
print("save history validation data down.")
# Save our trained model and open up a answer csv, initialize all the id
# try:
# model_bi_lstm_lstm.save(
# './lstm_model/model_BS{}E{}LR{}_{}d_{}layers.pkl'.format(BS, epochs, LR, dimension, args.layers))
# except:
# pass
answer = | pandas.DataFrame(columns=['id', 'labels']) | pandas.DataFrame |
###
# This code provides a way to approximate the probability of
# finding two features together using von Neumann Diffusion Kernel.
# Also plots a cluster heatmap of the normalized von Neumann diffusion
#
# by: <NAME>, 09/15/2016
#
# required modules:
# scipy, numpy, matplotlib, pandas, seaborn
#
#
# How it works:
# make your dataset a comma separated file that has the columns as features and rows as samples
# include the feature names on top of each column
# then run:
# python pairProbabilty.py {path/to/dataset}.csv
#
###
from scipy.linalg import inv, eigvals
import numpy as np
from matplotlib import pylab as pl
import pandas as pd # for data frame handling
import seaborn as sns # for pretty plotting
from optparse import OptionParser # to run in the terminal
usage = 'usage: %prog [options] <path_to_data>'
parser = OptionParser(usage)
(options,args) = parser.parse_args()
def main():
df = | pd.read_csv(args[0]) | pandas.read_csv |
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class StringOperatorTests(TestCase):
def test_operator_decorator(self):
self.assertTrue(StringType("foo").equal_to.is_operator)
def test_string_equal_to(self):
self.assertTrue(StringType("foo").equal_to("foo"))
self.assertFalse(StringType("foo").equal_to("Foo"))
def test_string_not_equal_to(self):
self.assertTrue(StringType("foo").not_equal_to("Foo"))
self.assertTrue(StringType("foo").not_equal_to("boo"))
self.assertFalse(StringType("foo").not_equal_to("foo"))
def test_string_equal_to_case_insensitive(self):
self.assertTrue(StringType("foo").equal_to_case_insensitive("FOo"))
self.assertTrue(StringType("foo").equal_to_case_insensitive("foo"))
self.assertFalse(StringType("foo").equal_to_case_insensitive("blah"))
def test_string_starts_with(self):
self.assertTrue(StringType("hello").starts_with("he"))
self.assertFalse(StringType("hello").starts_with("hey"))
self.assertFalse(StringType("hello").starts_with("He"))
def test_string_ends_with(self):
self.assertTrue(StringType("hello").ends_with("lo"))
self.assertFalse(StringType("hello").ends_with("boom"))
self.assertFalse(StringType("hello").ends_with("Lo"))
def test_string_contains(self):
self.assertTrue(StringType("hello").contains("ell"))
self.assertTrue(StringType("hello").contains("he"))
self.assertTrue(StringType("hello").contains("lo"))
self.assertFalse(StringType("hello").contains("asdf"))
self.assertFalse(StringType("hello").contains("ElL"))
def test_string_matches_regex(self):
self.assertTrue(StringType("hello").matches_regex(r"^h"))
self.assertFalse(StringType("hello").matches_regex(r"^sh"))
def test_non_empty(self):
self.assertTrue(StringType("hello").non_empty())
self.assertFalse(StringType("").non_empty())
self.assertFalse(StringType(None).non_empty())
class NumericOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, err_string):
NumericType("foo")
def test_numeric_type_validates_and_casts_decimal(self):
ten_dec = Decimal(10)
ten_int = 10
ten_float = 10.0
if sys.version_info[0] == 2:
ten_long = long(10)
else:
ten_long = int(10) # long and int are same in python3
ten_var_dec = NumericType(ten_dec) # this should not throw an exception
ten_var_int = NumericType(ten_int)
ten_var_float = NumericType(ten_float)
ten_var_long = NumericType(ten_long)
self.assertTrue(isinstance(ten_var_dec.value, Decimal))
self.assertTrue(isinstance(ten_var_int.value, Decimal))
self.assertTrue(isinstance(ten_var_float.value, Decimal))
self.assertTrue(isinstance(ten_var_long.value, Decimal))
def test_numeric_equal_to(self):
self.assertTrue(NumericType(10).equal_to(10))
self.assertTrue(NumericType(10).equal_to(10.0))
self.assertTrue(NumericType(10).equal_to(10.000001))
self.assertTrue(NumericType(10.000001).equal_to(10))
self.assertTrue(NumericType(Decimal('10.0')).equal_to(10))
self.assertTrue(NumericType(10).equal_to(Decimal('10.0')))
self.assertFalse(NumericType(10).equal_to(10.00001))
self.assertFalse(NumericType(10).equal_to(11))
def test_numeric_not_equal_to(self):
self.assertTrue(NumericType(10).not_equal_to(10.00001))
self.assertTrue(NumericType(10).not_equal_to(11))
self.assertTrue(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.1')))
self.assertFalse(NumericType(10).not_equal_to(10))
self.assertFalse(NumericType(10).not_equal_to(10.0))
self.assertFalse(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.0')))
def test_other_value_not_numeric(self):
error_string = "10 is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, error_string):
NumericType(10).equal_to("10")
def test_numeric_greater_than(self):
self.assertTrue(NumericType(10).greater_than(1))
self.assertFalse(NumericType(10).greater_than(11))
self.assertTrue(NumericType(10.1).greater_than(10))
self.assertFalse(NumericType(10.000001).greater_than(10))
self.assertTrue(NumericType(10.000002).greater_than(10))
def test_numeric_greater_than_or_equal_to(self):
self.assertTrue(NumericType(10).greater_than_or_equal_to(1))
self.assertFalse(NumericType(10).greater_than_or_equal_to(11))
self.assertTrue(NumericType(10.1).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000001).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000002).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10).greater_than_or_equal_to(10))
def test_numeric_less_than(self):
self.assertTrue(NumericType(1).less_than(10))
self.assertFalse(NumericType(11).less_than(10))
self.assertTrue(NumericType(10).less_than(10.1))
self.assertFalse(NumericType(10).less_than(10.000001))
self.assertTrue(NumericType(10).less_than(10.000002))
def test_numeric_less_than_or_equal_to(self):
self.assertTrue(NumericType(1).less_than_or_equal_to(10))
self.assertFalse(NumericType(11).less_than_or_equal_to(10))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.1))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000001))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000002))
self.assertTrue(NumericType(10).less_than_or_equal_to(10))
class BooleanOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType("foo")
err_string = "None is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType(None)
def test_boolean_is_true_and_is_false(self):
self.assertTrue(BooleanType(True).is_true())
self.assertFalse(BooleanType(True).is_false())
self.assertFalse(BooleanType(False).is_true())
self.assertTrue(BooleanType(False).is_false())
class SelectOperatorTests(TestCase):
def test_contains(self):
self.assertTrue(SelectType([1, 2]).contains(2))
self.assertFalse(SelectType([1, 2]).contains(3))
self.assertTrue(SelectType([1, 2, "a"]).contains("A"))
def test_does_not_contain(self):
self.assertTrue(SelectType([1, 2]).does_not_contain(3))
self.assertFalse(SelectType([1, 2]).does_not_contain(2))
self.assertFalse(SelectType([1, 2, "a"]).does_not_contain("A"))
class SelectMultipleOperatorTests(TestCase):
def test_contains_all(self):
self.assertTrue(SelectMultipleType([1, 2]).
contains_all([2, 1]))
self.assertFalse(SelectMultipleType([1, 2]).
contains_all([2, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
contains_all([2, 1, "A"]))
def test_is_contained_by(self):
self.assertTrue(SelectMultipleType([1, 2]).
is_contained_by([2, 1, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
is_contained_by([2, 3, 4]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
is_contained_by([2, 1, "A"]))
def test_shares_at_least_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_at_least_one_element_with([4, "A"]))
def test_shares_exactly_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_exactly_one_element_with([4, "A"]))
self.assertFalse(SelectMultipleType([1, 2, 3]).
shares_exactly_one_element_with([2, 3, "a"]))
def test_shares_no_elements_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_no_elements_with([4, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_no_elements_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2, "a"]).
shares_no_elements_with([4, "A"]))
class DataframeOperatorTests(TestCase):
def test_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ],
})
result: pd.Series = DataframeType({"value": df}).exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_not_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ]
})
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, "", 7, ],
"var2": [3, 5, 6, "", 2, ],
"var3": [1, 3, 8, "", 7, ],
"var4": ["test", "issue", "one", "", "two", ]
})
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to({
"target": "--r1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 20
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var4",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False, False, ])))
def test_not_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": 20
}).equals(pandas.Series([True, True, True])))
def test_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "", "new", "val"],
"var2": ["WORD", "", "test", "VAL"],
"var3": ["LET", "", "GO", "read"]
})
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "NEW"
}).equals(pandas.Series([False, False, True, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False])))
def test_not_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "new", "val"],
"var2": ["WORD", "test", "VAL"],
"var3": ["LET", "GO", "read"],
"var4": ["WORD", "NEW", "VAL"]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
def test_less_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than({
"target": "--r1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": 3
}).equals(pandas.Series([True, True, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than_or_equal_to({
"target": "--r1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([False, False, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 5, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than_or_equal_to({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_greater_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": 5000
}).equals(pandas.Series([False, False, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than_or_equal_to({
"target": "var1",
"comparator": "--r4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([True, True, False])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 3, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than_or_equal_to({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_contains(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": 5
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).does_not_contain({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([False, True, True])))
def test_contains_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["POKEMON", "CHARIZARD", "BULBASAUR"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "PIKACHU"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["pikachu", "charizard", "bulbasaur"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var1",
"comparator": "IVYSAUR"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([False, True, True])))
def test_is_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [4,5,6]
}).equals(pandas.Series([False, False, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([True, True, False])))
def test_is_not_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([False, False, True])))
def test_is_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
def test_is_not_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
def test_prefix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).prefix_matches_regex({
"target": "--r2",
"comparator": "w.*",
"prefix": 2
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([False, False])))
def test_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).suffix_matches_regex({
"target": "--r1",
"comparator": "es.*",
"suffix": 3
}).equals(pandas.Series([False, True])))
self.assertTrue(DataframeType({"value": df}).suffix_matches_regex({
"target": "var1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([False, False])))
def test_not_prefix_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_prefix_matches_regex({
"target": "--r1",
"comparator": ".*",
"prefix": 2
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).not_prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([True, True])))
def test_not_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_suffix_matches_regex({
"target": "var1",
"comparator": ".*",
"suffix": 3
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_suffix_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([True, True])))
def test_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).matches_regex({
"target": "--r1",
"comparator": ".*",
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).matches_regex({
"target": "var2",
"comparator": "[0-9].*",
}).equals(pandas.Series([False, False])))
def test_not_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_matches_regex({
"target": "var1",
"comparator": ".*",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
}).equals(pandas.Series([True, True])))
def test_starts_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).starts_with({
"target": "--r1",
"comparator": "WO",
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).starts_with({
"target": "var2",
"comparator": "ABC",
}).equals(pandas.Series([False, False])))
def test_ends_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).ends_with({
"target": "--r1",
"comparator": "abc",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).ends_with({
"target": "var1",
"comparator": "est",
}).equals(pandas.Series([False, True])))
def test_has_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([True, False])))
def test_has_not_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_not_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([False, True])))
def test_longer_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
def test_longer_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_shorter_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'val']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
def test_shorter_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_all({
"target": "--r1",
"comparator": "--r2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_not_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_invalid_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2099'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).invalid_date({"target": "--r1"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var3"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var2"})
.equals(pandas.Series([False, False, False, True, True])))
def test_date_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var1", "comparator": '2021'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "1997-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([False, False, True, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).date_equal_to({"target": "--r3", "comparator": "--r4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "hour"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "minute"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "second"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "microsecond"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_not_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var6", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_less_than(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var1", "comparator": '2022'})
.equals( | pandas.Series([True, True, True, True, True]) | pandas.Series |
"""Filter copy number segments."""
import functools
import logging
import numpy as np
import pandas as pd
import hashlib
from .descriptives import weighted_median
def require_column(*colnames):
"""Wrapper to coordinate the segment-filtering functions.
Verify that the given columns are in the CopyNumArray the wrapped function
takes. Also log the number of rows in the array before and after filtration.
"""
if len(colnames) == 1:
msg = "'{}' filter requires column '{}'"
else:
msg = "'{}' filter requires columns " + \
", ".join(["'{}'"] * len(colnames))
def wrap(func):
@functools.wraps(func)
def wrapped_f(segarr):
filtname = func.__name__
if any(c not in segarr for c in colnames):
raise ValueError(msg.format(filtname, *colnames))
result = func(segarr)
logging.info("Filtered by '%s' from %d to %d rows",
filtname, len(segarr), len(result))
return result
return wrapped_f
return wrap
def squash_by_groups(cnarr, levels, by_arm=False):
"""Reduce CopyNumArray rows to a single row within each given level."""
# Enumerate runs of identical values
change_levels = enumerate_changes(levels)
assert (change_levels.index == levels.index).all()
assert cnarr.data.index.is_unique
assert levels.index.is_unique
assert change_levels.index.is_unique
if by_arm:
# Enumerate chromosome arms
arm_levels = []
for i, (_chrom, cnarm) in enumerate(cnarr.by_arm()):
arm_levels.append(np.repeat(i, len(cnarm)))
change_levels += np.concatenate(arm_levels)
else:
# Enumerate chromosomes
chrom_names = cnarr['chromosome'].unique()
chrom_col = (cnarr['chromosome']
.replace(chrom_names, np.arange(len(chrom_names))))
change_levels += chrom_col
data = cnarr.data.assign(_group=change_levels)
groupkey = ['_group']
if 'cn1' in cnarr:
# Keep allele-specific CNAs separate
data['_g1'] = enumerate_changes(cnarr['cn1'])
data['_g2'] = enumerate_changes(cnarr['cn2'])
groupkey.extend(['_g1', '_g2'])
data = (data.groupby(groupkey, as_index=False, group_keys=False, sort=False)
.apply(squash_region)
.reset_index(drop=True))
return cnarr.as_dataframe(data)
def enumerate_changes(levels):
"""Assign a unique integer to each run of identical values.
Repeated but non-consecutive values will be assigned different integers.
"""
return levels.diff().fillna(0).abs().cumsum().astype(int)
def squash_region(cnarr):
"""Reduce a CopyNumArray to 1 row, keeping fields sensible.
Most fields added by the `segmetrics` command will be dropped.
"""
assert 'weight' in cnarr
out = {'chromosome': [cnarr['chromosome'].iat[0]],
'start': cnarr['start'].iat[0],
'end': cnarr['end'].iat[-1],
}
region_weight = cnarr['weight'].sum()
if region_weight > 0:
out['log2'] = np.average(cnarr['log2'], weights=cnarr['weight'])
else:
out['log2'] = np.mean(cnarr['log2'])
out['gene'] = ','.join(cnarr['gene'].drop_duplicates())
out['probes'] = cnarr['probes'].sum() if 'probes' in cnarr else len(cnarr)
out['weight'] = region_weight
if 'depth' in cnarr:
if region_weight > 0:
out['depth'] = np.average(cnarr['depth'], weights=cnarr['weight'])
else:
out['depth'] = np.mean(cnarr['depth'])
if 'baf' in cnarr:
if region_weight > 0:
out['baf'] = np.average(cnarr['baf'], weights=cnarr['weight'])
else:
out['baf'] = np.mean(cnarr['baf'])
if 'cn' in cnarr:
if region_weight > 0:
out['cn'] = weighted_median(cnarr['cn'], cnarr['weight'])
else:
out['cn'] = np.median(cnarr['cn'])
if 'cn1' in cnarr:
if region_weight > 0:
out['cn1'] = weighted_median(cnarr['cn1'], cnarr['weight'])
else:
out['cn1'] = np.median(cnarr['cn1'])
out['cn2'] = out['cn'] - out['cn1']
if 'p_bintest' in cnarr:
# Only relevant for single-bin segments, but this seems safe/conservative
out['p_bintest'] = cnarr['p_bintest'].max()
return pd.DataFrame(out)
@require_column('cn')
def ampdel(segarr):
"""Merge segments by amplified/deleted/neutral copy number status.
Follow the clinical reporting convention:
- 5+ copies (2.5-fold gain) is amplification
- 0 copies is homozygous/deep deletion
- CNAs of lesser degree are not reported
This is recommended only for selecting segments corresponding to
actionable, usually focal, CNAs. Any real and potentially informative but
lower-level CNAs will be dropped.
"""
levels = np.zeros(len(segarr))
levels[segarr['cn'] == 0] = -1
levels[segarr['cn'] >= 5] = 1
# or: segarr['log2'] >= np.log2(2.5)
cnarr = squash_by_groups(segarr, pd.Series(levels))
return cnarr[(cnarr['cn'] == 0) | (cnarr['cn'] >= 5)]
@require_column('depth')
def bic(segarr):
"""Merge segments by Bayesian Information Criterion.
See: BIC-seq (Xi 2011), doi:10.1073/pnas.1110574108
"""
return NotImplemented
@require_column('ci_lo', 'ci_hi')
def ci(segarr):
"""Merge segments by confidence interval (overlapping 0).
Segments with lower CI above 0 are kept as gains, upper CI below 0 as
losses, and the rest with CI overlapping zero are collapsed as neutral.
"""
levels = np.zeros(len(segarr))
# if len(segarr) < 10:
# logging.warning("* segarr :=\n%s", segarr)
# logging.warning("* segarr['ci_lo'] :=\n%s", segarr['ci_lo'])
# logging.warning("* segarr['ci_lo']>0 :=\n%s", segarr['ci_lo'] > 0)
levels[segarr['ci_lo'].values > 0] = 1
levels[segarr['ci_hi'].values < 0] = -1
return squash_by_groups(segarr, | pd.Series(levels) | pandas.Series |
from manifesto_data import get_manifesto_texts
import warnings,json,gzip,re
import os, glob
from scipy.sparse import hstack, vstack
import scipy as sp
import pandas as pd
import numpy as np
from sklearn.linear_model import SGDClassifier
from sklearn.feature_extraction.text import HashingVectorizer, CountVectorizer
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score, classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from scipy.special import logit
label2rightleft = {
'right': [104,201,203,305,401,402,407,414,505,601,603,605,606],
'left': [103,105,106,107,403,404,406,412,413,504,506,701,202]
}
EXPERIMENT_RESULT_FILENAME = "active_learning_curves_50_reps_margin.csv"
def print_classification_report(true,pred,fn='report.txt'):
s = classfication_report(true,pred) + \
"\n\n" + "\n".join([",".join(l) for l in confusion_matrix(true,pred)])
open(fn).write(s)
def report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
def model_selection(X,y):
'''
Runs model selection, returns fitted classifier
'''
# turn off warnings, usually there are some labels missing in the training set
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# TODO: pipeline parameters (hashing vectorizer dimensionalities etc.) should also be searchable here
text_clf = SGDClassifier(loss="log", max_iter=10)
parameters = {'alpha': (np.logspace(-6,-3,6)).tolist()}
# perform gridsearch to get the best regularizer
gs_clf = GridSearchCV(text_clf, parameters, cv=2, n_jobs=-1,verbose=1)
gs_clf.fit(X, y)
# report(gs_clf.cv_results_)
return gs_clf.best_estimator_
def load_data_bow(folder = "../data/manifesto",
min_label_count = 1000,
left_right = False,
max_words = int(1e6)
):
df = pd.concat([pd.read_csv(fn) for fn in glob.glob(os.path.join(folder,"*.csv"))]).dropna(subset=['cmp_code','content'])
df = df.sample(frac=1.0)
if left_right:
# replace_with = ['left', 'right', 'neutral']
replace_with = [-1, 1, 0]
label_to_replace = [
label2rightleft['left'],
label2rightleft['right'],
list(set(df.cmp_code.unique()) - set(label2rightleft['left'] + label2rightleft['right']))
]
for rep, label in zip(replace_with, label_to_replace):
df.cmp_code.replace(label, rep, inplace = True)
label_hist = df.cmp_code.value_counts()
valid_labels = label_hist[label_hist > min_label_count].index
df = df[df.cmp_code.isin(valid_labels)]
vect = CountVectorizer(max_features=max_words,binary=True).fit(df.content.values)
return vect.transform(df.content.values), df.cmp_code.apply(int).as_matrix(), vect.vocabulary_, df.content.values, vect
def load_data(folder = "../data/manifesto",
min_label_count = 1000,
left_right = False):
df = pd.concat([pd.read_csv(fn) for fn in glob.glob(os.path.join(folder,"*.csv"))]).dropna(subset=['cmp_code','content'])
if left_right:
# replace_with = ['left', 'right', 'neutral']
replace_with = [-1, 1, 0]
label_to_replace = [
label2rightleft['left'],
label2rightleft['right'],
list(set(df.cmp_code.unique()) - set(label2rightleft['left'] + label2rightleft['right']))
]
for rep, label in zip(replace_with, label_to_replace):
df.cmp_code.replace(label, rep, inplace = True)
label_hist = df.cmp_code.value_counts()
valid_labels = label_hist[label_hist > min_label_count].index
df = df[df.cmp_code.isin(valid_labels)]
vect = HashingVectorizer()
return vect.transform(df.content.values), df.cmp_code.apply(int).as_matrix()
def smooth_probas(label_probas, eps = 1e-9):
# smoothing probabilities to avoid infs
return np.minimum(label_probas + eps, 1.)
def prioritize_samples(label_probas, strategy="margin_sampling", include=[]):
'''
Some sampling strategies as in Settles' 2010 Tech Report
'''
if len(include) > 0:
exclude = list(set(range(label_probas.shape[-1]))-set(include))
excluded_max = label_probas[:, exclude].max(axis=1)
included = label_probas[:, include]
included.sort(axis=1)
included_margin = np.diff(included,axis=1)[:,-1]
priorities = (included_margin + excluded_max).argsort()
else:
if strategy == "entropy_sampling":
entropy = -(label_probas * np.log(label_probas)).sum(axis=1)
priorities = entropy.argsort()[::-1]
elif strategy == "margin_sampling":
label_probas.sort(axis=1)
priorities = (label_probas[:,-1] - label_probas[:,-2]).argsort()
elif strategy == "uncertainty_sampling":
uncertainty_sampling = 1 - label_probas.max(axis=1)
priorities = uncertainty_sampling.argsort()[::-1]
elif strategy == "random":
priorities = np.random.permutation(label_probas.shape[0])
return priorities
def compute_active_learning_curve(
X_tolabel,
y_tolabel,
X_validation,
y_validation,
percentage_samples=[1,2,5,10,15,30,50,75,100],
strategies = ["random", 'margin_sampling']):
'''
Emulate active learning with annotators:
for a given training, test and validation set, get the validation error by
training on training data only, then the score when trained on training and
test data and then the increasing validation score when adding more labelled
data, either with random selection or with active learning. The results are
the increase in scores with the respective sampling policy
'''
print('Computing active learning curve:')
clf_trained = model_selection(X_tolabel, y_tolabel)
baseline_high = accuracy_score(y_validation, clf_trained.predict(X_validation))
print('\tBaseline on 100% of data {}'.format(baseline_high))
learning_curves = []
for strategy in strategies:
# initially use random priorities, as we don't have a trained model
priorities = np.random.permutation(X_tolabel.shape[0]).tolist()
N = X_tolabel.shape[0]
all_indices = set(range(N))
labelled = []
for percentage in percentage_samples:
n_training_samples = int((percentage/100.) * N) - len(labelled)
labelled += priorities[:n_training_samples]
X_labelled = X_tolabel[labelled,:]
y_labelled = y_tolabel[labelled]
clf_current = model_selection(X_labelled, y_labelled)
# get the not yet labeled data point indices
to_label = list(all_indices - set(labelled))
current_score = accuracy_score(y_validation, clf_current.predict(X_validation))
learning_curves.append(
{
'percentage_samples': percentage,
'strategy': strategy,
'score': current_score
}
)
print('\t(Strategy {}) Trained on {} samples ({}%) from test set - reached {} ({}%)'.format(strategy, n_training_samples, percentage, current_score, np.round(100.0*(current_score - learning_curves[0]['score'])/(baseline_high-learning_curves[0]['score']))))
if len(to_label) > 0:
# prioritize the not yet labeled data points
priorities = prioritize_samples(clf_current.predict_proba(X_tolabel[to_label,:]), strategy)
# get indices in original data set for not yet labeled data
priorities = [to_label[idx] for idx in priorities]
return pd.DataFrame(learning_curves)
def compute_informed_active_learning_curve(
X_tolabel,
y_tolabel,
X_validation,
y_validation,
percentage_samples=[1,30,50,75,100],
strategies = ['random', 'margin_sampling', 'informed'],
include_labels = [-1,1]):
'''
Emulate active learning with annotators, but neglect some classes during sampling:
'''
def evaluate(y,yhat,included=[-1,0,1]):
true, predicted = zip(*[(t,p) for t,p in zip(y, yhat) if t in included])
return accuracy_score(true, predicted)
labels = np.unique(y_tolabel)
include = [idx for idx, label in enumerate(labels) if label in include_labels]
exclude = [idx for idx, label in enumerate(labels) if label not in include_labels]
print('Computing active learning curve:')
clf_trained = model_selection(X_tolabel, y_tolabel)
baseline_high = evaluate(y_validation, clf_trained.predict(X_validation))
print('\tBaseline on 100% of data {}'.format(baseline_high))
learning_curves = []
for strategy in strategies:
# initially use random priorities, as we don't have a trained model
priorities = np.random.permutation(X_tolabel.shape[0]).tolist()
N = X_tolabel.shape[0]
all_indices = set(range(N))
labelled = []
for percentage in percentage_samples:
n_training_samples = int((percentage/100.) * N) - len(labelled)
labelled += priorities[:n_training_samples]
X_labelled = X_tolabel[labelled,:]
y_labelled = y_tolabel[labelled]
clf_current = model_selection(X_labelled, y_labelled)
# get the not yet labeled data point indices
to_label = list(all_indices - set(labelled))
y_validation_predicted = clf_current.predict(X_validation)
current_score = evaluate(y_validation, y_validation_predicted)
learning_curves.append(
{
'percentage_samples': percentage,
'strategy': strategy,
'score': current_score,
'confusion_matrix': confusion_matrix(y_validation, y_validation_predicted)
}
)
print('\t(Stragety {}) Trained on {} samples ({}%) from test set - reached {} ({}%)'.format(strategy, n_training_samples, percentage, current_score, np.round(100.0*(current_score - learning_curves[0]['score'])/(baseline_high-learning_curves[0]['score']))))
if len(to_label) > 0:
# prioritize the not yet labeled data points
if strategy=='informed':
priorities = prioritize_samples(clf_current.predict_proba(X_tolabel[to_label,:]), include=include)
else:
priorities = prioritize_samples(clf_current.predict_proba(X_tolabel[to_label,:]), strategy)
# get indices in original data set for not yet labeled data
priorities = [to_label[idx] for idx in priorities]
return | pd.DataFrame(learning_curves) | pandas.DataFrame |
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.tests.extension.base.base import BaseExtensionTests
class BaseGroupbyTests(BaseExtensionTests):
"""Groupby-specific tests."""
def test_grouping_grouper(self, data_for_grouping):
df = pd.DataFrame(
{"A": ["B", "B", None, None, "A", "A", "B", "C"], "B": data_for_grouping}
)
gr1 = df.groupby("A").grouper.groupings[0]
gr2 = df.groupby("B").grouper.groupings[0]
tm.assert_numpy_array_equal(gr1.grouping_vector, df.A.values)
tm.assert_extension_array_equal(gr2.grouping_vector, data_for_grouping)
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
result = df.groupby("B", as_index=as_index).A.mean()
_, index = pd.factorize(data_for_grouping, sort=True)
index = pd.Index(index, name="B")
expected = pd.Series([3.0, 1.0, 4.0], index=index, name="A")
if as_index:
self.assert_series_equal(result, expected)
else:
expected = expected.reset_index()
self.assert_frame_equal(result, expected)
def test_groupby_agg_extension(self, data_for_grouping):
# GH#38980 groupby agg on extension type fails for non-numeric types
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
expected = df.iloc[[0, 2, 4, 7]]
expected = expected.set_index("A")
result = df.groupby("A").agg({"B": "first"})
self.assert_frame_equal(result, expected)
result = df.groupby("A").agg("first")
self.assert_frame_equal(result, expected)
result = df.groupby("A").first()
self.assert_frame_equal(result, expected)
def test_groupby_extension_no_sort(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
result = df.groupby("B", sort=False).A.mean()
_, index = pd.factorize(data_for_grouping, sort=False)
index = | pd.Index(index, name="B") | pandas.Index |
#! /usr/bin/env python3
'''
HERO - Highways Enumerated by Recombination Observations
Author - <NAME>
'''
from argparse import ArgumentParser
from Bio.SeqIO import parse as BioParse
from itertools import product
import math
import multiprocessing
import os
import pandas as pd
from plotnine import *
from random import randint
import subprocess
import time
start_time = time.time()
def get_args():
parser = ArgumentParser(description='HERO - Highways Elucidated by Recombination Observations',
usage='hero.py --hero_table [table] --groups [groups_file] [options]')
parser.add_argument('--hero_table', required=True, help='HERO input table')
parser.add_argument('--groups', required=True, help='Tab-deliminated file with genomes in 1st column and groups in 2nd')
parser.add_argument('-o', '--outdir', default='hero_results', type=str, help='Output directory [hero_results]')
parser.add_argument('-c', '--cpus', default=1, type=int, help='CPUs to use [1]')
parser.add_argument('-l', '--length', default=0, type=int, help='Minimum length required to process recomb event [0]')
parser.add_argument('-b', '--bayes', default=1, type=float, help='Minimum bayes factor required to process recomb event [1]')
return parser.parse_args()
def parse_metadata(metadata_file):
''' Parse metadata into a dictionary '''
groups = {} # Key = Genome [1st column], Value = Group [2nd column]
try:
with open(metadata_file, 'r') as metafile:
for line in metafile:
line = line.strip().split()
groups[line[0]] = line[1].lower()
except FileNotFoundError:
print('Groups file {0} could not be opened. Ensure filepath is correct'.format(metadata_file))
exit(1)
return groups
def parse_table(hero_table):
'''
Parse HERO table into list of arguments
Sanity check all paths
'''
genes = []
with open(hero_table, 'r') as infile:
for line in infile:
line = line.strip().split()
if os.path.exists(line[1]) and os.path.exists(line[2]):
genes.append(line)
else:
print('Gene {0} has a bad filepath. Skipping.'.format(line[0]), flush = True)
return genes
def unpack_arguments(arg_list):
''' Unpack arguments and parse recombination '''
return parse_fastgear(arg_list[0], arg_list[1], arg_list[2])
def parse_fastgear(gene_name, fasta_path, fastgear_path):
t0 = time.time()
''' Parse recent recombination events from fastgear run '''
# Find FASTA file to parse sequence info
if not any(BioParse(fasta_path, 'fasta')):
print('{0} fasta file is bad. Removing from analysis.'.format(gene_name), flush=True)
return gene_name
# Parse FASTA file into dict
seqs_dict = {}
for record in BioParse(fasta_path, 'fasta'):
seqs_dict[record.id] = record.seq
# Setup genome class
class Genome:
strain_to_genome = {} # Key: Strain name, Value: Genome class ID
lineages = {} # Key: Lineage, Value: Strain name
def __init__(self, sequence, lineage, name):
self.name = name
self.lineage = lineage
self.sequence = sequence
# Update class dictionaries
Genome.lineages.setdefault(lineage, [])
Genome.lineages[lineage].append(name)
Genome.strain_to_genome[name] = self
# Parse lineage file and update Genome Class Dicts
try:
with open('{0}/output/lineage_information.txt'.format(fastgear_path), 'r') as fg_file:
next(fg_file)
for line in fg_file:
line = line.strip().split()
try:
seq = seqs_dict[line[3]]
except KeyError:
print('{0} could not match a sequence to ID {1}. Removing from analysis.'.format(fastgear_path, line[3]), flush=True)
return gene_name
# Add genome to Genome class
Genome(seq, line[1], line[3])
except FileNotFoundError:
return gene_name
# Parse recombination
return parse_recombination(fastgear_path, Genome, gene_name)
def parse_recombination(fastgear_run, Genome, gene_name):
''' Parse recent recombination and filter events '''
def add_event(d_lineage, s_idx, e_idx, recipient):
''' Update pair with new event and condense overlapping events '''
# Make sure pair exists
pair = d_lineage
donor_lineages.setdefault(pair, [])
# Append new index to list of events
donor_lineages[pair].append([s_idx, e_idx, [recipient]])
# Then condense events by index pairs
donor_lineages[pair].sort(key = lambda x: x[0])
merged_pairs = [] # final array to hold merged intervals
merged_recipients = []
start = -1
end = -1
for idx in range(len(donor_lineages[pair])):
cur_event = donor_lineages[pair][idx]
if cur_event[0] > end:
if idx != 0:
merged_pairs.append([start, end, merged_recipients])
merged_recipients = []
end = cur_event[1]
start = cur_event[0]
merged_recipients.extend(cur_event[2])
elif cur_event[1] >= end:
end = cur_event[1]
merged_recipients.extend(cur_event[2])
if end != -1 and [start,end] not in merged_pairs:
merged_pairs.append([start, end, merged_recipients])
donor_lineages[pair] = merged_pairs
# Open recent recomb file
try:
recomb_file = open('{0}/output/recombinations_recent.txt'.format(fastgear_run), 'r')
next(recomb_file)
next(recomb_file)
except FileNotFoundError:
print('{0} has an incomplete fastgear run. Removing from analysis.'.format(fastgear_run), flush=True)
return gene_name
# Find external donor lineage num for gene for filtering
external_donor = str(max([int(x) for x in Genome.lineages]) + 1)
# Logs all lineage pairs and tracks unique events
donor_lineages = {} # Key: donor lineage
# Value: List of unique events
# Get event info
for line in recomb_file:
line = line.strip().split()
s_idx, e_idx = int(line[0])-1, int(line[1]) # fastGEAR includes s_idx in the sequence, so subtract one for indexing
d_lineage, strain_name = line[2], line[5]
logbf = float(line[4])
# If minimum length or bayes not met, move on (length/bayes are global vars)
# If donor lineage is external donor, move on
fragment_len = e_idx - s_idx # fastGEAR includes the start position in its len
if fragment_len < length or (math.e**logbf) < bayes or d_lineage == external_donor:
continue
# Add event to lineage pair in dict
add_event(d_lineage, s_idx, e_idx, strain_name)
recomb_file.close() # Close recomb file
# For each unique event, find the most likely donor(s).
# Then for each unique metadata group in recipients, log an event
events = set() # All recombination events
for d_lineage in donor_lineages:
for event in donor_lineages[d_lineage]:
start, end = int(event[0]), int(event[1])
sample_recipient = event[2][0]
# All genome are expected to be roughly equal. So take the first genome
recip_seq = Genome.strain_to_genome[sample_recipient].sequence[start:end]
donor_group = find_pair(start, end, d_lineage, recip_seq, Genome)
# Fit donor group to all unique recip groups
if donor_group:
for recipient in event[2]:
recip_group = metadata[recipient]
recip_strains = [strain for strain in event[2] if metadata[strain] == recip_group]
#final_info = (donor_group, recip_group, end-start, gene_name, ','.join(recip_strains))
final_info = (donor_group, recip_group, start+1, end, gene_name, ','.join(recip_strains))
events.add(final_info)
return list(events)
def find_pair(s_idx, e_idx, d_lineage, recip_seq, Genome):
''' Try to find a metadata pair that is linked by this recombination event '''
# Step 1: See if all donors in d_lineage are from same metadata group
# NOTE:Lots of checking metadata groups here.
# I always default to other in case genome wasn't established in
# metadata parsing.
# Test donors for total consistency
donors = Genome.lineages[d_lineage]
metadata.setdefault(donors[0], 'other')
metagroup = metadata[donors[0]] # metadata dict is a global var
for donor in donors[1:]:
metadata.setdefault(donor, 'other')
cur_group = metadata[donor]
if cur_group != metagroup: # Not all donors are the same
break
else: # All donors from same group! We can move on.
return metagroup
# Step 2: Not all donors fit
# Get distance of recip seq to donor recomb fragments
# Get distance of recip seq to all donor seqs
shortest = None
viable_donors = []
for donor in donors:
donor_frag = str(Genome.strain_to_genome[donor].sequence[s_idx:e_idx])
# Calculate distance between donor and recip fragment
dist = 0
for idx, nuc in enumerate(donor_frag):
if recip_seq[idx] != nuc:
dist += 1
# Compare dist to current best dist
if not shortest: # This is the first comparison
shortest = dist
viable_donors.append(donor)
continue
# All other tests
if dist < shortest:
shortest = dist
viable_donors = [donor]
elif dist == shortest:
viable_donors.append(donor)
# Step 3 (2b?): If all likely donors from same metagroup, we win.
# Otherwise, discard the event.
metagroup = metadata[viable_donors[0]]
if len(viable_donors) > 1: # If multiple donors, check for consistency
for donor in viable_donors[1:]:
if metadata[donor] != metagroup:
return None # If two metagroups exist, kill the search
# We found a good metagroup! Send the event back
return metagroup
def parse_events(recombination_events):
''' Parse events from multithreaded event finding '''
good_events = []
bad_genes = []
for gene in recombination_events:
if isinstance(gene, list): # Good events are lists, bad genes are str
for event in gene:
good_events.append(event)
else:
bad_genes.append(gene)
return good_events, bad_genes
def calculate_highway(events, unique_groups):
'''
Calculate the theshold for highways of recombination
highway = 3*IQR + Q3
IQR = Interquartile range
Q3 = Third quartile of the data
'''
recomb_events = {x:0 for x in unique_groups}
# Get all unique combinations of group pairs
for event in events:
pair = (event[0], event[1])
recomb_events.setdefault(pair, 0)
recomb_events[pair] += 1
# Calculate IQRs
recomb_counts = list(recomb_events.values())
recomb_df = pd.DataFrame({'Events': recomb_counts})
q3 = recomb_df.quantile(q=0.75)['Events']
q1 = recomb_df.quantile(q=0.25)['Events']
IQR = q3 - q1
significance_limit = q3 + (3*IQR)
return recomb_events, significance_limit
class Metagroup:
'''
Each metadata group will be given an instance.
Tracks recombination stats for each group
'''
metagroup_dict = {} # Key: metagroup string name
# Value: Metagroup object instance
def __init__(self, name):
# Recombination variables
self.name = name
self.donations = 0 # Number of donations
self.receipts = 0 # Number of receipts
self.group_stats = {} # Key: Other metagroup object string name
# Value: [donations_to, receipts_from]
# Plotting variables
self.d_pos = 0 # Number of donations already plotted
self.r_pos = 0 # Number of receipts already plotted
def total_events(self):
return self.donations + self.receipts
def add_event(event):
'''
Parse event to add donor and recipt credit
to each metagroup in event
'''
donor, recipient = event[0], event[1]
# Make object instance for each group if not already exists
Metagroup.metagroup_dict.setdefault(donor, Metagroup(donor))
Metagroup.metagroup_dict.setdefault(recipient, Metagroup(recipient))
# Add donor/recipient credit to each group
d_group = Metagroup.metagroup_dict[donor]
r_group = Metagroup.metagroup_dict[recipient]
d_group.donations += 1
r_group.receipts += 1
d_group.group_stats.setdefault(recipient, [0, 0])
r_group.group_stats.setdefault(donor, [0, 0])
d_group.group_stats[recipient][0] += 1 # Add donor credit
r_group.group_stats[donor][1] += 1 # Add recip credit
def make_circos(events, outdir):
''' Write circos files given events and list of genomes w/ metadata '''
# Log all events in Metagroup class
for event in events:
Metagroup.add_event(event)
# Write karyotype file for circos
with open('{0}/circos_karyotype.txt'.format(outdir), 'w') as k_file:
# Get random color for each group chunk
rand_colors = random_colors(len(Metagroup.metagroup_dict.keys()))
# Write color and group to karyotype file
for idx, group in enumerate(Metagroup.metagroup_dict.values()):
color = rand_colors[idx]
k_file.write('chr - {0} {0} 0 {1} {0}\n'.format(group.name.lower(), group.total_events()))
# Write link file
with open('{0}/circos_links.txt'.format(outdir), 'w') as l_file:
# Create links by the donor
for d_group in Metagroup.metagroup_dict.values():
donor = d_group.name
# Get recipient from group_stats variable
# If donor is in the list of recipients,
# Put it on the end so it looks cleaner
recipients = list(d_group.group_stats.keys())
recipients.sort(key=donor.__eq__)
for recipient in d_group.group_stats:
donations = d_group.group_stats[recipient][0]
r_group = Metagroup.metagroup_dict[recipient]
## Write link to file
# Get donor plot range and update donor positions
d_start = d_group.d_pos
d_end = d_start + donations
d_group.d_pos += donations
# Get recipient range and update recipient positions
# All receipts should be plotted away from donations
r_start = r_group.donations + r_group.r_pos
r_end = r_start + donations
r_group.r_pos += donations
# Write to file
link = donor + ' ' + str(d_start) + ' ' + str(d_end) + ' '
link += recipient + ' ' + str(r_start) + ' ' + str(r_end) + '\n'
l_file.write(link)
# Write config_file
# Tutorial to understanding circos config file can be found at:
# circos.ca/documentation/tutorials/quick_start/
with open('{0}/circos.conf'.format(outdir), 'w') as c_file:
file_contents = 'karyotype = {0}/circos_karyotype.txt\n'.format(outdir)
# Global color scheme
file_contents += '# Global color scheme\n'
file_contents += '<colors>\n'
for idx, name in enumerate(Metagroup.metagroup_dict.keys()):
file_contents += '{0}* = {1}\n'.format(name, rand_colors[idx])
file_contents += '</colors>\n'
# Basic required content (karyotype file location, ideogram creation)
file_contents += '<ideogram>\n\n<spacing>\n'
file_contents += 'default = 0.005r # Spacing between out ring chunks\n'
file_contents += '</spacing>\n\n'
# Ideogram layout details
file_contents += '# Ideogram layout details\n'
file_contents += 'radius = 0.9r # Size of radius for outer ring\n'
file_contents += 'thickness = 80p # Thickness of outer ring\n'
file_contents += 'fill = yes # Fill chunks with color?\n'
file_contents += 'stroke_color = dgrey # Color of chunk outline\n'
file_contents += 'stroke_thickness = 2p # Thickness of outline\n\n'
# Ideogram label details
file_contents += '# Ideogram label details\n'
file_contents += 'show_label = yes # Show chunk labels?\n'
file_contents += 'label_font = default # Font of the labels\n'
file_contents += 'label_radius = 1r + 75p # Where to place labels\n'
file_contents += 'label_size = 50 # Size of the label\n'
file_contents += 'label_parallel = yes # Set label parallel to chunks\n'
file_contents += '</ideogram>\n\n'
# Tick details
# << SKIPPED FOR NOW >>
# Link details
file_contents += '# Links... The actual connections\n'
file_contents += '<links>\n<link>\n'
file_contents += 'file = {0}/circos_links.txt # The file with links to draw\n'.format(outdir)
file_contents += 'ribbon = yes # Turn links into fancy ribbons\n'
file_contents += 'flat = yes # Flatten ribbons\n'
file_contents += 'z = 1 # importance for ribbon plotting\n'
file_contents += 'radius1 = 0.8r # Push donor end of ribbon inward\n'
file_contents += 'color = eval(var(chr2)) # Default link color\n'
file_contents += 'radius = 0.98r # Where links will stop at\n'
file_contents += 'bezier_radius = 0.1r # How far from center the curves are drawn\n'
file_contents += 'thickness = 5 # Default thickness\n'
# Establish rule to color links by donor chunk
file_contents += '\n<rules>\n'
file_contents += '\nflow = continue\n\n'
file_contents += '<rule>\n'
file_contents += 'condition = 1\n'
file_contents += 'color = eval(var(chr1))\n'
file_contents += '</rule>\n<rule>\n'
file_contents += 'condition = var(interchr)\n'
file_contents += 'z = 2\n'
file_contents += '</rule>\n'
file_contents += '</rules>\n\n'
file_contents += '</link>\n</links>\n\n'
# Default circos distributions to include
file_contents += '# Default circos distributions to include\n'
file_contents += '\n'
file_contents += '<<include etc/colors_fonts_patterns.conf>>\n'
file_contents += '<<include etc/housekeeping.conf>>\n'
c_file.write(file_contents)
def make_highway_circos(highway, outdir):
'''
Create 2nd circos.conf file which filters the color
of ribbons below the highway_definition threshold
'''
try:
with open('{0}/circos.conf'.format(outdir), 'r') as circos_file, open('{0}/highway_circos.conf'.format(outdir), 'w') as outfile:
for line in circos_file:
if line == '</rules>\n':
outfile.write('<rule>\n')
outfile.write('condition = (var(end1) - var(start1)) < {0}\n'.format(highway))
outfile.write('color = grey\n')
outfile.write('z = 1\n')
outfile.write('</rule>\n')
outfile.write(line)
except IOError:
print('Could not make highway circos file. Check circos.conf', flush=True)
def random_colors(num_colors):
''' Generate num_colors random colors '''
# Current optimum maximum number of groups: 51 (255//5)
colors = {k:[] for k in 'rgb'} # Dict of all R/G/B values
for color in range(num_colors): # Make each color
temp = {k: randint(0,255) for k in 'rgb'} # Get random RBG values
for k in temp:
# For each value, make sure it is at least 25 points
# different from all other values in same position
while True:
c = temp[k]
t = set(j for j in range(c-5, c+5) if 0 <= j <= 255)
if t.intersection(colors[k]):
temp[k] = randint(0,255)
else:
break
colors[k].append(temp[k])
# Format final colors
final_colors = []
for i in range(num_colors):
final_colors.append( '{0},{1},{2}'.format(colors['r'][i], colors['g'][i], colors['b'][i]))
return final_colors
def write_individual_stats(outdir, events):
'''
Write useful text files and plots for individual genome recomb data
1) Histogram of recombination fragment sizes
2) Histogram of recombination per gene
3) Histogram of recombination per recipient
'''
# Step 1: Write out fragment data and collect gene/recipient data
fragments = open('{0}/fragment_sizes.txt'.format(outdir), 'w')
recipient_counts = {}
gene_counts = {}
fragments.write('Size\n')
for event in events:
# Write out fragment now
fragments.write(str(event[3] - event[2])+'\n')
# Add 1 to the count for the gene
gene_counts.setdefault(event[4], 0)
gene_counts[event[4]] += 1
# Each genome gets 1 to its recipient count
for genome in event[5].split(','):
recipient_counts.setdefault(genome, 0)
recipient_counts[genome] += 1
fragments.close()
# Write out recipient/gene data
genes = open('{0}/gene_counts.txt'.format(outdir), 'w')
genes.write('Gene\tEvents\n')
for gene, count in gene_counts.items():
genes.write('{0}\t{1}\n'.format(str(gene), str(count)))
genes.close()
recipients = open('{0}/recipient_counts.txt'.format(outdir), 'w')
recipients.write('Recipient\tEvents\n')
for r, count in recipient_counts.items():
recipients.write('{0}\t{1}\n'.format(str(r), str(count)))
recipients.close()
# Step 2: Make each histogram
make_histogram('{0}/gene_counts.txt'.format(outdir), 'gene', '{0}/gene_counts'.format(outdir))
make_histogram('{0}/recipient_counts.txt'.format(outdir), 'recipient', '{0}/recipient_counts'.format(outdir))
make_histogram('{0}/fragment_sizes.txt'.format(outdir), 'fragment', '{0}/fragment_sizes'.format(outdir))
def make_histogram(file_loc, plot_type, filename):
'''
Make a histogram given a file location and plot type
'''
# Load in each filetype properly
if plot_type == 'gene':
datas = pd.read_csv(file_loc, header=0, sep='\t')
x_lab = '# of events per gene'
histogram = (ggplot(datas, aes(x='Events'))
+ geom_histogram() + xlab(x_lab))
elif plot_type == 'recipient':
datas = pd.read_csv(file_loc, header=0, sep='\t')
x_lab = '# of events per recipient'
histogram = (ggplot(datas, aes(x='Events'))
+ geom_histogram() + xlab(x_lab))
elif plot_type == 'fragment':
datas = | pd.read_csv(file_loc, header=0) | pandas.read_csv |
#Version 2.0
#Version 1.1.3
#--Updated from development version: 6/24/21
#Description:
#Module toolkit used for the gridded temperature map production and post-processing
#Development notes:
#2021-06-24
#--Updated version to 1.1
#--Deprecated 1.0 versions of removeOutlier, get_predictors, and makeModel
#--Added new functions: get_temperature_date, select_stations, extract_data
#2021-07-02
#Updated to version 1.1.1
#--Fixed high elevation gap-fill indexing bug in get_temperature_date and select_stations
#--Set default mixHighAlt to 2150 instead of None (i.e. always include mixed island high elev stations)
#2021-07-09
#--Updated version to 1.1.2:
#--Added new function: get_std_error
#2021-07-12
#--Updated version to 1.1.3:
#--Added new function: lr_temp_gapfill
#--Adjusted select_stations. Restored mixHighAlt default to None. Value determined based on island.
#--Clim gapfill incorporated as last fallback for lr_temp_gapfill
#--Hardcoded constants declared at start of module. Edit as needed.
#2021-08-11
#--Minor patch: Corrected divide by zero case in cross-validation function. metrics(...) cannot run when validation station too low with respect to n_params
#--Tmax gapfill stations added
#from attr import field
#import pylab as py
import pandas as pd
import numpy as np
import statsmodels.api as sm
from scipy.optimize import curve_fit
from sklearn.metrics import r2_score
# In[ ]:
#SET MODULE CONSTANTS
#Consolidate file names, index names, and directory names here to avoid hardcoding
STN_IDX_NAME = 'SKN'
ELEV_IDX_NAME = 'ELEV.m.'
MASTER_DIR = r'/home/hawaii_climate_products_container/preliminary/'
DEP_MASTER_DIR = MASTER_DIR + r'air_temp/daily/dependencies/'
GP_DATA_DIR = DEP_MASTER_DIR + r'gapfill_models/'
CLIM_DATA_DIR = DEP_MASTER_DIR + r'clim/'
META_MASTER_FILE = r'https://raw.githubusercontent.com/ikewai/hawaii_wx_station_mgmt_container/main/Hawaii_Master_Station_Meta.csv'
TARG_STN_LIST = ['39.0','339.6','885.7','1075.0']
#GAPFILL_PREF = ''
#GAPFILL_INF = ''
GAPFILL_SUF = '_20140101-20181231.csv'
#CLIM_PREF = ''
#CLIM_INF = ''
CLIM_SUF = '_stn_clim.csv'
PRED_STNS_MIN = {'39.0':['39.2','98.2','869.0','828.0','43.3','728.2','499.9','1036.0','94.0','107.4'],
'339.6':['39.2','113.2','946.0','43.3','499.12','1036.0','499.9','885.7','499.13','63.0'],
'885.7':['869.0','799.0','499.12','679','828.0','1074.0','1069.0','842.9','841.2','538.0'],
'1075.0':['1069.0','1036.0','1074.0','538.0','858.0','842.8','828.0','752.6','842.9','742.4']}
PRED_STNS_MAX = {'39.0':['39.2','107.4','1069.0','855.3','311.2','48.0','95.6','1029.0','499.13','946.0'],
'339.6':['39.2','107.4','267.8','499.13','129.0','75.1','266.0','147.2','752.6','1029.0'],
'885.7':['869.0','828.0','499.12','909.0','742.4','752.6','858.0','541.2','911.1','842.7'],
'1075.0':['499.6','3.9','266.0','43.3','63.0','499.8','1036.0','147.2','499.9','101.1']}
TMIN_STN_EXCLUDE = [728.2] #[728.2] Put this back
TMAX_STN_EXCLUDE = [728.2]
#END MODULE CONSTANTS--------------------------------------------------------------
def get_clim_file(varname):
#Change according to file naming convention needs
clim_name = CLIM_DATA_DIR + varname + CLIM_SUF
return clim_name
def linear(x, a, b):
return a * x + b
def bilinear(x, a, b, c):
left = a * x + b
right = c * (x - 2150) + (a * 2150 + b)
try:
y = np.asarray([left[i] if x[i] <= 2150 else right[i]
for i in range(len(x))])
return y
except BaseException:
if x <= 2150:
return left
else:
return right
# calculate bic for regression
def calculate_bic(n, mse, num_params):
BIC = n * np.log(mse) + num_params * np.log(n)
return BIC
# calculate aic for regression
def calculate_aic(n, mse, num_params):
# for the linear regression, assuming that errors are normally distributed
AIC = n * np.log(mse) + 2 * num_params
AICc = AIC + 2 * num_params * (num_params + 1.) / (n - num_params - 1.)
return AIC, AICc
def lr_temp_gapfill(isl_df,varname,stn_date):
"""
Description: Helper function specific to linear gap-fill of temperature min/max
Patch notes:
--[10/5/21] Function breaks for new input where nan-row stations are dropped from file
-- First checks index list to see if all gapfilled stations exist in index
-- If not present, they are gapfilled automatically
Development notes:
--Iteratively checks all target stations (specified in module constants)
--If target missing data, chooses predictor model based on highest correlation (specified)
--If no predictor stations available, fill with climatological value at target station
"""
if varname == 'Tmin':
predictor_stations = PRED_STNS_MIN
elif varname == 'Tmax':
predictor_stations = PRED_STNS_MAX
#Get list of all critical stations for gapfilling
#Ensures an index exists for donor and target stations for gapfill check
master_meta = pd.read_csv(META_MASTER_FILE)
master_meta = master_meta.set_index('SKN')
critical_stns = TARG_STN_LIST + [item for sublist in [predictor_stations[key] for key in predictor_stations.keys()] for item in sublist]
critical_stns = [float(crit) for crit in critical_stns]
non_exist_crits = np.setdiff1d(np.array(critical_stns),isl_df.index.values)
non_exist_meta = master_meta.loc[non_exist_crits]
new_inds = list(non_exist_crits) + list(isl_df.index.values)
new_isl_df = pd.DataFrame(index=new_inds)
new_isl_df.index.name = 'SKN'
new_isl_df.loc[isl_df.index,isl_df.columns] = isl_df
new_isl_df.loc[non_exist_crits,varname] = np.nan
new_isl_df.loc[non_exist_crits,non_exist_meta.columns] = non_exist_meta
#Check if target stations for gapfilling are part of the input dataset
#Then run gapfill as normal
for target in TARG_STN_LIST:
if np.isnan(new_isl_df.at[float(target),varname]):
#iteratively check the regression parameters
fill_file = GP_DATA_DIR + varname + '_target' + STN_IDX_NAME + target + GAPFILL_SUF
fill_model_df = pd.read_csv(fill_file, skiprows=3)
fill_model_df = fill_model_df.set_index(STN_IDX_NAME)
pred_stn_list = predictor_stations[target]
for pred in pred_stn_list:
#check if avail, if yes, predict and fill
#if not, pass to next
lr_fill_flag = False
if np.isnan(new_isl_df.at[float(pred),varname]):
#Station not available. Move on.
pass
else:
beta0 = fill_model_df.at[float(pred),'beta0']
beta1 = fill_model_df.at[float(pred),'beta1']
pred_x = new_isl_df.at[float(pred),varname]
targ_est = linear(pred_x,beta1,beta0)
isl_df.at[float(target),varname] = targ_est
lr_fill_flag = True
break
#if no linear regression was used, fill target with climo
if not lr_fill_flag:
clim_file = get_clim_file(varname)
clim_df = pd.read_csv(clim_file)
mon = stn_date.month - 1
new_isl_df.at[float(target),varname] = clim_df.at[mon,target]
return new_isl_df
# In[ ]:
def removeOutlier(X,y,threshold=2.5):
X = X.flatten()
fit, cov = curve_fit(bilinear, X, y, sigma=y * 0 + 1)
model = bilinear(X, fit[0], fit[1], fit[2])
stdev = np.std(model - y) # 1-sigma scatter of residuals
indx, = np.where(np.abs(model - y) < threshold * stdev)
# repeating the process one more time to clip outliers based
# on a more robust model
fit, cov = curve_fit(
bilinear, X[indx], y[indx], sigma=y[indx] * 0 + 1)
model = bilinear(X, fit[0], fit[1], fit[2])
stdev = np.std(model - y)
indx, = np.where(np.abs(model - y) < threshold * stdev)
return indx
# In[ ]:
def select_stations(vars,varname,iCode,stn_date,min_stn=10,mixHighAlt=None):
"""
Description: Primarily internal function to progressively sample stations from outer islands //
as needed to meet minimum regression sample size
Development notes:
--Currently specifies distinct selection hierarchy for each island
--Pulls high elevation stations from all islands as long as inversion height is specified
--Replaces highest elevation station with climatological value if no high elevation data available
Patch 2021-07-02:
--Fixed indexing bug for high elevation climatological gap-fill
Update 2021-07-12:
--Introduced linear regression gap-filling
Future patches:
"""
#Input is already filtered by date. Single day station dataset, all islands
#Sets decision algorithm for handling corner cases. May need to consider wrapping this
#Filter temps based on iCode, check length, re-filter or return
#Set exclusions
if varname == 'Tmin':
excl_list = TMIN_STN_EXCLUDE
elif varname == 'Tmax':
excl_list = TMAX_STN_EXCLUDE
#Defining search hierarchy for each island (Add more or change order here as desired)
all_iCodes = ['BI','MA','KO','MO','LA','OA','KA']
ka_hier = ['OA','MA','All']
oa_hier = ['KA','MA','All']
ma_hier = ['BI','OA','All']
bi_hier = ['MA','OA','All']
mn_hier = ['MA','BI','OA','All']
#Set original baseline island list
if (iCode == 'MN'):
isl_list = ['MA','MO','KO','LA']
hierarchy = ma_hier
elif iCode == 'BI':
isl_list = [iCode]
hierarchy = bi_hier
elif iCode == 'MA':
isl_list = [iCode]
hierarchy = ma_hier
elif iCode == 'OA':
isl_list = [iCode]
hierarchy = oa_hier
elif iCode == 'KA':
isl_list = [iCode]
hierarchy = ka_hier
elif iCode in ['MO','KO','LA']:
isl_list = [iCode]
hierarchy = mn_hier
else:
return None
#As long as inversion height is set by mixHighAlt parameter, automatically include all available
#Automatically gapfill all pre-selected target stations
var_isl = lr_temp_gapfill(vars,varname,stn_date)
if mixHighAlt is not None:
var_isl = var_isl[(var_isl['Island'].isin(isl_list) | (var_isl[ELEV_IDX_NAME] > mixHighAlt))]
else:
var_isl = var_isl[var_isl['Island'].isin(isl_list)]
#Iteratively check number of available stations. Progressively add outer island stations until minimum requirement is met
var_isl = var_isl[~var_isl[varname].isna()]
#Exclude any stations in exclusion list
var_isl = var_isl.loc[~var_isl.index.isin(excl_list)]
while ((var_isl.shape[0] < min_stn) & (set(isl_list) != set(all_iCodes))):
next_isl = [hierarchy.pop(0)]
if next_isl == ['All']:
next_isl = list(set(all_iCodes) - set(isl_list))
isl_list = isl_list + next_isl
var_isl = pd.concat([var_isl,vars[vars['Island'].isin(next_isl)]])
var_isl = var_isl[~var_isl[varname].isna()]
#Exclude any additional stations in the exclusion list
var_isl = var_isl.loc[~var_isl.index.isin(excl_list)]
var_isl = var_isl[~var_isl.index.duplicated(keep='first')]
return var_isl
def extract_dataset(varname,dataloc='',predictors=True,pred_name=None,predloc=None):
"""
Description: Simple dataset extraction. No data processing performed.
Development notes:
--Currently allows retrieval of data and related predictors. Will later need to generalize this functionality.
--Really, should only output one specified dataset at a time and keep data processing in other specified functions.
Future patches:
--Remove hardcoded file suffixes or at least create more dynamic options
--Either allow for other file types or specify this function is for csv extraction
"""
#Extracts full dataset based on specified varname
#Option to continuously add new variable handling
if varname == 'Tmax':
var_file = dataloc+varname+'_QC.csv'
elif varname == 'Tmin':
var_file = dataloc+varname+'_QC.csv'
elif varname =='RF':
var_file = dataloc+'2_Partial_Fill_Daily_RF_mm_1990_2020.csv'
var_df = pd.read_csv(var_file, encoding="ISO-8859-1", engine='python')
if predictors == True:
if predloc is None:
predloc = dataloc
if pred_name is None:
pred_name = varname
pred_file = predloc+pred_name+'_predictors.csv'
pred_df = pd.read_csv(pred_file, encoding="ISO-8859-1",engine='python')
return var_df, pred_df
else:
return var_df
# Need a process_archival function to convert non-standardized format data
def extract_predictors(filename,param_list):
pred_df = pd.read_csv(filename,encoding="ISO-8859-1",engine='python')
pred_df = pred_df.set_index(STN_IDX_NAME)
return (pred_df,pred_df[param_list])
def extract_temp_input(filename,meta_col_n=12,get_decomp=True):
"""
Reads the temperature input data for a specified date
Processes it according to the date standard, outputs a meta-only dataframe (SKN-sorted),
and a temp-only dataframe (SKN-sorted)
"""
temp_df = pd.read_csv(filename,encoding="ISO-8859-1",engine='python')
temp_df = temp_df.set_index(STN_IDX_NAME)
df_cols = temp_df.columns
meta_cols = df_cols[:meta_col_n]
temp_cols = df_cols[meta_col_n:]
meta_df = temp_df[list(meta_cols)]
temp_data = temp_df[list(temp_cols)]
#Convert keys into datetime keys for easier time indexing
temp_cols = [dt.split('X')[1] for dt in list(temp_cols)]
dt_keys = pd.to_datetime(list(temp_cols))
temp_data.columns = dt_keys
temp_df = meta_df.join(temp_data,how='left')
if get_decomp:
return (temp_df,meta_df,temp_data)
else:
return temp_df
def get_temperature_date(temp_data,meta_data,iCode,stn_date,varname=None,climloc='',dateFmt=None,mixHighAlt=None,min_stn=10,naive_select=False):
#Updated to take in a station-indexed temperature dataframe, should already be set_index(SKN)
iCode = iCode.upper()
if isinstance(stn_date,str):
if dateFmt == None:
stn_date = pd.to_datetime(stn_date)
else:
stn_date = pd.to_datetime(stn_date,format=dateFmt)
temp_day = temp_data[[stn_date]].rename(columns={stn_date:varname})
temp_day = meta_data.join(temp_day,how='left')
#Send islands and temp_day into select_stations.
#Outputs temp stn data of appropriate size
#if mixHighAlt not specified, set mixHighAlt based on the target island
#if mixHighAlt is specified, then force it to be the user specified value
if mixHighAlt == None:
if iCode in ['KA','OA']:
mixHighAlt = None
else:
mixHighAlt = 2150
if naive_select:
#Only select all island data from specified date
return temp_day.dropna()
else:
return select_stations(temp_day,varname,iCode,stn_date,min_stn=min_stn,mixHighAlt=mixHighAlt)
def get_predictors(pred_df,param_list):
"""
Description: Updated version of get_Predictors
Development notes:
--Removed redundancy in ISLAND_code call
--Now only outputs predictors which will actually be used in curve fitting
Future patches:
--Eventually will need to consider where to handle predictors from multiple sources
"""
pred_df = pred_df.set_index(STN_IDX_NAME)
return pred_df[param_list]
# In[ ]:
def myModel(inversion=2150):
'''
This wrapper function constructs another function called "MODEL"
according to the provided inversion elevation
'''
def MODEL(X, *theta):
_, n_params = X.shape
y = theta[0] + theta[1] * X[:, 0]
for t in range(1, n_params):
y += theta[t+2] * X[:, t]
ind, = np.where(X[:, 0] > inversion)
y[ind] += theta[2] * (X[:, 0][ind] - inversion)
return y
return MODEL
# In[ ]:
def makeModel(predictand,params,model,threshold=2.5):
"""
Description: Updated version of makeModel.
Development notes:
--Predictand replaces df for clarity. Only available and relevant stations of general predictand should be passed from this variable
--Params replaces parameter list. Parameter list should be filtered for selected predictors before being passed in
--Data preparation encapsulated in different function. This function now exclusively takes input and fits curve.
"""
n_data, n_params = params.shape
y = predictand.values
X = params.values
if len(y) > 1:
indx = removeOutlier(X,y,threshold=threshold)
X = X[indx]
y = y[indx]
fit, cov = curve_fit(model,X,y,p0=[30, -0.002] + (n_params) * [0])
return fit, cov, X, y
else:
return None, None, None, None
def get_std_error(X,y,pcov,param_grid,inversion):
"""
Description: Based on estimated parameter variance-covariance matrix
computes the standard error of the model predicted values.
Patch notes: Version 1.0
"""
se_fit = []
X_island = param_grid.copy()
if np.isinf(pcov).any():
#Remove outliers linear----------------------------------------
threshold = 2.5
Xvals = X.values.flatten()
yvals = y.values
fit, cov = curve_fit(linear, Xvals, yvals, sigma=yvals * 0 + 1)
model = linear(Xvals, fit[0], fit[1])
stdev = np.std(model - yvals) # 1-sigma scatter of residuals
indx, = np.where(np.abs(model - yvals) < threshold * stdev)
fit, cov = curve_fit(
linear, Xvals[indx], yvals[indx], sigma=yvals[indx] * 0 + 1)
model = linear(Xvals, fit[0], fit[1])
stdev = np.std(model - yvals)
indx, = np.where(np.abs(model - yvals) < threshold * stdev)
#Remove outliers end-------------------------------------------
#indx = removeOutlier(X.values,y.values,threshold=2.5)
X = X.iloc[indx]
y = y.iloc[indx]
se_model = sm.OLS(y,sm.add_constant(X))
se_res = se_model.fit()
pcov = se_res.cov_params().values
X_island = sm.add_constant(X_island.values)
for i in range(X_island.shape[0]):
xi = X_island[i].reshape(-1,1)
se = np.dot(np.dot(xi.T,pcov),xi)[0][0]
se_fit.append(se)
se_fit = np.array(se_fit)
else:
#X_island = sm.add_constant(param_grid.values)
X_above = X_island['dem_250'].copy() - inversion
X_above[X_above<=0] = 0
X_above.rename('above')
X_island.insert(1,'above',X_above)
X_island = sm.add_constant(X_island.values)
for i in range(X_island.shape[0]):
xi = X_island[i].reshape(-1,1)
se = np.dot(np.dot(xi.T,pcov),xi)[0][0]
se_fit.append(se)
se_fit = np.array(se_fit)
return se_fit
# In[ ]:
def cross_validation(predictor, response, iCode, varname, MODEL, metadata, threshold=2.5,inversion=2150):
if iCode == 'MN':
isl_list = ['MA','KO','MO','LA']
else:
isl_list = [iCode]
#Only select test values from target island
meta_stn = metadata.set_index('SKN')
targ_skns = []
predicted_y = []
validate_y = []
target_isl = response[response['Island'].isin(isl_list)].index.values
non_target_stn = response[~response['Island'].isin(isl_list)]
non_target_isl = response[~response['Island'].isin(isl_list)]['Island'].unique()
high_elev_stn = non_target_stn[non_target_stn['ELEV.m.'] > inversion]
high_elev_isl = high_elev_stn['Island'].unique()
nstn = response.shape[0]
nstn_ext = len(non_target_stn)
nstn_elev = len(high_elev_stn)
for target in list(target_isl):
train_inds = np.setdiff1d(predictor.index.values,[target])
X_train = predictor.loc[train_inds]
X_test = predictor.loc[target].values.reshape(-1,1)
y_train = response.loc[train_inds,varname]
y_test = response.loc[target,varname]
theta,pcov,X,y = makeModel(y_train,X_train,MODEL,threshold)
y_loo = MODEL(X_test,*theta)
targ_skns.append(target)
predicted_y.append(y_loo)
validate_y.append(y_test)
targ_skns = np.array(targ_skns).reshape(-1,1)
predicted_y = np.array(predicted_y).reshape(-1,1)
validate_y = np.array(validate_y).reshape(-1,1)
validate_flag = np.ones(validate_y.shape,dtype=bool)
anoms = validate_y - predicted_y
cv_data = np.concatenate([targ_skns,validate_y,predicted_y,anoms,validate_flag],axis=1)
n_params = X_train.shape[1]
u,v = sigma_Clip(predicted_y.flatten(),validate_y.flatten())
if ((len(u) - n_params -1) < 3) | ((len(v) - n_params - 1) < 3):
mae = np.nan
rmse = np.nan
r2 = np.nan
aic = np.nan
aicc = np.nan
bic = np.nan
obs_mean = np.nan
pred_mean = np.nan
bias = np.nan
r2_code = 1 #Not enough data to produce metric
else:
mae,rmse,r2,aic,aicc,bic = metrics(u,v,False,n_params)
obs_mean = np.mean(v)
pred_mean = np.mean(u)
bias = obs_mean - pred_mean
if r2 >= 0:
r2_code = 0
else:
r2_code = 2 #Negative R2
#Convert the arrays to dataframe (add the other columns as we figure out what they are)
cv_df = pd.DataFrame(cv_data,columns=[STN_IDX_NAME,'ObservedTemp','PredictedTemp','Obs-Pred','ValidatedStation'])
cv_meta = meta_stn.loc[cv_df[STN_IDX_NAME].values]
cv_meta = cv_meta.reset_index()
cv_df = pd.concat([cv_df[STN_IDX_NAME],cv_meta,cv_df[cv_df.columns[1:]]],axis=1)
cv_df = cv_df.loc[:,~cv_df.columns.duplicated()]
#Tack on the values for the training-only values from off-island if applicable
train_only_inds = np.setdiff1d(predictor.index.values,target_isl)
train_meta = meta_stn.loc[train_only_inds]
train_meta = train_meta.reset_index()
train_only_validate = response.loc[train_only_inds,varname].values
train_only_predicted = np.array([np.nan for i in range(train_only_validate.shape[0])])
training_flag = np.zeros(train_only_predicted.shape,dtype=bool)
train_only_data = np.concatenate([train_only_inds.reshape(-1,1),train_only_validate.reshape(-1,1),train_only_predicted.reshape(-1,1),train_only_predicted.reshape(-1,1),training_flag.reshape(-1,1)],axis=1)
train_only_df = pd.DataFrame(train_only_data,columns=['SKN','ObservedTemp','PredictedTemp','Obs-Pred','ValidatedStation'])
train_only_df = | pd.concat([train_only_df[STN_IDX_NAME],train_meta,train_only_df[train_only_df.columns[1:]]],axis=1) | pandas.concat |
# Training code for D4D Boston Crash Model project
# Developed by: bpben
import numpy as np
import pandas as pd
import scipy.stats as ss
from sklearn.metrics import roc_auc_score
import os
import json
import argparse
import yaml
from .model_utils import format_crash_data
from .model_classes import Indata, Tuner, Tester
from data.util import get_feature_list
# import sklearn.linear_model as skl
# all model outputs must be stored in the "data/processed/" directory
BASE_DIR = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__))))
def predict_forward(trained_model, best_model_features, perf_cutoff,
split_week, split_year, seg_data, crash_data):
"""simple function to predict crashes for specific week/year"""
test_crash = format_crash_data(crash_data, 'crash', split_week, split_year)
test_crash_segs = test_crash.merge(
seg_data, left_on='segment_id', right_on='segment_id')
preds = trained_model.predict_proba(
test_crash_segs[best_model_features])[::, 1]
try:
perf = roc_auc_score(test_crash_segs['target'], preds)
except ValueError:
print('Only one class present, likely no crashes in the week')
perf = 0
print(('Week {0}, year {1}, perf {2}'.format(split_week, split_year, perf)))
if perf <= perf_cutoff:
print(('Model performs below AUC %s, may not be usable' % perf_cutoff))
return(preds)
def output_importance(trained_model, features, datadir):
# output feature importances or coefficients
if hasattr(trained_model, 'feature_importances_'):
feature_imp_dict = dict(zip(features, trained_model.feature_importances_.astype(float)))
elif hasattr(trained_model, 'coefficients'):
feature_imp_dict = dict(zip(features, trained_model.coefficients.astype(float)))
else:
return("No feature importances/coefficients detected")
# conversion to json
with open(os.path.join(datadir, 'feature_importances.json'), 'w') as f:
json.dump(feature_imp_dict, f)
def set_params():
#cv parameters
cvp = dict()
cvp['pmetric'] = 'roc_auc'
cvp['iter'] = 5 #number of iterations
cvp['folds'] = 5 #folds for cv (default)
cvp['shuffle'] = True
#LR parameters
mp = dict()
mp['LogisticRegression'] = dict()
mp['LogisticRegression']['penalty'] = ['l1','l2']
mp['LogisticRegression']['C'] = ss.beta(a=5,b=2) #beta distribution for selecting reg strength
mp['LogisticRegression']['class_weight'] = ['balanced']
mp['LogisticRegression']['solver'] = ['liblinear']
#xgBoost model parameters
mp['XGBClassifier'] = dict()
mp['XGBClassifier']['max_depth'] = list(range(3, 7))
mp['XGBClassifier']['min_child_weight'] = list(range(1, 5))
mp['XGBClassifier']['learning_rate'] = ss.beta(a=2,b=15)
# cut-off for model performance
# generally, if the model isn't better than chance, it's not worth reporting
perf_cutoff = 0.5
return cvp, mp, perf_cutoff
def set_defaults(config={}):
"""
Sets defaults if not given in the config file.
Default is just to use the open street map features and crash file
args:
config - dict
"""
if 'seg_data' not in list(config.keys()):
config['seg_data'] = 'vz_predict_dataset.csv.gz'
if 'concern' not in list(config.keys()):
config['concern'] = ''
if 'atr' not in list(config.keys()):
config['atr'] = ''
if 'tmc' not in list(config.keys()):
config['tmc'] = ''
if 'f_cont' not in list(config.keys()):
config['f_cont'] = ['width']
if 'process' not in list(config.keys()):
config['process'] = True
if 'time_target' not in list(config.keys()):
config['time_target'] = [15, 2017]
if 'weeks_back' not in list(config.keys()):
config['weeks_back'] = 1
if 'name' not in list(config.keys()):
config['name'] = 'boston'
if 'level' not in list(config.keys()):
config['level'] = 'week'
def get_features(config, data):
"""
Get features from the feature list created during data generation
"""
features = get_feature_list(config)
# segment chars
# Dropping continuous features that don't exist
new_feats_cont = []
new_feats_cat = []
for f in features['f_cont']:
if f not in data.columns.values:
print("Feature " + f + " not found, skipping")
else:
new_feats_cont.append(f)
f_cont = new_feats_cont
for f in features['f_cat']:
if f not in data.columns.values:
print("Feature " + f + " not found, skipping")
else:
new_feats_cat.append(f)
f_cat = new_feats_cat
# create featureset holder
features = f_cont + f_cat
print(('Segment features included: {}'.format(features)))
if config['concern'] != '':
features.append(config['concern'])
if config['atr'] != '':
features += config['atr_cols']
if config['tmc'] != '':
features += config['tmc_cols']
return f_cat, f_cont, features
def predict(trained_model, data_model, best_model_features,
features, perf_cutoff, config_level, datadir):
"""
Args:
config_level - either week or segment
Returns
nothing, writes prediction segments to file
"""
if config_level == 'week':
# predict back number of weeks according to config
all_weeks = data[['year','week']].drop_duplicates().sort_values(['year','week']).values
back_weeks = all_weeks[-config['weeks_back']:]
pred_weeks = np.zeros([back_weeks.shape[0], data_segs.shape[0]])
for i, yw in enumerate(back_weeks):
preds = predict_forward(trained_model, best_model_features, perf_cutoff,
yw[1], yw[0], data_segs, data)
pred_weeks[i] = preds
# create dataframe with segment-year-week index
df_pred = pd.DataFrame(pred_weeks.T,
index=data_segs.segment_id.values,
columns=pd.MultiIndex.from_tuples([tuple(w) for w in back_weeks]))
# has year-week column index, need to stack for year-week index
df_pred = df_pred.stack(level=[0,1])
df_pred = df_pred.reset_index()
df_pred.columns = ['segment_id', 'year', 'week', 'prediction']
df_pred.to_csv(os.path.join(datadir, 'seg_with_predicted.csv'), index=False)
data_plus_pred = df_pred.merge(data_model, on=['segment_id'])
data_plus_pred.to_json(os.path.join(datadir, 'seg_with_predicted.json'), orient='index')
else:
preds = trained_model.predict_proba(data_model[features])[::, 1]
df_pred = data_model.copy(deep=True)
df_pred['prediction'] = preds
df_pred.to_csv(os.path.join(datadir, 'seg_with_predicted.csv'), index=False)
df_pred.to_json(os.path.join(datadir, 'seg_with_predicted.json'), orient='index')
def add_extra_features(data, data_segs, config, datadir):
"""
Add concerns, atrs and tmcs
Args:
data
data_segs
config
Returns:
updated data_segs
"""
# add concern
if config['concern'] != '':
print('Adding concerns')
concern_observed = data[data.year == 2016].groupby(
'segment_id')[config['concern']].max()
data_segs = data_segs.merge(
concern_observed.reset_index(), on='segment_id')
# add in tmcs if filepath present
if config['tmc'] != '':
print('Adding tmcs')
tmcs = pd.read_json(datadir+config['tmc'], dtype={'near_id': str})[
['near_id'] + config['tmc_cols']]
data_segs = data_segs.merge(
tmcs, left_on='segment_id', right_on='near_id', how='left')
data_segs[config['tmc_cols']] = data_segs[config['tmc_cols']].fillna(0)
return data_segs
def process_features(features, config, f_cat, f_cont, data_segs):
# features for linear model
lm_features = features
if config['process']:
print(('Processing categorical: {}'.format(f_cat)))
for f in f_cat:
t = | pd.get_dummies(data_segs[f]) | pandas.get_dummies |
import os
from random import uniform
import matplotlib
import pandas as pd
from geopy import Point
import uuid as IdGenerator
from geopy import distance
import multiprocessing as mp
from math import sin, cos, atan2, floor, sqrt, radians
def histogram(path, layers, show=True, max_x=None, save_log=True, **kwargs):
if isinstance(layers, str):
layers = [layers]
for layer in layers:
if 'data' in kwargs.keys():
frame = kwargs['data']
else:
if 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1:
pool = mp.Pool(int(kwargs['pool_size']))
result = pool.map(load_csv, [(path, file, layer) for file in os.listdir(path) if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file)])
pool.close()
pool.join()
else:
result = []
for file in os.listdir(path):
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
result.append(load_csv((path, file, layer)))
if len(result) == 0:
print('Layer %s empty!' % layer)
continue
frame = pd.concat(list(result))
frame = frame[frame['layer'] == layer]
if len(frame) == 0:
print('Layer %s empty!' % layer)
continue
frame = frame.groupby(['lat_bucket','lon_bucket','timestamp_bucket']).size()
# maximum = frame.max()
# frame = frame.map(lambda a: a / maximum)
if pd.__version__ >= '0.17.0':
frame.sort_values(ascending=False, inplace=True)
else:
frame.sort(ascending=False, inplace=True)
if len(frame) == 0:
print('Layer %s empty!' % layer)
continue
if save_log:
frame.to_csv('data/results/%s-bucket-histogram.log' % layer)
frame = frame.reset_index()
frame.columns = ['lat_bucket', 'lon_bucket', 'timestamp_bucket', 'relevance']
frame.index = [i for i in range(0, len(frame))]
plot = frame['relevance'].plot(kind='line', label=layer)
# if max_x:
# plot.axis([0,max_x,0,maximum+1])
# plot.set_yscale('log', nonposy='clip')
plot.legend()
if show:
matplotlib.pyplot.show(block=True)
else:
fig = plot.get_figure()
fig.savefig('data/results/bucket-histogram.png')
return frame
# df1 = pd.read_csv(
# 'data/twitter-bucket-histogram.log',
# header=None,
# skiprows=1,
# low_memory=False,
# memory_map=True,
# names=['i','j','k','c']
# )['c']
# paddf = pd.DataFrame([0 for i in range(200610, 2000000)])
# df1 = pd.concat([df1, paddf])
# df1.index = [i for i in range(0,len(df1))]
# plot = df1.plot(kind='line', label='twitter: (200610 used buckets)', color='r')
# # maximum = df1.max()
# # print(maximum)
# plot.axis([0,500000,0,1200])
# # matplotlib.pyplot.yscale('log')
# plot.text(0, 420, '(0, 420)', color='r')
# # plot.plot(0, 420, 'ro')
# # plot.text(350000, 800, "twitter: \nyellow_taxis (1996165 used buckets)")
# # plot.plot(187, 10, 'ro')
# plot.legend()
# df2 = pd.read_csv(
# 'data/yellow_taxis-bucket-histogram.log',
# header=None,
# skiprows=1,
# low_memory=False,
# memory_map=True,
# names=['i','j','k','c']
# )['c']
# df2.index = [i for i in range(0,len(df2))]
# plot = df2.plot(kind='line', label='yellow_taxis: (1996165 used buckets)', color='b')
# # maximum = df2.max()
# # print(maximum)
# plot.axis([0,500000,0,1200])
# # matplotlib.pyplot.yscale('log')
# plot.text(0, 1130, '(0, 1130)', color='b')
# # plot.plot(0, 1130, 'bo')
# # plot.text(500686, 10, '10', color='b')
# # plot.plot(500686, 10, 'bo')
# plot.legend()
# matplotlib.pyplot.show(block=True)
# # main('data/buckets/', 'twitter', 16, False, 4)
# # main('data/buckets/', 'yellow_taxis', 16, False, 4)
def index(path, distance_precision, temporal_precision, layer, **kwargs):
multiprocess = 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1
if multiprocess:
pool_size = int(kwargs['pool_size'])
pool = mp.Pool(pool_size)
result = pool.map(load_csv, [(path, file, layer) for file in os.listdir(path) if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file)])
pool.close()
pool.join()
else:
result = []
for file in os.listdir(path):
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
result.append(load_csv((path, file, layer)))
frame = pd.concat(list(result))
if 'verbose' in kwargs.keys() and kwargs['verbose']:
print('Data loaded...', flush=True)
# split layers
layer1 = frame[frame.layer == layer].groupby(['lat_bucket','lon_bucket','timestamp_bucket'])
if 'verbose' in kwargs.keys() and kwargs['verbose']:
print('Layers splited...', flush=True)
buckets_location = 'data/buckets/index/'
if not os.path.exists(buckets_location):
os.makedirs(buckets_location)
for name, g in layer1:
g.to_csv('data/buckets/index/%s-%d-%d-%d.csv' % (layer.replace('-', '_'), name[0], name[1], name[2]))
if 'verbose' in kwargs.keys() and kwargs['verbose']:
print('Buckets indexed...', flush=True)
def load_csv(args):
path, file, layer = args
df = pd.read_csv(
os.path.join(path, file),
header=0,
low_memory=False,
memory_map=True,
index_col='id'
)
return df[(df.layer == layer)]
def bucketize(path, origin, distance_precision, time_precision, **kwargs):
multiprocess = 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1
if multiprocess:
pool_size = int(kwargs['pool_size'])
pool = mp.Pool(pool_size)
filelist = os.listdir(path)
for file in filelist:
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
if multiprocess:
pool.apply_async(bucketize_file, args=(path, file, origin, distance_precision, time_precision))
else:
bucketize_file(path, file, origin, distance_precision, time_precision)
if multiprocess:
pool.close()
pool.join()
def bucketize_file(path, file, origin, distance_precision, time_precision):
filename = os.path.join(path, file)
df = | pd.read_csv(filename, header=0) | pandas.read_csv |
import numpy as np
from tspdb.src.database_module.sql_imp import SqlImplementation
from tspdb.src.pindex.predict import get_prediction_range, get_prediction
from tspdb.src.pindex.pindex_managment import TSPI, load_pindex
from tspdb.src.pindex.pindex_utils import index_ts_mapper
import time
interface = SqlImplementation(driver="postgresql", host="localhost", database="querytime_test",user="aalomar",password="<PASSWORD>")
import timeit
import pandas as pd
from tspdb.src.hdf_util import read_data
from tspdb.src.tsUtils import randomlyHideValues
from scipy.stats import norm
from sklearn.metrics import r2_score
def r2_var(y,y_h,X):
average = np.mean(X**2) - np.mean(X)**2
return 1 - sum((y-y_h)**2)/sum((y-average)**2)
def create_table_data():
interface = SqlImplementation(driver="postgresql", host="localhost", database="querytime_test",user="aalomar",password="<PASSWORD>")
obs = np.arange(10**5).astype('float')
means = obs
var = np.zeros(obs.shape)
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
print(obs_9)
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7,'var': var })
df.to_csv('testdata/tables/ts_basic_5.csv',index_label = 'time')
timestamps = pd.date_range('2012-10-01 00:00:00', periods = 10**5, freq='5s')
df.index = timestamps
df.to_csv('testdata/tables/ts_basic_ts_5_5.csv', index_label = 'time')
# real time series variance constant
data = read_data('testdata/MixtureTS2.h5')
obs = data['obs'][:]
means = data['means'][:]
var = np.ones(obs.shape)
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7 ,'var': var })
df.index_label = 'time'
df.to_csv('testdata/tables/MixtureTS2.csv', index_label = 'time')
# real time series variance constant
data = read_data('testdata/MixtureTS.h5')
obs = data['obs'][:]
means = data['means'][:]
var = np.ones(obs.shape)
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7,'var': var })
df.to_csv('testdata/tables/MixtureTS.csv', index_label = 'time')
# real time series varaince harmonics
data = read_data('testdata/MixtureTS_var_test.h5')
obs = data['obs'][:]
means = data['means'][:]
var = data['var'][:]
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7, 'var': var })
df.to_csv('testdata/tables/MixtureTS_var.csv', index_label = 'time')
def create_tables():
interface = SqlImplementation(driver="postgresql", host="localhost", database="querytime_test",user="postgres",password="<PASSWORD>")
for table in ['ts_basic_5','ts_basic_ts_5_5','mixturets2','mixturets_var']:
df = | pd.read_csv('tspdb/tests/testdata/tables/%s.csv'%table) | pandas.read_csv |
import logging
import random
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
import torch.utils.data as data
from torch.autograd import Variable
import torchvision.transforms.functional as FT
log = logging.getLogger(__name__)
INPUT_DIM = 224
MAX_PIXEL_VAL = 255
MEAN = 58.09
STDDEV = 49.73
def augmentation(img, flip, angle, shift):
img = FT.to_pil_image(img)
if flip:
img = FT.hflip(img)
img = FT.affine(
img, angle=angle, translate=(shift, 0), scale=1.0, shear=0.0
)
return FT.to_tensor(img).numpy()
def load_volume(paths, augment=False):
axial = _load_volume(paths[0], augment)
coronal = _load_volume(paths[1], augment)
sagittal = _load_volume(paths[2], augment)
return axial, coronal, sagittal
def _load_volume(path, augment=False):
vol = np.load(path)
# crop middle
pad = int((vol.shape[2] - INPUT_DIM) / 2)
vol = vol[:, pad:-pad, pad:-pad]
# standardize
vol = (vol - np.min(vol)) / (np.max(vol) - np.min(vol)) * MAX_PIXEL_VAL
# normalize
vol = (vol - MEAN) / STDDEV
shape_in = vol.shape
if augment:
# ensure we flip, rotate and shift all images in the volume
# by the same amount
flip = random.random() > 0.5
angle = (random.random() * 50.0) - 25.0
shift = random.randint(-25, 25)
vol = np.vstack(
[
augmentation(v, flip, angle, shift)
for v in vol.astype(np.float32)
]
)
shape_out = vol.shape
if shape_out != shape_in:
msg = (
f'Shape changed! Shape in: {shape_in}, Shape out: {shape_out}'
)
log.error(msg)
raise Exception(msg)
# convert to RGB
vol = np.stack((vol,) * 3, axis=1)
return torch.FloatTensor(vol)
class Dataset(data.Dataset):
def __init__(self,
# series,
path_df,
diagnosis=None,
label_df=None,
use_gpu=True,
augment=False):
super().__init__()
self.use_gpu = use_gpu
# self.series = series
self.augment = augment
self.paths = {
idx: (row.axial, row.coronal, row.sagittal)
for idx, row in path_df.iterrows()
}
self.labels = None
if label_df is not None:
# we only need to keep paths for the relevant labels
self.paths = {idx: self.paths[idx] for idx in label_df.index}
if diagnosis is not None:
self.diagnosis = diagnosis
self.labels = {
idx: row[self.diagnosis] for idx, row in label_df.iterrows()
}
neg_weight = np.mean(list(self.labels.values()))
self.weights = [neg_weight, 1 - neg_weight]
self.cases = sorted(list(self.paths.keys()))
def weighted_loss(self, prediction, target):
weights_npy = np.array([self.weights[int(t[0])] for t in target.data])
weights_tensor = torch.FloatTensor(weights_npy)
if self.use_gpu:
weights_tensor = weights_tensor.cuda()
loss = F.binary_cross_entropy_with_logits(
prediction, target, weight=Variable(weights_tensor)
)
return loss
def __getitem__(self, index):
case = self.cases[index]
vol_tensor = load_volume(self.paths[case], self.augment)
label_tensor = (
None if self.labels is None else
torch.FloatTensor([self.labels[case]])
)
return vol_tensor, label_tensor, case
def __len__(self):
return len(self.cases)
def paths_to_df(paths):
df = pd.DataFrame()
for path in paths:
series, case = path.split('/')[-2:]
df.loc[case, series] = path
assert(np.all( | pd.notnull(df) | pandas.notnull |
# -*- coding: utf-8 -*-
"""
Autor: <NAME>
Email: <EMAIL>
Functions that implement the ensemble of models
"""
import sys
sys.path.insert(0,'../') # including the path to deep-tasks folder
sys.path.insert(0,'./utils') # including the path to deep-tasks folder
from constants import TOPSIS_PATH
sys.path.insert(0,TOPSIS_PATH)
import os
import pandas as pd
import pickle
import numpy as np
from scipy.stats import mode
from tqdm import tqdm
from TOPSIS import TOPSIS
from scores import compute_estimations, get_scores
def get_scores_test (data_val, data_test, labels, metric_name, test_metrics_path=None, save=True):
test_scores = None
if test_metrics_path is not None:
if os.path.isfile(test_metrics_path):
with open(test_metrics_path, 'rb') as f:
test_scores = pickle.load(f)
if test_scores is None:
estimations = compute_estimations(data_val, labels)
test_scores = get_scores(data_test, labels, estimations, metric_name=metric_name, scores_per_label=False)
if save:
with open(test_metrics_path, 'wb') as f:
pickle.dump(test_scores, f)
return test_scores
def _dynamic_weigths_dir (hits, misses):
h = np.array(hits)
m = np.array(misses)
s = m/h
return s / s.sum()
def _get_LewDir (ensemble, idx):
hits, misses = list(), list()
for net in ensemble.keys():
hit = ensemble[net]['metrics'][idx]['hit']
miss = ensemble[net]['metrics'][idx]['miss']
hits.append(hit)
misses.append(miss)
return _dynamic_weigths_dir (hits, misses)
def _get_NP_weights (predictions, type, sig=1):
n_models, n_labels = predictions.shape
weights = np.zeros([n_labels, n_models])
for i in range(n_labels):
preds = predictions[:,i]
if type == "NP-AVG":
f_ = preds.mean()
elif type == "NP-MAX":
f_ = preds.max()
weights[i,:] = (1/sig*np.sqrt(2*np.pi)) * np.exp(-(preds-f_)/2*(sig*sig))
weights = weights / weights.sum(axis=0)
return weights
def _get_agg_predictions (preds, method, weights=None, top_k=None):
if top_k is not None and weights is not None:
if top_k > len(weights):
raise Exception("Top {} is greater than the number of classifiers".format(top_k))
k_best = weights.argsort()[::-1][:top_k]
weights = weights[k_best]
weights = weights / weights.sum()
preds = preds[k_best, :]
if method == 'avg':
if weights is not None:
if len(weights) != preds.shape[0] and weights.shape != preds.T.shape:
raise Exception ("The number of weights must be the same of classifiers")
agg_preds = (preds.T * weights).mean(axis=1)
else:
agg_preds = preds.mean(axis=0)
elif method == 'max':
agg_preds = preds.max(axis=0)
agg_preds = agg_preds / agg_preds.sum()
elif method == 'majority':
labels = preds.argmax(axis=1)
agg_preds = np.zeros(preds.shape[1])
for l in labels:
agg_preds[l] += 1
agg_preds = agg_preds / agg_preds.sum()
elif method == 'product':
agg_preds = preds.prod(axis=0)
agg_preds = agg_preds / agg_preds.sum()
elif method == 'topsis':
preds = preds.T
cb = [0] * preds.shape[1]
t = TOPSIS(preds, weights, cb)
t.normalizeMatrix()
if weights is not None:
t.introWeights()
t.getIdealSolutions()
t.distanceToIdeal()
t.relativeCloseness()
agg_preds = t.rCloseness
agg_preds = agg_preds / agg_preds.sum()
elif method == 'geo_avg':
agg_preds = np.sqrt(preds.prod(axis=0))
agg_preds = agg_preds / agg_preds.sum()
elif method == 'dynamic':
agg_preds = (preds.T * weights).sum(axis=1)
else:
raise Exception ("There is no aggregation method called {}".format(method))
return agg_preds
def agg_ensemble (ensemble, labels, agg_method, weights=None, top_k=None, img_name_col='image',
true_lab_col='REAL'):
nets = list(ensemble.keys())
num_samples = len(ensemble[nets[0]]['data'])
# sanity check
for net in nets[1:]:
if num_samples != len(ensemble[net]['data']):
print ("The network {} has more samples than the remaining ones".format(net))
raise Exception
try:
aux = ensemble[nets[0]]['data'][true_lab_col]
df_cols = ['image', true_lab_col] + labels
except KeyError:
df_cols = ['image'] + labels
df_values = list()
print ("\n- Starting the ensemble aggregation...")
print ("-- {} models: {}".format(len(nets), nets))
print ("-- Method: {}\n-- Weights: {}\n-- Top k: {}\n".format(agg_method, weights, top_k))
# Iterating in each row
with tqdm(total=num_samples, ascii=True, ncols=100) as t:
for row_i in range(num_samples):
# Getting the values for row_i to each model in the ensemble
predictions = list()
previous_img_name, previous_img_lab = None, None
for net in nets:
pred_net = list(ensemble[net]['data'].iloc[row_i][labels].values)
try:
img_name = ensemble[net]['data'].iloc[row_i][img_name_col]
except:
img_name = None
try:
img_lab = ensemble[net]['data'].iloc[row_i][true_lab_col]
except KeyError:
img_lab = None
# More sanities checking
if previous_img_lab is None and img_lab is not None:
previous_img_lab = img_lab
else:
if previous_img_lab != img_lab:
print ("Houston, we have a problem! We are comparing images with different labels: {} and {}".format(
previous_img_lab, img_lab))
raise Exception
if previous_img_name is None:
previous_img_name = img_name
elif img_name is not None:
if previous_img_name != img_name:
print ("Houston, we have a problem! We are comparing images with different names: {} and {}".format(
previous_img_name, img_name))
raise Exception
# Stacking predictions from all models
predictions.append(pred_net)
predictions = np.asarray(predictions)
weights_value = None
if weights == 'LewDir':
weights_value = _get_LewDir (ensemble, row_i)
elif weights == 'NP-AVG' or weights == 'NP-MAX':
weights_value = _get_NP_weights(predictions, weights)
elif isinstance(weights, list):
weights_value = weights
# Getting the aggregated predictions
agg_preds = _get_agg_predictions (predictions, agg_method, weights_value, top_k)
# Saving the values to compose the DataFrame
if img_lab is None:
aux = [img_name]
else:
aux = [img_name, img_lab]
aux.extend(agg_preds)
df_values.append(aux)
t.update()
df = | pd.DataFrame(df_values, columns=df_cols) | pandas.DataFrame |
# %% [markdown]
# # FOI-based hospital/ICU beds data analysis
import pandas
import altair
altair.data_transformers.disable_max_rows()
# %% [markdown]
# ## BHSCT FOI data
#
# * weekly totals, beds data is summed (i.e. bed days)
bhsct_beds = pandas.read_excel('../data/BHSCT/10-11330 Available_Occupied Beds & ED Atts 2010 - 2020.xlsx', engine='openpyxl', header=[9,10,11], index_col=0, sheet_name='BEDS')
bhsct_beds = bhsct_beds.stack([0,2]).reset_index()
bhsct_beds.rename(columns={'level_0':'Dates','level_1':'Hospital','Dates':'Care'},inplace=True)
bhsct_beds['start'] = pandas.to_datetime(bhsct_beds['Dates'].str.split(' - ', expand=True)[0], format='%d/%m/%Y')
bhsct_beds = bhsct_beds.groupby(['start','Care','Hospital'])['Available', 'Occupied'].sum().reset_index()
bhsct_beds = bhsct_beds.melt(id_vars=['start','Care','Hospital'])
bhsct_beds['col'] = bhsct_beds['Care'] + '-' + bhsct_beds['variable']
bhsct_beds = bhsct_beds.pivot(index=['start','Hospital'], columns='col', values='value')
bhsct_beds.rename(columns={'ICU/Critical Care-Available': 'Critical Care Available', 'NON ICU/Critical Care-Available': 'General Available', 'ICU/Critical Care-Occupied': 'Critical Care Occupied', 'NON ICU/Critical Care-Occupied': 'General Occupied'}, inplace=True)
bhsct_ae = pandas.read_excel('../data/BHSCT/10-11330 Available_Occupied Beds & ED Atts 2010 - 2020.xlsx', engine='openpyxl', header=6, sheet_name='AE')
bhsct_ae['start'] = pandas.to_datetime(bhsct_ae['Dates'].str.split(' - ', expand=True)[0], format='%d/%m/%Y')
bhsct_ae.drop(columns=['Dates'],inplace=True)
bhsct_ae = bhsct_ae.melt(id_vars=['start']).groupby(['start','variable'])['value'].sum().reset_index()
bhsct_ae.rename(columns={'variable': 'Hospital', 'value': 'ED Attendances'}, inplace=True)
bhsct_ae.set_index(['start', 'Hospital'], inplace=True)
bhsct_weekly = bhsct_beds.merge(bhsct_ae, how='left', left_index=True, right_index=True)
bhsct_weekly.fillna(0, inplace=True)
bhsct_weekly = bhsct_weekly.astype(int)
bhsct_weekly = bhsct_weekly.reset_index().replace({
'MIH': 'Mater Infirmorum Hospital',
'RBHSC': 'Royal Belfast Hospital for Sick Children',
'RVH': 'Royal Victoria Hospital',
'BCH': 'Belfast City Hospital',
'MPH': 'Musgrave Park Hospital'
}).set_index(['start','Hospital'])
# %% [markdown]
# ## NHSCT FOI data
#
# * daily data
nhsct_ae = pandas.read_excel('../data/NHSCT/20210208_PB080121_Response_Attachment_IJ.xlsx', engine='openpyxl', header=6, sheet_name='ED Attendances')
nhsct_ae.dropna(axis='columns', how='all', inplace=True)
nhsct_ae.dropna(axis='index', subset=['Arrival Date'], inplace=True)
nhsct_ae['date'] = pandas.to_datetime(nhsct_ae['Arrival Date'], format='%Y-%m-%d')
nhsct_ae.rename(columns={'Site': 'Hospital'}, inplace=True)
nhsct_ae_daily = nhsct_ae.groupby(['date','Hospital'])['Attendances'].sum()
nhsct_ae_daily.name = 'ED Attendances'
nhsct_icu = pandas.read_excel('../data/NHSCT/20210208_PB080121_Response_Attachment_IJ.xlsx', engine='openpyxl', header=5, sheet_name='ICU Wards')
nhsct_icu['date'] = pandas.to_datetime(nhsct_icu['DATE'], format='%Y-%m-%d')
nhsct_icu.rename(columns={'HOSPITAL': 'Hospital'}, inplace=True)
nhsct_icu_daily = nhsct_icu.groupby(['date','Hospital'])['AVAILABLE BEDS','OCCUPIED BEDS'].sum()
nhsct_icu_daily.rename(columns={'AVAILABLE BEDS': 'Critical Care Available', 'OCCUPIED BEDS': 'Critical Care Occupied'}, inplace=True)
nhsct_daily = nhsct_icu_daily.merge(nhsct_ae_daily, how='left', left_index=True, right_index=True)
nhsct_nonicu = | pandas.read_excel('../data/NHSCT/20210208_PB080121_Response_Attachment_IJ.xlsx', engine='openpyxl', header=6, sheet_name='Non ICU Wards') | pandas.read_excel |
from typing import Tuple
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from etna.datasets import generate_ar_df
from etna.datasets.tsdataset import TSDataset
from etna.transforms import DateFlagsTransform
from etna.transforms import TimeSeriesImputerTransform
@pytest.fixture()
def tsdf_with_exog(random_seed) -> TSDataset:
df_1 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-02-01", "2021-07-01", freq="1d")})
df_2 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-02-01", "2021-07-01", freq="1d")})
df_1["segment"] = "Moscow"
df_1["target"] = [x ** 2 + np.random.uniform(-2, 2) for x in list(range(len(df_1)))]
df_2["segment"] = "Omsk"
df_2["target"] = [x ** 0.5 + np.random.uniform(-2, 2) for x in list(range(len(df_2)))]
classic_df = pd.concat([df_1, df_2], ignore_index=True)
df = classic_df.pivot(index="timestamp", columns="segment")
df = df.reorder_levels([1, 0], axis=1)
df = df.sort_index(axis=1)
df.columns.names = ["segment", "feature"]
exog = generate_ar_df(start_time="2021-01-01", periods=600, n_segments=2)
exog = exog.pivot(index="timestamp", columns="segment")
exog = exog.reorder_levels([1, 0], axis=1)
exog = exog.sort_index(axis=1)
exog.columns.names = ["segment", "feature"]
exog.columns = pd.MultiIndex.from_arrays([["Moscow", "Omsk"], ["exog", "exog"]])
ts = TSDataset(df=df, df_exog=exog, freq="1D")
return ts
@pytest.fixture()
def df_and_regressors() -> Tuple[pd.DataFrame, pd.DataFrame]:
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df_1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "target": 12, "segment": "2"})
df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(df)
timestamp = pd.date_range("2020-12-01", "2021-02-11")
df_1 = pd.DataFrame({"timestamp": timestamp, "regressor_1": 1, "regressor_2": 2, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "regressor_1": 3, "regressor_2": 4, "segment": "2"})
df_exog = pd.concat([df_1, df_2], ignore_index=True)
df_exog = TSDataset.to_dataset(df_exog)
return df, df_exog
@pytest.fixture()
def ts_future(example_reg_tsds):
future = example_reg_tsds.make_future(10)
return future
def test_check_endings_error_raise():
"""Check that _check_endings method raises exception if some segments end with nan."""
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp[:-5], "target": 12, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
df = TSDataset.to_dataset(df)
ts = TSDataset(df=df, freq="D")
with pytest.raises(ValueError):
ts._check_endings()
def test_check_endings_error_pass():
"""Check that _check_endings method passes if there is no nans at the end of all segments."""
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp, "target": 12, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
df = TSDataset.to_dataset(df)
ts = TSDataset(df=df, freq="D")
ts._check_endings()
def test_categorical_after_call_to_pandas():
classic_df = generate_ar_df(periods=30, start_time="2021-06-01", n_segments=2)
classic_df["categorical_column"] = [0] * 30 + [1] * 30
classic_df["categorical_column"] = classic_df["categorical_column"].astype("category")
df = TSDataset.to_dataset(classic_df[["timestamp", "segment", "target"]])
exog = TSDataset.to_dataset(classic_df[["timestamp", "segment", "categorical_column"]])
ts = TSDataset(df, "D", exog)
flatten_df = ts.to_pandas(flatten=True)
assert flatten_df["categorical_column"].dtype == "category"
@pytest.mark.parametrize(
"borders, true_borders",
(
(
("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01"),
("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01"),
),
(
("2021-02-03", "2021-06-20", "2021-06-22", "2021-07-01"),
("2021-02-03", "2021-06-20", "2021-06-22", "2021-07-01"),
),
(
("2021-02-01", "2021-06-20", "2021-06-21", "2021-06-28"),
("2021-02-01", "2021-06-20", "2021-06-21", "2021-06-28"),
),
(
("2021-02-01", "2021-06-20", "2021-06-23", "2021-07-01"),
("2021-02-01", "2021-06-20", "2021-06-23", "2021-07-01"),
),
((None, "2021-06-20", "2021-06-23", "2021-06-28"), ("2021-02-01", "2021-06-20", "2021-06-23", "2021-06-28")),
(("2021-02-03", "2021-06-20", "2021-06-23", None), ("2021-02-03", "2021-06-20", "2021-06-23", "2021-07-01")),
((None, "2021-06-20", "2021-06-23", None), ("2021-02-01", "2021-06-20", "2021-06-23", "2021-07-01")),
((None, "2021-06-20", None, None), ("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01")),
((None, None, "2021-06-21", None), ("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01")),
),
)
def test_train_test_split(borders, true_borders, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
train_start_true, train_end_true, test_start_true, test_end_true = true_borders
train, test = tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end
)
assert isinstance(train, TSDataset)
assert isinstance(test, TSDataset)
assert (train.df == tsdf_with_exog.df[train_start_true:train_end_true]).all().all()
assert (train.df_exog == tsdf_with_exog.df_exog).all().all()
assert (test.df == tsdf_with_exog.df[test_start_true:test_end_true]).all().all()
assert (test.df_exog == tsdf_with_exog.df_exog).all().all()
@pytest.mark.parametrize(
"test_size, true_borders",
(
(11, ("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01")),
(9, ("2021-02-01", "2021-06-22", "2021-06-23", "2021-07-01")),
(1, ("2021-02-01", "2021-06-30", "2021-07-01", "2021-07-01")),
),
)
def test_train_test_split_with_test_size(test_size, true_borders, tsdf_with_exog):
train_start_true, train_end_true, test_start_true, test_end_true = true_borders
train, test = tsdf_with_exog.train_test_split(test_size=test_size)
assert isinstance(train, TSDataset)
assert isinstance(test, TSDataset)
assert (train.df == tsdf_with_exog.df[train_start_true:train_end_true]).all().all()
assert (train.df_exog == tsdf_with_exog.df_exog).all().all()
assert (test.df == tsdf_with_exog.df[test_start_true:test_end_true]).all().all()
assert (test.df_exog == tsdf_with_exog.df_exog).all().all()
@pytest.mark.parametrize(
"test_size, borders, true_borders",
(
(
10,
("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01"),
("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01"),
),
(
15,
("2021-02-03", "2021-06-20", "2021-06-22", "2021-07-01"),
("2021-02-03", "2021-06-20", "2021-06-22", "2021-07-01"),
),
(11, ("2021-02-02", None, None, "2021-06-28"), ("2021-02-02", "2021-06-17", "2021-06-18", "2021-06-28")),
(
4,
("2021-02-03", "2021-06-20", None, "2021-07-01"),
("2021-02-03", "2021-06-20", "2021-06-28", "2021-07-01"),
),
(
4,
("2021-02-03", "2021-06-20", None, None),
("2021-02-03", "2021-06-20", "2021-06-21", "2021-06-24"),
),
),
)
def test_train_test_split_both(test_size, borders, true_borders, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
train_start_true, train_end_true, test_start_true, test_end_true = true_borders
train, test = tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, test_size=test_size
)
assert isinstance(train, TSDataset)
assert isinstance(test, TSDataset)
assert (train.df == tsdf_with_exog.df[train_start_true:train_end_true]).all().all()
assert (train.df_exog == tsdf_with_exog.df_exog).all().all()
assert (test.df == tsdf_with_exog.df[test_start_true:test_end_true]).all().all()
assert (test.df_exog == tsdf_with_exog.df_exog).all().all()
@pytest.mark.parametrize(
"borders, match",
(
(("2021-01-01", "2021-06-20", "2021-06-21", "2021-07-01"), "Min timestamp in df is"),
(("2021-02-01", "2021-06-20", "2021-06-21", "2021-08-01"), "Max timestamp in df is"),
),
)
def test_train_test_split_warning(borders, match, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
with pytest.warns(UserWarning, match=match):
tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end
)
@pytest.mark.parametrize(
"test_size, borders, match",
(
(
10,
("2021-02-01", None, "2021-06-21", "2021-07-01"),
"test_size, test_start and test_end cannot be applied at the same time. test_size will be ignored",
),
),
)
def test_train_test_split_warning2(test_size, borders, match, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
with pytest.warns(UserWarning, match=match):
tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, test_size=test_size
)
@pytest.mark.parametrize(
"test_size, borders, match",
(
(
None,
("2021-02-03", None, None, "2021-07-01"),
"At least one of train_end, test_start or test_size should be defined",
),
(
17,
("2021-02-01", "2021-06-20", None, "2021-07-01"),
"The beginning of the test goes before the end of the train",
),
(
17,
("2021-02-01", "2021-06-20", "2021-06-26", None),
"test_size is 17, but only 6 available with your test_start",
),
),
)
def test_train_test_split_failed(test_size, borders, match, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
with pytest.raises(ValueError, match=match):
tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, test_size=test_size
)
def test_dataset_datetime_conversion():
classic_df = generate_ar_df(periods=30, start_time="2021-06-01", n_segments=2)
classic_df["timestamp"] = classic_df["timestamp"].astype(str)
df = TSDataset.to_dataset(classic_df[["timestamp", "segment", "target"]])
# todo: deal with pandas datetime format
assert df.index.dtype == "datetime64[ns]"
def test_dataset_datetime_conversion_during_init():
classic_df = generate_ar_df(periods=30, start_time="2021-06-01", n_segments=2)
classic_df["categorical_column"] = [0] * 30 + [1] * 30
classic_df["categorical_column"] = classic_df["categorical_column"].astype("category")
df = TSDataset.to_dataset(classic_df[["timestamp", "segment", "target"]])
exog = TSDataset.to_dataset(classic_df[["timestamp", "segment", "categorical_column"]])
df.index = df.index.astype(str)
exog.index = df.index.astype(str)
ts = TSDataset(df, "D", exog)
assert ts.df.index.dtype == "datetime64[ns]"
def test_make_future_raise_error_on_diff_endings(ts_diff_endings):
with pytest.raises(ValueError, match="All segments should end at the same timestamp"):
ts_diff_endings.make_future(10)
def test_make_future_with_imputer(ts_diff_endings, ts_future):
imputer = TimeSeriesImputerTransform(in_column="target")
ts_diff_endings.fit_transform([imputer])
future = ts_diff_endings.make_future(10)
assert_frame_equal(future.df, ts_future.df)
def test_make_future():
timestamp = pd.date_range("2020-01-01", periods=100, freq="D")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 1, "segment": "segment_1"})
df2 = pd.DataFrame({"timestamp": timestamp, "target": 2, "segment": "segment_2"})
df = pd.concat([df1, df2], ignore_index=False)
ts = TSDataset(TSDataset.to_dataset(df), freq="D")
ts_future = ts.make_future(10)
assert np.all(ts_future.index == pd.date_range(ts.index.max() + pd.Timedelta("1D"), periods=10, freq="D"))
assert set(ts_future.columns.get_level_values("feature")) == {"target"}
def test_make_future_small_horizon():
timestamp = np.arange(np.datetime64("2021-01-01"), np.datetime64("2021-02-01"))
target1 = [np.sin(i) for i in range(len(timestamp))]
target2 = [np.cos(i) for i in range(len(timestamp))]
df1 = pd.DataFrame({"timestamp": timestamp, "target": target1, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp, "target": target2, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
df = TSDataset.to_dataset(df)
ts = TSDataset(df, freq="D")
train = TSDataset(ts[: ts.index[10], :, :], freq="D")
with pytest.warns(UserWarning, match="TSDataset freq can't be inferred"):
assert len(train.make_future(1).df) == 1
def test_make_future_with_exog():
timestamp = pd.date_range("2020-01-01", periods=100, freq="D")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 1, "segment": "segment_1"})
df2 = | pd.DataFrame({"timestamp": timestamp, "target": 2, "segment": "segment_2"}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
import string
from collections import OrderedDict
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.uuid import gen_uuid
from kartothek.io_components.metapartition import MetaPartition
from kartothek.serialization import DataFrameSerializer
def test_file_structure_dataset_v4(store_factory, bound_store_dataframes):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list, store=store_factory, dataset_uuid="dataset_uuid", metadata_version=4
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
# TODO: json -> msgpack
expected_keys = set(
[
"dataset_uuid.by-dataset-metadata.json",
"dataset_uuid/helper/cluster_1.parquet",
"dataset_uuid/helper/cluster_2.parquet",
"dataset_uuid/helper/_common_metadata",
"dataset_uuid/core/cluster_1.parquet",
"dataset_uuid/core/cluster_2.parquet",
"dataset_uuid/core/_common_metadata",
]
)
assert set(expected_keys) == set(store.keys())
def test_file_structure_dataset_v4_partition_on(store_factory, bound_store_dataframes):
store = store_factory()
assert set(store.keys()) == set()
df = pd.DataFrame(
{"P": [1, 2, 3, 1, 2, 3], "L": [1, 1, 1, 2, 2, 2], "TARGET": np.arange(10, 16)}
)
df_helper = pd.DataFrame(
{
"P": [1, 2, 3, 1, 2, 3],
"L": [1, 1, 1, 2, 2, 2],
"info": string.ascii_lowercase[:2],
}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P", "L"]
assert len(dataset.partitions) == 12
store = store_factory()
expected_keys = set(
[
"dataset_uuid.by-dataset-metadata.json",
"dataset_uuid/helper/P=1/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=1/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=1/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=1/L=2/cluster_2.parquet",
"dataset_uuid/helper/P=2/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=2/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=2/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=2/L=2/cluster_2.parquet",
"dataset_uuid/helper/P=3/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=3/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=3/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=3/L=2/cluster_2.parquet",
"dataset_uuid/helper/_common_metadata",
"dataset_uuid/core/P=1/L=1/cluster_1.parquet",
"dataset_uuid/core/P=1/L=1/cluster_2.parquet",
"dataset_uuid/core/P=1/L=2/cluster_1.parquet",
"dataset_uuid/core/P=1/L=2/cluster_2.parquet",
"dataset_uuid/core/P=2/L=1/cluster_1.parquet",
"dataset_uuid/core/P=2/L=1/cluster_2.parquet",
"dataset_uuid/core/P=2/L=2/cluster_1.parquet",
"dataset_uuid/core/P=2/L=2/cluster_2.parquet",
"dataset_uuid/core/P=3/L=1/cluster_1.parquet",
"dataset_uuid/core/P=3/L=1/cluster_2.parquet",
"dataset_uuid/core/P=3/L=2/cluster_1.parquet",
"dataset_uuid/core/P=3/L=2/cluster_2.parquet",
"dataset_uuid/core/_common_metadata",
]
)
assert set(expected_keys) == set(store.keys())
def test_file_structure_dataset_v4_partition_on_second_table_no_index_col(
store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 2), "L": np.arange(0, 2), "TARGET": np.arange(10, 12)}
)
df_helper = pd.DataFrame({"P": [0, 0, 1], "info": string.ascii_lowercase[:2]})
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
with pytest.raises(Exception):
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
def test_file_structure_dataset_v4_partition_on_second_table_no_index_col_simple_group(
store_factory, bound_store_dataframes
):
"""
Pandas seems to stop evaluating the groupby expression if the dataframes after the first column split
is of length 1. This seems to be an optimization which should, however, still raise a KeyError
"""
df = pd.DataFrame(
{"P": np.arange(0, 2), "L": np.arange(0, 2), "TARGET": np.arange(10, 12)}
)
df_helper = pd.DataFrame({"P": [0, 1], "info": string.ascii_lowercase[:2]})
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
with pytest.raises(Exception):
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
def test_store_dataframes_as_dataset(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
secondary_indices=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
assert "P" in dataset.indices
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
index_dct = stored_dataset.indices["P"].load(store).index_dct
assert sorted(index_dct.keys()) == list(range(0, 10))
assert any([sorted(p) == ["cluster_1", "cluster_2"] for p in index_dct.values()])
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["helper"], store=store
)
pdt.assert_frame_equal(df_helper, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["helper"], store=store
)
pdt.assert_frame_equal(df_helper, df_stored)
def test_store_dataframes_as_dataset_empty_dataframe(
store_factory, metadata_version, df_all_types, bound_store_dataframes
):
"""
Test that writing an empty column succeeds.
In particular, this may fail due to too strict schema validation.
"""
df_empty = df_all_types.drop(0)
# Store a second table with shared columns. All shared columns must be of the same type
# This may fail in the presence of empty partitions if the schema validation doesn't account for it
df_shared_cols = df_all_types.loc[:, df_all_types.columns[:3]]
df_shared_cols["different_col"] = "a"
assert df_empty.empty
df_list = [
{
"label": "cluster_1",
"data": [("tableA", df_empty), ("tableB", df_shared_cols.copy(deep=True))],
},
{
"label": "cluster_2",
"data": [
("tableA", df_all_types),
("tableB", df_shared_cols.copy(deep=True)),
],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["tableA"], store=store
)
pdt.assert_frame_equal(df_empty, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["tableA"], store=store
)
# Roundtrips for type date are not type preserving
df_stored["date"] = df_stored["date"].dt.date
pdt.assert_frame_equal(df_all_types, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["tableB"], store=store
)
pdt.assert_frame_equal(df_shared_cols, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["tableB"], store=store
)
pdt.assert_frame_equal(df_shared_cols, df_stored)
def test_store_dataframes_as_dataset_batch_mode(
store_factory, metadata_version, bound_store_dataframes
):
values_p1 = [1, 2, 3]
values_p2 = [4, 5, 6]
df = pd.DataFrame({"P": values_p1})
df2 = pd.DataFrame({"P": values_p2})
df_list = [
[
{
"label": "cluster_1",
"data": [("core", df)],
"indices": {
"P": ExplicitSecondaryIndex(
"P", {v: ["cluster_1"] for v in values_p1}
)
},
},
{
"label": "cluster_2",
"data": [("core", df2)],
"indices": {
"P": ExplicitSecondaryIndex(
"P", {v: ["cluster_2"] for v in values_p2}
)
},
},
]
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store(
"dataset_uuid", store
).load_all_indices(store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["core"], store=store
)
pdt.assert_frame_equal(df2, df_stored)
assert stored_dataset.indices["P"].to_dict() == {
1: np.array(["cluster_1"], dtype=object),
2: np.array(["cluster_1"], dtype=object),
3: np.array(["cluster_1"], dtype=object),
4: np.array(["cluster_2"], dtype=object),
5: np.array(["cluster_2"], dtype=object),
6: np.array(["cluster_2"], dtype=object),
}
def test_store_dataframes_as_dataset_auto_uuid(
store_factory, metadata_version, mock_uuid, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [
("core", df.copy(deep=True)),
("helper", df_helper.copy(deep=True)),
],
},
{
"label": "cluster_2",
"data": [
("core", df.copy(deep=True)),
("helper", df_helper.copy(deep=True)),
],
},
]
dataset = bound_store_dataframes(
df_list, store=store_factory, metadata_version=metadata_version
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
stored_dataset = DatasetMetadata.load_from_store(
"auto_dataset_uuid", store_factory()
)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
def test_store_dataframes_as_dataset_list_input(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df2 = pd.DataFrame(
{
"P": np.arange(100, 110),
"L": np.arange(100, 110),
"TARGET": np.arange(10, 20),
}
)
df_list = [df, df2]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store_factory())
assert dataset == stored_dataset
def test_store_dataframes_as_dataset_mp_partition_on_none(
metadata_version, store, store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df2 = pd.DataFrame({"P": np.arange(0, 10), "info": np.arange(100, 110)})
mp = MetaPartition(
label=gen_uuid(),
data={"core": df, "helper": df2},
metadata_version=metadata_version,
)
df_list = [None, mp]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
partition_on=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P"]
assert len(dataset.partitions) == 10
assert dataset.metadata_version == metadata_version
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset == stored_dataset
def test_store_dataframes_partition_on(store_factory, bound_store_dataframes):
df = pd.DataFrame(
OrderedDict([("location", ["0", "1", "2"]), ("other", ["a", "a", "a"])])
)
# First partition is empty, test this edgecase
input_ = [
{
"label": "label",
"data": [("order_proposals", df.head(0))],
"indices": {"location": {}},
},
{
"label": "label",
"data": [("order_proposals", df)],
"indices": {"location": {k: ["label"] for k in df["location"].unique()}},
},
]
dataset = bound_store_dataframes(
input_,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=4,
partition_on=["other"],
)
assert len(dataset.partitions) == 1
assert len(dataset.indices) == 1
assert dataset.partition_keys == ["other"]
def _exception_str(exception):
"""
Extract the exception message, even if this is a re-throw of an exception
in distributed.
"""
if isinstance(exception, ValueError) and exception.args[0] == "Long error message":
return exception.args[1]
return str(exception)
@pytest.mark.parametrize(
"dfs,ok",
[
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int64),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int32),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int16),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int16),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int32),
"X": pd.Series([2], dtype=np.int64),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.uint64),
}
),
],
False,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int64),
"Y": pd.Series([2], dtype=np.int64),
}
),
],
False,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1, 2], dtype=np.int64),
"X": | pd.Series([1, 2], dtype=np.int64) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
import nose
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Index
from pandas.lib import Timestamp
from pandas.compat import StringIO
class UsecolsTests(object):
def test_raise_on_mixed_dtype_usecols(self):
# See gh-12678
data = """a,b,c
1000,2000,3000
4000,5000,6000
"""
msg = ("The elements of 'usecols' must "
"either be all strings, all unicode, or all integers")
usecols = [0, 'b', 2]
with tm.assertRaisesRegexp(ValueError, msg):
self.read_csv(StringIO(data), usecols=usecols)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 12 17:13:29 2018
@author: pamelaanderson
"""
from difflib import SequenceMatcher
import json
import numpy as np
import os
import operator
import pandas as pd
def load_adverse_events(path, year, q):
""" Loading adverse drug events while performing basic pre-processing"""
path_w_year = path + year + '/' + q + '/'
json_files = os.listdir(path_w_year)
df_adverse_ev = pd.DataFrame()
file_tot = [file for file in json_files if file not in ['.DS_Store']]
ind = 0
for file in file_tot:
print(file)
adverse_ev_data = json.load(open(path_w_year + file))
df_adverse_ev_json = pd.DataFrame(adverse_ev_data['results'])
df_adverse_ev = | pd.concat([df_adverse_ev, df_adverse_ev_json]) | pandas.concat |
#Soccer Dataset Analysis_______________________________________________________
#Import libraries
import sqlite3
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
| pd.set_option('display.precision', 3) | pandas.set_option |
"""
# Extracting twitter data
Uses package tweepy (v4.5.0).
Note that Twitter API was recently updated, and articles like
[this one](https://realpython.com/twitter-bot-python-tweepy/)
are now probably out of date?
References:
- https://dev.to/twitterdev/a-comprehensive-guide-for-using-the-twitter-api-v2-using-tweepy-in-python-15d9 # noqa
"""
import tweepy
from utils import read_yaml
import pandas as pd
from pathlib import Path
f = Path(__file__).parents[1].joinpath("config.yaml")
config = read_yaml(f)
bearer_token = config["tokens"]["bearer_token"]
client = tweepy.Client(bearer_token=bearer_token)
query = "from:baddatasciencer"
tweets = client.search_recent_tweets(
query=query,
tweet_fields=["context_annotations", "created_at"],
max_results=100, # noqa
)
for tweet in tweets.data:
print(tweet.text)
if len(tweet.context_annotations) > 0:
print(tweet.context_annotations)
df_tweets = | pd.DataFrame({"tweet_text": tweets.data}) | pandas.DataFrame |
# split into words
import os
import sys
import pandas as pd
import numpy as np
from ast import literal_eval
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import string
from collections import Counter
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import src.utils.utils as ut
#nltk.download('all')
def save_df(data, safe_to):
data.to_csv(safe_to, sep=';')
del data
def read_df(load_from):
df = | pd.read_csv(load_from, sep=';', header=0) | pandas.read_csv |
# Created by <NAME>
# email : <EMAIL>
import json
import os
import time
from concurrent import futures
from copy import deepcopy
from pathlib import Path
from typing import IO, Union, List
from collections import defaultdict
import re
from itertools import tee
import logging
# Non standard libraries
import pandas as pd
from urllib import parse
from aanalytics2 import config, connector, token_provider
from .projects import *
from .requestCreator import RequestCreator
JsonOrDataFrameType = Union[pd.DataFrame, dict]
JsonListOrDataFrameType = Union[pd.DataFrame, List[dict]]
def retrieveToken(verbose: bool = False, save: bool = False, **kwargs)->str:
"""
LEGACY retrieve token directly following the importConfigFile or Configure method.
"""
token_with_expiry = token_provider.get_token_and_expiry_for_config(config.config_object,**kwargs)
token = token_with_expiry['token']
config.config_object['token'] = token
config.config_object['date_limit'] = time.time() + token_with_expiry['expiry'] / 1000 - 500
config.header.update({'Authorization': f'Bearer {token}'})
if verbose:
print(f"token valid till : {time.ctime(time.time() + token_with_expiry['expiry'] / 1000)}")
return token
class Login:
"""
Class to connect to the the login company.
"""
loggingEnabled = False
logger = None
def __init__(self, config: dict = config.config_object, header: dict = config.header, retry: int = 0,loggingObject:dict=None) -> None:
"""
Instantiate the Loggin class.
Arguments:
config : REQUIRED : dictionary with your configuration information.
header : REQUIRED : dictionary of your header.
retry : OPTIONAL : if you want to retry, the number of time to retry
loggingObject : OPTIONAL : If you want to set logging capability for your actions.
"""
if loggingObject is not None and sorted(["level","stream","format","filename","file"]) == sorted(list(loggingObject.keys())):
self.loggingEnabled = True
self.logger = logging.getLogger(f"{__name__}.login")
self.logger.setLevel(loggingObject["level"])
formatter = logging.Formatter(loggingObject["format"])
if loggingObject["file"]:
fileHandler = logging.FileHandler(loggingObject["filename"])
fileHandler.setFormatter(formatter)
self.logger.addHandler(fileHandler)
if loggingObject["stream"]:
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
self.logger.addHandler(streamHandler)
self.connector = connector.AdobeRequest(
config_object=config, header=header, retry=retry,loggingEnabled=self.loggingEnabled,logger=self.logger)
self.header = self.connector.header
self.COMPANY_IDS = {}
self.retry = retry
def getCompanyId(self,verbose:bool=False) -> dict:
"""
Retrieve the company ids for later call for the properties.
"""
if self.loggingEnabled:
self.logger.debug("getCompanyId start")
res = self.connector.getData(
"https://analytics.adobe.io/discovery/me", headers=self.header)
json_res = res
if self.loggingEnabled:
self.logger.debug(f"getCompanyId reponse: {json_res}")
try:
companies = json_res['imsOrgs'][0]['companies']
self.COMPANY_IDS = json_res['imsOrgs'][0]['companies']
return companies
except:
if verbose:
print("exception when trying to get companies with parameter 'all'")
print(json_res)
if self.loggingEnabled:
self.logger.error(f"Error trying to get companyId: {json_res}")
return None
def createAnalyticsConnection(self, companyId: str = None,loggingObject:dict=None) -> object:
"""
Returns an instance of the Analytics class so you can query the different elements from that instance.
Arguments:
companyId: REQUIRED : The globalCompanyId that you want to use in your connection
loggingObject : OPTIONAL : If you want to set logging capability for your actions.
the retry parameter set in the previous class instantiation will be used here.
"""
analytics = Analytics(company_id=companyId,
config_object=self.connector.config, header=self.header, retry=self.retry,loggingObject=loggingObject)
return analytics
class Analytics:
"""
Class that instantiate a connection to a single login company.
"""
# Endpoints
header = {"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": "Bearer ",
"X-Api-Key": ""
}
_endpoint = 'https://analytics.adobe.io/api'
_getRS = '/collections/suites'
_getDimensions = '/dimensions'
_getMetrics = '/metrics'
_getSegments = '/segments'
_getCalcMetrics = '/calculatedmetrics'
_getUsers = '/users'
_getDateRanges = '/dateranges'
_getReport = '/reports'
loggingEnabled = False
logger = None
def __init__(self, company_id: str = None, config_object: dict = config.config_object, header: dict = config.header,
retry: int = 0,loggingObject:dict=None):
"""
Instantiate the Analytics class.
The Analytics class will be automatically connected to the API 2.0.
You have possibility to review the connection detail by looking into the connector instance.
"header", "company_id" and "endpoint_company" are attribute accessible for debugging.
Arguments:
company_id : REQUIRED : company ID retrieved by the getCompanyId
retry : OPTIONAL : Number of time you want to retrieve fail calls
loggingObject : OPTIONAL : logging object to log actions during runtime.
config_object : OPTIONAL : config object to be used for setting token (do not update if you do not know)
header : OPTIONAL : template header used for all requests (do not update if you do not know!)
"""
if company_id is None:
raise AttributeError(
'Expected "company_id" to be referenced.\nPlease ensure you pass the globalCompanyId when instantiating this class.')
if loggingObject is not None and sorted(["level","stream","format","filename","file"]) == sorted(list(loggingObject.keys())):
self.loggingEnabled = True
self.logger = logging.getLogger(f"{__name__}.analytics")
self.logger.setLevel(loggingObject["level"])
formatter = logging.Formatter(loggingObject["format"])
if loggingObject["file"]:
fileHandler = logging.FileHandler(loggingObject["filename"])
fileHandler.setFormatter(formatter)
self.logger.addHandler(fileHandler)
if loggingObject["stream"]:
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
self.logger.addHandler(streamHandler)
self.connector = connector.AdobeRequest(
config_object=config_object, header=header, retry=retry,loggingEnabled=self.loggingEnabled,logger=self.logger)
self.header = self.connector.header
self.connector.header['x-proxy-global-company-id'] = company_id
self.header['x-proxy-global-company-id'] = company_id
self.endpoint_company = f"{self._endpoint}/{company_id}"
self.company_id = company_id
self.listProjectIds = []
self.projectsDetails = {}
self.segments = []
self.calculatedMetrics = []
try:
import importlib.resources as pkg_resources
pathLOGS = pkg_resources.path(
"aanalytics2", "eventType_usageLogs.pickle")
except ImportError:
try:
# Try backported to PY<37 `importlib_resources`.
import pkg_resources
pathLOGS = pkg_resources.resource_filename(
"aanalytics2", "eventType_usageLogs.pickle")
except:
print('Empty LOGS_EVENT_TYPE attribute')
try:
with pathLOGS as f:
self.LOGS_EVENT_TYPE = pd.read_pickle(f)
except:
self.LOGS_EVENT_TYPE = "no data"
def __str__(self)->str:
obj = {
"endpoint" : self.endpoint_company,
"companyId" : self.company_id,
"header" : self.header,
"token" : self.connector.config['token']
}
return json.dumps(obj,indent=4)
def __repr__(self)->str:
obj = {
"endpoint" : self.endpoint_company,
"companyId" : self.company_id,
"header" : self.header,
"token" : self.connector.config['token']
}
return json.dumps(obj,indent=4)
def refreshToken(self, token: str = None):
if token is None:
raise AttributeError(
'Expected "token" to be referenced.\nPlease ensure you pass the token.')
self.header['Authorization'] = "Bearer " + token
def decodeAArequests(self,file:IO=None,urls:Union[list,str]=None,save:bool=False,**kwargs)->pd.DataFrame:
"""
Takes any of the parameter to load adobe url and decompose the requests into a dataframe, that you can save if you want.
Arguments:
file : OPTIONAL : file referencing the different requests saved (excel, or txt)
urls : OPTIONAL : list of requests (or a single request) that you want to decode.
save : OPTIONAL : parameter to save your decode list into a csv file.
Returns a dataframe.
possible kwargs:
encoding : the type of encoding to decode the file
"""
if self.loggingEnabled:
self.logger.debug(f"Starting decodeAArequests")
if file is None and urls is None:
raise ValueError("Require at least file or urls to contains data")
if file is not None:
if '.txt' in file:
with open(file,'r',encoding=kwargs.get('encoding','utf-8')) as f:
urls = f.readlines() ## passing decoding to urls
elif '.xlsx' in file:
temp_df = pd.read_excel(file,header=None)
urls = list(temp_df[0]) ## passing decoding to urls
if urls is not None:
if type(urls) == str:
data = parse.parse_qsl(urls)
df = pd.DataFrame(data)
df.columns = ['index','request']
df.set_index('index',inplace=True)
if save:
df.to_csv(f'request_{int(time.time())}.csv')
return df
elif type(urls) == list: ## decoding list of strings
tmp_list = [parse.parse_qsl(data) for data in urls]
tmp_dfs = [pd.DataFrame(data) for data in tmp_list]
tmp_dfs2 = []
for df, index in zip(tmp_dfs,range(len(tmp_dfs))):
df.columns = ['index',f"request {index+1}"]
## cleanup timestamp from request url
string = df.iloc[0,0]
df.iloc[0,0] = re.search('http.*://(.+?)/s[0-9]+.*',string).group(1) # tracking server
df.set_index('index',inplace=True)
new_df = df
tmp_dfs2.append(new_df)
df_full = pd.concat(tmp_dfs2,axis=1)
if save:
df_full.to_csv(f'requests_{int(time.time())}.csv')
return df_full
def getReportSuites(self, txt: str = None, rsid_list: str = None, limit: int = 100, extended_info: bool = False,
save: bool = False) -> list:
"""
Get the reportSuite IDs data. Returns a dataframe of reportSuite name and report suite id.
Arguments:
txt : OPTIONAL : returns the reportSuites that matches a speific text field
rsid_list : OPTIONAL : returns the reportSuites that matches the list of rsids set
limit : OPTIONAL : How many reportSuite retrieves per serverCall
save : OPTIONAL : if set to True, it will save the list in a file. (Default False)
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getReportSuite")
nb_error, nb_empty = 0, 0 # use for multi-thread loop
params = {}
params.update({'limit': str(limit)})
params.update({'page': '0'})
if txt is not None:
params.update({'rsidContains': str(txt)})
if rsid_list is not None:
params.update({'rsids': str(rsid_list)})
params.update(
{"expansion": "name,parentRsid,currency,calendarType,timezoneZoneinfo"})
if self.loggingEnabled:
self.logger.debug(f"parameters : {params}")
rsids = self.connector.getData(self.endpoint_company + self._getRS,
params=params, headers=self.header)
content = rsids['content']
if not extended_info:
list_content = [{'name': item['name'], 'rsid': item['rsid']}
for item in content]
df_rsids = pd.DataFrame(list_content)
else:
df_rsids = pd.DataFrame(content)
total_page = rsids['totalPages']
last_page = rsids['lastPage']
if not last_page: # if last_page =False
callsToMake = total_page
list_params = [{**params, 'page': page}
for page in range(1, callsToMake)]
list_urls = [self.endpoint_company +
self._getRS for x in range(1, callsToMake)]
listheaders = [self.header for x in range(1, callsToMake)]
workers = min(10, total_page)
with futures.ThreadPoolExecutor(workers) as executor:
res = executor.map(lambda x, y, z: self.connector.getData(
x, y, headers=z), list_urls, list_params, listheaders)
res = list(res)
list_data = [val for sublist in [r['content']
for r in res if 'content' in r.keys()] for val in sublist]
nb_error = sum(1 for elem in res if 'error_code' in elem.keys())
nb_empty = sum(1 for elem in res if 'content' in elem.keys() and len(
elem['content']) == 0)
if not extended_info:
list_append = [{'name': item['name'], 'rsid': item['rsid']}
for item in list_data]
df_append = pd.DataFrame(list_append)
else:
df_append = pd.DataFrame(list_data)
df_rsids = df_rsids.append(df_append, ignore_index=True)
if save:
if self.loggingEnabled:
self.logger.debug(f"saving rsids : {params}")
df_rsids.to_csv('RSIDS.csv', sep='\t')
if nb_error > 0 or nb_empty > 0:
message = f'WARNING : Retrieved data are partial.\n{nb_error}/{len(list_urls) + 1} requests returned an error.\n{nb_empty}/{len(list_urls)} requests returned an empty response. \nTry to use filter to retrieve reportSuite or increase limit per request'
print(message)
if self.loggingEnabled:
self.logger.warning(message)
return df_rsids
def getVirtualReportSuites(self, extended_info: bool = False, limit: int = 100, filterIds: str = None,
idContains: str = None, segmentIds: str = None, save: bool = False) -> list:
"""
return a lit of virtual reportSuites and their id. It can contain more information if expansion is selected.
Arguments:
extended_info : OPTIONAL : boolean to retrieve the maximum of information.
limit : OPTIONAL : How many reportSuite retrieves per serverCall
filterIds : OPTIONAL : comma delimited list of virtual reportSuite ID to be retrieved.
idContains : OPTIONAL : element that should be contained in the Virtual ReportSuite Id
segmentIds : OPTIONAL : comma delimited list of segmentId contained in the VRSID
save : OPTIONAL : if set to True, it will save the list in a file. (Default False)
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getVirtualReportSuites")
expansion_values = "globalCompanyKey,parentRsid,parentRsidName,timezone,timezoneZoneinfo,currentTimezoneOffset,segmentList,description,modified,isDeleted,dataCurrentAsOf,compatibility,dataSchema,sessionDefinition,curatedComponents,type"
params = {"limit": limit}
nb_error = 0
nb_empty = 0
list_urls = []
if extended_info:
params['expansion'] = expansion_values
if filterIds is not None:
params['filterByIds'] = filterIds
if idContains is not None:
params['idContains'] = idContains
if segmentIds is not None:
params['segmentIds'] = segmentIds
path = f"{self.endpoint_company}/reportsuites/virtualreportsuites"
if self.loggingEnabled:
self.logger.debug(f"params: {params}")
vrsid = self.connector.getData(
path, params=params, headers=self.header)
content = vrsid['content']
if not extended_info:
list_content = [{'name': item['name'], 'vrsid': item['id']}
for item in content]
df_vrsids = pd.DataFrame(list_content)
else:
df_vrsids = pd.DataFrame(content)
total_page = vrsid['totalPages']
last_page = vrsid['lastPage']
if not last_page: # if last_page =False
callsToMake = total_page
list_params = [{**params, 'page': page}
for page in range(1, callsToMake)]
list_urls = [path for x in range(1, callsToMake)]
listheaders = [self.header for x in range(1, callsToMake)]
workers = min(10, total_page)
with futures.ThreadPoolExecutor(workers) as executor:
res = executor.map(lambda x, y, z: self.connector.getData(
x, y, headers=z), list_urls, list_params, listheaders)
res = list(res)
list_data = [val for sublist in [r['content']
for r in res if 'content' in r.keys()] for val in sublist]
nb_error = sum(1 for elem in res if 'error_code' in elem.keys())
nb_empty = sum(1 for elem in res if 'content' in elem.keys() and len(
elem['content']) == 0)
if not extended_info:
list_append = [{'name': item['name'], 'vrsid': item['id']}
for item in list_data]
df_append = pd.DataFrame(list_append)
else:
df_append = pd.DataFrame(list_data)
df_vrsids = df_vrsids.append(df_append, ignore_index=True)
if save:
df_vrsids.to_csv('VRSIDS.csv', sep='\t')
if nb_error > 0 or nb_empty > 0:
message = f'WARNING : Retrieved data are partial.\n{nb_error}/{len(list_urls) + 1} requests returned an error.\n{nb_empty}/{len(list_urls)} requests returned an empty response. \nTry to use filter to retrieve reportSuite or increase limit per request'
print(message)
if self.loggingEnabled:
self.logger.warning(message)
return df_vrsids
def getVirtualReportSuite(self, vrsid: str = None, extended_info: bool = False,
format: str = 'df') -> JsonOrDataFrameType:
"""
return a single virtual report suite ID information as dataframe.
Arguments:
vrsid : REQUIRED : The virtual reportSuite to be retrieved
extended_info : OPTIONAL : boolean to add more information
format : OPTIONAL : format of the output. 2 values "df" for dataframe and "raw" for raw json.
"""
if vrsid is None:
raise Exception("require a Virtual ReportSuite ID")
if self.loggingEnabled:
self.logger.debug(f"Starting getVirtualReportSuite for {vrsid}")
expansion_values = "globalCompanyKey,parentRsid,parentRsidName,timezone,timezoneZoneinfo,currentTimezoneOffset,segmentList,description,modified,isDeleted,dataCurrentAsOf,compatibility,dataSchema,sessionDefinition,curatedComponents,type"
params = {}
if extended_info:
params['expansion'] = expansion_values
path = f"{self.endpoint_company}/reportsuites/virtualreportsuites/{vrsid}"
data = self.connector.getData(path, params=params, headers=self.header)
if format == "df":
data = pd.DataFrame({vrsid: data})
return data
def getVirtualReportSuiteComponents(self, vrsid: str = None, nan_value=""):
"""
Uses the getVirtualReportSuite function to get a VRS and returns
the VRS components for a VRS as a dataframe. VRS must have Component Curation enabled.
Arguments:
vrsid : REQUIRED : Virtual Report Suite ID
nan_value : OPTIONAL : how to handle empty cells, default = ""
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getVirtualReportSuiteComponents")
vrs_data = self.getVirtualReportSuite(extended_info=True, vrsid=vrsid)
if "curatedComponents" not in vrs_data.index:
return | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
import networkx as nx
import numpy as np
from sklearn.preprocessing import PowerTransformer
from src.utils.utils_s3 import read_s3_graphml, write_s3_graphml
class PanelDataETL:
def __init__(self,input_filepath, output_filepath):
self.input_filepath = input_filepath
self.output_filepath = output_filepath
self.centralities = ['hubs', 'authorities', 'pagerank', 'gfi', 'bridging', 'favor']
def networks_etl(self):
all_years = []
for year in range(1990, 2020):
year = str(year)
# Capital network --------------------------------------------
network_path = os.path.join(self.output_filepath, year, 'A_country.graphml')
G = read_s3_graphml(network_path)
df = pd.DataFrame(index=G.nodes)
for c in self.centralities:
df['financial_'+c] = df.index.map(nx.get_node_attributes(G,c))
df['financial_hhi'] = df.index.map(nx.get_node_attributes(G,'hhi_index'))
# Goods network --------------------------------------------
network_path = os.path.join(self.output_filepath, year, 'B_country.graphml')
G = read_s3_graphml(network_path)
for c in self.centralities:
df['goods_'+c] = df.index.map(nx.get_node_attributes(G,c))
df['goods_hhi'] = df.index.map(nx.get_node_attributes(G,'hhi_index'))
# Migration network ---------------------------------------------
network_path = os.path.join(self.output_filepath, year, 'migration_network.graphml')
G = read_s3_graphml(network_path)
for c in self.centralities:
df['human_'+c] = df.index.map(nx.get_node_attributes(G,c))
df['human_hhi'] = df.index.map(nx.get_node_attributes(G,'hhi_index'))
'''
# Estimated Migration network ---------------------------------------------
network_path = os.path.join(self.output_filepath, year, 'estimated_migration_network.graphml')
G = read_s3_graphml(network_path)
for c in self.centralities:
df['estimated_human_'+c] = df.index.map(nx.get_node_attributes(G,c))
df['estimated_human_hhi'] = df.index.map(nx.get_node_attributes(G,'hhi_index'))
'''
# Compile ---------------------------
out_path = os.path.join(self.output_filepath, year, 'industry_output.parquet')
df_out=pd.read_parquet(out_path)
df_year = df.merge(df_out, left_index=True, right_index=True)
gdp_path = os.path.join(self.output_filepath, year, 'gdp.parquet')
df_gdp=pd.read_parquet(gdp_path)
df_year = df_year.merge(df_gdp, left_index=True, right_index=True)
df_year['year'] = year
all_years.append(df_year)
df = | pd.concat(all_years) | pandas.concat |
# being a bit too dynamic
# pylint: disable=E1101
import datetime
import warnings
import re
from math import ceil
from collections import namedtuple
from contextlib import contextmanager
from distutils.version import LooseVersion
import numpy as np
from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.core.common as com
from pandas.core.common import AbstractMethodError
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
from pandas.core.series import Series, remove_na
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex, Period
import pandas.tseries.frequencies as frequencies
from pandas.tseries.offsets import DateOffset
from pandas.compat import range, lrange, lmap, map, zip, string_types
import pandas.compat as compat
from pandas.util.decorators import Appender
try: # mpl optional
import pandas.tseries.converter as conv
conv.register() # needs to override so set_xlim works with str/number
except ImportError:
pass
# Extracted from https://gist.github.com/huyng/816622
# this is the rcParams set when setting display.with_mpl_style
# to True.
mpl_stylesheet = {
'axes.axisbelow': True,
'axes.color_cycle': ['#348ABD',
'#7A68A6',
'#A60628',
'#467821',
'#CF4457',
'#188487',
'#E24A33'],
'axes.edgecolor': '#bcbcbc',
'axes.facecolor': '#eeeeee',
'axes.grid': True,
'axes.labelcolor': '#555555',
'axes.labelsize': 'large',
'axes.linewidth': 1.0,
'axes.titlesize': 'x-large',
'figure.edgecolor': 'white',
'figure.facecolor': 'white',
'figure.figsize': (6.0, 4.0),
'figure.subplot.hspace': 0.5,
'font.family': 'monospace',
'font.monospace': ['Andale Mono',
'Nimbus Mono L',
'Courier New',
'Courier',
'Fixed',
'Terminal',
'monospace'],
'font.size': 10,
'interactive': True,
'keymap.all_axes': ['a'],
'keymap.back': ['left', 'c', 'backspace'],
'keymap.forward': ['right', 'v'],
'keymap.fullscreen': ['f'],
'keymap.grid': ['g'],
'keymap.home': ['h', 'r', 'home'],
'keymap.pan': ['p'],
'keymap.save': ['s'],
'keymap.xscale': ['L', 'k'],
'keymap.yscale': ['l'],
'keymap.zoom': ['o'],
'legend.fancybox': True,
'lines.antialiased': True,
'lines.linewidth': 1.0,
'patch.antialiased': True,
'patch.edgecolor': '#EEEEEE',
'patch.facecolor': '#348ABD',
'patch.linewidth': 0.5,
'toolbar': 'toolbar2',
'xtick.color': '#555555',
'xtick.direction': 'in',
'xtick.major.pad': 6.0,
'xtick.major.size': 0.0,
'xtick.minor.pad': 6.0,
'xtick.minor.size': 0.0,
'ytick.color': '#555555',
'ytick.direction': 'in',
'ytick.major.pad': 6.0,
'ytick.major.size': 0.0,
'ytick.minor.pad': 6.0,
'ytick.minor.size': 0.0
}
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
color=None):
import matplotlib.pyplot as plt
if color is None and colormap is not None:
if isinstance(colormap, compat.string_types):
import matplotlib.cm as cm
cmap = colormap
colormap = cm.get_cmap(colormap)
if colormap is None:
raise ValueError("Colormap {0} is not recognized".format(cmap))
colors = lmap(colormap, np.linspace(0, 1, num=num_colors))
elif color is not None:
if colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
colors = color
else:
if color_type == 'default':
# need to call list() on the result to copy so we don't
# modify the global rcParams below
colors = list(plt.rcParams.get('axes.color_cycle',
list('bgrcmyk')))
if isinstance(colors, compat.string_types):
colors = list(colors)
elif color_type == 'random':
import random
def random_color(column):
random.seed(column)
return [random.random() for _ in range(3)]
colors = lmap(random_color, lrange(num_colors))
else:
raise ValueError("color_type must be either 'default' or 'random'")
if isinstance(colors, compat.string_types):
import matplotlib.colors
conv = matplotlib.colors.ColorConverter()
def _maybe_valid_colors(colors):
try:
[conv.to_rgba(c) for c in colors]
return True
except ValueError:
return False
# check whether the string can be convertable to single color
maybe_single_color = _maybe_valid_colors([colors])
# check whether each character can be convertable to colors
maybe_color_cycle = _maybe_valid_colors(list(colors))
if maybe_single_color and maybe_color_cycle and len(colors) > 1:
msg = ("'{0}' can be parsed as both single color and "
"color cycle. Specify each color using a list "
"like ['{0}'] or {1}")
raise ValueError(msg.format(colors, list(colors)))
elif maybe_single_color:
colors = [colors]
else:
# ``colors`` is regarded as color cycle.
# mpl will raise error any of them is invalid
pass
if len(colors) != num_colors:
multiple = num_colors//len(colors) - 1
mod = num_colors % len(colors)
colors += multiple * colors
colors += colors[:mod]
return colors
class _Options(dict):
"""
Stores pandas plotting options.
Allows for parameter aliasing so you can just use parameter names that are
the same as the plot function parameters, but is stored in a canonical
format that makes it easy to breakdown into groups later
"""
# alias so the names are same as plotting method parameter names
_ALIASES = {'x_compat': 'xaxis.compat'}
_DEFAULT_KEYS = ['xaxis.compat']
def __init__(self):
self['xaxis.compat'] = False
def __getitem__(self, key):
key = self._get_canonical_key(key)
if key not in self:
raise ValueError('%s is not a valid pandas plotting option' % key)
return super(_Options, self).__getitem__(key)
def __setitem__(self, key, value):
key = self._get_canonical_key(key)
return super(_Options, self).__setitem__(key, value)
def __delitem__(self, key):
key = self._get_canonical_key(key)
if key in self._DEFAULT_KEYS:
raise ValueError('Cannot remove default parameter %s' % key)
return super(_Options, self).__delitem__(key)
def __contains__(self, key):
key = self._get_canonical_key(key)
return super(_Options, self).__contains__(key)
def reset(self):
"""
Reset the option store to its initial state
Returns
-------
None
"""
self.__init__()
def _get_canonical_key(self, key):
return self._ALIASES.get(key, key)
@contextmanager
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
plot_params = _Options()
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
import matplotlib.pyplot as plt
from matplotlib.artist import setp
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = com.notnull(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
boundaries_list.append((rmin_ - rdelta_ext, rmax_+ rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j!= 0:
ax.yaxis.set_visible(False)
if i != n-1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
def _gca():
import matplotlib.pyplot as plt
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
def _get_marker_compat(marker):
import matplotlib.lines as mlines
import matplotlib as mpl
if mpl.__version__ < '1.1.0' and marker == '.':
return 'o'
if marker not in mlines.lineMarkers:
return 'o'
return marker
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""RadViz - a multivariate data visualization algorithm
Parameters:
-----------
frame: DataFrame
class_column: str
Column name containing class names
ax: Matplotlib axis object, optional
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib scatter plotting method
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
color_type='random', color=color)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m))
for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],
label=com.pprint_thing(kls), **kwds)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Parameters:
-----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib plotting method
Returns:
--------
ax: Matplotlib axis object
"""
from math import sqrt, pi, sin, cos
import matplotlib.pyplot as plt
def function(amplitudes):
def f(x):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
harmonic = 1.0
for x_even, x_odd in zip(amplitudes[1::2], amplitudes[2::2]):
result += (x_even * sin(harmonic * x) +
x_odd * cos(harmonic * x))
harmonic += 1.0
if len(amplitudes) % 2 != 0:
result += amplitudes[-1] * sin(harmonic * x)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
x = [-pi + 2.0 * pi * (t / float(samples)) for t in range(samples)]
used_legends = set([])
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = [f(t) for t in x]
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""Bootstrap plot.
Parameters:
-----------
series: Time series
fig: matplotlib figure object, optional
size: number of data points to consider during each sampling
samples: number of times the bootstrap procedure is performed
kwds: optional keyword arguments for plotting commands, must be accepted
by both hist and plot
Returns:
--------
fig: matplotlib figure
"""
import random
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array([(min(sampling) + max(sampling)) * 0.5
for sampling in samplings])
if fig is None:
fig = plt.figure()
x = lrange(samples)
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig
@deprecate_kwarg(old_arg_name='colors', new_arg_name='color')
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
axvlines=True, **kwds):
"""Parallel coordinates plotting.
Parameters
----------
frame: DataFrame
class_column: str
Column name containing class names
cols: list, optional
A list of column names to use
ax: matplotlib.axis, optional
matplotlib axis object
color: list or tuple, optional
Colors to use for the different classes
use_columns: bool, optional
If true, columns will be used as xticks
xticks: list or tuple, optional
A list of values to use for xticks
colormap: str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines: bool, optional
If true, vertical lines will be added at each xtick
kwds: keywords
Options to pass to matplotlib plotting method
Returns
-------
ax: matplotlib axis object
Examples
--------
>>> from pandas import read_csv
>>> from pandas.tools.plotting import parallel_coordinates
>>> from matplotlib import pyplot as plt
>>> df = read_csv('https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv')
>>> parallel_coordinates(df, 'Name', color=('#556270', '#4ECDC4', '#C7F464'))
>>> plt.show()
"""
import matplotlib.pyplot as plt
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
if cols is None:
df = frame.drop(class_column, axis=1)
else:
df = frame[cols]
used_legends = set([])
ncols = len(df.columns)
# determine values to use for xticks
if use_columns is True:
if not np.all(np.isreal(list(df.columns))):
raise ValueError('Columns must be numeric to be used as xticks')
x = df.columns
elif xticks is not None:
if not np.all(np.isreal(xticks)):
raise ValueError('xticks specified must be numeric')
elif len(xticks) != ncols:
raise ValueError('Length of xticks must match number of columns')
x = xticks
else:
x = lrange(ncols)
if ax is None:
ax = plt.gca()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
for i in range(n):
y = df.iloc[i].values
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
if axvlines:
for i in x:
ax.axvline(i, linewidth=1, color='black')
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
ax.set_xlim(x[0], x[-1])
ax.legend(loc='upper right')
ax.grid()
return ax
def lag_plot(series, lag=1, ax=None, **kwds):
"""Lag plot for time series.
Parameters:
-----------
series: Time series
lag: lag of the scatter plot, default 1
ax: Matplotlib axis object, optional
kwds: Matplotlib scatter method keyword arguments, optional
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel("y(t + %s)" % lag)
ax.scatter(y1, y2, **kwds)
return ax
def autocorrelation_plot(series, ax=None, **kwds):
"""Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = lmap(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
ax.axhline(y=z95 / np.sqrt(n), color='grey')
ax.axhline(y=0.0, color='black')
ax.axhline(y=-z95 / np.sqrt(n), color='grey')
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
_layout_type = 'vertical'
_default_rot = 0
orientation = None
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
'mark_right', 'stacked']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
'mark_right': True, 'stacked': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
sharey=False, use_index=True,
figsize=None, grid=None, legend=True, rot=None,
ax=None, fig=None, title=None, xlim=None, ylim=None,
xticks=None, yticks=None,
sort_columns=False, fontsize=None,
secondary_y=False, colormap=None,
table=False, layout=None, **kwds):
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
if isinstance(self._default_rot, dict):
self.rot = self._default_rot[self.kind]
else:
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else self.plt.rcParams['axes.grid']
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop('xerr', None)
yerr = kwds.pop('yerr', None)
self.errors = {}
for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]):
self.errors[kw] = self._parse_errorbars(kw, err)
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if 'cmap' in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif 'cmap' in kwds:
self.colormap = kwds.pop('cmap')
else:
self.colormap = colormap
self.table = table
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
if 'color' not in self.kwds and 'colors' in self.kwds:
warnings.warn(("'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"))
colors = self.kwds.pop('colors')
self.kwds['color'] = colors
if ('color' in self.kwds and self.nseries == 1):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds or 'colors' in self.kwds) and \
self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
if 'color' in self.kwds and self.style is not None:
if com.is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
if re.match('^[a-z]+?', s) is not None:
raise ValueError("Cannot pass 'style' string with a color "
"symbol and 'color' keyword argument. Please"
" use one or the other or pass 'style' "
"without a color symbol")
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
if self.sort_columns:
columns = com._try_sort(data.columns)
else:
columns = data.columns
for col in columns:
if keep_index is True:
yield col, data[col]
else:
yield col, data[col].values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._post_plot_logic()
self._adorn_subplots()
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
len(ax.containers) != 0)
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, 'right_ax'):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, 'left_ax'):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
new_ax._get_lines.color_cycle = orig_ax._get_lines.color_cycle
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(naxes=self.nseries,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize, ax=self.ax,
layout=self.layout,
layout_type=self._layout_type)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
if self.logx or self.loglog:
[a.set_xscale('log') for a in axes]
if self.logy or self.loglog:
[a.set_yscale('log') for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not com.is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (com.is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, Series):
label = self.label
if label is None and data.name is None:
label = 'None'
data = data.to_frame(name=label)
numeric_data = data.convert_objects()._get_numeric_data()
try:
is_empty = numeric_data.empty
except AttributeError:
is_empty = not len(numeric_data)
# no empty frames or series allowed
if is_empty:
raise TypeError('Empty {0!r}: no numeric data to '
'plot'.format(numeric_data.__class__.__name__))
self.data = numeric_data
def _make_plot(self):
raise | AbstractMethodError(self) | pandas.core.common.AbstractMethodError |
"""Tests for the cost bounds."""
import pytest
import uclasm
from uclasm import Graph, MatchingProblem
from uclasm.matching import *
from uclasm.matching import *
import numpy as np
from scipy.sparse import csr_matrix
import pandas as pd
@pytest.fixture
def smp():
"""Create a subgraph matching problem."""
adj0 = csr_matrix([[0, 0, 0],
[1, 0, 0],
[0, 0, 0]])
adj1 = csr_matrix([[0, 0, 0],
[0, 0, 0],
[0, 1, 0]])
nodelist = pd.DataFrame(['a', 'b', 'c'], columns=[Graph.node_col])
edgelist = pd.DataFrame([['b', 'a', 'c1'],
['c', 'b', 'c2']], columns=[Graph.source_col,
Graph.target_col,
Graph.channel_col])
tmplt = Graph([adj0, adj1], ['c1', 'c2'], nodelist, edgelist)
world = Graph([adj0, adj1], ['c1', 'c2'], nodelist, edgelist)
smp = MatchingProblem(tmplt, world)
return smp
@pytest.fixture
def smp_noisy():
"""Create a noisy subgraph matching problem."""
adj0 = csr_matrix([[0, 0, 0],
[1, 0, 0],
[0, 0, 0]])
adj1 = csr_matrix([[0, 0, 0],
[0, 0, 0],
[0, 1, 0]])
nodelist = | pd.DataFrame(['a', 'b', 'c'], columns=[Graph.node_col]) | pandas.DataFrame |
import pandas as pd
from pandas._testing import assert_frame_equal
import pytest
import numpy as np
from scripts.normalize_data import (
remove_whitespace_from_column_names,
normalize_expedition_section_cols,
remove_bracket_text,
remove_whitespace,
ddm2dec,
remove_empty_unnamed_columns,
normalize_columns
)
class RemoveSpacesFromColumns:
def test_replaces_leading_and_trailing_spaces_from_columns(self):
df = pd.DataFrame(columns=[' Aa', 'Bb12 ', ' Cc', 'Dd ', ' Ed Ed ', ' 12 ' ])
res = remove_whitespace_from_column_names(df)
assert res == ['Aa', 'Bb12', 'Cc', 'Dd', 'Ee Ee', '12']
def test_returns_columns_if_no_leading_and_trailing_spaces(self):
df = pd.DataFrame(columns=['Aa', 'Bb', 'Cc', 'Dd', 'Ed Ed'])
res = remove_whitespace_from_column_names(df)
assert res == ['Aa', 'Bb', 'Cc', 'Dd', 'Ee Ee' ]
class TestNormalizeExpeditionSectionCols:
def test_dataframe_does_not_change_if_expection_section_columns_exist(self):
data = {
"Col": [0, 1],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_dataframe_does_not_change_if_expection_section_Sample_exist(self):
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_dataframe_does_not_change_if_expection_section_Label_exist(self):
data = {
"Col": [0, 1],
"Label ID": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_adds_missing_expection_section_using_Label(self):
data = {
"Col": [0, 1],
"Label ID": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
}
df = pd.DataFrame(data)
data = {
"Col": [0, 1],
"Label ID": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_adds_missing_expection_section_using_Sample(self):
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
}
df = pd.DataFrame(data)
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_handles_missing_aw_col(self):
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3", "10-U2H-20T-3"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_handles_no_data(self):
data = {
"Col": [0],
"Sample": ["No data this hole"],
}
df = pd.DataFrame(data)
data = {
"Col": [0],
"Sample": ["No data this hole"],
"Exp": [None],
"Site": [None],
"Hole": [None],
"Core": [None],
"Type": [None],
"Section": [None],
"A/W": [None],
}
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_otherwise_raise_error(self):
df = pd.DataFrame({"foo": [1]})
message = "File does not have the expected columns."
with pytest.raises(ValueError, match=message):
normalize_expedition_section_cols(df)
class TestRemoveBracketText:
def test_removes_text_within_brackets_at_end_of_cell(self):
df = pd.DataFrame(['aa [A]', 'bb [BB]', 'cc [C] ', 'dd [dd] '])
expected = pd.DataFrame(['aa', 'bb', 'cc', 'dd'])
remove_bracket_text(df)
assert_frame_equal(df, expected)
def test_does_not_remove_text_within_brackets_at_start_of_cell(self):
df = pd.DataFrame(['[A] aa', '[BB] bb', '[C] cc ', ' [dd] dd '])
expected = df.copy()
remove_bracket_text(df)
assert_frame_equal(df, expected)
def test_does_not_remove_text_within_brackets_in_middle_of_cell(self):
df = pd.DataFrame(['aa [A] aa', 'bb [BB] bb', ' cc [C] cc ', ' dd [dd] dd '])
expected = df.copy()
remove_bracket_text(df)
assert_frame_equal(df, expected)
def test_removes_letters_numbers_punctuation_within_brackets(self):
df = pd.DataFrame(['aa [A A]', 'bb [BB 123]', 'cc [123-456.] '])
expected = pd.DataFrame(['aa', 'bb', 'cc'])
remove_bracket_text(df)
assert_frame_equal(df, expected)
class TestRemoveWhitespaceFromDataframe:
def test_remove_leading_and_trailing_spaces_from_dataframe(self):
data = {
'A': ['A', 'B ', ' C', 'D ', ' Ed ', ' 1 '],
'B': ['Aa', 'Bb ', ' Cc', 'Dd ', ' Ed Ed ', ' 11 '],
}
df = pd.DataFrame(data)
data2 = {
'A': ['A', 'B', 'C', 'D', 'Ed', '1'],
'B': ['Aa', 'Bb', 'Cc', 'Dd', 'Ed Ed', '11'],
}
expected = pd.DataFrame(data2)
remove_whitespace(df)
assert_frame_equal(df, expected)
def test_ignores_numeric_columns(self):
data = {
'A': ['A', 'B ', ' C'],
'B': [1, 2, 3],
'C': [1.1, 2.2, 3.3],
}
df = pd.DataFrame(data)
data2 = {
'A': ['A', 'B', 'C'],
'B': [1, 2, 3],
'C': [1.1, 2.2, 3.3],
}
expected = pd.DataFrame(data2)
remove_whitespace(df)
assert_frame_equal(df, expected)
def test_handles_empty_strings(self):
data = {'A': ['A', 'B ', ' C', ' ']}
df = pd.DataFrame(data)
data2 = {'A': ['A', 'B', 'C', '']}
expected = pd.DataFrame(data2)
remove_whitespace(df)
assert_frame_equal(df, expected)
def test_converts_nan_to_empty_strings(self):
data = {'A': ['A', 'B ', ' C', np.nan]}
df = pd.DataFrame(data)
data2 = {'A': ['A', 'B', 'C', '']}
expected = pd.DataFrame(data2)
remove_whitespace(df)
assert_frame_equal(df, expected)
class Testddm2dec:
def test_returns_decimal_degree_fof_degree_decimal_minute(self):
string = '25 51.498 N'
assert ddm2dec(string) == 25.8583
def test_works_with_decimal(self):
string = '25 .498 N'
assert ddm2dec(string) == 25.0083
def test_works_with_integer(self):
string = '25 20 N'
assert ddm2dec(string) == 25.333333333333332
def test_works_with_direction_first(self):
string = 'N 25 51.498'
assert ddm2dec(string) == 25.8583
@pytest.mark.parametrize("string,result", [("25° 51.498'N", 25.8583), ("25°51.498'N", 25.8583)])
def test_works_with_degree_minute_notation(self, string, result):
assert ddm2dec(string) == result
@pytest.mark.parametrize("string,result", [('25 51.498 e', 25.8583), ('25 51.498 w', -25.8583), ('25 51.498 S', -25.8583), ('25 51.498 n', 25.8583)])
def test_adds_correct_sign_for_direction(self, string, result):
assert ddm2dec(string) == result
class TestRemoveEmptyUnnamedColumns:
def test_remove_unnamed_columns_with_no_content(self):
data = {'A': [1, 2, 3], 'B': ['a', 'b', 'c'], 'Unnamed: 12': [None, None, None]}
df = pd.DataFrame(data)
data = {'A': [1, 2, 3], 'B': ['a', 'b', 'c']}
expected = pd.DataFrame(data)
remove_empty_unnamed_columns(df)
assert_frame_equal(df, expected)
def test_does_change_named_columns_without_content(self):
data = {'A': [1, 2, 3], 'B': ['a', 'b', 'c'], 'C': [None, None, None]}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
remove_empty_unnamed_columns(df)
assert_frame_equal(df, expected)
def test_does_change_unnamed_columns_with_content(self):
data = {'A': [1, 2, 3], 'B': ['a', 'b', 'c'], 'Unnamed: 12': ['a', None, None]}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
remove_empty_unnamed_columns(df)
assert_frame_equal(df, expected)
class TestNormalizeColumns:
def test_replace_column_name_with_value_from_columns_mapping(self):
columns_mapping = {"aa": "A"}
data = {"aa": [1]}
df = pd.DataFrame(data)
data = {"A": [1]}
expected = pd.DataFrame(data)
normalize_columns(df, columns_mapping)
assert_frame_equal(df, expected)
def test_replace_multiple_column_name_with_value_from_columns_mapping(self):
columns_mapping = {"aa": "A", "b b": "B"}
data = {"aa": [1], "b b": [2]}
df = pd.DataFrame(data)
data = {"A": [1], "B": [2]}
expected = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import cv2
import sys
import os
from keras.models import Sequential
from keras.callbacks import Callback, ModelCheckpoint
from keras.layers import (Flatten, Dense, Convolution2D, MaxPool2D,
BatchNormalization, Dropout, Activation, Cropping2D, Lambda)
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.preprocessing.image import ImageDataGenerator
from keras.backend import tf as ktf
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from scipy.misc import imread
import scipy
import matplotlib
import matplotlib.pyplot as plt
import argparse
import json
import random
matplotlib.style.use('ggplot')
########################### Utilities #########################################
def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, bar_length=100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
###
############################# VISUALIZATION ####################################
def show_data_distribution(df):
binwidth = 0.025
# histogram before image augmentation
plt.hist(df.steering_angle, bins=np.arange(min(df.steering_angle), max(df.steering_angle) + binwidth, binwidth))
plt.title('Number of images per steering angle')
plt.xlabel('Steering Angle')
plt.ylabel('# Frames')
plt.show()
############################### NETWORK ########################################
def nvidia_end_to_end(shape, l2_regularization_scale):
print("Training Nvidia End To End of input shape %s" % str(shape))
height = shape[0]
crop_factor = 0.2 # Top 40% to be removed
crop_size = (int)(crop_factor * height)
model = Sequential()
model.add(Cropping2D(cropping=((crop_size, 0), (0, 0)), input_shape=shape))
model.add(Lambda(lambda x: (x / 255.0) - 0.5))
model.add(BatchNormalization(axis=1, input_shape=shape))
model.add(Convolution2D(16, (3, 3), padding='valid', strides=(2, 2), activation='elu',
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Convolution2D(24, (3, 3), padding='valid', strides=(1, 2), activation='elu',
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Convolution2D(36, (3, 3), padding='valid', activation='elu',
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Convolution2D(48, (2, 2), padding='valid', activation='elu',
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Convolution2D(48, (2, 2), padding='valid', activation='elu',
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Flatten())
model.add(Dense(512,
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Dropout(.5))
model.add(Activation('elu'))
model.add(Dense(10,
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Activation('elu'))
model.add(Dense(1,
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.summary()
adam = Adam(lr=0.0001)
model.compile(loss='mse', optimizer=adam)
return model
################################# Dataset Manipulation Functions ##############################
def flip_image(img):
fimg = np.fliplr(img)
return fimg
def read_image(filename):
img = imread(filename).astype(np.float32)
img = scipy.misc.imresize(img, 50)
return img
def change_brightness(img):
change_pct = int(random.uniform(0, 100))
mask = (255 - img) < change_pct
img = np.where((255 - img) < change_pct, 255, img + change_pct)
return img
def read_csv(filename, cols):
print("Reading Training file: %s" % filename)
return pd.read_csv(filename, names=cols)
def drop_zero_value_steering_angle_rows(df, drop_to):
"""
df: The dataframe to drop rows from
col_name: The column to check from for steering_angle
drop_to: How many rows to drop to
"""
# print("Total rows: %s" % len(df))
# indices = df[df[col_name] == 0.0].index
# total_existing = indices.shape[0]
# print("Total Zero Value rows: %s" % total_existing)
# print("Dropping %s rows from df" % (total_existing - drop_to))
# remove_indices = np.random.choice(indices, size=total_existing - drop_to)
# new_df = df.drop(remove_indices)
# indices = new_df[new_df[col_name] == 0.0].index
# print("Remaining zero value %s" % len(indices))
#
# print("Total rows: %s" % len(new_df))
# print("Dropped %s rows" % (total_existing - drop_to))
# assert(len(df) - len(new_df) == (total_existing - drop_to))
# return new_df
df_with_zero = df[df.steering_angle == 0]
df_without_zero = df[df.steering_angle != 0]
df_with_zero = df_with_zero.sample(n=drop_to)
new_df = | pd.concat([df_with_zero, df_without_zero]) | pandas.concat |
from __future__ import division
import copy
import bt
from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
def test_node_tree():
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2])
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
m = Node('m', children=[p])
p = m['p']
c1 = p['c1']
c2 = p['c2']
assert len(m.children) == 1
assert 'p' in m.children
assert p.parent == m
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
def test_strategybase_tree():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
assert len(s.children) == 2
assert 's1' in s.children
assert 's2' in s.children
assert s == s1.parent
assert s == s2.parent
def test_node_members():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
actual = s.members
assert len(actual) == 3
assert s1 in actual
assert s2 in actual
assert s in actual
actual = s1.members
assert len(actual) == 1
assert s1 in actual
actual = s2.members
assert len(actual) == 1
assert s2 in actual
def test_node_full_name():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
# we cannot access s1 and s2 directly since they are copied
# we must therefore access through s
assert s.full_name == 'p'
assert s['s1'].full_name == 'p>s1'
assert s['s2'].full_name == 'p>s2'
def test_security_setup_prices():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
# now with setup
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
def test_strategybase_tree_setup():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
assert len(s.data) == 3
assert len(c1.data) == 3
assert len(c2.data) == 3
assert len(s._prices) == 3
assert len(c1._prices) == 3
assert len(c2._prices) == 3
assert len(s._values) == 3
assert len(c1._values) == 3
assert len(c2._values) == 3
def test_strategybase_tree_adjust():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
assert s.capital == 1000
assert s.value == 1000
assert c1.value == 0
assert c2.value == 0
assert c1.weight == 0
assert c2.weight == 0
def test_strategybase_tree_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
c1.price == 100
c2.price == 100
i = 1
s.update(dts[i], data.ix[dts[i]])
c1.price == 105
c2.price == 95
i = 2
s.update(dts[i], data.ix[dts[i]])
c1.price == 100
c2.price == 100
def test_update_fails_if_price_is_nan_and_position_open():
c1 = SecurityBase('c1')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100)
data['c1'][dts[1]] = np.nan
c1.setup(data)
i = 0
# mock in position
c1._position = 100
c1.update(dts[i], data.ix[dts[i]])
# test normal case - position & non-nan price
assert c1._value == 100 * 100
i = 1
# this should fail, because we have non-zero position, and price is nan, so
# bt has no way of updating the _value
try:
c1.update(dts[i], data.ix[dts[i]])
assert False
except Exception as e:
assert str(e).startswith('Position is open')
# on the other hand, if position was 0, this should be fine, and update
# value to 0
c1._position = 0
c1.update(dts[i], data.ix[dts[i]])
assert c1._value == 0
def test_strategybase_tree_allocate():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_child_from_strategy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate to c1
s.allocate(500, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.ix[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
s1.allocate(500)
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
c1.allocate(200)
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
def test_strategybase_tree_allocate_long_short():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
c1.allocate(-200)
assert c1.position == 3
assert c1.value == 300
assert c1.weight == 300.0 / 1000
assert s.capital == 1000 - 500 + 200
assert s.value == 1000
c1.allocate(-400)
assert c1.position == -1
assert c1.value == -100
assert c1.weight == -100.0 / 1000
assert s.capital == 1000 - 500 + 200 + 400
assert s.value == 1000
# close up
c1.allocate(-c1.value)
assert c1.position == 0
assert c1.value == 0
assert c1.weight == 0
assert s.capital == 1000 - 500 + 200 + 400 - 100
assert s.value == 1000
def test_strategybase_tree_allocate_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = | pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100) | pandas.DataFrame |
from pso.APSO_01 import APSO
import numpy as np
import time
import pandas as pd
np.random.seed(42)
def Sphere(x):
if x.ndim == 1:
x = x.reshape(1, -1)
return np.sum(x ** 2, axis=1)
def Schwefel_P222(x):
if x.ndim == 1:
x = x.reshape(1, -1)
return np.sum(np.abs(x), axis=1) + np.prod(np.abs(x), axis=1)
def Quadric(x):
if x.ndim == 1:
x = x.reshape(1, -1)
outer = 0
for i in range(x.shape[1]):
inner = np.sum(x[:, :i + 1], axis=1) ** 2
outer = outer + inner
return outer
def Rosenbrock(x):
if x.ndim == 1:
x = x.reshape(1, -1)
left = x[:, :-1].copy()
right = x[:, 1:].copy()
return np.sum(100 * (right - left ** 2) ** 2 + (left - 1) ** 2, axis=1)
def Step(x):
if x.ndim == 1:
x = x.reshape(1, -1)
return np.sum(np.round((x + 0.5), 0) ** 2, axis=1)
def Quadric_Noise(x):
if x.ndim == 1:
x = x.reshape(1, -1)
matrix = np.arange(x.shape[1]) + 1
return np.sum((x ** 4) * matrix, axis=1) + np.random.rand(x.shape[0])
def Schwefel(x):
if x.ndim == 1:
x = x.reshape(1, -1)
return -1 * np.sum(x * np.sin(np.abs(x) ** .5), axis=1)
def Rastrigin(x):
if x.ndim == 1:
x = x.reshape(1, -1)
return np.sum(x ** 2 - 10 * np.cos(2 * np.pi * x) + 10, axis=1)
def Noncontinuous_Rastrigin(x):
if x.ndim == 1:
x = x.reshape(1, -1)
outlier = np.abs(x) >= 0.5
x[outlier] = np.round(2 * x[outlier]) / 2
return np.sum(x ** 2 - 10 * np.cos(2 * np.pi * x) + 10, axis=1)
def Ackley(x):
if x.ndim == 1:
x = x.reshape(1, -1)
left = 20 * np.exp(-0.2 * (np.sum(x ** 2, axis=1) / x.shape[1]) ** .5)
right = np.exp(np.sum(np.cos(2 * np.pi * x), axis=1) / x.shape[1])
return -left - right + 20 + np.e
def Griewank(x):
if x.ndim == 1:
x = x.reshape(1, -1)
left = np.sum(x ** 2, axis=1) / 4000
right = np.prod(np.cos(x / ((np.arange(x.shape[1]) + 1) ** .5)), axis=1)
return left - right + 1
d = 30
g = 10000
p = 20
times = 30
table = np.zeros((2, 11))
for i in range(times):
x_max = 100 * np.ones(d)
x_min = -100 * np.ones(d)
optimizer = APSO(fit_func=Sphere, num_dim=d, num_particle=p, max_iter=g, x_max=x_max, x_min=x_min)
start = time.time()
optimizer.opt()
end = time.time()
table[0, 0] += optimizer.gBest_score
table[1, 0] += end - start
x_max = 10 * np.ones(d)
x_min = -10 * np.ones(d)
optimizer = APSO(fit_func=Schwefel_P222, num_dim=d, num_particle=p, max_iter=g, x_max=x_max, x_min=x_min)
start = time.time()
optimizer.opt()
end = time.time()
table[0, 1] += optimizer.gBest_score
table[1, 1] += end - start
x_max = 100 * np.ones(d)
x_min = -100 * np.ones(d)
optimizer = APSO(fit_func=Quadric, num_dim=d, num_particle=p, max_iter=g, x_max=x_max, x_min=x_min)
start = time.time()
optimizer.opt()
end = time.time()
table[0, 2] += optimizer.gBest_score
table[1, 2] += end - start
x_max = 10 * np.ones(d)
x_min = -10 * np.ones(d)
optimizer = APSO(fit_func=Rosenbrock, num_dim=d, num_particle=p, max_iter=g, x_max=x_max, x_min=x_min)
start = time.time()
optimizer.opt()
end = time.time()
table[0, 3] += optimizer.gBest_score
table[1, 3] += end - start
x_max = 100 * np.ones(d)
x_min = -100 * np.ones(d)
optimizer = APSO(fit_func=Step, num_dim=d, num_particle=p, max_iter=g, x_max=x_max, x_min=x_min)
start = time.time()
optimizer.opt()
end = time.time()
table[0, 4] += optimizer.gBest_score
table[1, 4] += end - start
x_max = 1.28 * np.ones(d)
x_min = -1.28 * np.ones(d)
optimizer = APSO(fit_func=Quadric_Noise, num_dim=d, num_particle=p, max_iter=g, x_max=x_max, x_min=x_min)
start = time.time()
optimizer.opt()
end = time.time()
table[0, 5] += optimizer.gBest_score
table[1, 5] += end - start
x_max = 500 * np.ones(d)
x_min = -500 * np.ones(d)
optimizer = APSO(fit_func=Schwefel, num_dim=d, num_particle=p, max_iter=g, x_max=x_max, x_min=x_min)
start = time.time()
optimizer.opt()
end = time.time()
table[0, 6] += optimizer.gBest_score
table[1, 6] += end - start
x_max = 5.12 * np.ones(d)
x_min = -5.12 * np.ones(d)
optimizer = APSO(fit_func=Rastrigin, num_dim=d, num_particle=p, max_iter=g, x_max=x_max, x_min=x_min)
start = time.time()
optimizer.opt()
end = time.time()
table[0, 7] += optimizer.gBest_score
table[1, 7] += end - start
x_max = 5.12 * np.ones(d)
x_min = -5.12 * np.ones(d)
optimizer = APSO(fit_func=Noncontinuous_Rastrigin, num_dim=d, num_particle=p, max_iter=g, x_max=x_max, x_min=x_min)
start = time.time()
optimizer.opt()
end = time.time()
table[0, 8] += optimizer.gBest_score
table[1, 8] += end - start
x_max = 32 * np.ones(d)
x_min = -32 * np.ones(d)
optimizer = APSO(fit_func=Ackley, num_dim=d, num_particle=p, max_iter=g, x_max=x_max, x_min=x_min)
start = time.time()
optimizer.opt()
end = time.time()
table[0, 9] += optimizer.gBest_score
table[1, 9] += end - start
x_max = 600 * np.ones(d)
x_min = -600 * np.ones(d)
optimizer = APSO(fit_func=Griewank, num_dim=d, num_particle=p, max_iter=g, x_max=x_max, x_min=x_min)
start = time.time()
optimizer.opt()
end = time.time()
table[0, 10] += optimizer.gBest_score
table[1, 10] += end - start
table = table / times
print(table)
table = | pd.DataFrame(table) | pandas.DataFrame |
# Copyright (c) 2020 Huawei Technologies Co., Ltd.
# <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from src.setting import setting
from src.cpePaser import day_extract
from src.cpePaser import week_extract
import pandas as pd
from src.timeOperator import timeOpt
import numpy as np
import math
import os
from src.postEva.exper_paser import exper_paser
from src.postEva.capacity_paser import capacity_paser
from src.postEva.capacity_paser import cell
from src.logger_setting.my_logger import get_logger
logger = get_logger()
engineer_map = {}
threshold_map = {}
def get_post_df():
min_month = week_extract.get_min_month()
all_file = week_extract.get_file_by_range(timeOpt.add_months(min_month, 2), timeOpt.add_months(min_month, 3))
df = pd.DataFrame(columns=setting.parameter_json["post_eva_from_day_column_name"])
for f in all_file:
file_df = | pd.read_csv(f, error_bad_lines=False, index_col=False) | pandas.read_csv |
import os
import numpy as np
import holoviews as hv
hv.extension('bokeh')
from collections import defaultdict
from fcsy.fcs import write_fcs
from sklearn.preprocessing import MinMaxScaler
from sklearn import cluster
from sklearn import mixture
from scipy.stats import gaussian_kde
from ssc.cluster import selfrepresentation as sr
import pandas as pd
import random
import sys
def nested_dict():
"""
A nested dictionary for hierarchical storage of thresholds.
"""
return defaultdict(nested_dict)
class Normalization:
"""
Automated intensity normalization framework for cycIF imaging datasets.
Parameters
----------
data: pandas DataFrame
Contains batch ids, scene ids, cell ids, and marker intensities.
marker_pairs: list of lists
Each sub-list contains a marker and its exclusive counterpart marker.
save_dir: string
Path to directory for saving results
manual_thresh: nested dict, optional
Manual thresholds for adding to figures
Attributes
----------
threshs: pandas Dataframe
"""
def __init__(self, data, marker_status, marker_pairs, save_dir, save_figs=False, manual_threshs=None):
self.data = data
self.marker_status = marker_status
self.marker_pairs = marker_pairs
self.threshs = pd.DataFrame({'batch': [], 'scene': [], 'model': []})
for marker_pair in self.marker_pairs:
self.threshs[marker_pair[0]] = np.nan
self.save_dir = save_dir
self.save_figs = save_figs
self.manual_threshs = manual_threshs
self._counter = 1
def get_thresh_curve(self, data, marker_pair, thresh, color, batch):
"""
Get holoview curve from thresh
Parameters
----------
data: pandas Dataframe
Contains batch ids, scene ids, cell ids, and marker intensities.
marker_pair: list
A two-element list of marker names
thresh: int (?)
"""
p = int(round(data[:, 1].max()))
x = np.array([thresh] * p)[:, np.newaxis]
y = np.arange(0, p)[:, np.newaxis]
xlabel = marker_pair[0]
ylabel = marker_pair[1]
xmax = data[:, 0].max()
xlim = (0, max(thresh * 1.1, xmax))
ymax = data[:, 1].max()
ylim = (0, ymax)
if batch == 'global':
line_dash = 'solid'
else:
line_dash = 'dotted'
curve = hv.Curve(np.hstack((x, y))).opts(xlabel=xlabel,
ylabel=ylabel,
xrotation=45,
xlim=xlim,
ylim=ylim,
tools=['hover'],
show_legend=False,
line_width=2,
color=color,
line_dash=line_dash)
return curve
def get_GMM_thresh(self, data, marker_pair, model, sigma_weight, color, batch):
"""
sigma_weight: int, Weighting factor for sigma, where higher value == fewer cells classified as positive.
"""
model.fit(data)
neg_idx = np.argmax([np.diagonal(i)[1] for i in model.covariances_]) # Diagonal of covariance matrix = variance
mu = model.means_[neg_idx, 0]
# Extract sigma from covariance matrix
sigma = np.sqrt(np.diagonal(model.covariances_[neg_idx])[0])
thresh = mu + sigma_weight * sigma
curve = self.get_thresh_curve(data, marker_pair, thresh, color, batch)
return thresh, curve
def get_clustering_thresh(self, data, marker_pair, model, sigma_weight, color, batch):
"""
sigma_weight: int, Weighting factor for sigma, where higher value == fewer cells classified as positive.
"""
model.fit(data)
clusters = [
data[model.labels_.astype('bool')],
data[~model.labels_.astype('bool')]
]
# Identify negative cluster based on maximum std on y-axis
neg_cluster = clusters[np.argmax([i[:, 1].std() for i in clusters])]
mu = neg_cluster[:, 0].mean()
sigma = neg_cluster[:, 0].std()
thresh = mu + sigma_weight * sigma
curve = self.get_thresh_curve(data, marker_pair, thresh, color, batch)
return thresh, curve
def get_marker_pair_thresh(self, data, scene, marker_pair, batch):
marker_pair_data = data[marker_pair].to_numpy()
xlabel = marker_pair[0]
ylabel = marker_pair[1]
curves = []
if batch != 'global' and marker_pair[0].split('_')[0] in self.marker_status[(self.marker_status.batch == str(batch)) & (self.marker_status.scene == scene)]:
m_st = self.marker_status[(self.marker_status.batch == batch) & (self.marker_status.scene == scene)][marker_pair[0].split('_')[0]].values
else:
m_st = 1
if m_st == 0:
thresh = marker_pair_data[:, 0].max()
self.threshs.loc[(self.threshs.scene == scene) & (self.threshs.batch == batch) & (
self.threshs.model == 'GMM'), xlabel] = thresh
self.threshs.loc[(self.threshs.scene == scene) & (self.threshs.batch == batch) & (
self.threshs.model == 'KMeans'), xlabel] = thresh
self.threshs.loc[(self.threshs.scene == scene) & (self.threshs.batch == batch) & (
self.threshs.model == 'SSC'), xlabel] = thresh
curve = self.get_thresh_curve(marker_pair_data, marker_pair, thresh, 'black', batch)
curves.append(curve)
else:
models = (
('KMeans', 'magenta', cluster.KMeans(n_clusters=2)),
('GMM', 'blue', mixture.GaussianMixture(n_components=2, n_init=10)),
('SSC', 'green', sr.SparseSubspaceClusteringOMP(n_clusters=2))
)
#models = (
# ('SSC', 'green', sr.SparseSubspaceClusteringOMP(n_clusters=2))
#)
sigma_weight = 0 # TODO: parameterize
for name, color, model in models:
if name == 'GMM':
thresh, curve = self.get_GMM_thresh(marker_pair_data,
marker_pair,
model,
sigma_weight,
color,
batch)
else:
thresh, curve = self.get_clustering_thresh(marker_pair_data,
marker_pair,
model,
sigma_weight,
color,
batch)
curves.append(curve)
self.threshs.loc[(self.threshs.scene == scene) & (self.threshs.batch == batch) & (
self.threshs.model == name), xlabel] = thresh
if batch == 'global':
scatter = hv.Scatter(marker_pair_data).opts(xlabel=xlabel.split('_')[0],
ylabel=ylabel.split('_')[0],
xrotation=45,
show_legend=False,
alpha=0.2,
color='gray')
else:
if m_st == 1:
# Get kde for plotting density of local scatter plot
marker_pair_data_T = marker_pair_data.T
z = gaussian_kde(marker_pair_data_T)(marker_pair_data_T)
marker_pair_data = np.vstack((marker_pair_data_T, z)).T
scatter = hv.Scatter(marker_pair_data, vdims=['y', 'z']).opts(title=str(batch),
xlabel=xlabel.split('_')[0],
ylabel=ylabel.split('_')[0],
xrotation=45,
show_legend=False,
color='z')
else:
scatter = hv.Scatter(marker_pair_data).opts(title=str(batch),
xlabel=str(batch) + '_' + xlabel.split('_')[0],
ylabel=ylabel.split('_')[0],
xrotation=45,
show_legend=False,
color='red')
return scatter, curves
def save_thresh_figs(self, scene_scatters, scene, save_dir, feat):
"""
Save hv figures for all marker pairs for a given scene as an hv.Layout()
if 'To' in feat: #split html files to prevent memory error
for i in range(len(scene_scatters)):
final_fig = hv.Layout(scene_scatters[i: (i+1)]).opts(title=scene, shared_axes=False).cols(3)
hv.save(final_fig, f'{save_dir}/{scene}_{feat}_gate_dist_plot_{i}.html')
else:
final_fig = hv.Layout(scene_scatters).opts(title=scene, shared_axes=False).cols(3)
hv.save(final_fig, f'{save_dir}/{scene}_{feat}_gate_dist.html')
"""
def visualize_scene(self, scene):
"""
Threshold prediction and figure generation
"""
scene_data = self.data[self.data.scene == scene]
model_names = ['KMeans', 'GMM', 'SSC']
batches = set(scene_data.batch)
batches.add('global')
for batch in batches:
for m in model_names:
self.threshs = self.threshs.append({'batch': batch, 'scene': scene, 'model': m},
ignore_index=True)
scene_scatters = []
feature = (self.marker_pairs[0][0].split('_')[1] + '_' + self.marker_pairs[0][0].split('_')[2])
for marker_pair in self.marker_pairs:
if (marker_pair[0].split('_')[1] + '_' + marker_pair[0].split('_')[2]) != feature:
if self.save_figs:
self.save_thresh_figs(scene_scatters, str(scene), self.save_dir, feature)
scene_scatters = []
print(str(self._counter) + '/' + str(437*9) + ': Processing scene = ' + str(scene) + ', batch = ' +
'global' + ' and marker_pair = ' + str(marker_pair))
global_scatter, global_curves = self.get_marker_pair_thresh(scene_data,
scene,
marker_pair,
'global')
for batch in set(scene_data.batch):
print(str(self._counter)
+ '/' + str(437*9) + ': Processing scene = ' + str(scene) + ', batch = ' +
str(batch) + ' and marker_pair = ' + str(marker_pair))
self._counter += 1
batch_scene_data = scene_data[scene_data.batch == batch]
local_scatter, local_curves = self.get_marker_pair_thresh(batch_scene_data,
scene,
marker_pair,
batch)
scene_scatters.append(global_scatter * local_scatter * hv.Overlay(local_curves) * hv.Overlay(global_curves))
feature = (marker_pair[0].split('_')[1] + '_' + marker_pair[0].split('_')[2])
def predict_thresh(self):
os.makedirs(self.save_dir, exist_ok=True)
scenes = set(self.data.scene)
for s in scenes:
self.visualize_scene(s)
self.threshs.to_csv(self.save_dir + '/threshs.csv', sep=';', index=False, decimal=',')
def normalize_scene(self):
'''
normalize scene with predicted thresholds - all background values become < 1 and all foreground values > 1
'''
normalized_data = self.data.copy()
scenes = set(self.data.scene)
for s in scenes:
scene_data = self.data[self.data.scene == s]
for marker_pair in self.marker_pairs:
for batch in set(scene_data.batch):
threshold = self.threshs[(self.threshs.batch == str(batch)) & (self.threshs.scene == s) & (self.threshs.model =='SSC')][marker_pair[0]]
threshold = threshold.values[0]
data = scene_data[scene_data.batch == batch][marker_pair[0]]
normalized_values = data/threshold
normalized_data.loc[(normalized_data.scene == s) & (normalized_data.batch == batch), marker_pair[0]] = normalized_values
return normalized_data
def scale_data(self, features_df: pd.DataFrame):
'''
All predicted background values are set to random values in the interval [0, 0.02] and foreground values are set to [0.02,1].
Morphological features are scaled between 0 and 1.
'''
def scale(arr, new_min, new_max, lower_perc, upper_perc):
arr_min = np.percentile(arr, lower_perc)
arr_max = np.percentile(arr, upper_perc)
if arr.size == 1:
new_arr = arr/arr_max
else:
arr_delta = arr_max - arr_min
new_delta = new_max - new_min
tmp = (arr - arr_min)/arr_delta
tmp[tmp < 0] = 0
tmp[tmp > 1] = 1
new_arr = tmp * new_delta + new_min
return new_arr
scenes = list(set(features_df.scene))
list_df = []
for s in scenes:
df = features_df[features_df.scene == s].copy()
df_short = df.drop(['batch', 'y_mean', 'x_mean', 'scene', 'cell', 'FoV_counter'], axis=1)
df_ar = df_short.values
for i in range(0, df_ar.shape[1]):
if 'Propidium' not in df.columns[i]:
BG_co = np.where(df_ar[:, i] <= 1)
FG_co = np.where(df_ar[:, i] > 1)
df_ar[BG_co, i] = [random.uniform(0, 0.02) for i in range(len(BG_co[0]))]
if len(FG_co[0]) != 0:
df_ar[FG_co, i] = scale(df_ar[FG_co, i], 0.02, 1, 0, 100)
else:
df_ar[:, i] = MinMaxScaler().fit_transform(df_ar[:, i].reshape(-1, 1))[:, 0]
df_scaled = pd.DataFrame(df_ar, columns=[col for col in df_short.columns])
df_scaled.index = df.index
df_scaled[['batch', 'y_mean', 'x_mean', 'scene', 'cell', 'FoV_counter']] = df[
['batch', 'y_mean', 'x_mean', 'scene', 'cell', 'FoV_counter']]
list_df.append(df_scaled)
df_scaled_all = | pd.concat(list_df) | pandas.concat |
import pandas as pd
snhp = pd.read_csv("raw.csv")
ref = pd.read_csv("./persistent_data/snhp2014.csv")
lookup = | pd.read_csv("./persistent_data/sc/COUNCIL AREA 2011 LOOKUP.csv") | pandas.read_csv |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-04 10:00')])
self.assert_series_equal(expected, result)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp(
'2011-01-02 10:00', tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00',
tz=tz)])
self.assert_series_equal(expected, result)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2013-01-01'), Timestamp('2011-01-03 10:00', tz=tz), Timestamp(
'2013-01-01')])
self.assert_series_equal(expected, result)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
self.assert_series_equal(expected, result)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
self.assertRaises(TypeError, s.fillna, [1, 2])
self.assertRaises(TypeError, s.fillna, (1, 2))
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
self.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(value=5), exp)
self.assertRaises(ValueError, ts.fillna)
self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
| assert_series_equal(result, s1) | pandas.util.testing.assert_series_equal |
# -*- coding: utf-8 -*-
import os
import re
import sys
from datetime import datetime
from random import randint
from time import sleep
import numpy as np
import pandas.util.testing as tm
import pytest
import pytz
from pandas import DataFrame, NaT, compat
from pandas.compat import range, u
from pandas.compat.numpy import np_datetime64_compat
from pandas_gbq import gbq
try:
import mock
except ImportError:
from unittest import mock
TABLE_ID = 'new_test'
def _skip_local_auth_if_in_travis_env():
if _in_travis_environment():
pytest.skip("Cannot run local auth in travis environment")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _skip_if_no_private_key_contents():
if not _get_private_key_contents():
raise pytest.skip("Cannot run integration tests without a "
"private key json contents")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_dataset_prefix_random():
return ''.join(['pandas_gbq_', str(randint(1, 100000))])
def _get_project_id():
project = os.environ.get('GBQ_PROJECT_ID')
if not project:
pytest.skip(
"Cannot run integration tests without a project id")
return project
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
else:
return os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
def _get_private_key_contents():
key_path = _get_private_key_path()
if key_path is None:
return None
with open(key_path) as f:
return f.read()
@pytest.fixture(autouse=True, scope='module')
def _test_imports():
try:
import pkg_resources # noqa
except ImportError:
raise ImportError('Could not import pkg_resources (setuptools).')
gbq._test_google_api_imports()
@pytest.fixture
def project():
return _get_project_id()
def _check_if_can_get_correct_default_credentials():
# Checks if "Application Default Credentials" can be fetched
# from the environment the tests are running in.
# See https://github.com/pandas-dev/pandas/issues/13577
import google.auth
from google.auth.exceptions import DefaultCredentialsError
try:
credentials, _ = google.auth.default(scopes=[gbq.GbqConnector.scope])
except (DefaultCredentialsError, IOError):
return False
return gbq._try_credentials(_get_project_id(), credentials) is not None
def clean_gbq_environment(dataset_prefix, private_key=None):
dataset = gbq._Dataset(_get_project_id(), private_key=private_key)
all_datasets = dataset.datasets()
retry = 3
while retry > 0:
try:
retry = retry - 1
for i in range(1, 10):
dataset_id = dataset_prefix + str(i)
if dataset_id in all_datasets:
table = gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
# Table listing is eventually consistent, so loop until
# all tables no longer appear (max 30 seconds).
table_retry = 30
all_tables = dataset.tables(dataset_id)
while all_tables and table_retry > 0:
for table_id in all_tables:
try:
table.delete(table_id)
except gbq.NotFoundException:
pass
sleep(1)
table_retry = table_retry - 1
all_tables = dataset.tables(dataset_id)
dataset.delete(dataset_id)
retry = 0
except gbq.GenericGBQException as ex:
# Build in retry logic to work around the following errors :
# An internal error occurred and the request could not be...
# Dataset ... is still in use
error_message = str(ex).lower()
if ('an internal error occurred' in error_message or
'still in use' in error_message) and retry > 0:
sleep(30)
else:
raise ex
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
def test_generate_bq_schema_deprecated():
# 11121 Deprecation of generate_bq_schema
with pytest.warns(FutureWarning):
df = make_mixed_dataframe_v2(10)
gbq.generate_bq_schema(df)
@pytest.fixture(params=['local', 'service_path', 'service_creds'])
def auth_type(request):
auth = request.param
if auth == 'local':
if _in_travis_environment():
pytest.skip("Cannot run local auth in travis environment")
elif auth == 'service_path':
if _in_travis_environment():
pytest.skip("Only run one auth type in Travis to save time")
_skip_if_no_private_key_path()
elif auth == 'service_creds':
_skip_if_no_private_key_contents()
else:
raise ValueError
return auth
@pytest.fixture()
def credentials(auth_type):
if auth_type == 'local':
return None
elif auth_type == 'service_path':
return _get_private_key_path()
elif auth_type == 'service_creds':
return _get_private_key_contents()
else:
raise ValueError
@pytest.fixture()
def gbq_connector(project, credentials):
return gbq.GbqConnector(project, private_key=credentials)
class TestGBQConnectorIntegration(object):
def test_should_be_able_to_make_a_connector(self, gbq_connector):
assert gbq_connector is not None, 'Could not create a GbqConnector'
def test_should_be_able_to_get_valid_credentials(self, gbq_connector):
credentials = gbq_connector.get_credentials()
assert credentials.valid
def test_should_be_able_to_get_a_bigquery_client(self, gbq_connector):
bigquery_client = gbq_connector.get_client()
assert bigquery_client is not None
def test_should_be_able_to_get_schema_from_query(self, gbq_connector):
schema, pages = gbq_connector.run_query('SELECT 1')
assert schema is not None
def test_should_be_able_to_get_results_from_query(self, gbq_connector):
schema, pages = gbq_connector.run_query('SELECT 1')
assert pages is not None
class TestGBQConnectorIntegrationWithLocalUserAccountAuth(object):
@pytest.fixture(autouse=True)
def setup(self, project):
_skip_local_auth_if_in_travis_env()
self.sut = gbq.GbqConnector(project, auth_local_webserver=True)
def test_get_application_default_credentials_does_not_throw_error(self):
if _check_if_can_get_correct_default_credentials():
# Can get real credentials, so mock it out to fail.
from google.auth.exceptions import DefaultCredentialsError
with mock.patch('google.auth.default',
side_effect=DefaultCredentialsError()):
credentials = self.sut.get_application_default_credentials()
else:
credentials = self.sut.get_application_default_credentials()
assert credentials is None
def test_get_application_default_credentials_returns_credentials(self):
if not _check_if_can_get_correct_default_credentials():
pytest.skip("Cannot get default_credentials "
"from the environment!")
from google.auth.credentials import Credentials
credentials = self.sut.get_application_default_credentials()
assert isinstance(credentials, Credentials)
def test_get_user_account_credentials_bad_file_returns_credentials(self):
from google.auth.credentials import Credentials
with mock.patch('__main__.open', side_effect=IOError()):
credentials = self.sut.get_user_account_credentials()
assert isinstance(credentials, Credentials)
def test_get_user_account_credentials_returns_credentials(self):
from google.auth.credentials import Credentials
credentials = self.sut.get_user_account_credentials()
assert isinstance(credentials, Credentials)
class TestGBQUnit(object):
def test_should_return_credentials_path_set_by_env_var(self):
env = {'PANDAS_GBQ_CREDENTIALS_FILE': '/tmp/dummy.dat'}
with mock.patch.dict('os.environ', env):
assert gbq._get_credentials_file() == '/tmp/dummy.dat'
@pytest.mark.parametrize(
('input', 'type_', 'expected'), [
(1, 'INTEGER', int(1)),
(1, 'FLOAT', float(1)),
pytest.param('false', 'BOOLEAN', False, marks=pytest.mark.xfail),
pytest.param(
'0e9', 'TIMESTAMP',
np_datetime64_compat('1970-01-01T00:00:00Z'),
marks=pytest.mark.xfail),
('STRING', 'STRING', 'STRING'),
])
def test_should_return_bigquery_correctly_typed(
self, input, type_, expected):
result = gbq._parse_data(
dict(fields=[dict(name='x', type=type_, mode='NULLABLE')]),
rows=[[input]]).iloc[0, 0]
assert result == expected
def test_to_gbq_should_fail_if_invalid_table_name_passed(self):
with pytest.raises(gbq.NotFoundException):
gbq.to_gbq(DataFrame(), 'invalid_table_name', project_id="1234")
def test_to_gbq_with_no_project_id_given_should_fail(self):
with pytest.raises(TypeError):
gbq.to_gbq(DataFrame(), 'dataset.tablename')
def test_read_gbq_with_no_project_id_given_should_fail(self):
with pytest.raises(TypeError):
gbq.read_gbq('SELECT 1')
def test_that_parse_data_works_properly(self):
from google.cloud.bigquery.table import Row
test_schema = {'fields': [
{'mode': 'NULLABLE', 'name': 'column_x', 'type': 'STRING'}]}
field_to_index = {'column_x': 0}
values = ('row_value',)
test_page = [Row(values, field_to_index)]
test_output = gbq._parse_data(test_schema, test_page)
correct_output = DataFrame({'column_x': ['row_value']})
tm.assert_frame_equal(test_output, correct_output)
def test_read_gbq_with_invalid_private_key_json_should_fail(self):
with pytest.raises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq('SELECT 1', project_id='x', private_key='y')
def test_read_gbq_with_empty_private_key_json_should_fail(self):
with pytest.raises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq('SELECT 1', project_id='x', private_key='{}')
def test_read_gbq_with_private_key_json_wrong_types_should_fail(self):
with pytest.raises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq(
'SELECT 1', project_id='x',
private_key='{ "client_email" : 1, "private_key" : True }')
def test_read_gbq_with_empty_private_key_file_should_fail(self):
with tm.ensure_clean() as empty_file_path:
with pytest.raises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq('SELECT 1', project_id='x',
private_key=empty_file_path)
def test_read_gbq_with_corrupted_private_key_json_should_fail(self):
_skip_if_no_private_key_contents()
with pytest.raises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq(
'SELECT 1', project_id='x',
private_key=re.sub('[a-z]', '9', _get_private_key_contents()))
def test_should_read(project, credentials):
query = 'SELECT "PI" AS valid_string'
df = gbq.read_gbq(query, project_id=project, private_key=credentials)
tm.assert_frame_equal(df, DataFrame({'valid_string': ['PI']}))
class TestReadGBQIntegration(object):
@pytest.fixture(autouse=True)
def setup(self, project, credentials):
# - PER-TEST FIXTURES -
# put here any instruction you want to be run *BEFORE* *EVERY* test is
# executed.
self.gbq_connector = gbq.GbqConnector(
project, private_key=credentials)
self.credentials = credentials
def test_should_properly_handle_valid_strings(self):
query = 'SELECT "PI" AS valid_string'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'valid_string': ['PI']}))
def test_should_properly_handle_empty_strings(self):
query = 'SELECT "" AS empty_string'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'empty_string': [""]}))
def test_should_properly_handle_null_strings(self):
query = 'SELECT STRING(NULL) AS null_string'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'null_string': [None]}))
def test_should_properly_handle_valid_integers(self):
query = 'SELECT INTEGER(3) AS valid_integer'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'valid_integer': [3]}))
def test_should_properly_handle_nullable_integers(self):
query = '''SELECT * FROM
(SELECT 1 AS nullable_integer),
(SELECT NULL AS nullable_integer)'''
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(
df, DataFrame({'nullable_integer': [1, None]}).astype(object))
def test_should_properly_handle_valid_longs(self):
query = 'SELECT 1 << 62 AS valid_long'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(
df, DataFrame({'valid_long': [1 << 62]}))
def test_should_properly_handle_nullable_longs(self):
query = '''SELECT * FROM
(SELECT 1 << 62 AS nullable_long),
(SELECT NULL AS nullable_long)'''
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(
df, DataFrame({'nullable_long': [1 << 62, None]}).astype(object))
def test_should_properly_handle_null_integers(self):
query = 'SELECT INTEGER(NULL) AS null_integer'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'null_integer': [None]}))
def test_should_properly_handle_valid_floats(self):
from math import pi
query = 'SELECT PI() AS valid_float'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame(
{'valid_float': [pi]}))
def test_should_properly_handle_nullable_floats(self):
from math import pi
query = '''SELECT * FROM
(SELECT PI() AS nullable_float),
(SELECT NULL AS nullable_float)'''
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(
df, DataFrame({'nullable_float': [pi, None]}))
def test_should_properly_handle_valid_doubles(self):
from math import pi
query = 'SELECT PI() * POW(10, 307) AS valid_double'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame(
{'valid_double': [pi * 10 ** 307]}))
def test_should_properly_handle_nullable_doubles(self):
from math import pi
query = '''SELECT * FROM
(SELECT PI() * POW(10, 307) AS nullable_double),
(SELECT NULL AS nullable_double)'''
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(
df, DataFrame({'nullable_double': [pi * 10 ** 307, None]}))
def test_should_properly_handle_null_floats(self):
query = 'SELECT FLOAT(NULL) AS null_float'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'null_float': [np.nan]}))
def test_should_properly_handle_timestamp_unix_epoch(self):
query = 'SELECT TIMESTAMP("1970-01-01 00:00:00") AS unix_epoch'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame(
{'unix_epoch': [np.datetime64('1970-01-01T00:00:00.000000Z')]}))
def test_should_properly_handle_arbitrary_timestamp(self):
query = 'SELECT TIMESTAMP("2004-09-15 05:00:00") AS valid_timestamp'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({
'valid_timestamp': [np.datetime64('2004-09-15T05:00:00.000000Z')]
}))
def test_should_properly_handle_null_timestamp(self):
query = 'SELECT TIMESTAMP(NULL) AS null_timestamp'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'null_timestamp': [NaT]}))
def test_should_properly_handle_true_boolean(self):
query = 'SELECT BOOLEAN(TRUE) AS true_boolean'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'true_boolean': [True]}))
def test_should_properly_handle_false_boolean(self):
query = 'SELECT BOOLEAN(FALSE) AS false_boolean'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'false_boolean': [False]}))
def test_should_properly_handle_null_boolean(self):
query = 'SELECT BOOLEAN(NULL) AS null_boolean'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'null_boolean': [None]}))
def test_should_properly_handle_nullable_booleans(self):
query = '''SELECT * FROM
(SELECT BOOLEAN(TRUE) AS nullable_boolean),
(SELECT NULL AS nullable_boolean)'''
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(
df, DataFrame({'nullable_boolean': [True, None]}).astype(object))
def test_unicode_string_conversion_and_normalization(self):
correct_test_datatype = DataFrame(
{'unicode_string': [u("\xe9\xfc")]}
)
unicode_string = "\xc3\xa9\xc3\xbc"
if compat.PY3:
unicode_string = unicode_string.encode('latin-1').decode('utf8')
query = 'SELECT "{0}" AS unicode_string'.format(unicode_string)
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, correct_test_datatype)
def test_index_column(self):
query = "SELECT 'a' AS string_1, 'b' AS string_2"
result_frame = gbq.read_gbq(query, project_id=_get_project_id(),
index_col="string_1",
private_key=self.credentials)
correct_frame = DataFrame(
{'string_1': ['a'], 'string_2': ['b']}).set_index("string_1")
assert result_frame.index.name == correct_frame.index.name
def test_column_order(self):
query = "SELECT 'a' AS string_1, 'b' AS string_2, 'c' AS string_3"
col_order = ['string_3', 'string_1', 'string_2']
result_frame = gbq.read_gbq(query, project_id=_get_project_id(),
col_order=col_order,
private_key=self.credentials)
correct_frame = DataFrame({'string_1': ['a'], 'string_2': [
'b'], 'string_3': ['c']})[col_order]
tm.assert_frame_equal(result_frame, correct_frame)
def test_read_gbq_raises_invalid_column_order(self):
query = "SELECT 'a' AS string_1, 'b' AS string_2, 'c' AS string_3"
col_order = ['string_aaa', 'string_1', 'string_2']
# Column string_aaa does not exist. Should raise InvalidColumnOrder
with pytest.raises(gbq.InvalidColumnOrder):
gbq.read_gbq(query, project_id=_get_project_id(),
col_order=col_order,
private_key=self.credentials)
def test_column_order_plus_index(self):
query = "SELECT 'a' AS string_1, 'b' AS string_2, 'c' AS string_3"
col_order = ['string_3', 'string_2']
result_frame = gbq.read_gbq(query, project_id=_get_project_id(),
index_col='string_1', col_order=col_order,
private_key=self.credentials)
correct_frame = DataFrame(
{'string_1': ['a'], 'string_2': ['b'], 'string_3': ['c']})
correct_frame.set_index('string_1', inplace=True)
correct_frame = correct_frame[col_order]
tm.assert_frame_equal(result_frame, correct_frame)
def test_read_gbq_raises_invalid_index_column(self):
query = "SELECT 'a' AS string_1, 'b' AS string_2, 'c' AS string_3"
col_order = ['string_3', 'string_2']
# Column string_bbb does not exist. Should raise InvalidIndexColumn
with pytest.raises(gbq.InvalidIndexColumn):
gbq.read_gbq(query, project_id=_get_project_id(),
index_col='string_bbb', col_order=col_order,
private_key=self.credentials)
def test_malformed_query(self):
with pytest.raises(gbq.GenericGBQException):
gbq.read_gbq("SELCET * FORM [publicdata:samples.shakespeare]",
project_id=_get_project_id(),
private_key=self.credentials)
def test_bad_project_id(self):
with pytest.raises(gbq.GenericGBQException):
gbq.read_gbq("SELECT 1", project_id='001',
private_key=self.credentials)
def test_bad_table_name(self):
with pytest.raises(gbq.GenericGBQException):
gbq.read_gbq("SELECT * FROM [publicdata:samples.nope]",
project_id=_get_project_id(),
private_key=self.credentials)
def test_download_dataset_larger_than_200k_rows(self):
test_size = 200005
# Test for known BigQuery bug in datasets larger than 100k rows
# http://stackoverflow.com/questions/19145587/bq-py-not-paging-results
df = gbq.read_gbq("SELECT id FROM [publicdata:samples.wikipedia] "
"GROUP EACH BY id ORDER BY id ASC LIMIT {0}"
.format(test_size),
project_id=_get_project_id(),
private_key=self.credentials)
assert len(df.drop_duplicates()) == test_size
def test_zero_rows(self):
# Bug fix for https://github.com/pandas-dev/pandas/issues/10273
df = gbq.read_gbq("SELECT title, id, is_bot, "
"SEC_TO_TIMESTAMP(timestamp) ts "
"FROM [publicdata:samples.wikipedia] "
"WHERE timestamp=-9999999",
project_id=_get_project_id(),
private_key=self.credentials)
page_array = np.zeros(
(0,), dtype=[('title', object), ('id', np.dtype(int)),
('is_bot', np.dtype(bool)), ('ts', 'M8[ns]')])
expected_result = DataFrame(
page_array, columns=['title', 'id', 'is_bot', 'ts'])
tm.assert_frame_equal(df, expected_result)
def test_legacy_sql(self):
legacy_sql = "SELECT id FROM [publicdata.samples.wikipedia] LIMIT 10"
# Test that a legacy sql statement fails when
# setting dialect='standard'
with pytest.raises(gbq.GenericGBQException):
gbq.read_gbq(legacy_sql, project_id=_get_project_id(),
dialect='standard',
private_key=self.credentials)
# Test that a legacy sql statement succeeds when
# setting dialect='legacy'
df = gbq.read_gbq(legacy_sql, project_id=_get_project_id(),
dialect='legacy',
private_key=self.credentials)
assert len(df.drop_duplicates()) == 10
def test_standard_sql(self):
standard_sql = "SELECT DISTINCT id FROM " \
"`publicdata.samples.wikipedia` LIMIT 10"
# Test that a standard sql statement fails when using
# the legacy SQL dialect (default value)
with pytest.raises(gbq.GenericGBQException):
gbq.read_gbq(standard_sql, project_id=_get_project_id(),
private_key=self.credentials)
# Test that a standard sql statement succeeds when
# setting dialect='standard'
df = gbq.read_gbq(standard_sql, project_id=_get_project_id(),
dialect='standard',
private_key=self.credentials)
assert len(df.drop_duplicates()) == 10
def test_invalid_option_for_sql_dialect(self):
sql_statement = "SELECT DISTINCT id FROM " \
"`publicdata.samples.wikipedia` LIMIT 10"
# Test that an invalid option for `dialect` raises ValueError
with pytest.raises(ValueError):
gbq.read_gbq(sql_statement, project_id=_get_project_id(),
dialect='invalid',
private_key=self.credentials)
# Test that a correct option for dialect succeeds
# to make sure ValueError was due to invalid dialect
gbq.read_gbq(sql_statement, project_id=_get_project_id(),
dialect='standard', private_key=self.credentials)
def test_query_with_parameters(self):
sql_statement = "SELECT @param1 + @param2 AS valid_result"
config = {
'query': {
"useLegacySql": False,
"parameterMode": "named",
"queryParameters": [
{
"name": "param1",
"parameterType": {
"type": "INTEGER"
},
"parameterValue": {
"value": 1
}
},
{
"name": "param2",
"parameterType": {
"type": "INTEGER"
},
"parameterValue": {
"value": 2
}
}
]
}
}
# Test that a query that relies on parameters fails
# when parameters are not supplied via configuration
with pytest.raises(ValueError):
gbq.read_gbq(sql_statement, project_id=_get_project_id(),
private_key=self.credentials)
# Test that the query is successful because we have supplied
# the correct query parameters via the 'config' option
df = gbq.read_gbq(sql_statement, project_id=_get_project_id(),
private_key=self.credentials,
configuration=config)
tm.assert_frame_equal(df, DataFrame({'valid_result': [3]}))
def test_query_inside_configuration(self):
query_no_use = 'SELECT "PI_WRONG" AS valid_string'
query = 'SELECT "PI" AS valid_string'
config = {
'query': {
"query": query,
"useQueryCache": False,
}
}
# Test that it can't pass query both
# inside config and as parameter
with pytest.raises(ValueError):
gbq.read_gbq(query_no_use, project_id=_get_project_id(),
private_key=self.credentials,
configuration=config)
df = gbq.read_gbq(None, project_id=_get_project_id(),
private_key=self.credentials,
configuration=config)
tm.assert_frame_equal(df, DataFrame({'valid_string': ['PI']}))
def test_configuration_without_query(self):
sql_statement = 'SELECT 1'
config = {
'copy': {
"sourceTable": {
"projectId": _get_project_id(),
"datasetId": "publicdata:samples",
"tableId": "wikipedia"
},
"destinationTable": {
"projectId": _get_project_id(),
"datasetId": "publicdata:samples",
"tableId": "wikipedia_copied"
},
}
}
# Test that only 'query' configurations are supported
# nor 'copy','load','extract'
with pytest.raises(ValueError):
gbq.read_gbq(sql_statement, project_id=_get_project_id(),
private_key=self.credentials,
configuration=config)
def test_configuration_raises_value_error_with_multiple_config(self):
sql_statement = 'SELECT 1'
config = {
'query': {
"query": sql_statement,
"useQueryCache": False,
},
'load': {
"query": sql_statement,
"useQueryCache": False,
}
}
# Test that only ValueError is raised with multiple configurations
with pytest.raises(ValueError):
gbq.read_gbq(sql_statement, project_id=_get_project_id(),
private_key=self.credentials,
configuration=config)
def test_timeout_configuration(self):
sql_statement = 'SELECT 1'
config = {
'query': {
"timeoutMs": 1
}
}
# Test that QueryTimeout error raises
with pytest.raises(gbq.QueryTimeout):
gbq.read_gbq(sql_statement, project_id=_get_project_id(),
private_key=self.credentials,
configuration=config)
def test_query_response_bytes(self):
assert self.gbq_connector.sizeof_fmt(999) == "999.0 B"
assert self.gbq_connector.sizeof_fmt(1024) == "1.0 KB"
assert self.gbq_connector.sizeof_fmt(1099) == "1.1 KB"
assert self.gbq_connector.sizeof_fmt(1044480) == "1020.0 KB"
assert self.gbq_connector.sizeof_fmt(1048576) == "1.0 MB"
assert self.gbq_connector.sizeof_fmt(1048576000) == "1000.0 MB"
assert self.gbq_connector.sizeof_fmt(1073741824) == "1.0 GB"
assert self.gbq_connector.sizeof_fmt(1.099512E12) == "1.0 TB"
assert self.gbq_connector.sizeof_fmt(1.125900E15) == "1.0 PB"
assert self.gbq_connector.sizeof_fmt(1.152922E18) == "1.0 EB"
assert self.gbq_connector.sizeof_fmt(1.180592E21) == "1.0 ZB"
assert self.gbq_connector.sizeof_fmt(1.208926E24) == "1.0 YB"
assert self.gbq_connector.sizeof_fmt(1.208926E28) == "10000.0 YB"
def test_struct(self):
query = """SELECT 1 int_field,
STRUCT("a" as letter, 1 as num) struct_field"""
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials,
dialect='standard')
expected = DataFrame([[1, {"letter": "a", "num": 1}]],
columns=["int_field", "struct_field"])
tm.assert_frame_equal(df, expected)
def test_array(self):
query = """select ["a","x","b","y","c","z"] as letters"""
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials,
dialect='standard')
tm.assert_frame_equal(df, DataFrame([[["a", "x", "b", "y", "c", "z"]]],
columns=["letters"]))
def test_array_length_zero(self):
query = """WITH t as (
SELECT "a" letter, [""] as array_field
UNION ALL
SELECT "b" letter, [] as array_field)
select letter, array_field, array_length(array_field) len
from t
order by letter ASC"""
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials,
dialect='standard')
expected = DataFrame([["a", [""], 1], ["b", [], 0]],
columns=["letter", "array_field", "len"])
| tm.assert_frame_equal(df, expected) | pandas.util.testing.assert_frame_equal |
import pytest
import numpy as np
import pandas as pd
EXP_IDX = pd.MultiIndex(levels=[['model_a'], ['scen_a', 'scen_b']],
codes=[[0, 0], [0, 1]], names=['model', 'scenario'])
def test_set_meta_no_name(test_df):
idx = pd.MultiIndex(levels=[['a_scenario'], ['a_model'], ['some_region']],
codes=[[0], [0], [0]],
names=['scenario', 'model', 'region'])
s = pd.Series(data=[0.3], index=idx)
pytest.raises(ValueError, test_df.set_meta, s)
def test_set_meta_as_named_series(test_df):
idx = pd.MultiIndex(levels=[['scen_a'], ['model_a'], ['some_region']],
codes=[[0], [0], [0]],
names=['scenario', 'model', 'region'])
s = pd.Series(data=[0.3], index=idx, name='meta_values')
test_df.set_meta(s)
exp = pd.Series(data=[0.3, np.nan], index=EXP_IDX, name='meta_values')
pd.testing.assert_series_equal(test_df['meta_values'], exp)
def test_set_meta_as_unnamed_series(test_df):
idx = pd.MultiIndex(levels=[['scen_a'], ['model_a'], ['some_region']],
codes=[[0], [0], [0]],
names=['scenario', 'model', 'region'])
s = pd.Series(data=[0.3], index=idx)
test_df.set_meta(s, name='meta_values')
exp = pd.Series(data=[0.3, np.nan], index=EXP_IDX, name='meta_values')
pd.testing.assert_series_equal(test_df['meta_values'], exp)
def test_set_meta_non_unique_index_fail(test_df):
idx = pd.MultiIndex(levels=[['model_a'], ['scen_a'], ['reg_a', 'reg_b']],
codes=[[0, 0], [0, 0], [0, 1]],
names=['model', 'scenario', 'region'])
s = pd.Series([0.4, 0.5], idx)
pytest.raises(ValueError, test_df.set_meta, s)
def test_set_meta_non_existing_index_fail(test_df):
idx = pd.MultiIndex(levels=[['model_a', 'fail_model'],
['scen_a', 'fail_scenario']],
codes=[[0, 1], [0, 1]], names=['model', 'scenario'])
s = pd.Series([0.4, 0.5], idx)
pytest.raises(ValueError, test_df.set_meta, s)
def test_set_meta_by_df(test_df):
df = pd.DataFrame([
['model_a', 'scen_a', 'some_region', 1],
], columns=['model', 'scenario', 'region', 'col'])
test_df.set_meta(meta=0.3, name='meta_values', index=df)
exp = | pd.Series(data=[0.3, np.nan], index=EXP_IDX, name='meta_values') | pandas.Series |
import unittest
import pandas as pd
from enda.backtesting import BackTesting
class TestBackTesting(unittest.TestCase):
def test_yield_train_test_1(self):
df = pd.date_range(
start=pd.to_datetime('2015-01-01 00:00:00+01:00').tz_convert('Europe/Paris'),
end=pd.to_datetime('2021-01-01 00:00:00+01:00').tz_convert('Europe/Paris'),
freq='D',
tz='Europe/Paris',
name='time'
).to_frame()
df = df.set_index('time')
df["value"] = 1
count_iterations = 0
for train_set, test_set in BackTesting.yield_train_test(
df,
start_eval_datetime= | pd.to_datetime('2019-01-01 00:00:00+01:00') | pandas.to_datetime |
Subsets and Splits